@@ -45,6 +45,83 @@ avic_get_phy_ait_entry(struct vcpu *v, int index)
}
/***************************************************************
+ * AVIC VCPU SCHEDULING
+ */
+static void avic_vcpu_load(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+ int h_phy_apic_id;
+ struct svm_avic_phy_ait_entry entry;
+
+ if ( !svm_avic || !s->avic_phy_id_cache )
+ return;
+
+ if ( test_bit(_VPF_blocked, &v->pause_flags) )
+ return;
+
+ /* Note: APIC ID = 0xff is used for broadcast.
+ * APIC ID > 0xff is reserved.
+ */
+ h_phy_apic_id = cpu_data[v->processor].apicid;
+ if ( h_phy_apic_id >= AVIC_PHY_APIC_ID_MAX )
+ return;
+
+ entry = *(s->avic_phy_id_cache);
+ smp_rmb();
+ entry.host_phy_apic_id = h_phy_apic_id;
+ entry.is_running = 1;
+ *(s->avic_phy_id_cache) = entry;
+ smp_wmb();
+}
+
+static void avic_vcpu_put(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+ struct svm_avic_phy_ait_entry entry;
+
+ if ( !svm_avic || !s->avic_phy_id_cache )
+ return;
+
+ entry = *(s->avic_phy_id_cache);
+ smp_rmb();
+ entry.is_running = 0;
+ *(s->avic_phy_id_cache) = entry;
+ smp_wmb();
+}
+
+static void avic_vcpu_resume(struct vcpu *v)
+{
+ struct svm_avic_phy_ait_entry entry;
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+ if ( !svm_avic_vcpu_enabled(v) || !s->avic_phy_id_cache )
+ return;
+
+ ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
+
+ entry = *(s->avic_phy_id_cache);
+ smp_rmb();
+ entry.is_running = 1;
+ *(s->avic_phy_id_cache)= entry;
+ smp_wmb();
+}
+
+static void avic_vcpu_block(struct vcpu *v)
+{
+ struct svm_avic_phy_ait_entry entry;
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+ if ( !svm_avic_vcpu_enabled(v) || !s->avic_phy_id_cache )
+ return;
+
+ entry = *(s->avic_phy_id_cache);
+ smp_rmb();
+ entry.is_running = 0;
+ *(s->avic_phy_id_cache) = entry;
+ smp_wmb();
+}
+
+/***************************************************************
* AVIC APIs
*/
int svm_avic_dom_init(struct domain *d)
@@ -97,6 +174,11 @@ int svm_avic_dom_init(struct domain *d)
clear_domain_page(_mfn(mfn));
d->arch.hvm_domain.svm.avic_phy_ait_mfn = mfn;
+ d->arch.hvm_domain.pi_ops.pi_switch_to = avic_vcpu_put;
+ d->arch.hvm_domain.pi_ops.pi_switch_from = avic_vcpu_load;
+ d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block;
+ d->arch.hvm_domain.pi_ops.pi_do_resume = avic_vcpu_resume;
+
return ret;
err_out:
svm_avic_dom_destroy(d);
@@ -1011,6 +1011,10 @@ static void svm_ctxt_switch_from(struct vcpu *v)
svm_tsc_ratio_save(v);
svm_sync_vmcb(v);
+
+ if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
+ v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
+
svm_vmload(per_cpu(root_vmcb, cpu));
/* Resume use of ISTs now that the host TR is reinstated. */
@@ -1050,6 +1054,9 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_lwp_load(v);
svm_tsc_ratio_load(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
+ v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
+
if ( cpu_has_rdtscp )
wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
}
@@ -1095,6 +1102,9 @@ static void noreturn svm_do_resume(struct vcpu *v)
vmcb_set_vintr(vmcb, intr);
}
+ if ( v->domain->arch.hvm_domain.pi_ops.pi_do_resume )
+ v->domain->arch.hvm_domain.pi_ops.pi_do_resume(v);
+
hvm_do_resume(v);
reset_stack_and_jump(svm_asm_do_resume);
Add hooks to manage AVIC data structure during vcpu scheduling. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> --- xen/arch/x86/hvm/svm/avic.c | 82 +++++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 10 ++++++ 2 files changed, 92 insertions(+)