@@ -204,12 +204,12 @@ void vmx_pi_hooks_assign(struct domain *d)
if ( !iommu_intpost || !has_hvm_container_domain(d) )
return;
- ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+ ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
- d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
- d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
- d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
- d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+ d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
+ d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
+ d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
+ d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
}
/* This function is called when pcidevs_lock is held */
@@ -218,12 +218,12 @@ void vmx_pi_hooks_deassign(struct domain *d)
if ( !iommu_intpost || !has_hvm_container_domain(d) )
return;
- ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
+ ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
- d->arch.hvm_domain.vmx.vcpu_block = NULL;
- d->arch.hvm_domain.vmx.pi_switch_from = NULL;
- d->arch.hvm_domain.vmx.pi_switch_to = NULL;
- d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+ d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
+ d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
+ d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
+ d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
}
static int vmx_domain_initialise(struct domain *d)
@@ -901,8 +901,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
vmx_restore_host_msrs();
vmx_save_dr(v);
- if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
- v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
+ v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
}
static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -916,8 +916,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
- if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
- v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
+ v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
}
@@ -3914,8 +3914,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
struct hvm_vcpu_asid *p_asid;
bool_t need_flush;
- if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
- curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
+ if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
+ curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
if ( !cpu_has_vmx_vpid )
goto out;
@@ -72,6 +72,67 @@ struct hvm_ioreq_server {
bool_t bufioreq_atomic;
};
+struct hvm_pi_ops {
+ /*
+ * To handle posted interrupts correctly, we need to set the following
+ * state:
+ *
+ * * The PI notification vector (NV)
+ * * The PI notification destination processor (NDST)
+ * * The PI "suppress notification" bit (SN)
+ * * The vcpu pi "blocked" list
+ *
+ * If a VM is currently running, we want the PI delivered to the guest vcpu
+ * on the proper pcpu (NDST = v->processor, SN clear).
+ *
+ * If the vm is blocked, we want the PI delivered to Xen so that it can
+ * wake it up (SN clear, NV = pi_wakeup_vector, vcpu on block list).
+ *
+ * If the VM is currently either preempted or offline (i.e., not running
+ * because of some reason other than blocking waiting for an interrupt),
+ * there's nothing Xen can do -- we want the interrupt pending bit set in
+ * the guest, but we don't want to bother Xen with an interrupt (SN clear).
+ *
+ * There's a brief window of time between vmx_intr_assist() and checking
+ * softirqs where if an interrupt comes in it may be lost; so we need Xen
+ * to get an interrupt and raise a softirq so that it will go through the
+ * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
+ *
+ * The way we implement this now is by looking at what needs to happen on
+ * the following runstate transitions:
+ *
+ * A: runnable -> running
+ * - SN = 0
+ * - NDST = v->processor
+ * B: running -> runnable
+ * - SN = 1
+ * C: running -> blocked
+ * - NV = pi_wakeup_vector
+ * - Add vcpu to blocked list
+ * D: blocked -> runnable
+ * - NV = posted_intr_vector
+ * - Take vcpu off blocked list
+ *
+ * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
+ * paths.
+ *
+ * For transition C, we add a new arch hook, arch_vcpu_block(), which is
+ * called from vcpu_block() and vcpu_do_poll().
+ *
+ * For transition D, rather than add an extra arch hook on vcpu_wake, we
+ * add a hook on the vmentry path which checks to see if either of the two
+ * actions need to be taken.
+ *
+ * These hooks only need to be called when the domain in question actually
+ * has a physical device assigned to it, so we set and clear the callbacks
+ * as appropriate when device assignment changes.
+ */
+ void (*vcpu_block) (struct vcpu *);
+ void (*pi_switch_from) (struct vcpu *v);
+ void (*pi_switch_to) (struct vcpu *v);
+ void (*pi_do_resume) (struct vcpu *v);
+};
+
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
struct {
@@ -148,6 +209,8 @@ struct hvm_domain {
struct list_head list;
} write_map;
+ struct hvm_pi_ops pi_ops;
+
union {
struct vmx_domain vmx;
struct svm_domain svm;
@@ -621,8 +621,8 @@ unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
struct vcpu *v_ = (v); \
struct domain *d_ = v_->domain; \
if ( has_hvm_container_domain(d_) && \
- (cpu_has_vmx && d_->arch.hvm_domain.vmx.vcpu_block) ) \
- d_->arch.hvm_domain.vmx.vcpu_block(v_); \
+ (d_->arch.hvm_domain.pi_ops.vcpu_block) ) \
+ d_->arch.hvm_domain.pi_ops.vcpu_block(v_); \
})
#endif /* __ASM_X86_HVM_HVM_H__ */
@@ -77,65 +77,6 @@ struct vmx_domain {
unsigned long apic_access_mfn;
/* VMX_DOMAIN_* */
unsigned int status;
-
- /*
- * To handle posted interrupts correctly, we need to set the following
- * state:
- *
- * * The PI notification vector (NV)
- * * The PI notification destination processor (NDST)
- * * The PI "suppress notification" bit (SN)
- * * The vcpu pi "blocked" list
- *
- * If a VM is currently running, we want the PI delivered to the guest vcpu
- * on the proper pcpu (NDST = v->processor, SN clear).
- *
- * If the vm is blocked, we want the PI delivered to Xen so that it can
- * wake it up (SN clear, NV = pi_wakeup_vector, vcpu on block list).
- *
- * If the VM is currently either preempted or offline (i.e., not running
- * because of some reason other than blocking waiting for an interrupt),
- * there's nothing Xen can do -- we want the interrupt pending bit set in
- * the guest, but we don't want to bother Xen with an interrupt (SN clear).
- *
- * There's a brief window of time between vmx_intr_assist() and checking
- * softirqs where if an interrupt comes in it may be lost; so we need Xen
- * to get an interrupt and raise a softirq so that it will go through the
- * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
- *
- * The way we implement this now is by looking at what needs to happen on
- * the following runstate transitions:
- *
- * A: runnable -> running
- * - SN = 0
- * - NDST = v->processor
- * B: running -> runnable
- * - SN = 1
- * C: running -> blocked
- * - NV = pi_wakeup_vector
- * - Add vcpu to blocked list
- * D: blocked -> runnable
- * - NV = posted_intr_vector
- * - Take vcpu off blocked list
- *
- * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
- * paths.
- *
- * For transition C, we add a new arch hook, arch_vcpu_block(), which is
- * called from vcpu_block() and vcpu_do_poll().
- *
- * For transition D, rather than add an extra arch hook on vcpu_wake, we
- * add a hook on the vmentry path which checks to see if either of the two
- * actions need to be taken.
- *
- * These hooks only need to be called when the domain in question actually
- * has a physical device assigned to it, so we set and clear the callbacks
- * as appropriate when device assignment changes.
- */
- void (*vcpu_block) (struct vcpu *);
- void (*pi_switch_from) (struct vcpu *v);
- void (*pi_switch_to) (struct vcpu *v);
- void (*pi_do_resume) (struct vcpu *v);
};
struct pi_desc {
The current function pointers for managing hvm posted interrupt can be used also by SVM AVIC. Therefore, this patch introduces the struct hvm_pi_ops in the struct hvm_domain to hold them. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> --- xen/arch/x86/hvm/vmx/vmx.c | 32 +++++++++---------- xen/include/asm-x86/hvm/domain.h | 63 ++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 4 +-- xen/include/asm-x86/hvm/vmx/vmcs.h | 59 ----------------------------------- 4 files changed, 81 insertions(+), 77 deletions(-)