diff mbox

[Xen-devel,v8,05/10] xen/arm: physical irq follow virtual irq

Message ID 1405016003-19131-5-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini July 10, 2014, 6:13 p.m. UTC
Migrate physical irqs to the same physical cpu that is running the vcpu
expected to receive the irqs. That is done when enabling irqs, when the
guest writes to GICD_ITARGETSR and when Xen migrates a vcpu to a
different pcpu.

In case of virq migration, if the virq is inflight and in a GICH_LR
register already, delay migrating the corresponding physical irq until
the virq is EOIed by the guest and the MIGRATING flag has been cleared.
This way we make sure that the pcpu running the old vcpu gets
interrupted with a new irq of the same kind, clearing the GICH_LR sooner.

Introduce a new arch specific function, arch_move_irqs, that is empty on
x86 and implements the vgic irq migration code on ARM.
arch_move_irqs is going to be called by from sched.c.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>

---

Changes in v7:
- remove checks at the top of gic_irq_set_affinity, add assert instead;
- move irq_set_affinity to irq.c;
- delay setting the affinity of the physical irq when the virq is
MIGRATING until the virq is EOIed by the guest;
- do not set the affinity of MIGRATING irqs from arch_move_irqs.

Changes in v6:
- use vgic_get_target_vcpu instead of _vgic_get_target_vcpu in
arch_move_irqs.

Changes in v5:
- prettify vgic_move_irqs;
- rename vgic_move_irqs to arch_move_irqs;
- introduce helper function irq_set_affinity.
---
 xen/arch/arm/gic-v2.c     |   15 +++++++++++++--
 xen/arch/arm/gic.c        |    1 +
 xen/arch/arm/irq.c        |    6 ++++++
 xen/arch/arm/vgic.c       |   21 +++++++++++++++++++++
 xen/include/asm-arm/gic.h |    1 +
 xen/include/asm-arm/irq.h |    2 ++
 xen/include/asm-x86/irq.h |    2 ++
 7 files changed, 46 insertions(+), 2 deletions(-)

Comments

Julien Grall July 11, 2014, 1:07 p.m. UTC | #1
On 07/10/2014 07:13 PM, Stefano Stabellini wrote:
>  void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
>  {
>      const unsigned long mask = r;
> @@ -277,6 +297,7 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
>          }
>          if ( p->desc != NULL )
>          {
> +            irq_set_affinity(p->desc, cpumask_of(v_target->processor));
>              spin_lock_irqsave(&p->desc->lock, flags);
>              p->desc->handler->enable(p->desc);
>              spin_unlock_irqrestore(&p->desc->lock, flags);
> diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
> index 6410280..8d93ccd 100644
> --- a/xen/include/asm-arm/gic.h
> +++ b/xen/include/asm-arm/gic.h
> @@ -321,6 +321,7 @@ struct gic_hw_operations {
>  void register_gic_ops(const struct gic_hw_operations *ops);
>  
>  struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
> +void arch_move_irqs(struct vcpu *v);

I would stay consistent with x86 by moving this declaration in irq.h.
Stefano Stabellini July 23, 2014, 3 p.m. UTC | #2
On Fri, 11 Jul 2014, Julien Grall wrote:
> On 07/10/2014 07:13 PM, Stefano Stabellini wrote:
> >  void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
> >  {
> >      const unsigned long mask = r;
> > @@ -277,6 +297,7 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
> >          }
> >          if ( p->desc != NULL )
> >          {
> > +            irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> >              spin_lock_irqsave(&p->desc->lock, flags);
> >              p->desc->handler->enable(p->desc);
> >              spin_unlock_irqrestore(&p->desc->lock, flags);
> > diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
> > index 6410280..8d93ccd 100644
> > --- a/xen/include/asm-arm/gic.h
> > +++ b/xen/include/asm-arm/gic.h
> > @@ -321,6 +321,7 @@ struct gic_hw_operations {
> >  void register_gic_ops(const struct gic_hw_operations *ops);
> >  
> >  struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
> > +void arch_move_irqs(struct vcpu *v);
> 
> I would stay consistent with x86 by moving this declaration in irq.h.

OK
diff mbox

Patch

diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index 843f5a1..ae95178 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -572,9 +572,20 @@  static void gicv2_guest_irq_end(struct irq_desc *desc)
     /* Deactivation happens in maintenance interrupt / via GICV */
 }
 
-static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
 {
-    BUG();
+    unsigned int mask;
+
+    ASSERT(!cpumask_empty(cpu_mask));
+
+    spin_lock(&gicv2.lock);
+
+    mask = gicv2_cpu_mask(cpu_mask);
+
+    /* Set target CPU mask (RAZ/WI on uniprocessor) */
+    writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
+
+    spin_unlock(&gicv2.lock);
 }
 
 /* XXX different for level vs edge */
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 884661c..d7e1882 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -400,6 +400,7 @@  static void gic_update_one_lr(struct vcpu *v, int i)
                 /* vgic_get_target_vcpu takes the rank lock, ensuring
                  * consistency with other itarget changes. */
                 v_target = vgic_get_target_vcpu(v, irq);
+                irq_set_affinity(p->desc, cpumask_of(v_target->processor));
                 vgic_vcpu_inject_irq(v_target, irq);
                 spin_lock(&v->arch.vgic.lock);
             }
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 49ca467..7150c7a 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -134,6 +134,12 @@  static inline struct domain *irq_get_domain(struct irq_desc *desc)
     return desc->action->dev_id;
 }
 
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
+{
+    if ( desc != NULL )
+        desc->handler->set_affinity(desc, cpu_mask);
+}
+
 int request_irq(unsigned int irq, unsigned int irqflags,
                 void (*handler)(int, void *, struct cpu_user_regs *),
                 const char *devname, void *dev_id)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 5445a7b..704eaaf 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -199,6 +199,7 @@  void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
 
     if ( list_empty(&p->inflight) )
     {
+        irq_set_affinity(p->desc, cpumask_of(new->processor));
         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
         return;
     }
@@ -207,6 +208,7 @@  void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
     {
         list_del_init(&p->lr_queue);
         list_del_init(&p->inflight);
+        irq_set_affinity(p->desc, cpumask_of(new->processor));
         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
         vgic_vcpu_inject_irq(new, irq);
         return;
@@ -222,6 +224,24 @@  void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
     spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
 }
 
+void arch_move_irqs(struct vcpu *v)
+{
+    const cpumask_t *cpu_mask = cpumask_of(v->processor);
+    struct domain *d = v->domain;
+    struct pending_irq *p;
+    struct vcpu *v_target;
+    int i;
+
+    for ( i = 32; i < d->arch.vgic.nr_lines; i++ )
+    {
+        v_target = vgic_get_target_vcpu(v, i);
+        p = irq_to_pending(v_target, i);
+
+        if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+            irq_set_affinity(p->desc, cpu_mask);
+    }
+}
+
 void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
 {
     const unsigned long mask = r;
@@ -277,6 +297,7 @@  void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
         }
         if ( p->desc != NULL )
         {
+            irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             spin_lock_irqsave(&p->desc->lock, flags);
             p->desc->handler->enable(p->desc);
             spin_unlock_irqrestore(&p->desc->lock, flags);
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 6410280..8d93ccd 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -321,6 +321,7 @@  struct gic_hw_operations {
 void register_gic_ops(const struct gic_hw_operations *ops);
 
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+void arch_move_irqs(struct vcpu *v);
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/xen/include/asm-arm/irq.h b/xen/include/asm-arm/irq.h
index e567f71..dc282f0 100644
--- a/xen/include/asm-arm/irq.h
+++ b/xen/include/asm-arm/irq.h
@@ -48,6 +48,8 @@  int irq_set_spi_type(unsigned int spi, unsigned int type);
 
 int platform_get_irq(const struct dt_device_node *device, int index);
 
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask);
+
 #endif /* _ASM_HW_IRQ_H */
 /*
  * Local variables:
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index 9066d38..d3c55f3 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -197,4 +197,6 @@  void cleanup_domain_irq_mapping(struct domain *);
 
 bool_t cpu_has_pending_apic_eoi(void);
 
+static inline void arch_move_irqs(struct vcpu *v) { }
+
 #endif /* _ASM_HW_IRQ_H */