diff mbox

[Xen-devel,v5,6/6] xen/arm: physical irq follow virtual irq

Message ID 1402504032-13267-6-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini June 11, 2014, 4:27 p.m. UTC
Migrate physical irqs to the same physical cpu that is running the vcpu
expected to receive the irqs. That is done when enabling irqs, when the
guest writes to GICD_ITARGETSR and when Xen migrates a vcpu to a
different pcpu.

Introduce a new arch specific function, arch_move_irqs, that is empty on
x86 and implements the vgic irq migration code on ARM.
arch_move_irqs is called by evtchn_move_pirqs.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
CC: JBeulich@suse.com

---

Changes in v5:
- prettify vgic_move_irqs;
- rename vgic_move_irqs to arch_move_irqs;
- introduce helper function irq_set_affinity.
---
 xen/arch/arm/gic.c         |   18 ++++++++++++++++--
 xen/arch/arm/vgic.c        |   42 ++++++++++++++++++++++++++++++++++++------
 xen/common/event_channel.c |    2 ++
 xen/include/asm-arm/gic.h  |    1 +
 xen/include/asm-x86/irq.h  |    2 ++
 5 files changed, 57 insertions(+), 8 deletions(-)

Comments

Ian Campbell June 18, 2014, 11:15 a.m. UTC | #1
On Wed, 2014-06-11 at 17:27 +0100, Stefano Stabellini wrote:
> +struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
> +{
> +    struct vcpu *v_target;
> +    struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
> +
> +    vgic_lock_rank(v, rank);
> +    v_target = _vgic_get_target_vcpu(v, irq);
> +    vgic_unlock_rank(v, rank);
> +    return v_target;
> +}

Looks like you just moved this? Did it also change? Please can you
either introduce it in the right place in earlier patch or leave it
where it is in this one.

> @@ -678,6 +706,8 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
>                  v_target = v->domain->vcpu[target];
>                  v_old = v->domain->vcpu[old_target];
>                  vgic_migrate_irq(v_old, v_target, irq);
> +                p = irq_to_pending(v_target, irq);
> +                irq_set_affinity(p->desc, cpumask_of(v_target->processor));

I think vgic_migrate_irq should take care of this stuff too. Any reson
not to?

> diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
> index 6853842..383057c 100644
> --- a/xen/common/event_channel.c
> +++ b/xen/common/event_channel.c
> @@ -1319,6 +1319,8 @@ void evtchn_move_pirqs(struct vcpu *v)
>      unsigned int port;
>      struct evtchn *chn;
>  
> +    arch_move_irqs(v);

It seems odd to do this from event_channel.c

I suggest adding sched_move_irqs to scheduler.c and having it call
evtchn_move_pirqs and arch_move_irqs and then replace all the existing
calls to evtchn_move_pirqs with it.
Stefano Stabellini June 20, 2014, 12:39 p.m. UTC | #2
On Wed, 18 Jun 2014, Ian Campbell wrote:
> On Wed, 2014-06-11 at 17:27 +0100, Stefano Stabellini wrote:
> > +struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
> > +{
> > +    struct vcpu *v_target;
> > +    struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
> > +
> > +    vgic_lock_rank(v, rank);
> > +    v_target = _vgic_get_target_vcpu(v, irq);
> > +    vgic_unlock_rank(v, rank);
> > +    return v_target;
> > +}
> 
> Looks like you just moved this? Did it also change? Please can you
> either introduce it in the right place in earlier patch or leave it
> where it is in this one.

Strange, this chuck shouldn't be here at all.


> > @@ -678,6 +706,8 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> >                  v_target = v->domain->vcpu[target];
> >                  v_old = v->domain->vcpu[old_target];
> >                  vgic_migrate_irq(v_old, v_target, irq);
> > +                p = irq_to_pending(v_target, irq);
> > +                irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> 
> I think vgic_migrate_irq should take care of this stuff too. Any reson
> not to?

This covers a different case: if the guest moves an irq from vcpu0 to
vcpu1, we want to also move the corresponding physical irq from the pcpu
that is running vcpu0 to the pcpu that is running vcpu1.


> > diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
> > index 6853842..383057c 100644
> > --- a/xen/common/event_channel.c
> > +++ b/xen/common/event_channel.c
> > @@ -1319,6 +1319,8 @@ void evtchn_move_pirqs(struct vcpu *v)
> >      unsigned int port;
> >      struct evtchn *chn;
> >  
> > +    arch_move_irqs(v);
> 
> It seems odd to do this from event_channel.c
> 
> I suggest adding sched_move_irqs to scheduler.c and having it call
> evtchn_move_pirqs and arch_move_irqs and then replace all the existing
> calls to evtchn_move_pirqs with it.
 
I am OK with that.
diff mbox

Patch

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 82a0be4..a4422fd 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -192,9 +192,23 @@  static void gic_guest_irq_end(struct irq_desc *desc)
     /* Deactivation happens in maintenance interrupt / via GICV */
 }
 
-static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
 {
-    BUG();
+    volatile unsigned char *bytereg;
+    unsigned int mask;
+
+    if ( desc == NULL || cpumask_empty(cpu_mask) )
+        return;
+
+    spin_lock(&gic.lock);
+
+    mask = gic_cpu_mask(cpu_mask);
+
+    /* Set target CPU mask (RAZ/WI on uniprocessor) */
+    bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
+    bytereg[desc->irq] = mask;
+
+    spin_unlock(&gic.lock);
 }
 
 /* XXX different for level vs edge */
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 2192a8c..696f9f4 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -395,6 +395,17 @@  static struct vcpu *_vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
     return v_target;
 }
 
+struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
+{
+    struct vcpu *v_target;
+    struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
+
+    vgic_lock_rank(v, rank);
+    v_target = _vgic_get_target_vcpu(v, irq);
+    vgic_unlock_rank(v, rank);
+    return v_target;
+}
+
 static void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
 {
     unsigned long flags;
@@ -414,15 +425,30 @@  static void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int ir
     spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
 }
 
-struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
+static inline void irq_set_affinity(struct irq_desc *desc,
+                                    const cpumask_t *cpu_mask)
 {
+    if ( desc != NULL )
+        desc->handler->set_affinity(desc, cpu_mask);
+}
+
+void arch_move_irqs(struct vcpu *v)
+{
+    const cpumask_t *cpu_mask = cpumask_of(v->processor);
+    struct domain *d = v->domain;
+    struct pending_irq *p;
     struct vcpu *v_target;
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
+    int i;
 
-    vgic_lock_rank(v, rank);
-    v_target = _vgic_get_target_vcpu(v, irq);
-    vgic_unlock_rank(v, rank);
-    return v_target;
+    for ( i = 32; i < d->arch.vgic.nr_lines; i++ )
+    {
+        v_target = _vgic_get_target_vcpu(v, i);
+        if ( v_target == v )
+        {
+            p = irq_to_pending(v, i);
+            irq_set_affinity(p->desc, cpu_mask);
+        }
+    }
 }
 
 static void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
@@ -480,6 +506,7 @@  static void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
         }
         if ( p->desc != NULL )
         {
+            irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             spin_lock_irqsave(&p->desc->lock, flags);
             p->desc->handler->enable(p->desc);
             spin_unlock_irqrestore(&p->desc->lock, flags);
@@ -667,6 +694,7 @@  static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
             unsigned int irq, target, old_target;
             unsigned long old_target_mask;
             struct vcpu *v_target, *v_old;
+            struct pending_irq *p;
 
             target = i % 8;
             old_target_mask = byte_read(rank->itargets[REG_RANK_INDEX(8, gicd_reg - GICD_ITARGETSR)], 0, i/8);
@@ -678,6 +706,8 @@  static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
                 v_target = v->domain->vcpu[target];
                 v_old = v->domain->vcpu[old_target];
                 vgic_migrate_irq(v_old, v_target, irq);
+                p = irq_to_pending(v_target, irq);
+                irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             }
             i += 8 - target;
         }
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 6853842..383057c 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -1319,6 +1319,8 @@  void evtchn_move_pirqs(struct vcpu *v)
     unsigned int port;
     struct evtchn *chn;
 
+    arch_move_irqs(v);
+
     spin_lock(&d->event_lock);
     for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
     {
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 8f09933..ce18cfe 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -229,6 +229,7 @@  int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
 void gic_clear_lrs(struct vcpu *v);
 
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+void arch_move_irqs(struct vcpu *v);
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index 9066d38..d3c55f3 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -197,4 +197,6 @@  void cleanup_domain_irq_mapping(struct domain *);
 
 bool_t cpu_has_pending_apic_eoi(void);
 
+static inline void arch_move_irqs(struct vcpu *v) { }
+
 #endif /* _ASM_HW_IRQ_H */