diff mbox series

[Xen-devel,v3,24/39] ARM: new VGIC: Add TARGET registers handlers

Message ID 20180321163235.12529-25-andre.przywara@linaro.org
State New
Headers show
Series New VGIC(-v2) implementation | expand

Commit Message

Andre Przywara March 21, 2018, 4:32 p.m. UTC
The target register handlers are v2 emulation specific, so their
implementation lives entirely in vgic-mmio-v2.c.
We copy the old VGIC behaviour of assigning an IRQ to the first VCPU
set in the target mask instead of making it possibly pending on
multiple VCPUs.
We update the physical affinity of a hardware mapped vIRQ on the way.

This is based on Linux commit 2c234d6f1826, written by Andre Przywara.

Signed-off-by: Andre Przywara <andre.przywara@linaro.org>
Reviewed-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/vgic/vgic-mmio-v2.c | 59 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 58 insertions(+), 1 deletion(-)

Comments

Stefano Stabellini March 27, 2018, 12:24 a.m. UTC | #1
On Wed, 21 Mar 2018, Andre Przywara wrote:
> The target register handlers are v2 emulation specific, so their
> implementation lives entirely in vgic-mmio-v2.c.
> We copy the old VGIC behaviour of assigning an IRQ to the first VCPU
> set in the target mask instead of making it possibly pending on
> multiple VCPUs.
> We update the physical affinity of a hardware mapped vIRQ on the way.
> 
> This is based on Linux commit 2c234d6f1826, written by Andre Przywara.
> 
> Signed-off-by: Andre Przywara <andre.przywara@linaro.org>
> Reviewed-by: Julien Grall <julien.grall@arm.com>

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>  xen/arch/arm/vgic/vgic-mmio-v2.c | 59 +++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 58 insertions(+), 1 deletion(-)
> 
> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
> index a28d0e459b..b333de9ed7 100644
> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
> @@ -81,6 +81,63 @@ static void vgic_mmio_write_v2_misc(struct vcpu *vcpu,
>      }
>  }
>  
> +static unsigned long vgic_mmio_read_target(struct vcpu *vcpu,
> +                                           paddr_t addr, unsigned int len)
> +{
> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
> +    uint32_t val = 0;
> +    unsigned int i;
> +
> +    for ( i = 0; i < len; i++ )
> +    {
> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> +
> +        val |= (uint32_t)irq->targets << (i * 8);
> +
> +        vgic_put_irq(vcpu->domain, irq);
> +    }
> +
> +    return val;
> +}
> +
> +static void vgic_mmio_write_target(struct vcpu *vcpu,
> +                                   paddr_t addr, unsigned int len,
> +                                   unsigned long val)
> +{
> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
> +    uint8_t cpu_mask = GENMASK(vcpu->domain->max_vcpus - 1, 0);
> +    unsigned int i;
> +    unsigned long flags;
> +
> +    /* GICD_ITARGETSR[0-7] are read-only */
> +    if ( intid < VGIC_NR_PRIVATE_IRQS )
> +        return;
> +
> +    for ( i = 0; i < len; i++ )
> +    {
> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
> +
> +        spin_lock_irqsave(&irq->irq_lock, flags);
> +
> +        irq->targets = (val >> (i * 8)) & cpu_mask;
> +        if ( irq->targets )
> +        {
> +            irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
> +            if ( irq->hw )
> +            {
> +                struct irq_desc *desc = irq_to_desc(irq->hwintid);
> +
> +                irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
> +            }
> +        }
> +        else
> +            irq->target_vcpu = NULL;
> +
> +        spin_unlock_irqrestore(&irq->irq_lock, flags);
> +        vgic_put_irq(vcpu->domain, irq);
> +    }
> +}
> +
>  static const struct vgic_register_region vgic_v2_dist_registers[] = {
>      REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
>          vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
> @@ -110,7 +167,7 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
>          vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
>          VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR,
> -        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
> +        vgic_mmio_read_target, vgic_mmio_write_target, 8,
>          VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR,
>          vgic_mmio_read_config, vgic_mmio_write_config, 2,
> -- 
> 2.14.1
>
diff mbox series

Patch

diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index a28d0e459b..b333de9ed7 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -81,6 +81,63 @@  static void vgic_mmio_write_v2_misc(struct vcpu *vcpu,
     }
 }
 
+static unsigned long vgic_mmio_read_target(struct vcpu *vcpu,
+                                           paddr_t addr, unsigned int len)
+{
+    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
+    uint32_t val = 0;
+    unsigned int i;
+
+    for ( i = 0; i < len; i++ )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        val |= (uint32_t)irq->targets << (i * 8);
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    return val;
+}
+
+static void vgic_mmio_write_target(struct vcpu *vcpu,
+                                   paddr_t addr, unsigned int len,
+                                   unsigned long val)
+{
+    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
+    uint8_t cpu_mask = GENMASK(vcpu->domain->max_vcpus - 1, 0);
+    unsigned int i;
+    unsigned long flags;
+
+    /* GICD_ITARGETSR[0-7] are read-only */
+    if ( intid < VGIC_NR_PRIVATE_IRQS )
+        return;
+
+    for ( i = 0; i < len; i++ )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
+
+        spin_lock_irqsave(&irq->irq_lock, flags);
+
+        irq->targets = (val >> (i * 8)) & cpu_mask;
+        if ( irq->targets )
+        {
+            irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
+            if ( irq->hw )
+            {
+                struct irq_desc *desc = irq_to_desc(irq->hwintid);
+
+                irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
+            }
+        }
+        else
+            irq->target_vcpu = NULL;
+
+        spin_unlock_irqrestore(&irq->irq_lock, flags);
+        vgic_put_irq(vcpu->domain, irq);
+    }
+}
+
 static const struct vgic_register_region vgic_v2_dist_registers[] = {
     REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
         vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
@@ -110,7 +167,7 @@  static const struct vgic_register_region vgic_v2_dist_registers[] = {
         vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR,
-        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        vgic_mmio_read_target, vgic_mmio_write_target, 8,
         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR,
         vgic_mmio_read_config, vgic_mmio_write_config, 2,