diff mbox series

[Xen-devel,v3,20/39] ARM: new VGIC: Add PENDING registers handlers

Message ID 20180321163235.12529-21-andre.przywara@linaro.org
State New
Headers show
Series New VGIC(-v2) implementation | expand

Commit Message

Andre Przywara March 21, 2018, 4:32 p.m. UTC
The pending register handlers are shared between the v2 and v3
emulation, so their implementation goes into vgic-mmio.c, to be easily
referenced from the v3 emulation as well later.
For level triggered interrupts the real line level is unaffected by
this write, so we keep this state separate and combine it with the
device's level to get the actual pending state.
Hardware mapped IRQs need some special handling, as their hardware state
has to be coordinated with the virtual pending bit to avoid hanging
or masked interrupts.

This is based on Linux commit 96b298000db4, written by Andre Przywara.

Signed-off-by: Andre Przywara <andre.przywara@linaro.org>
Reviewed-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/vgic/vgic-mmio-v2.c |   4 +-
 xen/arch/arm/vgic/vgic-mmio.c    | 125 +++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/vgic/vgic-mmio.h    |  11 ++++
 3 files changed, 138 insertions(+), 2 deletions(-)

Comments

Stefano Stabellini March 27, 2018, 9:14 p.m. UTC | #1
On Wed, 21 Mar 2018, Andre Przywara wrote:
> The pending register handlers are shared between the v2 and v3
> emulation, so their implementation goes into vgic-mmio.c, to be easily
> referenced from the v3 emulation as well later.
> For level triggered interrupts the real line level is unaffected by
> this write, so we keep this state separate and combine it with the
> device's level to get the actual pending state.
> Hardware mapped IRQs need some special handling, as their hardware state
> has to be coordinated with the virtual pending bit to avoid hanging
> or masked interrupts.
> 
> This is based on Linux commit 96b298000db4, written by Andre Przywara.
> 
> Signed-off-by: Andre Przywara <andre.przywara@linaro.org>
> Reviewed-by: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/arm/vgic/vgic-mmio-v2.c |   4 +-
>  xen/arch/arm/vgic/vgic-mmio.c    | 125 +++++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/vgic/vgic-mmio.h    |  11 ++++
>  3 files changed, 138 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
> index 7efd1c4eb4..a48c554040 100644
> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
> @@ -95,10 +95,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
>          vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
>          VGIC_ACCESS_32bit),
>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
> -        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> +        vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
>          VGIC_ACCESS_32bit),
>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
> -        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> +        vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
>          VGIC_ACCESS_32bit),
>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
>          vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
> index f219b7c509..53b8978c02 100644
> --- a/xen/arch/arm/vgic/vgic-mmio.c
> +++ b/xen/arch/arm/vgic/vgic-mmio.c
> @@ -156,6 +156,131 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu,
>      }
>  }
>  
> +unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
> +                                     paddr_t addr, unsigned int len)
> +{
> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> +    uint32_t value = 0;
> +    unsigned int i;
> +
> +    /* Loop over all IRQs affected by this read */
> +    for ( i = 0; i < len * 8; i++ )
> +    {
> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> +
> +        if ( irq_is_pending(irq) )
> +            value |= (1U << i);

Same question: shouldn't we take the irq->irq_lock?


> +        vgic_put_irq(vcpu->domain, irq);
> +    }
> +
> +    return value;
> +}
> +
> +void vgic_mmio_write_spending(struct vcpu *vcpu,
> +                              paddr_t addr, unsigned int len,
> +                              unsigned long val)
> +{
> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> +    unsigned int i;
> +    unsigned long flags;
> +    irq_desc_t *desc;
> +
> +    for_each_set_bit( i, &val, len * 8 )
> +    {
> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> +
> +        spin_lock_irqsave(&irq->irq_lock, flags);
> +        irq->pending_latch = true;
> +
> +        /* To observe the locking order, just take the irq_desc pointer here. */
> +        if ( irq->hw )
> +            desc = irq_to_desc(irq->hwintid);
> +        else
> +            desc = NULL;
> +
> +        vgic_queue_irq_unlock(vcpu->domain, irq, flags);
> +
> +        /*
> +         * When the VM sets the pending state for a HW interrupt on the virtual
> +         * distributor we set the active state on the physical distributor,
> +         * because the virtual interrupt can become active and then the guest
> +         * can deactivate it.
> +         */
> +        if ( desc )
> +        {
> +            spin_lock_irqsave(&desc->lock, flags);
> +            spin_lock(&irq->irq_lock);
> +
> +            /* This h/w IRQ should still be assigned to the virtual IRQ. */
> +            ASSERT(irq->hw && desc->irq == irq->hwintid);
> +
> +            gic_set_active_state(desc, true);
> +
> +            spin_unlock(&irq->irq_lock);
> +            spin_unlock_irqrestore(&desc->lock, flags);
> +        }
> +
> +        vgic_put_irq(vcpu->domain, irq);
> +    }
> +}
> +
> +void vgic_mmio_write_cpending(struct vcpu *vcpu,
> +                              paddr_t addr, unsigned int len,
> +                              unsigned long val)
> +{
> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> +    unsigned int i;
> +    unsigned long flags;
> +    irq_desc_t *desc;
> +
> +    for_each_set_bit( i, &val, len * 8 )
> +    {
> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> +
> +        spin_lock_irqsave(&irq->irq_lock, flags);
> +        irq->pending_latch = false;
> +
> +        /* To observe the locking order, just take the irq_desc pointer here. */
> +        if ( irq->hw )
> +            desc = irq_to_desc(irq->hwintid);
> +        else
> +            desc = NULL;
> +
> +        spin_unlock_irqrestore(&irq->irq_lock, flags);
> +
> +        /*
> +         * We don't want the guest to effectively mask the physical
> +         * interrupt by doing a write to SPENDR followed by a write to
> +         * CPENDR for HW interrupts, so we clear the active state on
> +         * the physical side if the virtual interrupt is not active.
> +         * This may lead to taking an additional interrupt on the
> +         * host, but that should not be a problem as the worst that
> +         * can happen is an additional vgic injection.  We also clear
> +         * the pending state to maintain proper semantics for edge HW
> +         * interrupts.
> +         */
> +        if ( desc )
> +        {
> +            spin_lock_irqsave(&desc->lock, flags);
> +            spin_lock(&irq->irq_lock);
> +
> +            /* This h/w IRQ should still be assigned to the virtual IRQ. */
> +            ASSERT(irq->hw && desc->irq == irq->hwintid);
> +
> +            gic_set_pending_state(desc, false);

Should we check irq_is_pending before calling gic_set_pending_state? I
am asking because I think it is possible to race against another
concurrent change that could come in after releasing irq_lock and before
taking desc->lock and irq->irq_lock again. If we check what's the latest
about the pending state we should be safe against the race, similarly to
what you did in the previous patch in vgic_sync_hardware_irq.


> +            if (!irq->active)
> +                gic_set_active_state(desc, false);
> +
> +            spin_unlock(&irq->irq_lock);
> +            spin_unlock_irqrestore(&desc->lock, flags);
> +        }
> +
> +
> +        vgic_put_irq(vcpu->domain, irq);
> +    }
> +}
> +
>  static int match_region(const void *key, const void *elt)
>  {
>      const unsigned int offset = (unsigned long)key;
> diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
> index a2cebd77f4..5c927f28b0 100644
> --- a/xen/arch/arm/vgic/vgic-mmio.h
> +++ b/xen/arch/arm/vgic/vgic-mmio.h
> @@ -97,6 +97,17 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu,
>                               paddr_t addr, unsigned int len,
>                               unsigned long val);
>  
> +unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
> +                                     paddr_t addr, unsigned int len);
> +
> +void vgic_mmio_write_spending(struct vcpu *vcpu,
> +                              paddr_t addr, unsigned int len,
> +                              unsigned long val);
> +
> +void vgic_mmio_write_cpending(struct vcpu *vcpu,
> +                              paddr_t addr, unsigned int len,
> +                              unsigned long val);
> +
>  unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
>  
>  #endif
> -- 
> 2.14.1
>
Andre Przywara March 28, 2018, 2:10 p.m. UTC | #2
Hi,

On 27/03/18 22:14, Stefano Stabellini wrote:
> On Wed, 21 Mar 2018, Andre Przywara wrote:
>> The pending register handlers are shared between the v2 and v3
>> emulation, so their implementation goes into vgic-mmio.c, to be easily
>> referenced from the v3 emulation as well later.
>> For level triggered interrupts the real line level is unaffected by
>> this write, so we keep this state separate and combine it with the
>> device's level to get the actual pending state.
>> Hardware mapped IRQs need some special handling, as their hardware state
>> has to be coordinated with the virtual pending bit to avoid hanging
>> or masked interrupts.
>>
>> This is based on Linux commit 96b298000db4, written by Andre Przywara.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@linaro.org>
>> Reviewed-by: Julien Grall <julien.grall@arm.com>
>> ---
>>  xen/arch/arm/vgic/vgic-mmio-v2.c |   4 +-
>>  xen/arch/arm/vgic/vgic-mmio.c    | 125 +++++++++++++++++++++++++++++++++++++++
>>  xen/arch/arm/vgic/vgic-mmio.h    |  11 ++++
>>  3 files changed, 138 insertions(+), 2 deletions(-)
>>
>> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
>> index 7efd1c4eb4..a48c554040 100644
>> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
>> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
>> @@ -95,10 +95,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
>>          vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
>>          VGIC_ACCESS_32bit),
>>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
>> -        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
>> +        vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
>>          VGIC_ACCESS_32bit),
>>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
>> -        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
>> +        vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
>>          VGIC_ACCESS_32bit),
>>      REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
>>          vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
>> diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
>> index f219b7c509..53b8978c02 100644
>> --- a/xen/arch/arm/vgic/vgic-mmio.c
>> +++ b/xen/arch/arm/vgic/vgic-mmio.c
>> @@ -156,6 +156,131 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu,
>>      }
>>  }
>>  
>> +unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
>> +                                     paddr_t addr, unsigned int len)
>> +{
>> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
>> +    uint32_t value = 0;
>> +    unsigned int i;
>> +
>> +    /* Loop over all IRQs affected by this read */
>> +    for ( i = 0; i < len * 8; i++ )
>> +    {
>> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
>> +
>> +        if ( irq_is_pending(irq) )
>> +            value |= (1U << i);
> 
> Same question: shouldn't we take the irq->irq_lock?
> 
> 
>> +        vgic_put_irq(vcpu->domain, irq);
>> +    }
>> +
>> +    return value;
>> +}
>> +
>> +void vgic_mmio_write_spending(struct vcpu *vcpu,
>> +                              paddr_t addr, unsigned int len,
>> +                              unsigned long val)
>> +{
>> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
>> +    unsigned int i;
>> +    unsigned long flags;
>> +    irq_desc_t *desc;
>> +
>> +    for_each_set_bit( i, &val, len * 8 )
>> +    {
>> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
>> +
>> +        spin_lock_irqsave(&irq->irq_lock, flags);
>> +        irq->pending_latch = true;
>> +
>> +        /* To observe the locking order, just take the irq_desc pointer here. */
>> +        if ( irq->hw )
>> +            desc = irq_to_desc(irq->hwintid);
>> +        else
>> +            desc = NULL;
>> +
>> +        vgic_queue_irq_unlock(vcpu->domain, irq, flags);
>> +
>> +        /*
>> +         * When the VM sets the pending state for a HW interrupt on the virtual
>> +         * distributor we set the active state on the physical distributor,
>> +         * because the virtual interrupt can become active and then the guest
>> +         * can deactivate it.
>> +         */
>> +        if ( desc )
>> +        {
>> +            spin_lock_irqsave(&desc->lock, flags);
>> +            spin_lock(&irq->irq_lock);
>> +
>> +            /* This h/w IRQ should still be assigned to the virtual IRQ. */
>> +            ASSERT(irq->hw && desc->irq == irq->hwintid);
>> +
>> +            gic_set_active_state(desc, true);
>> +
>> +            spin_unlock(&irq->irq_lock);
>> +            spin_unlock_irqrestore(&desc->lock, flags);
>> +        }
>> +
>> +        vgic_put_irq(vcpu->domain, irq);
>> +    }
>> +}
>> +
>> +void vgic_mmio_write_cpending(struct vcpu *vcpu,
>> +                              paddr_t addr, unsigned int len,
>> +                              unsigned long val)
>> +{
>> +    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
>> +    unsigned int i;
>> +    unsigned long flags;
>> +    irq_desc_t *desc;
>> +
>> +    for_each_set_bit( i, &val, len * 8 )
>> +    {
>> +        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
>> +
>> +        spin_lock_irqsave(&irq->irq_lock, flags);
>> +        irq->pending_latch = false;
>> +
>> +        /* To observe the locking order, just take the irq_desc pointer here. */
>> +        if ( irq->hw )
>> +            desc = irq_to_desc(irq->hwintid);
>> +        else
>> +            desc = NULL;
>> +
>> +        spin_unlock_irqrestore(&irq->irq_lock, flags);
>> +
>> +        /*
>> +         * We don't want the guest to effectively mask the physical
>> +         * interrupt by doing a write to SPENDR followed by a write to
>> +         * CPENDR for HW interrupts, so we clear the active state on
>> +         * the physical side if the virtual interrupt is not active.
>> +         * This may lead to taking an additional interrupt on the
>> +         * host, but that should not be a problem as the worst that
>> +         * can happen is an additional vgic injection.  We also clear
>> +         * the pending state to maintain proper semantics for edge HW
>> +         * interrupts.
>> +         */
>> +        if ( desc )
>> +        {
>> +            spin_lock_irqsave(&desc->lock, flags);
>> +            spin_lock(&irq->irq_lock);
>> +
>> +            /* This h/w IRQ should still be assigned to the virtual IRQ. */
>> +            ASSERT(irq->hw && desc->irq == irq->hwintid);
>> +
>> +            gic_set_pending_state(desc, false);
> 
> Should we check irq_is_pending before calling gic_set_pending_state?
> I am asking because I think it is possible to race against another
> concurrent change that could come in after releasing irq_lock and before
> taking desc->lock and irq->irq_lock again. If we check what's the latest
> about the pending state we should be safe against the race, similarly to
> what you did in the previous patch in vgic_sync_hardware_irq.

Yeah, that's a good point, that belongs to the condition of "nothing
(critical) has changed meanwhile".

Cheers,
Andre.

>> +            if (!irq->active)
>> +                gic_set_active_state(desc, false);
>> +
>> +            spin_unlock(&irq->irq_lock);
>> +            spin_unlock_irqrestore(&desc->lock, flags);
>> +        }
>> +
>> +
>> +        vgic_put_irq(vcpu->domain, irq);
>> +    }
>> +}
>> +
>>  static int match_region(const void *key, const void *elt)
>>  {
>>      const unsigned int offset = (unsigned long)key;
>> diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
>> index a2cebd77f4..5c927f28b0 100644
>> --- a/xen/arch/arm/vgic/vgic-mmio.h
>> +++ b/xen/arch/arm/vgic/vgic-mmio.h
>> @@ -97,6 +97,17 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu,
>>                               paddr_t addr, unsigned int len,
>>                               unsigned long val);
>>  
>> +unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
>> +                                     paddr_t addr, unsigned int len);
>> +
>> +void vgic_mmio_write_spending(struct vcpu *vcpu,
>> +                              paddr_t addr, unsigned int len,
>> +                              unsigned long val);
>> +
>> +void vgic_mmio_write_cpending(struct vcpu *vcpu,
>> +                              paddr_t addr, unsigned int len,
>> +                              unsigned long val);
>> +
>>  unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
>>  
>>  #endif
>> -- 
>> 2.14.1
>>
diff mbox series

Patch

diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index 7efd1c4eb4..a48c554040 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -95,10 +95,10 @@  static const struct vgic_register_region vgic_v2_dist_registers[] = {
         vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
-        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+        vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
-        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+        vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
         VGIC_ACCESS_32bit),
     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
         vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
index f219b7c509..53b8978c02 100644
--- a/xen/arch/arm/vgic/vgic-mmio.c
+++ b/xen/arch/arm/vgic/vgic-mmio.c
@@ -156,6 +156,131 @@  void vgic_mmio_write_cenable(struct vcpu *vcpu,
     }
 }
 
+unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
+                                     paddr_t addr, unsigned int len)
+{
+    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+    uint32_t value = 0;
+    unsigned int i;
+
+    /* Loop over all IRQs affected by this read */
+    for ( i = 0; i < len * 8; i++ )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        if ( irq_is_pending(irq) )
+            value |= (1U << i);
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+
+    return value;
+}
+
+void vgic_mmio_write_spending(struct vcpu *vcpu,
+                              paddr_t addr, unsigned int len,
+                              unsigned long val)
+{
+    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+    unsigned int i;
+    unsigned long flags;
+    irq_desc_t *desc;
+
+    for_each_set_bit( i, &val, len * 8 )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        spin_lock_irqsave(&irq->irq_lock, flags);
+        irq->pending_latch = true;
+
+        /* To observe the locking order, just take the irq_desc pointer here. */
+        if ( irq->hw )
+            desc = irq_to_desc(irq->hwintid);
+        else
+            desc = NULL;
+
+        vgic_queue_irq_unlock(vcpu->domain, irq, flags);
+
+        /*
+         * When the VM sets the pending state for a HW interrupt on the virtual
+         * distributor we set the active state on the physical distributor,
+         * because the virtual interrupt can become active and then the guest
+         * can deactivate it.
+         */
+        if ( desc )
+        {
+            spin_lock_irqsave(&desc->lock, flags);
+            spin_lock(&irq->irq_lock);
+
+            /* This h/w IRQ should still be assigned to the virtual IRQ. */
+            ASSERT(irq->hw && desc->irq == irq->hwintid);
+
+            gic_set_active_state(desc, true);
+
+            spin_unlock(&irq->irq_lock);
+            spin_unlock_irqrestore(&desc->lock, flags);
+        }
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+}
+
+void vgic_mmio_write_cpending(struct vcpu *vcpu,
+                              paddr_t addr, unsigned int len,
+                              unsigned long val)
+{
+    uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+    unsigned int i;
+    unsigned long flags;
+    irq_desc_t *desc;
+
+    for_each_set_bit( i, &val, len * 8 )
+    {
+        struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+        spin_lock_irqsave(&irq->irq_lock, flags);
+        irq->pending_latch = false;
+
+        /* To observe the locking order, just take the irq_desc pointer here. */
+        if ( irq->hw )
+            desc = irq_to_desc(irq->hwintid);
+        else
+            desc = NULL;
+
+        spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+        /*
+         * We don't want the guest to effectively mask the physical
+         * interrupt by doing a write to SPENDR followed by a write to
+         * CPENDR for HW interrupts, so we clear the active state on
+         * the physical side if the virtual interrupt is not active.
+         * This may lead to taking an additional interrupt on the
+         * host, but that should not be a problem as the worst that
+         * can happen is an additional vgic injection.  We also clear
+         * the pending state to maintain proper semantics for edge HW
+         * interrupts.
+         */
+        if ( desc )
+        {
+            spin_lock_irqsave(&desc->lock, flags);
+            spin_lock(&irq->irq_lock);
+
+            /* This h/w IRQ should still be assigned to the virtual IRQ. */
+            ASSERT(irq->hw && desc->irq == irq->hwintid);
+
+            gic_set_pending_state(desc, false);
+            if (!irq->active)
+                gic_set_active_state(desc, false);
+
+            spin_unlock(&irq->irq_lock);
+            spin_unlock_irqrestore(&desc->lock, flags);
+        }
+
+
+        vgic_put_irq(vcpu->domain, irq);
+    }
+}
+
 static int match_region(const void *key, const void *elt)
 {
     const unsigned int offset = (unsigned long)key;
diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
index a2cebd77f4..5c927f28b0 100644
--- a/xen/arch/arm/vgic/vgic-mmio.h
+++ b/xen/arch/arm/vgic/vgic-mmio.h
@@ -97,6 +97,17 @@  void vgic_mmio_write_cenable(struct vcpu *vcpu,
                              paddr_t addr, unsigned int len,
                              unsigned long val);
 
+unsigned long vgic_mmio_read_pending(struct vcpu *vcpu,
+                                     paddr_t addr, unsigned int len);
+
+void vgic_mmio_write_spending(struct vcpu *vcpu,
+                              paddr_t addr, unsigned int len,
+                              unsigned long val);
+
+void vgic_mmio_write_cpending(struct vcpu *vcpu,
+                              paddr_t addr, unsigned int len,
+                              unsigned long val);
+
 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
 
 #endif