@@ -16,6 +16,7 @@ obj-y += domain_build.o
obj-y += domctl.o
obj-$(EARLY_PRINTK) += early_printk.o
obj-y += gic.o
+obj-y += gic-vgic.o
obj-y += gic-v2.o
obj-$(CONFIG_HAS_GICV3) += gic-v3.o
obj-$(CONFIG_HAS_ITS) += gic-v3-its.o
new file mode 100644
@@ -0,0 +1,410 @@
+/*
+ * xen/arch/arm/gic-vgic.c
+ *
+ * ARM Generic Interrupt Controller virtualization support
+ *
+ * Tim Deegan <tim@xen.org>
+ * Copyright (c) 2011 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/irq.h>
+#include <xen/sched.h>
+#include <xen/errno.h>
+#include <xen/softirq.h>
+#include <xen/list.h>
+#include <xen/device_tree.h>
+#include <xen/acpi.h>
+#include <asm/p2m.h>
+#include <asm/domain.h>
+#include <asm/platform.h>
+#include <asm/device.h>
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/vgic.h>
+#include <asm/acpi.h>
+
+extern uint64_t per_cpu__lr_mask;
+extern const struct gic_hw_operations *gic_hw_ops;
+
+#define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_hw_ops->info->nr_lrs) - 1))
+
+#undef GIC_DEBUG
+
+static void gic_update_one_lr(struct vcpu *v, int i);
+
+static inline void gic_set_lr(int lr, struct pending_irq *p,
+ unsigned int state)
+{
+ ASSERT(!local_irq_is_enabled());
+
+ clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status);
+
+ gic_hw_ops->update_lr(lr, p, state);
+
+ set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
+ clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
+ p->lr = lr;
+}
+
+static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n)
+{
+ struct pending_irq *iter;
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ if ( !list_empty(&n->lr_queue) )
+ return;
+
+ list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue )
+ {
+ if ( iter->priority > n->priority )
+ {
+ list_add_tail(&n->lr_queue, &iter->lr_queue);
+ return;
+ }
+ }
+ list_add_tail(&n->lr_queue, &v->arch.vgic.lr_pending);
+}
+
+void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p)
+{
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ list_del_init(&p->lr_queue);
+}
+
+void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
+{
+ struct pending_irq *n = irq_to_pending(v, virtual_irq);
+
+ /* If an LPI has been removed meanwhile, there is nothing left to raise. */
+ if ( unlikely(!n) )
+ return;
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ /* Don't try to update the LR if the interrupt is disabled */
+ if ( !test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
+ return;
+
+ if ( list_empty(&n->lr_queue) )
+ {
+ if ( v == current )
+ gic_update_one_lr(v, n->lr);
+ }
+#ifdef GIC_DEBUG
+ else
+ gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into d%dv%d, when it is still lr_pending\n",
+ virtual_irq, v->domain->domain_id, v->vcpu_id);
+#endif
+}
+
+/*
+ * Find an unused LR to insert an IRQ into, starting with the LR given
+ * by @lr. If this new interrupt is a PRISTINE LPI, scan the other LRs to
+ * avoid inserting the same IRQ twice. This situation can occur when an
+ * event gets discarded while the LPI is in an LR, and a new LPI with the
+ * same number gets mapped quickly afterwards.
+ */
+static unsigned int gic_find_unused_lr(struct vcpu *v,
+ struct pending_irq *p,
+ unsigned int lr)
+{
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+ unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask);
+ struct gic_lr lr_val;
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ if ( unlikely(test_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
+ {
+ unsigned int used_lr;
+
+ for_each_set_bit(used_lr, lr_mask, nr_lrs)
+ {
+ gic_hw_ops->read_lr(used_lr, &lr_val);
+ if ( lr_val.virq == p->irq )
+ return used_lr;
+ }
+ }
+
+ lr = find_next_zero_bit(lr_mask, nr_lrs, lr);
+
+ return lr;
+}
+
+void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
+ unsigned int priority)
+{
+ int i;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+ struct pending_irq *p = irq_to_pending(v, virtual_irq);
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ if ( unlikely(!p) )
+ /* An unmapped LPI does not need to be raised. */
+ return;
+
+ if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
+ {
+ i = gic_find_unused_lr(v, p, 0);
+
+ if (i < nr_lrs) {
+ set_bit(i, &this_cpu(lr_mask));
+ gic_set_lr(i, p, GICH_LR_PENDING);
+ return;
+ }
+ }
+
+ gic_add_to_lr_pending(v, p);
+}
+
+static void gic_update_one_lr(struct vcpu *v, int i)
+{
+ struct pending_irq *p;
+ int irq;
+ struct gic_lr lr_val;
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+ ASSERT(!local_irq_is_enabled());
+
+ gic_hw_ops->read_lr(i, &lr_val);
+ irq = lr_val.virq;
+ p = irq_to_pending(v, irq);
+ /*
+ * An LPI might have been unmapped, in which case we just clean up here.
+ * If that LPI is marked as PRISTINE, the information in the LR is bogus,
+ * as it belongs to a previous, already unmapped LPI. So we discard it
+ * here as well.
+ */
+ if ( unlikely(!p ||
+ test_and_clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
+ {
+ ASSERT(is_lpi(irq));
+
+ gic_hw_ops->clear_lr(i);
+ clear_bit(i, &this_cpu(lr_mask));
+
+ return;
+ }
+
+ if ( lr_val.state & GICH_LR_ACTIVE )
+ {
+ set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
+ if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
+ test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
+ {
+ if ( p->desc == NULL )
+ {
+ lr_val.state |= GICH_LR_PENDING;
+ gic_hw_ops->write_lr(i, &lr_val);
+ }
+ else
+ gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into d%dv%d: already active in LR%d\n",
+ irq, v->domain->domain_id, v->vcpu_id, i);
+ }
+ }
+ else if ( lr_val.state & GICH_LR_PENDING )
+ {
+ int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
+#ifdef GIC_DEBUG
+ if ( q )
+ gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when it is already pending in LR%d\n",
+ irq, v->domain->domain_id, v->vcpu_id, i);
+#endif
+ }
+ else
+ {
+ gic_hw_ops->clear_lr(i);
+ clear_bit(i, &this_cpu(lr_mask));
+
+ if ( p->desc != NULL )
+ clear_bit(_IRQ_INPROGRESS, &p->desc->status);
+ clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
+ clear_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
+ p->lr = GIC_INVALID_LR;
+ if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
+ test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&
+ !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ gic_raise_guest_irq(v, irq, p->priority);
+ else {
+ list_del_init(&p->inflight);
+ /*
+ * Remove from inflight, then change physical affinity. It
+ * makes sure that when a new interrupt is received on the
+ * next pcpu, inflight is already cleared. No concurrent
+ * accesses to inflight.
+ */
+ smp_wmb();
+ if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ {
+ struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
+ clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
+ }
+ }
+ }
+}
+
+void gic_clear_lrs(struct vcpu *v)
+{
+ int i = 0;
+ unsigned long flags;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+
+ /* The idle domain has no LRs to be cleared. Since gic_restore_state
+ * doesn't write any LR registers for the idle domain they could be
+ * non-zero. */
+ if ( is_idle_vcpu(v) )
+ return;
+
+ gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false);
+
+ spin_lock_irqsave(&v->arch.vgic.lock, flags);
+
+ while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask),
+ nr_lrs, i)) < nr_lrs ) {
+ gic_update_one_lr(v, i);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+}
+
+static void gic_restore_pending_irqs(struct vcpu *v)
+{
+ int lr = 0;
+ struct pending_irq *p, *t, *p_r;
+ struct list_head *inflight_r;
+ unsigned long flags;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+ int lrs = nr_lrs;
+
+ spin_lock_irqsave(&v->arch.vgic.lock, flags);
+
+ if ( list_empty(&v->arch.vgic.lr_pending) )
+ goto out;
+
+ inflight_r = &v->arch.vgic.inflight_irqs;
+ list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
+ {
+ lr = gic_find_unused_lr(v, p, lr);
+ if ( lr >= nr_lrs )
+ {
+ /* No more free LRs: find a lower priority irq to evict */
+ list_for_each_entry_reverse( p_r, inflight_r, inflight )
+ {
+ if ( p_r->priority == p->priority )
+ goto out;
+ if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status) &&
+ !test_bit(GIC_IRQ_GUEST_ACTIVE, &p_r->status) )
+ goto found;
+ }
+ /* We didn't find a victim this time, and we won't next
+ * time, so quit */
+ goto out;
+
+found:
+ lr = p_r->lr;
+ p_r->lr = GIC_INVALID_LR;
+ set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status);
+ clear_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status);
+ gic_add_to_lr_pending(v, p_r);
+ inflight_r = &p_r->inflight;
+ }
+
+ gic_set_lr(lr, p, GICH_LR_PENDING);
+ list_del_init(&p->lr_queue);
+ set_bit(lr, &this_cpu(lr_mask));
+
+ /* We can only evict nr_lrs entries */
+ lrs--;
+ if ( lrs == 0 )
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+}
+
+void gic_clear_pending_irqs(struct vcpu *v)
+{
+ struct pending_irq *p, *t;
+
+ ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+ v->arch.lr_mask = 0;
+ list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
+ gic_remove_from_lr_pending(v, p);
+}
+
+int gic_events_need_delivery(void)
+{
+ struct vcpu *v = current;
+ struct pending_irq *p;
+ unsigned long flags;
+ const unsigned long apr = gic_hw_ops->read_apr(0);
+ int mask_priority;
+ int active_priority;
+ int rc = 0;
+
+ mask_priority = gic_hw_ops->read_vmcr_priority();
+ active_priority = find_next_bit(&apr, 32, 0);
+
+ spin_lock_irqsave(&v->arch.vgic.lock, flags);
+
+ /* TODO: We order the guest irqs by priority, but we don't change
+ * the priority of host irqs. */
+
+ /* find the first enabled non-active irq, the queue is already
+ * ordered by priority */
+ list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight )
+ {
+ if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority )
+ goto out;
+ if ( GIC_PRI_TO_GUEST(p->priority) >= active_priority )
+ goto out;
+ if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+ {
+ rc = 1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+ return rc;
+}
+
+void gic_inject(void)
+{
+ ASSERT(!local_irq_is_enabled());
+
+ gic_restore_pending_irqs(current);
+
+ if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
+ gic_hw_ops->update_hcr_status(GICH_HCR_UIE, true);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -36,15 +36,11 @@
#include <asm/vgic.h>
#include <asm/acpi.h>
-static DEFINE_PER_CPU(uint64_t, lr_mask);
-
-#define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_hw_ops->info->nr_lrs) - 1))
+DEFINE_PER_CPU(uint64_t, lr_mask);
#undef GIC_DEBUG
-static void gic_update_one_lr(struct vcpu *v, int i);
-
-static const struct gic_hw_operations *gic_hw_ops;
+const struct gic_hw_operations *gic_hw_ops;
void register_gic_ops(const struct gic_hw_operations *ops)
{
@@ -366,361 +362,6 @@ void gic_disable_cpu(void)
gic_hw_ops->disable_interface();
}
-static inline void gic_set_lr(int lr, struct pending_irq *p,
- unsigned int state)
-{
- ASSERT(!local_irq_is_enabled());
-
- clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status);
-
- gic_hw_ops->update_lr(lr, p, state);
-
- set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
- clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
- p->lr = lr;
-}
-
-static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n)
-{
- struct pending_irq *iter;
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- if ( !list_empty(&n->lr_queue) )
- return;
-
- list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue )
- {
- if ( iter->priority > n->priority )
- {
- list_add_tail(&n->lr_queue, &iter->lr_queue);
- return;
- }
- }
- list_add_tail(&n->lr_queue, &v->arch.vgic.lr_pending);
-}
-
-void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p)
-{
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- list_del_init(&p->lr_queue);
-}
-
-void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
-{
- struct pending_irq *n = irq_to_pending(v, virtual_irq);
-
- /* If an LPI has been removed meanwhile, there is nothing left to raise. */
- if ( unlikely(!n) )
- return;
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- /* Don't try to update the LR if the interrupt is disabled */
- if ( !test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
- return;
-
- if ( list_empty(&n->lr_queue) )
- {
- if ( v == current )
- gic_update_one_lr(v, n->lr);
- }
-#ifdef GIC_DEBUG
- else
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into d%dv%d, when it is still lr_pending\n",
- virtual_irq, v->domain->domain_id, v->vcpu_id);
-#endif
-}
-
-/*
- * Find an unused LR to insert an IRQ into, starting with the LR given
- * by @lr. If this new interrupt is a PRISTINE LPI, scan the other LRs to
- * avoid inserting the same IRQ twice. This situation can occur when an
- * event gets discarded while the LPI is in an LR, and a new LPI with the
- * same number gets mapped quickly afterwards.
- */
-static unsigned int gic_find_unused_lr(struct vcpu *v,
- struct pending_irq *p,
- unsigned int lr)
-{
- unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
- unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask);
- struct gic_lr lr_val;
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- if ( unlikely(test_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
- {
- unsigned int used_lr;
-
- for_each_set_bit(used_lr, lr_mask, nr_lrs)
- {
- gic_hw_ops->read_lr(used_lr, &lr_val);
- if ( lr_val.virq == p->irq )
- return used_lr;
- }
- }
-
- lr = find_next_zero_bit(lr_mask, nr_lrs, lr);
-
- return lr;
-}
-
-void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
- unsigned int priority)
-{
- int i;
- unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
- struct pending_irq *p = irq_to_pending(v, virtual_irq);
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- if ( unlikely(!p) )
- /* An unmapped LPI does not need to be raised. */
- return;
-
- if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
- {
- i = gic_find_unused_lr(v, p, 0);
-
- if (i < nr_lrs) {
- set_bit(i, &this_cpu(lr_mask));
- gic_set_lr(i, p, GICH_LR_PENDING);
- return;
- }
- }
-
- gic_add_to_lr_pending(v, p);
-}
-
-static void gic_update_one_lr(struct vcpu *v, int i)
-{
- struct pending_irq *p;
- int irq;
- struct gic_lr lr_val;
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
- ASSERT(!local_irq_is_enabled());
-
- gic_hw_ops->read_lr(i, &lr_val);
- irq = lr_val.virq;
- p = irq_to_pending(v, irq);
- /*
- * An LPI might have been unmapped, in which case we just clean up here.
- * If that LPI is marked as PRISTINE, the information in the LR is bogus,
- * as it belongs to a previous, already unmapped LPI. So we discard it
- * here as well.
- */
- if ( unlikely(!p ||
- test_and_clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
- {
- ASSERT(is_lpi(irq));
-
- gic_hw_ops->clear_lr(i);
- clear_bit(i, &this_cpu(lr_mask));
-
- return;
- }
-
- if ( lr_val.state & GICH_LR_ACTIVE )
- {
- set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
- if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
- test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
- {
- if ( p->desc == NULL )
- {
- lr_val.state |= GICH_LR_PENDING;
- gic_hw_ops->write_lr(i, &lr_val);
- }
- else
- gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into d%dv%d: already active in LR%d\n",
- irq, v->domain->domain_id, v->vcpu_id, i);
- }
- }
- else if ( lr_val.state & GICH_LR_PENDING )
- {
- int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
-#ifdef GIC_DEBUG
- if ( q )
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when it is already pending in LR%d\n",
- irq, v->domain->domain_id, v->vcpu_id, i);
-#endif
- }
- else
- {
- gic_hw_ops->clear_lr(i);
- clear_bit(i, &this_cpu(lr_mask));
-
- if ( p->desc != NULL )
- clear_bit(_IRQ_INPROGRESS, &p->desc->status);
- clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
- clear_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
- p->lr = GIC_INVALID_LR;
- if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
- test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&
- !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
- gic_raise_guest_irq(v, irq, p->priority);
- else {
- list_del_init(&p->inflight);
- /*
- * Remove from inflight, then change physical affinity. It
- * makes sure that when a new interrupt is received on the
- * next pcpu, inflight is already cleared. No concurrent
- * accesses to inflight.
- */
- smp_wmb();
- if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
- {
- struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
- irq_set_affinity(p->desc, cpumask_of(v_target->processor));
- clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
- }
- }
- }
-}
-
-void gic_clear_lrs(struct vcpu *v)
-{
- int i = 0;
- unsigned long flags;
- unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
-
- /* The idle domain has no LRs to be cleared. Since gic_restore_state
- * doesn't write any LR registers for the idle domain they could be
- * non-zero. */
- if ( is_idle_vcpu(v) )
- return;
-
- gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false);
-
- spin_lock_irqsave(&v->arch.vgic.lock, flags);
-
- while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask),
- nr_lrs, i)) < nr_lrs ) {
- gic_update_one_lr(v, i);
- i++;
- }
-
- spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
-}
-
-static void gic_restore_pending_irqs(struct vcpu *v)
-{
- int lr = 0;
- struct pending_irq *p, *t, *p_r;
- struct list_head *inflight_r;
- unsigned long flags;
- unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
- int lrs = nr_lrs;
-
- spin_lock_irqsave(&v->arch.vgic.lock, flags);
-
- if ( list_empty(&v->arch.vgic.lr_pending) )
- goto out;
-
- inflight_r = &v->arch.vgic.inflight_irqs;
- list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
- {
- lr = gic_find_unused_lr(v, p, lr);
- if ( lr >= nr_lrs )
- {
- /* No more free LRs: find a lower priority irq to evict */
- list_for_each_entry_reverse( p_r, inflight_r, inflight )
- {
- if ( p_r->priority == p->priority )
- goto out;
- if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status) &&
- !test_bit(GIC_IRQ_GUEST_ACTIVE, &p_r->status) )
- goto found;
- }
- /* We didn't find a victim this time, and we won't next
- * time, so quit */
- goto out;
-
-found:
- lr = p_r->lr;
- p_r->lr = GIC_INVALID_LR;
- set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status);
- clear_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status);
- gic_add_to_lr_pending(v, p_r);
- inflight_r = &p_r->inflight;
- }
-
- gic_set_lr(lr, p, GICH_LR_PENDING);
- list_del_init(&p->lr_queue);
- set_bit(lr, &this_cpu(lr_mask));
-
- /* We can only evict nr_lrs entries */
- lrs--;
- if ( lrs == 0 )
- break;
- }
-
-out:
- spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
-}
-
-void gic_clear_pending_irqs(struct vcpu *v)
-{
- struct pending_irq *p, *t;
-
- ASSERT(spin_is_locked(&v->arch.vgic.lock));
-
- v->arch.lr_mask = 0;
- list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
- gic_remove_from_lr_pending(v, p);
-}
-
-int gic_events_need_delivery(void)
-{
- struct vcpu *v = current;
- struct pending_irq *p;
- unsigned long flags;
- const unsigned long apr = gic_hw_ops->read_apr(0);
- int mask_priority;
- int active_priority;
- int rc = 0;
-
- mask_priority = gic_hw_ops->read_vmcr_priority();
- active_priority = find_next_bit(&apr, 32, 0);
-
- spin_lock_irqsave(&v->arch.vgic.lock, flags);
-
- /* TODO: We order the guest irqs by priority, but we don't change
- * the priority of host irqs. */
-
- /* find the first enabled non-active irq, the queue is already
- * ordered by priority */
- list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight )
- {
- if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority )
- goto out;
- if ( GIC_PRI_TO_GUEST(p->priority) >= active_priority )
- goto out;
- if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
- {
- rc = 1;
- goto out;
- }
- }
-
-out:
- spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
- return rc;
-}
-
-void gic_inject(void)
-{
- ASSERT(!local_irq_is_enabled());
-
- gic_restore_pending_irqs(current);
-
- if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
- gic_hw_ops->update_hcr_status(GICH_HCR_UIE, true);
-}
-
static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
{
/* Lower the priority */