@@ -432,8 +432,6 @@ static int gicv2v_setup(struct domain *d)
d->arch.vgic.cbase = GUEST_GICC_BASE;
}
- d->arch.vgic.nr_lines = 0;
-
/*
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
@@ -922,7 +922,7 @@ static int gicv_v3_init(struct domain *d)
d->arch.vgic.rbase_size[0] = GUEST_GICV3_GICR0_SIZE;
}
- d->arch.vgic.nr_lines = 0;
+ d->arch.vgic.nr_spis = 0;
return 0;
}
@@ -54,7 +54,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
/* No secure world support for guests. */
vgic_lock(v);
*r = ( (v->domain->max_vcpus << 5) & GICD_TYPE_CPUS )
- |( ((v->domain->arch.vgic.nr_lines / 32)) & GICD_TYPE_LINES );
+ |( ((v->domain->arch.vgic.nr_spis / 32)) & GICD_TYPE_LINES );
vgic_unlock(v);
return 1;
case GICD_IIDR:
@@ -668,7 +668,7 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
if ( dabt.size != DABT_WORD ) goto bad_width;
/* No secure world support for guests. */
*r = (((v->domain->max_vcpus << 5) & GICD_TYPE_CPUS ) |
- ((v->domain->arch.vgic.nr_lines / 32) & GICD_TYPE_LINES));
+ ((v->domain->arch.vgic.nr_spis / 32) & GICD_TYPE_LINES));
return 1;
case GICD_STATUSR:
/*
@@ -66,13 +66,10 @@ int domain_vgic_init(struct domain *d)
d->arch.vgic.ctlr = 0;
- /* Currently nr_lines in vgic and gic doesn't have the same meanings
- * Here nr_lines = number of SPIs
- */
if ( is_hardware_domain(d) )
- d->arch.vgic.nr_lines = gic_number_lines() - 32;
+ d->arch.vgic.nr_spis = gic_number_lines() - 32;
else
- d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
+ d->arch.vgic.nr_spis = 0; /* We don't need SPIs for the guest */
switch ( gic_hw_version() )
{
@@ -96,11 +93,11 @@ int domain_vgic_init(struct domain *d)
return -ENOMEM;
d->arch.vgic.pending_irqs =
- xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
+ xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis);
if ( d->arch.vgic.pending_irqs == NULL )
return -ENOMEM;
- for (i=0; i<d->arch.vgic.nr_lines; i++)
+ for (i=0; i<d->arch.vgic.nr_spis; i++)
{
INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
@@ -218,7 +215,7 @@ void arch_move_irqs(struct vcpu *v)
struct vcpu *v_target;
int i;
- for ( i = 32; i < (d->arch.vgic.nr_lines + 32); i++ )
+ for ( i = 32; i < vgic_num_irqs(d); i++ )
{
v_target = vgic_get_target_vcpu(v, i);
p = irq_to_pending(v_target, i);
@@ -344,7 +341,7 @@ int vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, int
struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
{
struct pending_irq *n;
- /* Pending irqs allocation strategy: the first vgic.nr_lines irqs
+ /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
* are used for SPIs; the rests are used for per cpu irqs */
if ( irq < 32 )
n = &v->arch.vgic.pending_irqs[irq];
@@ -89,7 +89,7 @@ struct arch_domain
*/
spinlock_t lock;
int ctlr;
- int nr_lines; /* Number of SPIs */
+ int nr_spis; /* Number of SPIs */
struct vgic_irq_rank *shared_irqs;
/*
* SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
@@ -113,7 +113,7 @@ struct vgic_ops {
};
/* Number of ranks of interrupt registers for a domain */
-#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
+#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32)
#define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock)
#define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
@@ -175,6 +175,8 @@ enum gic_sgi_mode;
*/
#define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32)
+#define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32)
+
extern int domain_vgic_init(struct domain *d);
extern void domain_vgic_free(struct domain *d);
extern int vcpu_vgic_init(struct vcpu *v);