diff mbox

[Xen-devel,RFC,v3,2/6] xen/arm: Add save/restore support for ARM GIC V2

Message ID 1399583908-21755-3-git-send-email-w1.huang@samsung.com
State New
Headers show

Commit Message

Wei Huang May 8, 2014, 9:18 p.m. UTC
This patch implements a save/restore support for
ARM guest GIC. Two types of GIC V2 states are saved seperately:
1) VGICD_* contains the GIC distributor state from
guest VM's view; 2) GICH_* is the GIC virtual control
state from hypervisor's persepctive.

Signed-off-by: Evgeny Fedotov <e.fedotov@samsung.com>
Signed-off-by: Wei Huang <w1.huang@samsung.com>
---
 xen/arch/arm/vgic.c                    |  171 ++++++++++++++++++++++++++++++++
 xen/include/public/arch-arm/hvm/save.h |   34 ++++++-
 2 files changed, 204 insertions(+), 1 deletion(-)

Comments

Andrew Cooper May 8, 2014, 10:47 p.m. UTC | #1
On 08/05/2014 22:18, Wei Huang wrote:
> This patch implements a save/restore support for
> ARM guest GIC. Two types of GIC V2 states are saved seperately:
> 1) VGICD_* contains the GIC distributor state from
> guest VM's view; 2) GICH_* is the GIC virtual control
> state from hypervisor's persepctive.
>
> Signed-off-by: Evgeny Fedotov <e.fedotov@samsung.com>
> Signed-off-by: Wei Huang <w1.huang@samsung.com>
> ---
>  xen/arch/arm/vgic.c                    |  171 ++++++++++++++++++++++++++++++++
>  xen/include/public/arch-arm/hvm/save.h |   34 ++++++-
>  2 files changed, 204 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 4cf6470..505e944 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -24,6 +24,7 @@
>  #include <xen/softirq.h>
>  #include <xen/irq.h>
>  #include <xen/sched.h>
> +#include <xen/hvm/save.h>
>  
>  #include <asm/current.h>
>  
> @@ -73,6 +74,110 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, int b, int n)
>          return NULL;
>  }
>  
> +/* Save guest VM's distributor info into a context to support domains

Small nit, but Xen style would be:

/*
 * start of comment

for multiline comments.

> + * save/restore. Such info represents guest VM's view of its GIC
> + * distributor (GICD_*).
> + */
> +static int hvm_vgicd_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_arm_vgicd_v2 ctxt;
> +    struct vcpu *v;
> +    struct vgic_irq_rank *rank;
> +    int rc = 0;
> +
> +    /* Save the state for each VCPU */
> +    for_each_vcpu( d, v )
> +    {
> +        rank = &v->arch.vgic.private_irqs;
> +
> +        /* IENABLE, IACTIVE, IPEND,  PENDSGI */
> +        ctxt.ienable = rank->ienable;
> +        ctxt.iactive = rank->iactive;
> +        ctxt.ipend = rank->ipend;
> +        ctxt.pendsgi = rank->pendsgi;
> +
> +        /* ICFG */
> +        ctxt.icfg[0] = rank->icfg[0];
> +        ctxt.icfg[1] = rank->icfg[1];
> +
> +        /* IPRIORITY */
> +        BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
> +        memcpy(ctxt.ipriority, rank->ipriority, sizeof(rank->ipriority));

Can you be consistent with a space (or lack of) with the sizeof
operator.   Eyeballing a grep of the codebase, Xen's prevaling style
would appear to be without the space.

> +
> +        /* ITARGETS */
> +        BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
> +        memcpy(ctxt.itargets, rank->itargets, sizeof(rank->itargets));
> +
> +        if ( (rc = hvm_save_entry(VGICD_V2, v->vcpu_id, h, &ctxt)) != 0 )
> +            return rc;
> +    }
> +
> +    return rc;
> +}
> +
> +/* Load guest VM's distributor info from a context to support domain
> + * save/restore. The info is loaded into vgic_irq_rank.
> + */
> +static int hvm_vgicd_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_arm_vgicd_v2 ctxt;
> +    struct vgic_irq_rank *rank;
> +    struct vcpu *v;
> +    int vcpuid;

unsigned int. (and later on as well)

Can be combined with the 'irq' declaration.

> +    unsigned long enable_bits;
> +    struct pending_irq *p;
> +    unsigned int irq = 0;
> +    int rc = 0;
> +
> +    /* Which vcpu is this? */
> +    vcpuid = hvm_load_instance(h);
> +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> +    {
> +        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n",
> +                d->domain_id, vcpuid);
> +        return -EINVAL;
> +    }
> +
> +    if ( (rc = hvm_load_entry(VGICD_V2, h, &ctxt)) != 0 )
> +        return rc;
> +
> +    /* Restore PPI states */
> +    rank = &v->arch.vgic.private_irqs;
> +
> +    /* IENABLE, IACTIVE, IPEND, PENDSGI */
> +    rank->ienable = ctxt.ienable;
> +    rank->iactive = ctxt.iactive;
> +    rank->ipend = ctxt.ipend;
> +    rank->pendsgi = ctxt.pendsgi;
> +
> +    /* ICFG */
> +    rank->icfg[0] = ctxt.icfg[0];
> +    rank->icfg[1] = ctxt.icfg[1];
> +
> +    /* IPRIORITY */
> +    BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
> +    memcpy(rank->ipriority, ctxt.ipriority, sizeof(rank->ipriority));
> +
> +    /* ITARGETS */
> +    BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
> +    memcpy(rank->itargets, ctxt.itargets, sizeof(rank->itargets));
> +
> +    /* Set IRQ status as enabled by iterating through rank->ienable register.
> +     * This step is required otherwise events won't be received by the VM
> +     * after restore. */
> +    enable_bits = ctxt.ienable;
> +    while ( (irq = find_next_bit(&enable_bits, 32, irq)) < 32 )
> +    {
> +        p = irq_to_pending(v, irq);
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +        irq++;
> +    }
> +
> +    return 0;
> +}
> +HVM_REGISTER_SAVE_RESTORE(VGICD_V2, hvm_vgicd_save, hvm_vgicd_load,
> +                          1, HVMSR_PER_VCPU);
> +
>  int domain_vgic_init(struct domain *d)
>  {
>      int i;
> @@ -759,6 +864,72 @@ out:
>          smp_send_event_check_mask(cpumask_of(v->processor));
>  }
>  
> +/* Save GIC virtual control state into a context to support save/restore. 
> + * The info reprsents most of GICH_* registers. */
> +static int hvm_gich_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_arm_gich_v2 ctxt;
> +    struct vcpu *v;
> +    int rc = 0;
> +
> +    /* Save the state of GICs */
> +    for_each_vcpu( d, v )
> +    {
> +        ctxt.gic_hcr = v->arch.gic_hcr;
> +        ctxt.gic_vmcr = v->arch.gic_vmcr;
> +        ctxt.gic_apr = v->arch.gic_apr;
> +
> +        /* Save list registers and masks */
> +        BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
> +        memcpy(ctxt.gic_lr, v->arch.gic_lr, sizeof(v->arch.gic_lr));
> +
> +        ctxt.lr_mask = v->arch.lr_mask;
> +        ctxt.event_mask = v->arch.event_mask;
> +
> +        if ( (rc = hvm_save_entry(GICH_V2, v->vcpu_id, h, &ctxt)) != 0 )
> +            return rc;
> +    }
> +
> +    return rc;
> +}
> +
> +/* Restore GIC virtual control state from a context to support save/restore */
> +static int hvm_gich_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    int vcpuid;
> +    struct hvm_arm_gich_v2 ctxt;
> +    struct vcpu *v;
> +    int rc = 0;
> +
> +    /* Which vcpu is this? */
> +    vcpuid = hvm_load_instance(h);
> +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> +    {
> +        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n", d->domain_id,
> +                vcpuid);
> +        return -EINVAL;
> +    }
> +
> +    if ( (rc = hvm_load_entry(GICH_V2, h, &ctxt)) != 0 )
> +        return rc;
> +
> +    v->arch.gic_hcr = ctxt.gic_hcr;
> +    v->arch.gic_vmcr = ctxt.gic_vmcr;
> +    v->arch.gic_apr = ctxt.gic_apr;
> +
> +    /* Restore list registers and masks */
> +    BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
> +    memcpy(v->arch.gic_lr, ctxt.gic_lr, sizeof(v->arch.gic_lr));
> +
> +    v->arch.lr_mask = ctxt.lr_mask;
> +    v->arch.event_mask = ctxt.event_mask;
> +
> +    return rc;
> +}
> +
> +HVM_REGISTER_SAVE_RESTORE(GICH_V2, hvm_gich_save, hvm_gich_load, 1,
> +                          HVMSR_PER_VCPU);
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h
> index 8312e7b..421a6f6 100644
> --- a/xen/include/public/arch-arm/hvm/save.h
> +++ b/xen/include/public/arch-arm/hvm/save.h
> @@ -40,10 +40,42 @@ struct hvm_save_header
>  };
>  DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
>  
> +/* Guest's view of GIC distributor (per-vcpu)
> + *   - Based on GICv2 (see "struct vgic_irq_rank")
> + *   - Store guest's view of GIC distributor
> + *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
> + */
> +struct hvm_arm_vgicd_v2
> +{
> +    uint32_t ienable;
> +    uint32_t iactive;
> +    uint32_t ipend;
> +    uint32_t pendsgi;
> +    uint32_t icfg[2];
> +    uint32_t ipriority[8];
> +    uint32_t itargets[8];
> +};
> +DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);
> +
> +/* Info for hypervisor to manage guests (per-vcpu)
> + *   - Based on GICv2
> + *   - Mainly store registers of GICH_*
> + */
> +struct hvm_arm_gich_v2
> +{
> +    uint32_t gic_hcr;
> +    uint32_t gic_vmcr;
> +    uint32_t gic_apr;
> +    uint32_t gic_lr[64];
> +    uint64_t event_mask;
> +    uint64_t lr_mask;

This has an odd number of uint32_t.  I suspect it will end up with a
different structure size between a 32 and 64 bit build of Xen.

> +};
> +DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
> +
>  /*
>   * Largest type-code in use
>   */
> -#define HVM_SAVE_CODE_MAX 1
> +#define HVM_SAVE_CODE_MAX 3
>  
>  #endif
>  

On x86, we require that HVM save records only contain architectural
state.  Not knowing arm myself, it is not clear from your comments
whether this is the case or not.  Can you confirm whether it is or not?

~Andrew
Julien Grall May 9, 2014, 9:17 a.m. UTC | #2
Hi Wei,

(Adding Vijay in CC).

Vijay is working on GICv3 support in Xen. His patch series and this 
patch will clash sooner.

I think you should work together to avoid big reworking later.

On 08/05/14 22:18, Wei Huang wrote:
> This patch implements a save/restore support for
> ARM guest GIC. Two types of GIC V2 states are saved seperately:

separately

> 1) VGICD_* contains the GIC distributor state from
> guest VM's view; 2) GICH_* is the GIC virtual control

I would add a newline before "2)", we don't care about long commit 
message :).

> state from hypervisor's persepctive.

perspective

>
> Signed-off-by: Evgeny Fedotov <e.fedotov@samsung.com>
> Signed-off-by: Wei Huang <w1.huang@samsung.com>
> ---
>   xen/arch/arm/vgic.c                    |  171 ++++++++++++++++++++++++++++++++
>   xen/include/public/arch-arm/hvm/save.h |   34 ++++++-
>   2 files changed, 204 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 4cf6470..505e944 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -24,6 +24,7 @@
>   #include <xen/softirq.h>
>   #include <xen/irq.h>
>   #include <xen/sched.h>
> +#include <xen/hvm/save.h>
>
>   #include <asm/current.h>
>
> @@ -73,6 +74,110 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, int b, int n)
>           return NULL;
>   }
>
> +/* Save guest VM's distributor info into a context to support domains
> + * save/restore. Such info represents guest VM's view of its GIC
> + * distributor (GICD_*).
> + */
> +static int hvm_vgicd_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_arm_vgicd_v2 ctxt;
> +    struct vcpu *v;
> +    struct vgic_irq_rank *rank;
> +    int rc = 0;
> +
> +    /* Save the state for each VCPU */
> +    for_each_vcpu( d, v )
> +    {
> +        rank = &v->arch.vgic.private_irqs;
> +
> +        /* IENABLE, IACTIVE, IPEND,  PENDSGI */
> +        ctxt.ienable = rank->ienable;
> +        ctxt.iactive = rank->iactive;
> +        ctxt.ipend = rank->ipend;
> +        ctxt.pendsgi = rank->pendsgi;
> +
> +        /* ICFG */
> +        ctxt.icfg[0] = rank->icfg[0];
> +        ctxt.icfg[1] = rank->icfg[1];

I would use the same pattern as IPRIOTITY and ITARGETS.

[..]

> +/* Load guest VM's distributor info from a context to support domain
> + * save/restore. The info is loaded into vgic_irq_rank.
> + */
> +static int hvm_vgicd_load(struct domain *d, hvm_domain_context_t *h)
> +{

[..]

> +    /* ICFG */
> +    rank->icfg[0] = ctxt.icfg[0];
> +    rank->icfg[1] = ctxt.icfg[1];

Same remark here.

[..]

> +/* Save GIC virtual control state into a context to support save/restore.
> + * The info reprsents most of GICH_* registers. */

represents

Regards,
Wei Huang May 9, 2014, 2:12 p.m. UTC | #3
On 05/08/2014 05:47 PM, Andrew Cooper wrote:
>> +DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
>> +
>>   /*
>>    * Largest type-code in use
>>    */
>> -#define HVM_SAVE_CODE_MAX 1
>> +#define HVM_SAVE_CODE_MAX 3
>>
>>   #endif
>>
>
> On x86, we require that HVM save records only contain architectural
> state.  Not knowing arm myself, it is not clear from your comments
> whether this is the case or not.  Can you confirm whether it is or not?
Most states are guest architecture states which include core registers, 
arch timer, memory. GIC states are arguable, given that Xen uses data 
structures (e.g. struct vgic_irq_rank) to represent GIC states internally.
>
> ~Andrew
>
Ian Campbell May 9, 2014, 2:24 p.m. UTC | #4
On Fri, 2014-05-09 at 09:12 -0500, Wei Huang wrote:
> On 05/08/2014 05:47 PM, Andrew Cooper wrote:
> >> +DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
> >> +
> >>   /*
> >>    * Largest type-code in use
> >>    */
> >> -#define HVM_SAVE_CODE_MAX 1
> >> +#define HVM_SAVE_CODE_MAX 3
> >>
> >>   #endif
> >>
> >
> > On x86, we require that HVM save records only contain architectural
> > state.  Not knowing arm myself, it is not clear from your comments
> > whether this is the case or not.  Can you confirm whether it is or not?
> Most states are guest architecture states which include core registers, 
> arch timer, memory. GIC states are arguable, given that Xen uses data 
> structures (e.g. struct vgic_irq_rank) to represent GIC states internally.

(note: I've not looked at this series for ages, I plan to look at this
new version next week)

The contents of vgic_irq_rank is still a set of architectural register,
I think (the rank thing is just to account for the fact that some
registers use 1 bit to describe 32-registers, some use 2 bits to
describe 16, etc).

If there is something in a vgic rank which needs saving which isn't in
an architectural form then it needs synthesizing to/from the
architectural state on save/restore.

IOW ARM has the same requirements as x86 here.

Ian.
Julien Grall May 11, 2014, 4:15 p.m. UTC | #5
Hi,

On 09/05/14 15:24, Ian Campbell wrote:
> On Fri, 2014-05-09 at 09:12 -0500, Wei Huang wrote:
>> On 05/08/2014 05:47 PM, Andrew Cooper wrote:
>>>> +DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
>>>> +
>>>>    /*
>>>>     * Largest type-code in use
>>>>     */
>>>> -#define HVM_SAVE_CODE_MAX 1
>>>> +#define HVM_SAVE_CODE_MAX 3
>>>>
>>>>    #endif
>>>>
>>>
>>> On x86, we require that HVM save records only contain architectural
>>> state.  Not knowing arm myself, it is not clear from your comments
>>> whether this is the case or not.  Can you confirm whether it is or not?
>> Most states are guest architecture states which include core registers,
>> arch timer, memory. GIC states are arguable, given that Xen uses data
>> structures (e.g. struct vgic_irq_rank) to represent GIC states internally.
>
> (note: I've not looked at this series for ages, I plan to look at this
> new version next week)
>
> The contents of vgic_irq_rank is still a set of architectural register,
> I think (the rank thing is just to account for the fact that some
> registers use 1 bit to describe 32-registers, some use 2 bits to
> describe 16, etc).

Correct, the vgic_irq_rank should be saved entirely. It represents the 
guest view of the GIC state (such as the priorities, the routing... of 
an IRQ).

Regards,
Wei Huang May 13, 2014, 2:53 p.m. UTC | #6
>> +
>> +/* Info for hypervisor to manage guests (per-vcpu)
>> + *   - Based on GICv2
>> + *   - Mainly store registers of GICH_*
>> + */
>> +struct hvm_arm_gich_v2
>> +{
>> +    uint32_t gic_hcr;
>> +    uint32_t gic_vmcr;
>> +    uint32_t gic_apr;
>> +    uint32_t gic_lr[64];
>> +    uint64_t event_mask;
>> +    uint64_t lr_mask;
>
> This has an odd number of uint32_t.  I suspect it will end up with a
> different structure size between a 32 and 64 bit build of Xen.
>
I will add a padding field to make all structures 64-bit aligned. Let me 
know if this isn't what you want.

-Wei
Ian Campbell May 14, 2014, 11:07 a.m. UTC | #7
On Thu, 2014-05-08 at 16:18 -0500, Wei Huang wrote:
> diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h
> index 8312e7b..421a6f6 100644
> --- a/xen/include/public/arch-arm/hvm/save.h
> +++ b/xen/include/public/arch-arm/hvm/save.h
> @@ -40,10 +40,42 @@ struct hvm_save_header
>  };
>  DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
>  
> +/* Guest's view of GIC distributor (per-vcpu)
> + *   - Based on GICv2 (see "struct vgic_irq_rank")
> + *   - Store guest's view of GIC distributor
> + *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
> + */
> +struct hvm_arm_vgicd_v2
> +{
> +    uint32_t ienable;
> +    uint32_t iactive;
> +    uint32_t ipend;
> +    uint32_t pendsgi;
> +    uint32_t icfg[2];
> +    uint32_t ipriority[8];
> +    uint32_t itargets[8];
> +};
> +DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);

This is the state of 32 interrupts. How do you propose to handle more
interrupts than that?

I think it would be sensible to split the domain global state, the
distributor and cpu interface base addresses and sizes and the states of
any SPIs in here and have a separate per-vpcu set of state for the
per-cpu GICD state (SPIs and PPIs mainly).

For the SPI I think you either want to put the above set of state into
an array of size NR_GUEST_INTERRUPTS/32 or better make each of the above
an array based on NR_GUEST_INTERRUPTS.

> +
> +/* Info for hypervisor to manage guests (per-vcpu)
> + *   - Based on GICv2
> + *   - Mainly store registers of GICH_*
> + */
> +struct hvm_arm_gich_v2
> +{
> +    uint32_t gic_hcr;
> +    uint32_t gic_vmcr;
> +    uint32_t gic_apr;
> +    uint32_t gic_lr[64];
> +    uint64_t event_mask;
> +    uint64_t lr_mask;

I don't think you should be saving any GICH state at all. What should be
saved is the corresponding GICC state, i.e. "architectural state" that
is observed by the guest. This might mean pickling stuff from the GICH
state into a GICC form. (I said this wrt the LRs in a previous round of
review)

Ian.
Julien Grall May 14, 2014, 12:05 p.m. UTC | #8
On 05/14/2014 12:07 PM, Ian Campbell wrote:
> On Thu, 2014-05-08 at 16:18 -0500, Wei Huang wrote:
>> diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h
>> index 8312e7b..421a6f6 100644
>> --- a/xen/include/public/arch-arm/hvm/save.h
>> +++ b/xen/include/public/arch-arm/hvm/save.h
>> @@ -40,10 +40,42 @@ struct hvm_save_header
>>  };
>>  DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
>>  
>> +/* Guest's view of GIC distributor (per-vcpu)
>> + *   - Based on GICv2 (see "struct vgic_irq_rank")
>> + *   - Store guest's view of GIC distributor
>> + *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
>> + */
>> +struct hvm_arm_vgicd_v2
>> +{
>> +    uint32_t ienable;
>> +    uint32_t iactive;
>> +    uint32_t ipend;
>> +    uint32_t pendsgi;
>> +    uint32_t icfg[2];
>> +    uint32_t ipriority[8];
>> +    uint32_t itargets[8];
>> +};
>> +DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);
> 
> This is the state of 32 interrupts. How do you propose to handle more
> interrupts than that?
> 
> I think it would be sensible to split the domain global state, the
> distributor and cpu interface base addresses and sizes and the states of
> any SPIs in here and have a separate per-vpcu set of state for the
> per-cpu GICD state (SPIs and PPIs mainly).
> 
> For the SPI I think you either want to put the above set of state into
> an array of size NR_GUEST_INTERRUPTS/32 or better make each of the above
> an array based on NR_GUEST_INTERRUPTS.
> 
>> +
>> +/* Info for hypervisor to manage guests (per-vcpu)
>> + *   - Based on GICv2
>> + *   - Mainly store registers of GICH_*
>> + */
>> +struct hvm_arm_gich_v2
>> +{
>> +    uint32_t gic_hcr;
>> +    uint32_t gic_vmcr;
>> +    uint32_t gic_apr;
>> +    uint32_t gic_lr[64];
>> +    uint64_t event_mask;
>> +    uint64_t lr_mask;
> 
> I don't think you should be saving any GICH state at all. What should be
> saved is the corresponding GICC state, i.e. "architectural state" that
> is observed by the guest. This might mean pickling stuff from the GICH
> state into a GICC form. (I said this wrt the LRs in a previous round of
> review)

What are the advantage to save the GICC state rather than GICH?

IIRC, the GICH state gives you a representation of the important bits of
the GICC. Most of GICC can't be restore without any translation and
writing in GICH (see gic_vmcr that is a collection of multiple GICC
registers). It seems easier to use GICH state during migration.

Regards,
Tim Deegan May 14, 2014, 12:23 p.m. UTC | #9
At 13:05 +0100 on 14 May (1400069153), Julien Grall wrote:
> On 05/14/2014 12:07 PM, Ian Campbell wrote:
> > On Thu, 2014-05-08 at 16:18 -0500, Wei Huang wrote:
> >> diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h
> >> index 8312e7b..421a6f6 100644
> >> --- a/xen/include/public/arch-arm/hvm/save.h
> >> +++ b/xen/include/public/arch-arm/hvm/save.h
> >> @@ -40,10 +40,42 @@ struct hvm_save_header
> >>  };
> >>  DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
> >>  
> >> +/* Guest's view of GIC distributor (per-vcpu)
> >> + *   - Based on GICv2 (see "struct vgic_irq_rank")
> >> + *   - Store guest's view of GIC distributor
> >> + *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
> >> + */
> >> +struct hvm_arm_vgicd_v2
> >> +{
> >> +    uint32_t ienable;
> >> +    uint32_t iactive;
> >> +    uint32_t ipend;
> >> +    uint32_t pendsgi;
> >> +    uint32_t icfg[2];
> >> +    uint32_t ipriority[8];
> >> +    uint32_t itargets[8];
> >> +};
> >> +DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);
> > 
> > This is the state of 32 interrupts. How do you propose to handle more
> > interrupts than that?
> > 
> > I think it would be sensible to split the domain global state, the
> > distributor and cpu interface base addresses and sizes and the states of
> > any SPIs in here and have a separate per-vpcu set of state for the
> > per-cpu GICD state (SPIs and PPIs mainly).
> > 
> > For the SPI I think you either want to put the above set of state into
> > an array of size NR_GUEST_INTERRUPTS/32 or better make each of the above
> > an array based on NR_GUEST_INTERRUPTS.
> > 
> >> +
> >> +/* Info for hypervisor to manage guests (per-vcpu)
> >> + *   - Based on GICv2
> >> + *   - Mainly store registers of GICH_*
> >> + */
> >> +struct hvm_arm_gich_v2
> >> +{
> >> +    uint32_t gic_hcr;
> >> +    uint32_t gic_vmcr;
> >> +    uint32_t gic_apr;
> >> +    uint32_t gic_lr[64];
> >> +    uint64_t event_mask;
> >> +    uint64_t lr_mask;
> > 
> > I don't think you should be saving any GICH state at all. What should be
> > saved is the corresponding GICC state, i.e. "architectural state" that
> > is observed by the guest. This might mean pickling stuff from the GICH
> > state into a GICC form. (I said this wrt the LRs in a previous round of
> > review)
> 
> What are the advantage to save the GICC state rather than GICH?
> 
> IIRC, the GICH state gives you a representation of the important bits of
> the GICC. Most of GICC can't be restore without any translation and
> writing in GICH (see gic_vmcr that is a collection of multiple GICC
> registers). It seems easier to use GICH state during migration.

The GICC state is the architectural state of the virtual machine;
the GICH state is an implementation detail of how that's achieved by Xen.
We prefer always to put clean architectural state into the save record.
That way if we for any reason change how the VMM is implemented, the
save record format won't be affected by that.

Tim.
Ian Campbell May 14, 2014, 1:24 p.m. UTC | #10
On Wed, 2014-05-14 at 14:23 +0200, Tim Deegan wrote:
> > >> +
> > >> +/* Info for hypervisor to manage guests (per-vcpu)
> > >> + *   - Based on GICv2
> > >> + *   - Mainly store registers of GICH_*
> > >> + */
> > >> +struct hvm_arm_gich_v2
> > >> +{
> > >> +    uint32_t gic_hcr;
> > >> +    uint32_t gic_vmcr;
> > >> +    uint32_t gic_apr;
> > >> +    uint32_t gic_lr[64];
> > >> +    uint64_t event_mask;
> > >> +    uint64_t lr_mask;
> > > 
> > > I don't think you should be saving any GICH state at all. What should be
> > > saved is the corresponding GICC state, i.e. "architectural state" that
> > > is observed by the guest. This might mean pickling stuff from the GICH
> > > state into a GICC form. (I said this wrt the LRs in a previous round of
> > > review)
> > 
> > What are the advantage to save the GICC state rather than GICH?
> > 
> > IIRC, the GICH state gives you a representation of the important bits of
> > the GICC. Most of GICC can't be restore without any translation and
> > writing in GICH (see gic_vmcr that is a collection of multiple GICC
> > registers). It seems easier to use GICH state during migration.
> 
> The GICC state is the architectural state of the virtual machine;
> the GICH state is an implementation detail of how that's achieved by Xen.
> We prefer always to put clean architectural state into the save record.
> That way if we for any reason change how the VMM is implemented, the
> save record format won't be affected by that.

Ack.

> 
> Tim.
Julien Grall May 15, 2014, 5:15 p.m. UTC | #11
Hi Wei,

On 05/08/2014 10:18 PM, Wei Huang wrote:
> +struct hvm_arm_gich_v2
> +{
> +    uint32_t gic_hcr;
> +    uint32_t gic_vmcr;
> +    uint32_t gic_apr;
> +    uint32_t gic_lr[64];
> +    uint64_t event_mask;

FYI, the field event_mask as been dropped in xen upstream [1]

So you don't need to save it.

[1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=6fedf29bf3ff8a2391eef7c45244406ec4900f88

Regards,
Ian Campbell May 16, 2014, 7:36 a.m. UTC | #12
On Thu, 2014-05-15 at 18:15 +0100, Julien Grall wrote:
> Hi Wei,
> 
> On 05/08/2014 10:18 PM, Wei Huang wrote:
> > +struct hvm_arm_gich_v2
> > +{
> > +    uint32_t gic_hcr;
> > +    uint32_t gic_vmcr;
> > +    uint32_t gic_apr;
> > +    uint32_t gic_lr[64];
> > +    uint64_t event_mask;
> 
> FYI, the field event_mask as been dropped in xen upstream [1]
> 
> So you don't need to save it.

Quite apart from it's existence in upstream Xen it is not and never was
an architectural field and so shouldn't ever be saved. It's presence or
absence in some struct inside Xen has no impact on that.

Ian.
diff mbox

Patch

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 4cf6470..505e944 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -24,6 +24,7 @@ 
 #include <xen/softirq.h>
 #include <xen/irq.h>
 #include <xen/sched.h>
+#include <xen/hvm/save.h>
 
 #include <asm/current.h>
 
@@ -73,6 +74,110 @@  static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, int b, int n)
         return NULL;
 }
 
+/* Save guest VM's distributor info into a context to support domains
+ * save/restore. Such info represents guest VM's view of its GIC
+ * distributor (GICD_*).
+ */
+static int hvm_vgicd_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_vgicd_v2 ctxt;
+    struct vcpu *v;
+    struct vgic_irq_rank *rank;
+    int rc = 0;
+
+    /* Save the state for each VCPU */
+    for_each_vcpu( d, v )
+    {
+        rank = &v->arch.vgic.private_irqs;
+
+        /* IENABLE, IACTIVE, IPEND,  PENDSGI */
+        ctxt.ienable = rank->ienable;
+        ctxt.iactive = rank->iactive;
+        ctxt.ipend = rank->ipend;
+        ctxt.pendsgi = rank->pendsgi;
+
+        /* ICFG */
+        ctxt.icfg[0] = rank->icfg[0];
+        ctxt.icfg[1] = rank->icfg[1];
+
+        /* IPRIORITY */
+        BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
+        memcpy(ctxt.ipriority, rank->ipriority, sizeof(rank->ipriority));
+
+        /* ITARGETS */
+        BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
+        memcpy(ctxt.itargets, rank->itargets, sizeof(rank->itargets));
+
+        if ( (rc = hvm_save_entry(VGICD_V2, v->vcpu_id, h, &ctxt)) != 0 )
+            return rc;
+    }
+
+    return rc;
+}
+
+/* Load guest VM's distributor info from a context to support domain
+ * save/restore. The info is loaded into vgic_irq_rank.
+ */
+static int hvm_vgicd_load(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_vgicd_v2 ctxt;
+    struct vgic_irq_rank *rank;
+    struct vcpu *v;
+    int vcpuid;
+    unsigned long enable_bits;
+    struct pending_irq *p;
+    unsigned int irq = 0;
+    int rc = 0;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( (rc = hvm_load_entry(VGICD_V2, h, &ctxt)) != 0 )
+        return rc;
+
+    /* Restore PPI states */
+    rank = &v->arch.vgic.private_irqs;
+
+    /* IENABLE, IACTIVE, IPEND, PENDSGI */
+    rank->ienable = ctxt.ienable;
+    rank->iactive = ctxt.iactive;
+    rank->ipend = ctxt.ipend;
+    rank->pendsgi = ctxt.pendsgi;
+
+    /* ICFG */
+    rank->icfg[0] = ctxt.icfg[0];
+    rank->icfg[1] = ctxt.icfg[1];
+
+    /* IPRIORITY */
+    BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
+    memcpy(rank->ipriority, ctxt.ipriority, sizeof(rank->ipriority));
+
+    /* ITARGETS */
+    BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
+    memcpy(rank->itargets, ctxt.itargets, sizeof(rank->itargets));
+
+    /* Set IRQ status as enabled by iterating through rank->ienable register.
+     * This step is required otherwise events won't be received by the VM
+     * after restore. */
+    enable_bits = ctxt.ienable;
+    while ( (irq = find_next_bit(&enable_bits, 32, irq)) < 32 )
+    {
+        p = irq_to_pending(v, irq);
+        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        irq++;
+    }
+
+    return 0;
+}
+HVM_REGISTER_SAVE_RESTORE(VGICD_V2, hvm_vgicd_save, hvm_vgicd_load,
+                          1, HVMSR_PER_VCPU);
+
 int domain_vgic_init(struct domain *d)
 {
     int i;
@@ -759,6 +864,72 @@  out:
         smp_send_event_check_mask(cpumask_of(v->processor));
 }
 
+/* Save GIC virtual control state into a context to support save/restore. 
+ * The info reprsents most of GICH_* registers. */
+static int hvm_gich_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_gich_v2 ctxt;
+    struct vcpu *v;
+    int rc = 0;
+
+    /* Save the state of GICs */
+    for_each_vcpu( d, v )
+    {
+        ctxt.gic_hcr = v->arch.gic_hcr;
+        ctxt.gic_vmcr = v->arch.gic_vmcr;
+        ctxt.gic_apr = v->arch.gic_apr;
+
+        /* Save list registers and masks */
+        BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
+        memcpy(ctxt.gic_lr, v->arch.gic_lr, sizeof(v->arch.gic_lr));
+
+        ctxt.lr_mask = v->arch.lr_mask;
+        ctxt.event_mask = v->arch.event_mask;
+
+        if ( (rc = hvm_save_entry(GICH_V2, v->vcpu_id, h, &ctxt)) != 0 )
+            return rc;
+    }
+
+    return rc;
+}
+
+/* Restore GIC virtual control state from a context to support save/restore */
+static int hvm_gich_load(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct hvm_arm_gich_v2 ctxt;
+    struct vcpu *v;
+    int rc = 0;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n", d->domain_id,
+                vcpuid);
+        return -EINVAL;
+    }
+
+    if ( (rc = hvm_load_entry(GICH_V2, h, &ctxt)) != 0 )
+        return rc;
+
+    v->arch.gic_hcr = ctxt.gic_hcr;
+    v->arch.gic_vmcr = ctxt.gic_vmcr;
+    v->arch.gic_apr = ctxt.gic_apr;
+
+    /* Restore list registers and masks */
+    BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
+    memcpy(v->arch.gic_lr, ctxt.gic_lr, sizeof(v->arch.gic_lr));
+
+    v->arch.lr_mask = ctxt.lr_mask;
+    v->arch.event_mask = ctxt.event_mask;
+
+    return rc;
+}
+
+HVM_REGISTER_SAVE_RESTORE(GICH_V2, hvm_gich_save, hvm_gich_load, 1,
+                          HVMSR_PER_VCPU);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h
index 8312e7b..421a6f6 100644
--- a/xen/include/public/arch-arm/hvm/save.h
+++ b/xen/include/public/arch-arm/hvm/save.h
@@ -40,10 +40,42 @@  struct hvm_save_header
 };
 DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
 
+/* Guest's view of GIC distributor (per-vcpu)
+ *   - Based on GICv2 (see "struct vgic_irq_rank")
+ *   - Store guest's view of GIC distributor
+ *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
+ */
+struct hvm_arm_vgicd_v2
+{
+    uint32_t ienable;
+    uint32_t iactive;
+    uint32_t ipend;
+    uint32_t pendsgi;
+    uint32_t icfg[2];
+    uint32_t ipriority[8];
+    uint32_t itargets[8];
+};
+DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);
+
+/* Info for hypervisor to manage guests (per-vcpu)
+ *   - Based on GICv2
+ *   - Mainly store registers of GICH_*
+ */
+struct hvm_arm_gich_v2
+{
+    uint32_t gic_hcr;
+    uint32_t gic_vmcr;
+    uint32_t gic_apr;
+    uint32_t gic_lr[64];
+    uint64_t event_mask;
+    uint64_t lr_mask;
+};
+DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
+
 /*
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 1
+#define HVM_SAVE_CODE_MAX 3
 
 #endif