diff mbox series

[v4,1/2] target/arm: kvm: Handle DABT with no valid ISS

Message ID 20200323113227.3169-2-beata.michalska@linaro.org
State New
Headers show
Series target/arm: kvm: Support for KVM DABT with no valid ISS | expand

Commit Message

Beata Michalska March 23, 2020, 11:32 a.m. UTC
On ARMv7 & ARMv8 some load/store instructions might trigger a data abort
exception with no valid ISS info to be decoded. The lack of decode info
makes it at least tricky to emulate those instruction which is one of the
(many) reasons why KVM will not even try to do so.

Add support for handling those by requesting KVM to inject external
dabt into the quest.

Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

---
 target/arm/cpu.h     |  2 ++
 target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 target/arm/kvm_arm.h | 11 +++++++++++
 3 files changed, 67 insertions(+)

-- 
2.7.4

Comments

Andrew Jones March 23, 2020, 12:44 p.m. UTC | #1
On Mon, Mar 23, 2020 at 11:32:26AM +0000, Beata Michalska wrote:
> On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> exception with no valid ISS info to be decoded. The lack of decode info

> makes it at least tricky to emulate those instruction which is one of the

> (many) reasons why KVM will not even try to do so.

> 

> Add support for handling those by requesting KVM to inject external

> dabt into the quest.

> 

> Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> ---

>  target/arm/cpu.h     |  2 ++

>  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

>  target/arm/kvm_arm.h | 11 +++++++++++

>  3 files changed, 67 insertions(+)

> 

> diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> index 4ffd991..4f834c1 100644

> --- a/target/arm/cpu.h

> +++ b/target/arm/cpu.h

> @@ -560,6 +560,8 @@ typedef struct CPUARMState {

>          uint64_t esr;

>      } serror;

>  

> +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */

> +

>      /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */

>      uint32_t irq_line_state;

>  

> diff --git a/target/arm/kvm.c b/target/arm/kvm.c

> index 85860e6..c088589 100644

> --- a/target/arm/kvm.c

> +++ b/target/arm/kvm.c

> @@ -39,6 +39,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {

>  

>  static bool cap_has_mp_state;

>  static bool cap_has_inject_serror_esr;

> +static bool cap_has_inject_ext_dabt;

>  

>  static ARMHostCPUFeatures arm_host_cpu_features;

>  

> @@ -244,6 +245,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s)

>          ret = -EINVAL;

>      }

>  

> +    if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {

> +        if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {

> +            warn_report("Failed to enable DABT NISV cap");


Shouldn't this be an error? If KVM says it has KVM_CAP_ARM_NISV_TO_USER,
then I think it should always work to enable it, unless userspace passes
the wrong flags. Currently flags must be zero, but if they were to change
then we'll need to add the flags to vmstate and fail migration when they
aren't compatible, and I guess that failure would occur here.

> +        } else {

> +            /* Set status for supporting the external dabt injection */

> +            cap_has_inject_ext_dabt = kvm_check_extension(s,

> +                                    KVM_CAP_ARM_INJECT_EXT_DABT);

> +        }

> +    }

> +

>      return ret;

>  }

>  

> @@ -703,9 +714,16 @@ int kvm_put_vcpu_events(ARMCPU *cpu)

>          events.exception.serror_esr = env->serror.esr;

>      }

>  

> +    if (cap_has_inject_ext_dabt) {

> +        events.exception.ext_dabt_pending = env->ext_dabt_pending;

> +    }

> +

>      ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);

>      if (ret) {

>          error_report("failed to put vcpu events");

> +    } else {

> +        /* Clear instantly if the call was successful */

> +        env->ext_dabt_pending = 0;

>      }

>  

>      return ret;

> @@ -819,6 +837,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)

>              ret = EXCP_DEBUG;

>          } /* otherwise return to guest */

>          break;

> +    case KVM_EXIT_ARM_NISV:

> +        /* External DABT with no valid iss to decode */

> +        ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,

> +                                       run->arm_nisv.fault_ipa);

> +        break;

>      default:

>          qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",

>                        __func__, run->exit_reason);

> @@ -953,3 +976,34 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)

>  {

>      return (data - 32) & 0xffff;

>  }

> +

> +int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,

> +                             uint64_t fault_ipa)

> +{

> +    ARMCPU *cpu = ARM_CPU(cs);

> +    CPUARMState *env = &cpu->env;

> +

> +   /*

> +    * ISS [23:14] is invalid so there is a limited info

> +    * on what has just happened so the only *useful* thing that can

> +    * be retrieved from ISS is WnR & DFSC (though in some cases WnR

> +    * might be less of a value as well)

> +    */

> +

> +    /*

> +     * Set pending ext dabt and trigger SET_EVENTS so that

> +     * KVM can inject the abort

> +     */

> +    if (cap_has_inject_ext_dabt) {

> +        kvm_cpu_synchronize_state(cs);


I guess this is just an expensive 'vcpu_dirty=true', which the comment
above justifies, but not super clearly. Can you try to clarify the
comment some more?  I also wonder if we shouldn't add a KVM function
that just marks a vcpu dirty, rather than fetching all registers.

> +        env->ext_dabt_pending = 1;

> +    } else {

> +        error_report("Data abort exception triggered by guest memory access "

> +                     "at physical address: 0x"  TARGET_FMT_lx,

> +                     (target_ulong)fault_ipa);

> +        error_printf("KVM unable to emulate faulting instruction.\n");

> +        return -1;

> +    }

> +

> +    return 0;

> +}

> diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h

> index ae9e075..39472d5 100644

> --- a/target/arm/kvm_arm.h

> +++ b/target/arm/kvm_arm.h

> @@ -450,6 +450,17 @@ struct kvm_guest_debug_arch;

>  void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);

>  

>  /**

> + * kvm_arm_handle_dabt_nisv:

> + * @cs: CPUState

> + * @esr_iss: ISS encoding (limited) for the exception from Data Abort

> + *           ISV bit set to '0b0' -> no valid instruction syndrome

> + * @fault_ipa: faulting address for the synch data abort

> + *

> + * Returns: 0 if the exception has been handled, < 0 otherwise

> + */

> +int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,

> +                             uint64_t fault_ipa);

> +/**

>   * its_class_name:

>   *

>   * Return the ITS class name to use depending on whether KVM acceleration

> -- 

> 2.7.4

>


Thanks,
drew
Beata Michalska March 25, 2020, 3:15 p.m. UTC | #2
Hi,

On Mon, 23 Mar 2020 at 12:44, Andrew Jones <drjones@redhat.com> wrote:
>

> On Mon, Mar 23, 2020 at 11:32:26AM +0000, Beata Michalska wrote:

> > On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> > exception with no valid ISS info to be decoded. The lack of decode info

> > makes it at least tricky to emulate those instruction which is one of the

> > (many) reasons why KVM will not even try to do so.

> >

> > Add support for handling those by requesting KVM to inject external

> > dabt into the quest.

> >

> > Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> > ---

> >  target/arm/cpu.h     |  2 ++

> >  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

> >  target/arm/kvm_arm.h | 11 +++++++++++

> >  3 files changed, 67 insertions(+)

> >

> > diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> > index 4ffd991..4f834c1 100644

> > --- a/target/arm/cpu.h

> > +++ b/target/arm/cpu.h

> > @@ -560,6 +560,8 @@ typedef struct CPUARMState {

> >          uint64_t esr;

> >      } serror;

> >

> > +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */

> > +

> >      /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */

> >      uint32_t irq_line_state;

> >

> > diff --git a/target/arm/kvm.c b/target/arm/kvm.c

> > index 85860e6..c088589 100644

> > --- a/target/arm/kvm.c

> > +++ b/target/arm/kvm.c

> > @@ -39,6 +39,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {

> >

> >  static bool cap_has_mp_state;

> >  static bool cap_has_inject_serror_esr;

> > +static bool cap_has_inject_ext_dabt;

> >

> >  static ARMHostCPUFeatures arm_host_cpu_features;

> >

> > @@ -244,6 +245,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s)

> >          ret = -EINVAL;

> >      }

> >

> > +    if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {

> > +        if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {

> > +            warn_report("Failed to enable DABT NISV cap");

>

> Shouldn't this be an error? If KVM says it has KVM_CAP_ARM_NISV_TO_USER,

> then I think it should always work to enable it, unless userspace passes

> the wrong flags. Currently flags must be zero, but if they were to change

> then we'll need to add the flags to vmstate and fail migration when they

> aren't compatible, and I guess that failure would occur here.

>

That's a fair point. From the kernel point of view this one is pretty
straightforward,
so it should not fail. I haven't used the error here as the lack of
this cap is not really
critical for guest but indeed it might be worth to have it here .

> > +        } else {

> > +            /* Set status for supporting the external dabt injection */

> > +            cap_has_inject_ext_dabt = kvm_check_extension(s,

> > +                                    KVM_CAP_ARM_INJECT_EXT_DABT);

> > +        }

> > +    }

> > +

> >      return ret;

> >  }

> >

> > @@ -703,9 +714,16 @@ int kvm_put_vcpu_events(ARMCPU *cpu)

> >          events.exception.serror_esr = env->serror.esr;

> >      }

> >

> > +    if (cap_has_inject_ext_dabt) {

> > +        events.exception.ext_dabt_pending = env->ext_dabt_pending;

> > +    }

> > +

> >      ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);

> >      if (ret) {

> >          error_report("failed to put vcpu events");

> > +    } else {

> > +        /* Clear instantly if the call was successful */

> > +        env->ext_dabt_pending = 0;

> >      }

> >

> >      return ret;

> > @@ -819,6 +837,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)

> >              ret = EXCP_DEBUG;

> >          } /* otherwise return to guest */

> >          break;

> > +    case KVM_EXIT_ARM_NISV:

> > +        /* External DABT with no valid iss to decode */

> > +        ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,

> > +                                       run->arm_nisv.fault_ipa);

> > +        break;

> >      default:

> >          qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",

> >                        __func__, run->exit_reason);

> > @@ -953,3 +976,34 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)

> >  {

> >      return (data - 32) & 0xffff;

> >  }

> > +

> > +int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,

> > +                             uint64_t fault_ipa)

> > +{

> > +    ARMCPU *cpu = ARM_CPU(cs);

> > +    CPUARMState *env = &cpu->env;

> > +

> > +   /*

> > +    * ISS [23:14] is invalid so there is a limited info

> > +    * on what has just happened so the only *useful* thing that can

> > +    * be retrieved from ISS is WnR & DFSC (though in some cases WnR

> > +    * might be less of a value as well)

> > +    */

> > +

> > +    /*

> > +     * Set pending ext dabt and trigger SET_EVENTS so that

> > +     * KVM can inject the abort

> > +     */

> > +    if (cap_has_inject_ext_dabt) {

> > +        kvm_cpu_synchronize_state(cs);

>

> I guess this is just an expensive 'vcpu_dirty=true', which the comment

> above justifies, but not super clearly. Can you try to clarify the

> comment some more?  I also wonder if we shouldn't add a KVM function

> that just marks a vcpu dirty, rather than fetching all registers.

>

I can definitely adjust the comments here to explain the case more
clearly. And I would definitely vote for moving the flag
setting/clearing outside the
sync so that it can be triggered separately without involving all the series
of potentially unnecessary ioctls. WIll draft that in the next iteration.

Thanks
Beata


> > +        env->ext_dabt_pending = 1;

> > +    } else {

> > +        error_report("Data abort exception triggered by guest memory access "

> > +                     "at physical address: 0x"  TARGET_FMT_lx,

> > +                     (target_ulong)fault_ipa);

> > +        error_printf("KVM unable to emulate faulting instruction.\n");

> > +        return -1;

> > +    }

> > +

> > +    return 0;

> > +}

> > diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h

> > index ae9e075..39472d5 100644

> > --- a/target/arm/kvm_arm.h

> > +++ b/target/arm/kvm_arm.h

> > @@ -450,6 +450,17 @@ struct kvm_guest_debug_arch;

> >  void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);

> >

> >  /**

> > + * kvm_arm_handle_dabt_nisv:

> > + * @cs: CPUState

> > + * @esr_iss: ISS encoding (limited) for the exception from Data Abort

> > + *           ISV bit set to '0b0' -> no valid instruction syndrome

> > + * @fault_ipa: faulting address for the synch data abort

> > + *

> > + * Returns: 0 if the exception has been handled, < 0 otherwise

> > + */

> > +int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,

> > +                             uint64_t fault_ipa);

> > +/**

> >   * its_class_name:

> >   *

> >   * Return the ITS class name to use depending on whether KVM acceleration

> > --

> > 2.7.4

> >

>

> Thanks,

> drew

>
Peter Maydell April 17, 2020, 10:39 a.m. UTC | #3
On Mon, 23 Mar 2020 at 11:32, Beata Michalska
<beata.michalska@linaro.org> wrote:
>

> On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> exception with no valid ISS info to be decoded. The lack of decode info

> makes it at least tricky to emulate those instruction which is one of the

> (many) reasons why KVM will not even try to do so.

>

> Add support for handling those by requesting KVM to inject external

> dabt into the quest.

>

> Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> ---

>  target/arm/cpu.h     |  2 ++

>  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

>  target/arm/kvm_arm.h | 11 +++++++++++

>  3 files changed, 67 insertions(+)

>

> diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> index 4ffd991..4f834c1 100644

> --- a/target/arm/cpu.h

> +++ b/target/arm/cpu.h

> @@ -560,6 +560,8 @@ typedef struct CPUARMState {

>          uint64_t esr;

>      } serror;

>

> +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */


I was trying to work out whether we need to migrate this state,
and I'm not sure. Andrew, do you know? I think this comes down
to "at what points in QEMU's kvm run loop can migration kick in",
and specifically if we get a KVM_EXIT_ARM_NISV do we definitely
go round the loop and KVM_RUN again without ever checking
to see if we should do a migration ?

thanks
-- PMM
Andrew Jones April 17, 2020, 1:10 p.m. UTC | #4
On Fri, Apr 17, 2020 at 11:39:25AM +0100, Peter Maydell wrote:
> On Mon, 23 Mar 2020 at 11:32, Beata Michalska

> <beata.michalska@linaro.org> wrote:

> >

> > On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> > exception with no valid ISS info to be decoded. The lack of decode info

> > makes it at least tricky to emulate those instruction which is one of the

> > (many) reasons why KVM will not even try to do so.

> >

> > Add support for handling those by requesting KVM to inject external

> > dabt into the quest.

> >

> > Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> > ---

> >  target/arm/cpu.h     |  2 ++

> >  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

> >  target/arm/kvm_arm.h | 11 +++++++++++

> >  3 files changed, 67 insertions(+)

> >

> > diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> > index 4ffd991..4f834c1 100644

> > --- a/target/arm/cpu.h

> > +++ b/target/arm/cpu.h

> > @@ -560,6 +560,8 @@ typedef struct CPUARMState {

> >          uint64_t esr;

> >      } serror;

> >

> > +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */

> 

> I was trying to work out whether we need to migrate this state,

> and I'm not sure. Andrew, do you know? I think this comes down

> to "at what points in QEMU's kvm run loop can migration kick in",

> and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

> go round the loop and KVM_RUN again without ever checking

> to see if we should do a migration ?

>


I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,
but afaict there's no way to break out of the KVM_RUN loop after a
successful (ret=0) call to kvm_arch_handle_exit() until after the next
KVM_RUN ioctl. This is because even if migration kicks the vcpus between
kvm_arch_handle_exit() and the next run, the signal won't do anything
other than prepare the vcpu for an immediate exit.

Thanks,
drew
Beata Michalska April 18, 2020, 10:56 p.m. UTC | #5
On Fri, 17 Apr 2020 at 14:10, Andrew Jones <drjones@redhat.com> wrote:
>

> On Fri, Apr 17, 2020 at 11:39:25AM +0100, Peter Maydell wrote:

> > On Mon, 23 Mar 2020 at 11:32, Beata Michalska

> > <beata.michalska@linaro.org> wrote:

> > >

> > > On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> > > exception with no valid ISS info to be decoded. The lack of decode info

> > > makes it at least tricky to emulate those instruction which is one of the

> > > (many) reasons why KVM will not even try to do so.

> > >

> > > Add support for handling those by requesting KVM to inject external

> > > dabt into the quest.

> > >

> > > Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> > > ---

> > >  target/arm/cpu.h     |  2 ++

> > >  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

> > >  target/arm/kvm_arm.h | 11 +++++++++++

> > >  3 files changed, 67 insertions(+)

> > >

> > > diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> > > index 4ffd991..4f834c1 100644

> > > --- a/target/arm/cpu.h

> > > +++ b/target/arm/cpu.h

> > > @@ -560,6 +560,8 @@ typedef struct CPUARMState {

> > >          uint64_t esr;

> > >      } serror;

> > >

> > > +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */

> >

> > I was trying to work out whether we need to migrate this state,

> > and I'm not sure. Andrew, do you know? I think this comes down

> > to "at what points in QEMU's kvm run loop can migration kick in",

> > and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

> > go round the loop and KVM_RUN again without ever checking

> > to see if we should do a migration ?

> >

>

> I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,

> but afaict there's no way to break out of the KVM_RUN loop after a

> successful (ret=0) call to kvm_arch_handle_exit() until after the next

> KVM_RUN ioctl. This is because even if migration kicks the vcpus between

> kvm_arch_handle_exit() and the next run, the signal won't do anything

> other than prepare the vcpu for an immediate exit.

>

I am definitely not an expert on that one, but if I got things right,
by the time the 'exit_request' gets verified , the external abort
should already be set up , the pending status cleared (through
KVM_SET_VCPU_EVENTS)
and the reg content verified (kvm_arch_pre_run), as all of it is being
 triggered
prior to checking the exit request. So this should not need a
dedicated migration state.

I will hold on with sending the new version though to get the confirmation
whether that is the case.

Thanks,

BR
Beata
>

> Thanks,

> drew

>
Dr. David Alan Gilbert April 24, 2020, 12:16 p.m. UTC | #6
* Andrew Jones (drjones@redhat.com) wrote:
> On Fri, Apr 17, 2020 at 11:39:25AM +0100, Peter Maydell wrote:

> > On Mon, 23 Mar 2020 at 11:32, Beata Michalska

> > <beata.michalska@linaro.org> wrote:

> > >

> > > On ARMv7 & ARMv8 some load/store instructions might trigger a data abort

> > > exception with no valid ISS info to be decoded. The lack of decode info

> > > makes it at least tricky to emulate those instruction which is one of the

> > > (many) reasons why KVM will not even try to do so.

> > >

> > > Add support for handling those by requesting KVM to inject external

> > > dabt into the quest.

> > >

> > > Signed-off-by: Beata Michalska <beata.michalska@linaro.org>

> > > ---

> > >  target/arm/cpu.h     |  2 ++

> > >  target/arm/kvm.c     | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++

> > >  target/arm/kvm_arm.h | 11 +++++++++++

> > >  3 files changed, 67 insertions(+)

> > >

> > > diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> > > index 4ffd991..4f834c1 100644

> > > --- a/target/arm/cpu.h

> > > +++ b/target/arm/cpu.h

> > > @@ -560,6 +560,8 @@ typedef struct CPUARMState {

> > >          uint64_t esr;

> > >      } serror;

> > >

> > > +    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */

> > 

> > I was trying to work out whether we need to migrate this state,

> > and I'm not sure. Andrew, do you know? I think this comes down

> > to "at what points in QEMU's kvm run loop can migration kick in",

> > and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

> > go round the loop and KVM_RUN again without ever checking

> > to see if we should do a migration ?

> >

> 

> I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,

> but afaict there's no way to break out of the KVM_RUN loop after a

> successful (ret=0) call to kvm_arch_handle_exit() until after the next

> KVM_RUN ioctl. This is because even if migration kicks the vcpus between

> kvm_arch_handle_exit() and the next run, the signal won't do anything

> other than prepare the vcpu for an immediate exit.


This is a level I've not looked at I'm afraid.
However, the point at which we're saving the vCPU state is when the
vCPUs are stopped; so as long as your state that you save is everything
you need to restart and you migrate that then you should be OK; but in
the end fromt he migration point of view we just stop the VM and ask for
each devices state.

Dave

> Thanks,

> drew 

--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Peter Maydell April 24, 2020, 12:51 p.m. UTC | #7
On Fri, 24 Apr 2020 at 13:17, Dr. David Alan Gilbert
<dgilbert@redhat.com> wrote:
>

> * Andrew Jones (drjones@redhat.com) wrote:

> > On Fri, Apr 17, 2020 at 11:39:25AM +0100, Peter Maydell wrote:

> > > I was trying to work out whether we need to migrate this state,

> > > and I'm not sure. Andrew, do you know? I think this comes down

> > > to "at what points in QEMU's kvm run loop can migration kick in",

> > > and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

> > > go round the loop and KVM_RUN again without ever checking

> > > to see if we should do a migration ?

> > >

> >

> > I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,

> > but afaict there's no way to break out of the KVM_RUN loop after a

> > successful (ret=0) call to kvm_arch_handle_exit() until after the next

> > KVM_RUN ioctl. This is because even if migration kicks the vcpus between

> > kvm_arch_handle_exit() and the next run, the signal won't do anything

> > other than prepare the vcpu for an immediate exit.

>

> This is a level I've not looked at I'm afraid.

> However, the point at which we're saving the vCPU state is when the

> vCPUs are stopped; so as long as your state that you save is everything

> you need to restart and you migrate that then you should be OK; but in

> the end fromt he migration point of view we just stop the VM and ask for

> each devices state.


Yeah, but I think the question is "at what point in the main loop
do we check 'is the VM stopping'". It sounds from what Andrew
says that the answer is that it can't happen at this point in
time, though.

thanks
-- PMM
Paolo Bonzini April 25, 2020, 9:24 a.m. UTC | #8
On 24/04/20 14:16, Dr. David Alan Gilbert wrote:
>>> I was trying to work out whether we need to migrate this state,

>>> and I'm not sure. Andrew, do you know? I think this comes down

>>> to "at what points in QEMU's kvm run loop can migration kick in",

>>> and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

>>> go round the loop and KVM_RUN again without ever checking

>>> to see if we should do a migration ?

>>>

>> I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,

>> but afaict there's no way to break out of the KVM_RUN loop after a

>> successful (ret=0) call to kvm_arch_handle_exit() until after the next

>> KVM_RUN ioctl. This is because even if migration kicks the vcpus between

>> kvm_arch_handle_exit() and the next run, the signal won't do anything

>> other than prepare the vcpu for an immediate exit.


As far as QEMU is concerned, this should be enough for Beata's patch to
be safe.  If the signal causes KVM to exit before KVM_EXIT_ARM_NISV,
it's of course okay.  If you get a KVM_EXIT_ARM_NISV, however, KVM_RUN
will exit with return code 0 and kvm_cpu_exec will:

- set env->ext_dabt_pending

- go round the loop again

- notice cpu->exit_request and schedule an immediate exit

- call kvm_arch_put_registers

- call KVM_RUN again, which will exit with -EINTR

- exit the loop and allow migration to proceed

However, I'm not sure that it's a good idea to

+        /* Clear instantly if the call was successful */
+        env->ext_dabt_pending = 0;

Rather, this should be done by the next kvm_arch_get_registers when it
calls KVM_GET_VCPU_EVENTS.  It's also possible to add an assertion in
kvm_get_vcpu_events that it you always get zero, to justify that the
field is not migrated.

Thanks,

Paolo
Andrew Jones April 27, 2020, 6:18 a.m. UTC | #9
On Sat, Apr 25, 2020 at 11:24:14AM +0200, Paolo Bonzini wrote:
> On 24/04/20 14:16, Dr. David Alan Gilbert wrote:

> >>> I was trying to work out whether we need to migrate this state,

> >>> and I'm not sure. Andrew, do you know? I think this comes down

> >>> to "at what points in QEMU's kvm run loop can migration kick in",

> >>> and specifically if we get a KVM_EXIT_ARM_NISV do we definitely

> >>> go round the loop and KVM_RUN again without ever checking

> >>> to see if we should do a migration ?

> >>>

> >> I'd prefer a migration expert confirm this, so I've CC'ed David and Juan,

> >> but afaict there's no way to break out of the KVM_RUN loop after a

> >> successful (ret=0) call to kvm_arch_handle_exit() until after the next

> >> KVM_RUN ioctl. This is because even if migration kicks the vcpus between

> >> kvm_arch_handle_exit() and the next run, the signal won't do anything

> >> other than prepare the vcpu for an immediate exit.

> 

> As far as QEMU is concerned, this should be enough for Beata's patch to

> be safe.  If the signal causes KVM to exit before KVM_EXIT_ARM_NISV,

> it's of course okay.  If you get a KVM_EXIT_ARM_NISV, however, KVM_RUN

> will exit with return code 0 and kvm_cpu_exec will:

> 

> - set env->ext_dabt_pending

> 

> - go round the loop again

> 

> - notice cpu->exit_request and schedule an immediate exit

> 

> - call kvm_arch_put_registers

> 

> - call KVM_RUN again, which will exit with -EINTR

> 

> - exit the loop and allow migration to proceed


This was my understanding as well. Thanks for the confirmation.

> 

> However, I'm not sure that it's a good idea to

> 

> +        /* Clear instantly if the call was successful */

> +        env->ext_dabt_pending = 0;

> 

> Rather, this should be done by the next kvm_arch_get_registers when it

> calls KVM_GET_VCPU_EVENTS.  It's also possible to add an assertion in

> kvm_get_vcpu_events that it you always get zero, to justify that the

> field is not migrated.


I like the idea of a balanced API; keeping the 'set' abort pending
until the next 'get', but this event doesn't work that way.
Documentation/virt/kvm/api.rst states:

  It is not possible to read back a pending external abort (injected via
  KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always
  delivered directly to the virtual CPU).

I think clearing the userspace copy instantly after a successful
KVM_SET_VCPU_EVENTS implements that correctly.

Thanks,
drew
diff mbox series

Patch

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 4ffd991..4f834c1 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -560,6 +560,8 @@  typedef struct CPUARMState {
         uint64_t esr;
     } serror;
 
+    uint8_t ext_dabt_pending; /* Request for injecting ext DABT */
+
     /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */
     uint32_t irq_line_state;
 
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 85860e6..c088589 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -39,6 +39,7 @@  const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
 
 static bool cap_has_mp_state;
 static bool cap_has_inject_serror_esr;
+static bool cap_has_inject_ext_dabt;
 
 static ARMHostCPUFeatures arm_host_cpu_features;
 
@@ -244,6 +245,16 @@  int kvm_arch_init(MachineState *ms, KVMState *s)
         ret = -EINVAL;
     }
 
+    if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {
+        if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {
+            warn_report("Failed to enable DABT NISV cap");
+        } else {
+            /* Set status for supporting the external dabt injection */
+            cap_has_inject_ext_dabt = kvm_check_extension(s,
+                                    KVM_CAP_ARM_INJECT_EXT_DABT);
+        }
+    }
+
     return ret;
 }
 
@@ -703,9 +714,16 @@  int kvm_put_vcpu_events(ARMCPU *cpu)
         events.exception.serror_esr = env->serror.esr;
     }
 
+    if (cap_has_inject_ext_dabt) {
+        events.exception.ext_dabt_pending = env->ext_dabt_pending;
+    }
+
     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
     if (ret) {
         error_report("failed to put vcpu events");
+    } else {
+        /* Clear instantly if the call was successful */
+        env->ext_dabt_pending = 0;
     }
 
     return ret;
@@ -819,6 +837,11 @@  int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
             ret = EXCP_DEBUG;
         } /* otherwise return to guest */
         break;
+    case KVM_EXIT_ARM_NISV:
+        /* External DABT with no valid iss to decode */
+        ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,
+                                       run->arm_nisv.fault_ipa);
+        break;
     default:
         qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
                       __func__, run->exit_reason);
@@ -953,3 +976,34 @@  int kvm_arch_msi_data_to_gsi(uint32_t data)
 {
     return (data - 32) & 0xffff;
 }
+
+int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
+                             uint64_t fault_ipa)
+{
+    ARMCPU *cpu = ARM_CPU(cs);
+    CPUARMState *env = &cpu->env;
+
+   /*
+    * ISS [23:14] is invalid so there is a limited info
+    * on what has just happened so the only *useful* thing that can
+    * be retrieved from ISS is WnR & DFSC (though in some cases WnR
+    * might be less of a value as well)
+    */
+
+    /*
+     * Set pending ext dabt and trigger SET_EVENTS so that
+     * KVM can inject the abort
+     */
+    if (cap_has_inject_ext_dabt) {
+        kvm_cpu_synchronize_state(cs);
+        env->ext_dabt_pending = 1;
+    } else {
+        error_report("Data abort exception triggered by guest memory access "
+                     "at physical address: 0x"  TARGET_FMT_lx,
+                     (target_ulong)fault_ipa);
+        error_printf("KVM unable to emulate faulting instruction.\n");
+        return -1;
+    }
+
+    return 0;
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index ae9e075..39472d5 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -450,6 +450,17 @@  struct kvm_guest_debug_arch;
 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);
 
 /**
+ * kvm_arm_handle_dabt_nisv:
+ * @cs: CPUState
+ * @esr_iss: ISS encoding (limited) for the exception from Data Abort
+ *           ISV bit set to '0b0' -> no valid instruction syndrome
+ * @fault_ipa: faulting address for the synch data abort
+ *
+ * Returns: 0 if the exception has been handled, < 0 otherwise
+ */
+int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
+                             uint64_t fault_ipa);
+/**
  * its_class_name:
  *
  * Return the ITS class name to use depending on whether KVM acceleration