diff mbox series

[v2,09/18] arm64: KVM: enable conditional save/restore full SPE profiling buffer controls

Message ID 20191220143025.33853-10-andrew.murray@arm.com
State New
Headers show
Series [v2,01/18] dt-bindings: ARM SPE: highlight the need for PPI partitions on heterogeneous systems | expand

Commit Message

Andrew Murray Dec. 20, 2019, 2:30 p.m. UTC
From: Sudeep Holla <sudeep.holla@arm.com>


Now that we can save/restore the full SPE controls, we can enable it
if SPE is setup and ready to use in KVM. It's supported in KVM only if
all the CPUs in the system supports SPE.

However to support heterogenous systems, we need to move the check if
host supports SPE and do a partial save/restore.

Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

Signed-off-by: Andrew Murray <andrew.murray@arm.com>

---
 arch/arm64/kvm/hyp/debug-sr.c | 33 ++++++++++++++++-----------------
 include/kvm/arm_spe.h         |  6 ++++++
 2 files changed, 22 insertions(+), 17 deletions(-)

-- 
2.21.0

Comments

Mark Rutland Dec. 20, 2019, 6:06 p.m. UTC | #1
On Fri, Dec 20, 2019 at 02:30:16PM +0000, Andrew Murray wrote:
> From: Sudeep Holla <sudeep.holla@arm.com>

> 

> Now that we can save/restore the full SPE controls, we can enable it

> if SPE is setup and ready to use in KVM. It's supported in KVM only if

> all the CPUs in the system supports SPE.

> 

> However to support heterogenous systems, we need to move the check if

> host supports SPE and do a partial save/restore.


I don't think that it makes sense to support this for heterogeneous
systems, given their SPE capabilities and IMP DEF details will differ.

Is there some way we can limit this to homogeneous systems?

Thanks,
Mark.

> 

> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

> Signed-off-by: Andrew Murray <andrew.murray@arm.com>

> ---

>  arch/arm64/kvm/hyp/debug-sr.c | 33 ++++++++++++++++-----------------

>  include/kvm/arm_spe.h         |  6 ++++++

>  2 files changed, 22 insertions(+), 17 deletions(-)

> 

> diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c

> index 12429b212a3a..d8d857067e6d 100644

> --- a/arch/arm64/kvm/hyp/debug-sr.c

> +++ b/arch/arm64/kvm/hyp/debug-sr.c

> @@ -86,18 +86,13 @@

>  	}

>  

>  static void __hyp_text

> -__debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> +__debug_save_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  {

>  	u64 reg;

>  

>  	/* Clear pmscr in case of early return */

>  	ctxt->sys_regs[PMSCR_EL1] = 0;

>  

> -	/* SPE present on this CPU? */

> -	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> -						  ID_AA64DFR0_PMSVER_SHIFT))

> -		return;

> -

>  	/* Yes; is it owned by higher EL? */

>  	reg = read_sysreg_s(SYS_PMBIDR_EL1);

>  	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))

> @@ -142,7 +137,7 @@ __debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  }

>  

>  static void __hyp_text

> -__debug_restore_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> +__debug_restore_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  {

>  	if (!ctxt->sys_regs[PMSCR_EL1])

>  		return;

> @@ -210,11 +205,14 @@ void __hyp_text __debug_restore_guest_context(struct kvm_vcpu *vcpu)

>  	struct kvm_guest_debug_arch *host_dbg;

>  	struct kvm_guest_debug_arch *guest_dbg;

>  

> +	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> +	guest_ctxt = &vcpu->arch.ctxt;

> +

> +	__debug_restore_spe_context(guest_ctxt, kvm_arm_spe_v1_ready(vcpu));

> +

>  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

>  		return;

>  

> -	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> -	guest_ctxt = &vcpu->arch.ctxt;

>  	host_dbg = &vcpu->arch.host_debug_state.regs;

>  	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

>  

> @@ -232,8 +230,7 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

>  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

>  	guest_ctxt = &vcpu->arch.ctxt;

>  

> -	if (!has_vhe())

> -		__debug_restore_spe_nvhe(host_ctxt, false);

> +	__debug_restore_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));

>  

>  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

>  		return;

> @@ -249,19 +246,21 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

>  

>  void __hyp_text __debug_save_host_context(struct kvm_vcpu *vcpu)

>  {

> -	/*

> -	 * Non-VHE: Disable and flush SPE data generation

> -	 * VHE: The vcpu can run, but it can't hide.

> -	 */

>  	struct kvm_cpu_context *host_ctxt;

>  

>  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> -	if (!has_vhe())

> -		__debug_save_spe_nvhe(host_ctxt, false);

> +	if (cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> +						 ID_AA64DFR0_PMSVER_SHIFT))

> +		__debug_save_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));

>  }

>  

>  void __hyp_text __debug_save_guest_context(struct kvm_vcpu *vcpu)

>  {

> +	bool kvm_spe_ready = kvm_arm_spe_v1_ready(vcpu);

> +

> +	/* SPE present on this vCPU? */

> +	if (kvm_spe_ready)

> +		__debug_save_spe_context(&vcpu->arch.ctxt, kvm_spe_ready);

>  }

>  

>  u32 __hyp_text __kvm_get_mdcr_el2(void)

> diff --git a/include/kvm/arm_spe.h b/include/kvm/arm_spe.h

> index 48d118fdb174..30c40b1bc385 100644

> --- a/include/kvm/arm_spe.h

> +++ b/include/kvm/arm_spe.h

> @@ -16,4 +16,10 @@ struct kvm_spe {

>  	bool irq_level;

>  };

>  

> +#ifdef CONFIG_KVM_ARM_SPE

> +#define kvm_arm_spe_v1_ready(v)		((v)->arch.spe.ready)

> +#else

> +#define kvm_arm_spe_v1_ready(v)		(false)

> +#endif /* CONFIG_KVM_ARM_SPE */

> +

>  #endif /* __ASM_ARM_KVM_SPE_H */

> -- 

> 2.21.0

>
Marc Zyngier Dec. 21, 2019, 2:13 p.m. UTC | #2
On Fri, 20 Dec 2019 14:30:16 +0000
Andrew Murray <andrew.murray@arm.com> wrote:

[somehow managed not to do a reply all, re-sending]

> From: Sudeep Holla <sudeep.holla@arm.com>

> 

> Now that we can save/restore the full SPE controls, we can enable it

> if SPE is setup and ready to use in KVM. It's supported in KVM only if

> all the CPUs in the system supports SPE.

> 

> However to support heterogenous systems, we need to move the check if

> host supports SPE and do a partial save/restore.


No. Let's just not go down that path. For now, KVM on heterogeneous
systems do not get SPE. If SPE has been enabled on a guest and a CPU
comes up without SPE, this CPU should fail to boot (same as exposing a
feature to userspace).

> 

> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

> Signed-off-by: Andrew Murray <andrew.murray@arm.com>

> ---

>  arch/arm64/kvm/hyp/debug-sr.c | 33 ++++++++++++++++-----------------

>  include/kvm/arm_spe.h         |  6 ++++++

>  2 files changed, 22 insertions(+), 17 deletions(-)

> 

> diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c

> index 12429b212a3a..d8d857067e6d 100644

> --- a/arch/arm64/kvm/hyp/debug-sr.c

> +++ b/arch/arm64/kvm/hyp/debug-sr.c

> @@ -86,18 +86,13 @@

>  	}

>  

>  static void __hyp_text

> -__debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> +__debug_save_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  {

>  	u64 reg;

>  

>  	/* Clear pmscr in case of early return */

>  	ctxt->sys_regs[PMSCR_EL1] = 0;

>  

> -	/* SPE present on this CPU? */

> -	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> -						  ID_AA64DFR0_PMSVER_SHIFT))

> -		return;

> -

>  	/* Yes; is it owned by higher EL? */

>  	reg = read_sysreg_s(SYS_PMBIDR_EL1);

>  	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))

> @@ -142,7 +137,7 @@ __debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  }

>  

>  static void __hyp_text

> -__debug_restore_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> +__debug_restore_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

>  {

>  	if (!ctxt->sys_regs[PMSCR_EL1])

>  		return;

> @@ -210,11 +205,14 @@ void __hyp_text __debug_restore_guest_context(struct kvm_vcpu *vcpu)

>  	struct kvm_guest_debug_arch *host_dbg;

>  	struct kvm_guest_debug_arch *guest_dbg;

>  

> +	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> +	guest_ctxt = &vcpu->arch.ctxt;

> +

> +	__debug_restore_spe_context(guest_ctxt, kvm_arm_spe_v1_ready(vcpu));

> +

>  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

>  		return;

>  

> -	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> -	guest_ctxt = &vcpu->arch.ctxt;

>  	host_dbg = &vcpu->arch.host_debug_state.regs;

>  	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

>  

> @@ -232,8 +230,7 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

>  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

>  	guest_ctxt = &vcpu->arch.ctxt;

>  

> -	if (!has_vhe())

> -		__debug_restore_spe_nvhe(host_ctxt, false);

> +	__debug_restore_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));


So you now do an unconditional save/restore on the exit path for VHE as
well? Even if the host isn't using the SPE HW? That's not acceptable
as, in most cases, only the host /or/ the guest will use SPE. Here, you
put a measurable overhead on each exit.

If the host is not using SPE, then the restore/save should happen in
vcpu_load/vcpu_put. Only if the host is using SPE should you do
something in the run loop. Of course, this only applies to VHE and
non-VHE must switch eagerly.

>  

>  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

>  		return;

> @@ -249,19 +246,21 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

>  

>  void __hyp_text __debug_save_host_context(struct kvm_vcpu *vcpu)

>  {

> -	/*

> -	 * Non-VHE: Disable and flush SPE data generation

> -	 * VHE: The vcpu can run, but it can't hide.

> -	 */

>  	struct kvm_cpu_context *host_ctxt;

>  

>  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> -	if (!has_vhe())

> -		__debug_save_spe_nvhe(host_ctxt, false);

> +	if (cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> +						 ID_AA64DFR0_PMSVER_SHIFT))

> +		__debug_save_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));

>  }

>  

>  void __hyp_text __debug_save_guest_context(struct kvm_vcpu *vcpu)

>  {

> +	bool kvm_spe_ready = kvm_arm_spe_v1_ready(vcpu);

> +

> +	/* SPE present on this vCPU? */

> +	if (kvm_spe_ready)

> +		__debug_save_spe_context(&vcpu->arch.ctxt, kvm_spe_ready);

>  }

>  

>  u32 __hyp_text __kvm_get_mdcr_el2(void)

> diff --git a/include/kvm/arm_spe.h b/include/kvm/arm_spe.h

> index 48d118fdb174..30c40b1bc385 100644

> --- a/include/kvm/arm_spe.h

> +++ b/include/kvm/arm_spe.h

> @@ -16,4 +16,10 @@ struct kvm_spe {

>  	bool irq_level;

>  };

>  

> +#ifdef CONFIG_KVM_ARM_SPE

> +#define kvm_arm_spe_v1_ready(v)		((v)->arch.spe.ready)

> +#else

> +#define kvm_arm_spe_v1_ready(v)		(false)

> +#endif /* CONFIG_KVM_ARM_SPE */

> +

>  #endif /* __ASM_ARM_KVM_SPE_H */


Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
Andrew Murray Dec. 24, 2019, 12:15 p.m. UTC | #3
On Fri, Dec 20, 2019 at 06:06:58PM +0000, Mark Rutland wrote:
> On Fri, Dec 20, 2019 at 02:30:16PM +0000, Andrew Murray wrote:

> > From: Sudeep Holla <sudeep.holla@arm.com>

> > 

> > Now that we can save/restore the full SPE controls, we can enable it

> > if SPE is setup and ready to use in KVM. It's supported in KVM only if

> > all the CPUs in the system supports SPE.

> > 

> > However to support heterogenous systems, we need to move the check if

> > host supports SPE and do a partial save/restore.

> 

> I don't think that it makes sense to support this for heterogeneous

> systems, given their SPE capabilities and IMP DEF details will differ.

> 

> Is there some way we can limit this to homogeneous systems?


No problem, I'll see how to limit this.

Thanks,

Andrew Murray

> 

> Thanks,

> Mark.

> 

> > 

> > Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

> > Signed-off-by: Andrew Murray <andrew.murray@arm.com>

> > ---

> >  arch/arm64/kvm/hyp/debug-sr.c | 33 ++++++++++++++++-----------------

> >  include/kvm/arm_spe.h         |  6 ++++++

> >  2 files changed, 22 insertions(+), 17 deletions(-)

> > 

> > diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c

> > index 12429b212a3a..d8d857067e6d 100644

> > --- a/arch/arm64/kvm/hyp/debug-sr.c

> > +++ b/arch/arm64/kvm/hyp/debug-sr.c

> > @@ -86,18 +86,13 @@

> >  	}

> >  

> >  static void __hyp_text

> > -__debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> > +__debug_save_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

> >  {

> >  	u64 reg;

> >  

> >  	/* Clear pmscr in case of early return */

> >  	ctxt->sys_regs[PMSCR_EL1] = 0;

> >  

> > -	/* SPE present on this CPU? */

> > -	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> > -						  ID_AA64DFR0_PMSVER_SHIFT))

> > -		return;

> > -

> >  	/* Yes; is it owned by higher EL? */

> >  	reg = read_sysreg_s(SYS_PMBIDR_EL1);

> >  	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))

> > @@ -142,7 +137,7 @@ __debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> >  }

> >  

> >  static void __hyp_text

> > -__debug_restore_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)

> > +__debug_restore_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)

> >  {

> >  	if (!ctxt->sys_regs[PMSCR_EL1])

> >  		return;

> > @@ -210,11 +205,14 @@ void __hyp_text __debug_restore_guest_context(struct kvm_vcpu *vcpu)

> >  	struct kvm_guest_debug_arch *host_dbg;

> >  	struct kvm_guest_debug_arch *guest_dbg;

> >  

> > +	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> > +	guest_ctxt = &vcpu->arch.ctxt;

> > +

> > +	__debug_restore_spe_context(guest_ctxt, kvm_arm_spe_v1_ready(vcpu));

> > +

> >  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

> >  		return;

> >  

> > -	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> > -	guest_ctxt = &vcpu->arch.ctxt;

> >  	host_dbg = &vcpu->arch.host_debug_state.regs;

> >  	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

> >  

> > @@ -232,8 +230,7 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

> >  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> >  	guest_ctxt = &vcpu->arch.ctxt;

> >  

> > -	if (!has_vhe())

> > -		__debug_restore_spe_nvhe(host_ctxt, false);

> > +	__debug_restore_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));

> >  

> >  	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))

> >  		return;

> > @@ -249,19 +246,21 @@ void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)

> >  

> >  void __hyp_text __debug_save_host_context(struct kvm_vcpu *vcpu)

> >  {

> > -	/*

> > -	 * Non-VHE: Disable and flush SPE data generation

> > -	 * VHE: The vcpu can run, but it can't hide.

> > -	 */

> >  	struct kvm_cpu_context *host_ctxt;

> >  

> >  	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);

> > -	if (!has_vhe())

> > -		__debug_save_spe_nvhe(host_ctxt, false);

> > +	if (cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),

> > +						 ID_AA64DFR0_PMSVER_SHIFT))

> > +		__debug_save_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));

> >  }

> >  

> >  void __hyp_text __debug_save_guest_context(struct kvm_vcpu *vcpu)

> >  {

> > +	bool kvm_spe_ready = kvm_arm_spe_v1_ready(vcpu);

> > +

> > +	/* SPE present on this vCPU? */

> > +	if (kvm_spe_ready)

> > +		__debug_save_spe_context(&vcpu->arch.ctxt, kvm_spe_ready);

> >  }

> >  

> >  u32 __hyp_text __kvm_get_mdcr_el2(void)

> > diff --git a/include/kvm/arm_spe.h b/include/kvm/arm_spe.h

> > index 48d118fdb174..30c40b1bc385 100644

> > --- a/include/kvm/arm_spe.h

> > +++ b/include/kvm/arm_spe.h

> > @@ -16,4 +16,10 @@ struct kvm_spe {

> >  	bool irq_level;

> >  };

> >  

> > +#ifdef CONFIG_KVM_ARM_SPE

> > +#define kvm_arm_spe_v1_ready(v)		((v)->arch.spe.ready)

> > +#else

> > +#define kvm_arm_spe_v1_ready(v)		(false)

> > +#endif /* CONFIG_KVM_ARM_SPE */

> > +

> >  #endif /* __ASM_ARM_KVM_SPE_H */

> > -- 

> > 2.21.0

> >
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 12429b212a3a..d8d857067e6d 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -86,18 +86,13 @@ 
 	}
 
 static void __hyp_text
-__debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)
+__debug_save_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)
 {
 	u64 reg;
 
 	/* Clear pmscr in case of early return */
 	ctxt->sys_regs[PMSCR_EL1] = 0;
 
-	/* SPE present on this CPU? */
-	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
-						  ID_AA64DFR0_PMSVER_SHIFT))
-		return;
-
 	/* Yes; is it owned by higher EL? */
 	reg = read_sysreg_s(SYS_PMBIDR_EL1);
 	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
@@ -142,7 +137,7 @@  __debug_save_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)
 }
 
 static void __hyp_text
-__debug_restore_spe_nvhe(struct kvm_cpu_context *ctxt, bool full_ctxt)
+__debug_restore_spe_context(struct kvm_cpu_context *ctxt, bool full_ctxt)
 {
 	if (!ctxt->sys_regs[PMSCR_EL1])
 		return;
@@ -210,11 +205,14 @@  void __hyp_text __debug_restore_guest_context(struct kvm_vcpu *vcpu)
 	struct kvm_guest_debug_arch *host_dbg;
 	struct kvm_guest_debug_arch *guest_dbg;
 
+	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+	guest_ctxt = &vcpu->arch.ctxt;
+
+	__debug_restore_spe_context(guest_ctxt, kvm_arm_spe_v1_ready(vcpu));
+
 	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
 		return;
 
-	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
-	guest_ctxt = &vcpu->arch.ctxt;
 	host_dbg = &vcpu->arch.host_debug_state.regs;
 	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
 
@@ -232,8 +230,7 @@  void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)
 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
 	guest_ctxt = &vcpu->arch.ctxt;
 
-	if (!has_vhe())
-		__debug_restore_spe_nvhe(host_ctxt, false);
+	__debug_restore_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));
 
 	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
 		return;
@@ -249,19 +246,21 @@  void __hyp_text __debug_restore_host_context(struct kvm_vcpu *vcpu)
 
 void __hyp_text __debug_save_host_context(struct kvm_vcpu *vcpu)
 {
-	/*
-	 * Non-VHE: Disable and flush SPE data generation
-	 * VHE: The vcpu can run, but it can't hide.
-	 */
 	struct kvm_cpu_context *host_ctxt;
 
 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
-	if (!has_vhe())
-		__debug_save_spe_nvhe(host_ctxt, false);
+	if (cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
+						 ID_AA64DFR0_PMSVER_SHIFT))
+		__debug_save_spe_context(host_ctxt, kvm_arm_spe_v1_ready(vcpu));
 }
 
 void __hyp_text __debug_save_guest_context(struct kvm_vcpu *vcpu)
 {
+	bool kvm_spe_ready = kvm_arm_spe_v1_ready(vcpu);
+
+	/* SPE present on this vCPU? */
+	if (kvm_spe_ready)
+		__debug_save_spe_context(&vcpu->arch.ctxt, kvm_spe_ready);
 }
 
 u32 __hyp_text __kvm_get_mdcr_el2(void)
diff --git a/include/kvm/arm_spe.h b/include/kvm/arm_spe.h
index 48d118fdb174..30c40b1bc385 100644
--- a/include/kvm/arm_spe.h
+++ b/include/kvm/arm_spe.h
@@ -16,4 +16,10 @@  struct kvm_spe {
 	bool irq_level;
 };
 
+#ifdef CONFIG_KVM_ARM_SPE
+#define kvm_arm_spe_v1_ready(v)		((v)->arch.spe.ready)
+#else
+#define kvm_arm_spe_v1_ready(v)		(false)
+#endif /* CONFIG_KVM_ARM_SPE */
+
 #endif /* __ASM_ARM_KVM_SPE_H */