diff mbox series

[v6,3/6] arm64/kvm: context-switch ptrauth registers

Message ID 1550568271-5319-4-git-send-email-amit.kachhap@arm.com
State New
Headers show
Series [v6,1/6] arm64/kvm: preserve host HCR_EL2 value | expand

Commit Message

Amit Kachhap Feb. 19, 2019, 9:24 a.m. UTC
From: Mark Rutland <mark.rutland@arm.com>


When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.

Pointer authentication feature is only enabled when VHE is built
in the kernel and present into CPU implementation so only VHE code
paths are modified.

When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key registers
are saved in vcpu load stage as they remain constant for each vcpu
schedule.

Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.

Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>

[Only VHE, key switch from from assembly, kvm_supports_ptrauth
checks, save host key in vcpu_load]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>

Reviewed-by: Julien Thierry <julien.thierry@arm.com>

Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
---
 arch/arm/include/asm/kvm_host.h   |   1 +
 arch/arm64/include/asm/kvm_host.h |  23 +++++++++
 arch/arm64/include/asm/kvm_hyp.h  |   7 +++
 arch/arm64/kernel/traps.c         |   1 +
 arch/arm64/kvm/handle_exit.c      |  21 +++++---
 arch/arm64/kvm/hyp/Makefile       |   1 +
 arch/arm64/kvm/hyp/entry.S        |  17 +++++++
 arch/arm64/kvm/hyp/ptrauth-sr.c   | 101 ++++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/sys_regs.c         |  37 +++++++++++++-
 virt/kvm/arm/arm.c                |   2 +
 10 files changed, 201 insertions(+), 10 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/ptrauth-sr.c

-- 
2.7.4

Comments

Mark Rutland Feb. 21, 2019, 12:29 p.m. UTC | #1
On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:
> From: Mark Rutland <mark.rutland@arm.com>

> 

> When pointer authentication is supported, a guest may wish to use it.

> This patch adds the necessary KVM infrastructure for this to work, with

> a semi-lazy context switch of the pointer auth state.

> 

> Pointer authentication feature is only enabled when VHE is built

> in the kernel and present into CPU implementation so only VHE code

> paths are modified.


Nit: s/into/in the/

> 

> When we schedule a vcpu, we disable guest usage of pointer

> authentication instructions and accesses to the keys. While these are

> disabled, we avoid context-switching the keys. When we trap the guest

> trying to use pointer authentication functionality, we change to eagerly

> context-switching the keys, and enable the feature. The next time the

> vcpu is scheduled out/in, we start again. However the host key registers

> are saved in vcpu load stage as they remain constant for each vcpu

> schedule.

> 

> Pointer authentication consists of address authentication and generic

> authentication, and CPUs in a system might have varied support for

> either. Where support for either feature is not uniform, it is hidden

> from guests via ID register emulation, as a result of the cpufeature

> framework in the host.

> 

> Unfortunately, address authentication and generic authentication cannot

> be trapped separately, as the architecture provides a single EL2 trap

> covering both. If we wish to expose one without the other, we cannot

> prevent a (badly-written) guest from intermittently using a feature

> which is not uniformly supported (when scheduled on a physical CPU which

> supports the relevant feature). Hence, this patch expects both type of

> authentication to be present in a cpu.

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> [Only VHE, key switch from from assembly, kvm_supports_ptrauth

> checks, save host key in vcpu_load]


Hmm, why do we need to do the key switch in assembly, given it's not
used in-kernel right now?

Is that in preparation for in-kernel pointer auth usage? If so, please
call that out in the commit message.

[...]

> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c

> index 4e2fb87..5cac605 100644

> --- a/arch/arm64/kernel/traps.c

> +++ b/arch/arm64/kernel/traps.c

> @@ -749,6 +749,7 @@ static const char *esr_class_str[] = {

>  	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",

>  	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",

>  	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",

> +	[ESR_ELx_EC_PAC]		= "Pointer authentication trap",


For consistency with the other strings, can we please make this "PAC"?

[...]

> diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile

> index 82d1904..17cec99 100644

> --- a/arch/arm64/kvm/hyp/Makefile

> +++ b/arch/arm64/kvm/hyp/Makefile

> @@ -19,6 +19,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o

>  obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o

>  obj-$(CONFIG_KVM_ARM_HOST) += tlb.o

>  obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o

> +obj-$(CONFIG_KVM_ARM_HOST) += ptrauth-sr.o


Huh, so we're actually doing the switch in C code...

>  # KVM code is run at a different exception code with a different map, so

>  # compiler instrumentation that inserts callbacks or checks into the code may

> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

> index 675fdc1..b78cc15 100644

> --- a/arch/arm64/kvm/hyp/entry.S

> +++ b/arch/arm64/kvm/hyp/entry.S

> @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

>  

>  	add	x18, x0, #VCPU_CONTEXT

>  

> +#ifdef	CONFIG_ARM64_PTR_AUTH

> +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

> +	mov	x2, x18

> +	bl	__ptrauth_switch_to_guest

> +#endif


... and conditionally *calling* that switch code from assembly ...

> +

>  	// Restore guest regs x0-x17

>  	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]

>  	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]

> @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

>  

>  	get_host_ctxt	x2, x3

>  

> +#ifdef	CONFIG_ARM64_PTR_AUTH

> +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

> +	// Save x0, x2 which are used later in callee saved registers.

> +	mov	x19, x0

> +	mov	x20, x2

> +	sub	x0, x1, #VCPU_CONTEXT

> +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]

> +	bl	__ptrauth_switch_to_host

> +	mov	x0, x19

> +	mov	x2, x20

> +#endif


... which adds a load of boilerplate for no immediate gain.

Do we really need to do this in assembly today?

Thanks,
Mark.
Dave Martin Feb. 21, 2019, 3:51 p.m. UTC | #2
On Thu, Feb 21, 2019 at 12:29:42PM +0000, Mark Rutland wrote:
> On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:

> > From: Mark Rutland <mark.rutland@arm.com>

> > 

> > When pointer authentication is supported, a guest may wish to use it.

> > This patch adds the necessary KVM infrastructure for this to work, with

> > a semi-lazy context switch of the pointer auth state.

> > 

> > Pointer authentication feature is only enabled when VHE is built

> > in the kernel and present into CPU implementation so only VHE code

> > paths are modified.

> 

> Nit: s/into/in the/

> 

> > 

> > When we schedule a vcpu, we disable guest usage of pointer

> > authentication instructions and accesses to the keys. While these are

> > disabled, we avoid context-switching the keys. When we trap the guest

> > trying to use pointer authentication functionality, we change to eagerly

> > context-switching the keys, and enable the feature. The next time the

> > vcpu is scheduled out/in, we start again. However the host key registers

> > are saved in vcpu load stage as they remain constant for each vcpu

> > schedule.

> > 

> > Pointer authentication consists of address authentication and generic

> > authentication, and CPUs in a system might have varied support for

> > either. Where support for either feature is not uniform, it is hidden

> > from guests via ID register emulation, as a result of the cpufeature

> > framework in the host.

> > 

> > Unfortunately, address authentication and generic authentication cannot

> > be trapped separately, as the architecture provides a single EL2 trap

> > covering both. If we wish to expose one without the other, we cannot

> > prevent a (badly-written) guest from intermittently using a feature

> > which is not uniformly supported (when scheduled on a physical CPU which

> > supports the relevant feature). Hence, this patch expects both type of

> > authentication to be present in a cpu.

> > 

> > Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> > [Only VHE, key switch from from assembly, kvm_supports_ptrauth

> > checks, save host key in vcpu_load]

> 

> Hmm, why do we need to do the key switch in assembly, given it's not

> used in-kernel right now?

> 

> Is that in preparation for in-kernel pointer auth usage? If so, please

> call that out in the commit message.


[...]

> Huh, so we're actually doing the switch in C code...

> 

> >  # KVM code is run at a different exception code with a different map, so

> >  # compiler instrumentation that inserts callbacks or checks into the code may

> > diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

> > index 675fdc1..b78cc15 100644

> > --- a/arch/arm64/kvm/hyp/entry.S

> > +++ b/arch/arm64/kvm/hyp/entry.S

> > @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

> >  

> >  	add	x18, x0, #VCPU_CONTEXT

> >  

> > +#ifdef	CONFIG_ARM64_PTR_AUTH

> > +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

> > +	mov	x2, x18

> > +	bl	__ptrauth_switch_to_guest

> > +#endif

> 

> ... and conditionally *calling* that switch code from assembly ...

> 

> > +

> >  	// Restore guest regs x0-x17

> >  	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]

> >  	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]

> > @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

> >  

> >  	get_host_ctxt	x2, x3

> >  

> > +#ifdef	CONFIG_ARM64_PTR_AUTH

> > +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

> > +	// Save x0, x2 which are used later in callee saved registers.

> > +	mov	x19, x0

> > +	mov	x20, x2

> > +	sub	x0, x1, #VCPU_CONTEXT

> > +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]

> > +	bl	__ptrauth_switch_to_host

> > +	mov	x0, x19

> > +	mov	x2, x20

> > +#endif

> 

> ... which adds a load of boilerplate for no immediate gain.

> 

> Do we really need to do this in assembly today?


If we will need to move this to assembly when we add in-kernel ptrauth
support, it may be best to have it in assembly from the start, to reduce
unnecessary churn.

But having a mix of C and assembly is likely to make things more
complicated: we should go with one or the other IMHO.

Cheers
---Dave
Dave Martin Feb. 21, 2019, 3:53 p.m. UTC | #3
On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:
> From: Mark Rutland <mark.rutland@arm.com>

> 

> When pointer authentication is supported, a guest may wish to use it.

> This patch adds the necessary KVM infrastructure for this to work, with

> a semi-lazy context switch of the pointer auth state.

> 

> Pointer authentication feature is only enabled when VHE is built

> in the kernel and present into CPU implementation so only VHE code

> paths are modified.

> 

> When we schedule a vcpu, we disable guest usage of pointer

> authentication instructions and accesses to the keys. While these are

> disabled, we avoid context-switching the keys. When we trap the guest

> trying to use pointer authentication functionality, we change to eagerly

> context-switching the keys, and enable the feature. The next time the

> vcpu is scheduled out/in, we start again. However the host key registers

> are saved in vcpu load stage as they remain constant for each vcpu

> schedule.

> 

> Pointer authentication consists of address authentication and generic

> authentication, and CPUs in a system might have varied support for

> either. Where support for either feature is not uniform, it is hidden

> from guests via ID register emulation, as a result of the cpufeature

> framework in the host.

> 

> Unfortunately, address authentication and generic authentication cannot

> be trapped separately, as the architecture provides a single EL2 trap

> covering both. If we wish to expose one without the other, we cannot

> prevent a (badly-written) guest from intermittently using a feature

> which is not uniformly supported (when scheduled on a physical CPU which

> supports the relevant feature). Hence, this patch expects both type of

> authentication to be present in a cpu.

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> [Only VHE, key switch from from assembly, kvm_supports_ptrauth

> checks, save host key in vcpu_load]

> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>

> Reviewed-by: Julien Thierry <julien.thierry@arm.com>

> Cc: Marc Zyngier <marc.zyngier@arm.com>

> Cc: Christoffer Dall <christoffer.dall@arm.com>

> Cc: kvmarm@lists.cs.columbia.edu

> ---

>  arch/arm/include/asm/kvm_host.h   |   1 +

>  arch/arm64/include/asm/kvm_host.h |  23 +++++++++

>  arch/arm64/include/asm/kvm_hyp.h  |   7 +++

>  arch/arm64/kernel/traps.c         |   1 +

>  arch/arm64/kvm/handle_exit.c      |  21 +++++---

>  arch/arm64/kvm/hyp/Makefile       |   1 +

>  arch/arm64/kvm/hyp/entry.S        |  17 +++++++

>  arch/arm64/kvm/hyp/ptrauth-sr.c   | 101 ++++++++++++++++++++++++++++++++++++++

>  arch/arm64/kvm/sys_regs.c         |  37 +++++++++++++-

>  virt/kvm/arm/arm.c                |   2 +

>  10 files changed, 201 insertions(+), 10 deletions(-)

>  create mode 100644 arch/arm64/kvm/hyp/ptrauth-sr.c


[...]

> diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c

> new file mode 100644

> index 0000000..528ee6e

> --- /dev/null

> +++ b/arch/arm64/kvm/hyp/ptrauth-sr.c

> @@ -0,0 +1,101 @@

> +// SPDX-License-Identifier: GPL-2.0

> +/*

> + * arch/arm64/kvm/hyp/ptrauth-sr.c: Guest/host ptrauth save/restore

> + *

> + * Copyright 2018 Arm Limited

> + * Author: Mark Rutland <mark.rutland@arm.com>

> + *         Amit Daniel Kachhap <amit.kachhap@arm.com>

> + */

> +#include <linux/compiler.h>

> +#include <linux/kvm_host.h>

> +

> +#include <asm/cpucaps.h>

> +#include <asm/cpufeature.h>

> +#include <asm/kvm_asm.h>

> +#include <asm/kvm_hyp.h>

> +#include <asm/pointer_auth.h>

> +

> +static __always_inline bool __ptrauth_is_enabled(struct kvm_vcpu *vcpu)

> +{

> +	return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&

> +			vcpu->arch.ctxt.hcr_el2 & (HCR_API | HCR_APK);

> +}

> +

> +#define __ptrauth_save_key(regs, key)						\

> +({										\

> +	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\

> +	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static __always_inline void __ptrauth_save_state(struct kvm_cpu_context *ctxt)


Why __always_inline?

> +{

> +	__ptrauth_save_key(ctxt->sys_regs, APIA);

> +	__ptrauth_save_key(ctxt->sys_regs, APIB);

> +	__ptrauth_save_key(ctxt->sys_regs, APDA);

> +	__ptrauth_save_key(ctxt->sys_regs, APDB);

> +	__ptrauth_save_key(ctxt->sys_regs, APGA);

> +}

> +

> +#define __ptrauth_restore_key(regs, key) 					\

> +({										\

> +	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\

> +	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static __always_inline void __ptrauth_restore_state(struct kvm_cpu_context *ctxt)


Same here.  I would hope these just need to be marked with the correct
function attribute to disable ptrauth by the compiler.  I don't see why
it makes a difference whether it's inline or not.

If the compiler semantics are not sufficiently clear, make it a macro.

(Bikeshedding here, so it you feel this has already been discussed to
death I'm happy for this to stay as-is.)

> +{

> +	__ptrauth_restore_key(ctxt->sys_regs, APIA);

> +	__ptrauth_restore_key(ctxt->sys_regs, APIB);

> +	__ptrauth_restore_key(ctxt->sys_regs, APDA);

> +	__ptrauth_restore_key(ctxt->sys_regs, APDB);

> +	__ptrauth_restore_key(ctxt->sys_regs, APGA);

> +}

> +

> +/**

> + * This function changes the key so assign Pointer Authentication safe

> + * GCC attribute if protected by it.

> + */


(I'd have preferred to keep __noptrauth here and define it do nothing for
now.  But I'll defer to others on that, since this has already been
discussed...)

> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

> +				  struct kvm_cpu_context *host_ctxt,

> +				  struct kvm_cpu_context *guest_ctxt)

> +{

> +	if (!__ptrauth_is_enabled(vcpu))

> +		return;

> +

> +	__ptrauth_restore_state(guest_ctxt);

> +}

> +

> +/**

> + * This function changes the key so assign Pointer Authentication safe

> + * GCC attribute if protected by it.

> + */

> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

> +				 struct kvm_cpu_context *guest_ctxt,

> +				 struct kvm_cpu_context *host_ctxt)

> +{

> +	if (!__ptrauth_is_enabled(vcpu))

> +		return;

> +

> +	__ptrauth_save_state(guest_ctxt);

> +	__ptrauth_restore_state(host_ctxt);

> +}

> +

> +/**

> + * kvm_arm_vcpu_ptrauth_reset - resets ptrauth for vcpu schedule

> + *

> + * @vcpu: The VCPU pointer

> + *

> + * This function may be used to disable ptrauth and use it in a lazy context

> + * via traps. However host key registers are saved here as they dont change

> + * during host/guest switch.

> + */

> +void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu)


I feel this is not a good name.  It sounds too much like it resets the
registers as part of vcpu reset, whereas really it's doing something
completely different.

(Do you reset the regs anywhere btw?  I may have missed it...)

> +{

> +	struct kvm_cpu_context *host_ctxt;

> +

> +	if (kvm_supports_ptrauth()) {

> +		kvm_arm_vcpu_ptrauth_disable(vcpu);

> +		host_ctxt = vcpu->arch.host_cpu_context;

> +		__ptrauth_save_state(host_ctxt);

> +	}

> +}

> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> index a6c9381..12529df 100644

> --- a/arch/arm64/kvm/sys_regs.c

> +++ b/arch/arm64/kvm/sys_regs.c

> @@ -986,6 +986,32 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

>  	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\

>  	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }

>  

> +

> +void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)

> +{

> +	vcpu->arch.ctxt.hcr_el2 |= (HCR_API | HCR_APK);


Pedantic nit: surplus ().

(Although opinions differ, and keeping them looks more symmetric with
kvm_arm_vcpu_ptrauth_disable() -- either way, the code can stay as-is if
you prefer.)

> +}

> +

> +void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)

> +{

> +	vcpu->arch.ctxt.hcr_el2 &= ~(HCR_API | HCR_APK);

> +}

> +

> +static bool trap_ptrauth(struct kvm_vcpu *vcpu,

> +			 struct sys_reg_params *p,

> +			 const struct sys_reg_desc *rd)

> +{

> +	kvm_arm_vcpu_ptrauth_trap(vcpu);

> +	return false;


Can we ever get here?  Won't PAC traps always be handled via
handle_exit()?

Or can we also take sysreg access traps when the guest tries to access
the ptrauth key registers?

(I'm now wondering how this works for SVE.)

> +}

> +

> +#define __PTRAUTH_KEY(k)						\

> +	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k }

> +

> +#define PTRAUTH_KEY(k)							\

> +	__PTRAUTH_KEY(k ## KEYLO_EL1),					\

> +	__PTRAUTH_KEY(k ## KEYHI_EL1)

> +

>  static bool access_cntp_tval(struct kvm_vcpu *vcpu,

>  		struct sys_reg_params *p,

>  		const struct sys_reg_desc *r)

> @@ -1045,9 +1071,10 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)

>  					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |

>  					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |

>  					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);

> -		if (val & ptrauth_mask)

> +		if (!kvm_supports_ptrauth()) {


Don't we now always print this when ptrauth is not supported?

Previously we only printed a message in the interesting case, i.e.,
where the host supports ptrauch but we cannot offer it to the guest.

>  			kvm_debug("ptrauth unsupported for guests, suppressing\n");

> -		val &= ~ptrauth_mask;

> +			val &= ~ptrauth_mask;

> +		}

>  	} else if (id == SYS_ID_AA64MMFR1_EL1) {

>  		if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))

>  			kvm_debug("LORegions unsupported for guests, suppressing\n");

> @@ -1316,6 +1343,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>  	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },

>  	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },

>  

> +	PTRAUTH_KEY(APIA),

> +	PTRAUTH_KEY(APIB),

> +	PTRAUTH_KEY(APDA),

> +	PTRAUTH_KEY(APDB),

> +	PTRAUTH_KEY(APGA),

> +

>  	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },

>  	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },

>  	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },

> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c

> index 2032a66..d7e003f 100644

> --- a/virt/kvm/arm/arm.c

> +++ b/virt/kvm/arm/arm.c

> @@ -388,6 +388,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

>  		vcpu_clear_wfe_traps(vcpu);

>  	else

>  		vcpu_set_wfe_traps(vcpu);

> +

> +	kvm_arm_vcpu_ptrauth_reset(vcpu);

>  }

>  

>  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

> -- 

> 2.7.4

> 

> _______________________________________________

> kvmarm mailing list

> kvmarm@lists.cs.columbia.edu

> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
James Morse Feb. 26, 2019, 6:31 p.m. UTC | #4
Hi Amit,

On 19/02/2019 09:24, Amit Daniel Kachhap wrote:
> From: Mark Rutland <mark.rutland@arm.com>

> 

> When pointer authentication is supported, a guest may wish to use it.

> This patch adds the necessary KVM infrastructure for this to work, with

> a semi-lazy context switch of the pointer auth state.

> 

> Pointer authentication feature is only enabled when VHE is built

> in the kernel and present into CPU implementation so only VHE code

> paths are modified.


> When we schedule a vcpu, we disable guest usage of pointer

> authentication instructions and accesses to the keys. While these are

> disabled, we avoid context-switching the keys. When we trap the guest

> trying to use pointer authentication functionality, we change to eagerly

> context-switching the keys, and enable the feature. The next time the

> vcpu is scheduled out/in, we start again.


> However the host key registers

> are saved in vcpu load stage as they remain constant for each vcpu

> schedule.


(I think we can get away with doing this later ... with the hope of doing it never!)


> Pointer authentication consists of address authentication and generic

> authentication, and CPUs in a system might have varied support for

> either. Where support for either feature is not uniform, it is hidden

> from guests via ID register emulation, as a result of the cpufeature

> framework in the host.

> 

> Unfortunately, address authentication and generic authentication cannot

> be trapped separately, as the architecture provides a single EL2 trap

> covering both. If we wish to expose one without the other, we cannot

> prevent a (badly-written) guest from intermittently using a feature

> which is not uniformly supported (when scheduled on a physical CPU which

> supports the relevant feature). Hence, this patch expects both type of

> authentication to be present in a cpu.



> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h

> index 2f1bb86..1bacf78 100644

> --- a/arch/arm64/include/asm/kvm_host.h

> +++ b/arch/arm64/include/asm/kvm_host.h

> @@ -146,6 +146,18 @@ enum vcpu_sysreg {


> +static inline bool kvm_supports_ptrauth(void)

> +{

> +	return has_vhe() && system_supports_address_auth() &&

> +				system_supports_generic_auth();

> +}


Do we intend to support the implementation defined algorithm? I'd assumed not.

system_supports_address_auth() is defined as:
| static inline bool system_supports_address_auth(void)
| {
| 	return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
| 		(cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
| 		cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
| }


So we could return true from kvm_supports_ptrauth() even if we only support the imp-def
algorithm.

I think we should hide the imp-def ptrauth support as KVM hides all other imp-def
features. This lets us avoid trying to migrate values that have been signed with the
imp-def algorithm.

I'm worried that it could include some value that we can't migrate (e.g. the SoC serial
number). Does the ARM-ARM say this can't happen?

All I can find is D5.1.5 "Pointer authentication in AArch64 state" of DDI0487D.a which has
this clause for the imp-def algorithm:
| For a set of arguments passed to the function, must give the same result for all PEs
| that a thread of execution could migrate between.

... with KVM we've extended the scope of migration significantly.


Could we check the cpus_have_const_cap() values for the two architected algorithms directly?


> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h

> index 6e65cad..09e061a 100644

> --- a/arch/arm64/include/asm/kvm_hyp.h

> +++ b/arch/arm64/include/asm/kvm_hyp.h

> @@ -153,6 +153,13 @@ bool __fpsimd_enabled(void);

>  void activate_traps_vhe_load(struct kvm_vcpu *vcpu);

>  void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);

>  

> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

> +			       struct kvm_cpu_context *host_ctxt,

> +			       struct kvm_cpu_context *guest_ctxt);

> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

> +			      struct kvm_cpu_context *guest_ctxt,

> +			      struct kvm_cpu_context *host_ctxt);



Why do you need the vcpu and the guest_ctxt?
Would it be possible for these to just take the vcpu, and to pull the host context from
the per-cpu variable?
This would avoid any future bugs where the ctxt's are the wrong way round, taking two is
unusual in KVM, but necessary here.

As you're calling these from asm you want the compiler to do as much of the type mangling
as possible.


> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c

> index 4e2fb87..5cac605 100644

> --- a/arch/arm64/kernel/traps.c

> +++ b/arch/arm64/kernel/traps.c

> @@ -749,6 +749,7 @@ static const char *esr_class_str[] = {

>  	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",

>  	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",

>  	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",

> +	[ESR_ELx_EC_PAC]		= "Pointer authentication trap",

>  	[ESR_ELx_EC_CP14_64]		= "CP14 MCRR/MRRC",

>  	[ESR_ELx_EC_ILL]		= "PSTATE.IL",

>  	[ESR_ELx_EC_SVC32]		= "SVC (AArch32)",


Is this needed? Or was it just missing from the parts already merged. (should it be a
separate patch for the arch code)

It looks like KVM only prints it from kvm_handle_unknown_ec(), which would never happen as
arm_exit_handlers[] has an entry for ESR_ELx_EC_PAC.

Is it true that the host could never take this trap either?, as it can only be taken when
HCR_EL2.TGE is clear.
(breadcrumbs from the ESR_ELx definition to "Trap to EL2 of EL0 accesses to Pointer
authentication instructions")


> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

> index 675fdc1..b78cc15 100644

> --- a/arch/arm64/kvm/hyp/entry.S

> +++ b/arch/arm64/kvm/hyp/entry.S

> @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

>  

>  	add	x18, x0, #VCPU_CONTEXT

>  

> +#ifdef	CONFIG_ARM64_PTR_AUTH

> +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

> +	mov	x2, x18

> +	bl	__ptrauth_switch_to_guest

> +#endif


This calls back into C code with x18 clobbered... is that allowed?
x18 has this weird platform-register/temporary-register behaviour, that depends on the
compiler. The PCS[0] has a note that 'hand-coded assembler should avoid it entirely'!

Can we assume that compiler generated code is using it from something, and depends on that
value, which means we need to preserve or save/restore it when calling into C.


The upshot? Could you use one of the callee saved registers instead of x18, then move it
after your C call.


> @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

>  

>  	get_host_ctxt	x2, x3

>  

> +#ifdef	CONFIG_ARM64_PTR_AUTH

> +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

> +	// Save x0, x2 which are used later in callee saved registers.

> +	mov	x19, x0

> +	mov	x20, x2

> +	sub	x0, x1, #VCPU_CONTEXT


> +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]


Is this to make the stack-trace look plausible?


> +	bl	__ptrauth_switch_to_host

> +	mov	x0, x19

> +	mov	x2, x20

> +#endif


(ditching the host_ctxt would let you move this above get_host_ctxt and the need to
preserve its result)


> diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c

> new file mode 100644

> index 0000000..528ee6e

> --- /dev/null

> +++ b/arch/arm64/kvm/hyp/ptrauth-sr.c

> @@ -0,0 +1,101 @@


> +static __always_inline bool __ptrauth_is_enabled(struct kvm_vcpu *vcpu)


This __always_inline still looks weird! You said it might be needed to make it function
pointer safe. If it is, could you add a comment explaining why.

(alternatives would be making it an #ifdef, disabling ptrauth for the whole file, or
annotating this function too)


> +#define __ptrauth_save_key(regs, key)						\

> +({										\

> +	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\

> +	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static __always_inline void __ptrauth_save_state(struct kvm_cpu_context *ctxt)

> +{

> +	__ptrauth_save_key(ctxt->sys_regs, APIA);

> +	__ptrauth_save_key(ctxt->sys_regs, APIB);

> +	__ptrauth_save_key(ctxt->sys_regs, APDA);

> +	__ptrauth_save_key(ctxt->sys_regs, APDB);

> +	__ptrauth_save_key(ctxt->sys_regs, APGA);

> +}

> +

> +#define __ptrauth_restore_key(regs, key) 					\

> +({										\

> +	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\

> +	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static __always_inline void __ptrauth_restore_state(struct kvm_cpu_context *ctxt)

> +{

> +	__ptrauth_restore_key(ctxt->sys_regs, APIA);

> +	__ptrauth_restore_key(ctxt->sys_regs, APIB);

> +	__ptrauth_restore_key(ctxt->sys_regs, APDA);

> +	__ptrauth_restore_key(ctxt->sys_regs, APDB);

> +	__ptrauth_restore_key(ctxt->sys_regs, APGA);


Are writes to these registers self synchronising? I'd assume not, as they come as a pair.

I think this means we need an isb() here to ensure that when restoring the host registers
the next host authentication attempt uses the key we wrote here? We don't need it for the
guest, so we could put it at the end of __ptrauth_switch_to_host().


> +/**

> + * This function changes the key so assign Pointer Authentication safe

> + * GCC attribute if protected by it.

> + */


... this comment is the reminder for 'once we have host kernel ptrauth support'? could we
add something to say that kernel support is when the attribute would be needed. Otherwise
it reads like we're waiting for GCC support.

(I assume LLVM has a similar attribute ... is it exactly the same?)


> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

> +				  struct kvm_cpu_context *host_ctxt,

> +				  struct kvm_cpu_context *guest_ctxt)

> +{


> +}


> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

> +				 struct kvm_cpu_context *guest_ctxt,

> +				 struct kvm_cpu_context *host_ctxt)

> +{


> +}



Could you add NOKPROBE_SYMBOL(symbol_name) for these. This adds them to the kprobe
blacklist as they aren't part of the __hyp_text. (and don't need to be as its VHE only).
Without this, you can patch a software-breakpoint in here, which KVM won't handle as its
already switched VBAR for entry to the guest.

Details in 7d82602909ed ("KVM: arm64: Forbid kprobing of the VHE world-switch code")

(... this turned up in a kernel version later than you based on ...)


> +/**

> + * kvm_arm_vcpu_ptrauth_reset - resets ptrauth for vcpu schedule

> + *

> + * @vcpu: The VCPU pointer

> + *

> + * This function may be used to disable ptrauth and use it in a lazy context

> + * via traps. However host key registers are saved here as they dont change

> + * during host/guest switch.

> + */

> +void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu)

> +{

> +	struct kvm_cpu_context *host_ctxt;

> +

> +	if (kvm_supports_ptrauth()) {

> +		kvm_arm_vcpu_ptrauth_disable(vcpu);

> +		host_ctxt = vcpu->arch.host_cpu_context;


> +		__ptrauth_save_state(host_ctxt);


Could you equally do this host-save in kvm_arm_vcpu_ptrauth_trap() before
kvm_arm_vcpu_ptrauth_enable()? This would avoid saving the keys if the guest never gets
the opportunity to change them. At the moment we do it on every vcpu_load().


As kvm_arm_vcpu_ptrauth_reset() isn't used as part of the world-switch, could we keep it
outside the 'hyp' directory? The Makefile for that directory expects to be building the
hyp text, so it disables KASAN, KCOV and friends.
kvm_arm_vcpu_ptrauth_reset() is safe for all of these, and its good for it to be covered
by this debug infrastructure. Could you move it to guest.c?


> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> index a6c9381..12529df 100644

> --- a/arch/arm64/kvm/sys_regs.c

> +++ b/arch/arm64/kvm/sys_regs.c> @@ -1045,9 +1071,10 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)

>  					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |

>  					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |

>  					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);

> -		if (val & ptrauth_mask)

> +		if (!kvm_supports_ptrauth()) {

>  			kvm_debug("ptrauth unsupported for guests, suppressing\n");

> -		val &= ~ptrauth_mask;

> +			val &= ~ptrauth_mask;

> +		}


This means that debug message gets printed even on systems that don't support ptrauth in
hardware. (val&ptrauth_mask) used to cut them out, now kvm_supports_ptrauth() fails if the
static keys are false, and we end up printing this message.
Now that KVM supports pointer-auth, I don't think the debug message is useful, can we
remove it? (it would now mean 'you didn't ask for ptrauth to be turned on')


Could we always mask out the imp-def bits?


This patch needs to be merged together with 4 & 5 so the user-abi is as it should be. This
means the check_present/restrictions thing needs sorting so they're ready together.


Thanks,

James


[0] http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
Amit Kachhap Feb. 28, 2019, 9:07 a.m. UTC | #5
Hi Mark,

On 2/21/19 5:59 PM, Mark Rutland wrote:
> On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:

>> From: Mark Rutland <mark.rutland@arm.com>

>>

>> When pointer authentication is supported, a guest may wish to use it.

>> This patch adds the necessary KVM infrastructure for this to work, with

>> a semi-lazy context switch of the pointer auth state.

>>

>> Pointer authentication feature is only enabled when VHE is built

>> in the kernel and present into CPU implementation so only VHE code

>> paths are modified.

> 

> Nit: s/into/in the/

ok.
> 

>>

>> When we schedule a vcpu, we disable guest usage of pointer

>> authentication instructions and accesses to the keys. While these are

>> disabled, we avoid context-switching the keys. When we trap the guest

>> trying to use pointer authentication functionality, we change to eagerly

>> context-switching the keys, and enable the feature. The next time the

>> vcpu is scheduled out/in, we start again. However the host key registers

>> are saved in vcpu load stage as they remain constant for each vcpu

>> schedule.

>>

>> Pointer authentication consists of address authentication and generic

>> authentication, and CPUs in a system might have varied support for

>> either. Where support for either feature is not uniform, it is hidden

>> from guests via ID register emulation, as a result of the cpufeature

>> framework in the host.

>>

>> Unfortunately, address authentication and generic authentication cannot

>> be trapped separately, as the architecture provides a single EL2 trap

>> covering both. If we wish to expose one without the other, we cannot

>> prevent a (badly-written) guest from intermittently using a feature

>> which is not uniformly supported (when scheduled on a physical CPU which

>> supports the relevant feature). Hence, this patch expects both type of

>> authentication to be present in a cpu.

>>

>> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

>> [Only VHE, key switch from from assembly, kvm_supports_ptrauth

>> checks, save host key in vcpu_load]

> 

> Hmm, why do we need to do the key switch in assembly, given it's not

> used in-kernel right now?

> 

> Is that in preparation for in-kernel pointer auth usage? If so, please

> call that out in the commit message.

ok sure.
> 

> [...]

> 

>> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c

>> index 4e2fb87..5cac605 100644

>> --- a/arch/arm64/kernel/traps.c

>> +++ b/arch/arm64/kernel/traps.c

>> @@ -749,6 +749,7 @@ static const char *esr_class_str[] = {

>>   	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",

>>   	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",

>>   	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",

>> +	[ESR_ELx_EC_PAC]		= "Pointer authentication trap",

> 

> For consistency with the other strings, can we please make this "PAC"?

ok. It makes sense.
> 

> [...]

> 

>> diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile

>> index 82d1904..17cec99 100644

>> --- a/arch/arm64/kvm/hyp/Makefile

>> +++ b/arch/arm64/kvm/hyp/Makefile

>> @@ -19,6 +19,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o

>>   obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o

>>   obj-$(CONFIG_KVM_ARM_HOST) += tlb.o

>>   obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o

>> +obj-$(CONFIG_KVM_ARM_HOST) += ptrauth-sr.o

> 

> Huh, so we're actually doing the switch in C code...

> 

>>   # KVM code is run at a different exception code with a different map, so

>>   # compiler instrumentation that inserts callbacks or checks into the code may

>> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

>> index 675fdc1..b78cc15 100644

>> --- a/arch/arm64/kvm/hyp/entry.S

>> +++ b/arch/arm64/kvm/hyp/entry.S

>> @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

>>   

>>   	add	x18, x0, #VCPU_CONTEXT

>>   

>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>> +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

>> +	mov	x2, x18

>> +	bl	__ptrauth_switch_to_guest

>> +#endif

> 

> ... and conditionally *calling* that switch code from assembly ...

> 

>> +

>>   	// Restore guest regs x0-x17

>>   	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]

>>   	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]

>> @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

>>   

>>   	get_host_ctxt	x2, x3

>>   

>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>> +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

>> +	// Save x0, x2 which are used later in callee saved registers.

>> +	mov	x19, x0

>> +	mov	x20, x2

>> +	sub	x0, x1, #VCPU_CONTEXT

>> +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]

>> +	bl	__ptrauth_switch_to_host

>> +	mov	x0, x19

>> +	mov	x2, x20

>> +#endif

> 

> ... which adds a load of boilerplate for no immediate gain.

Here some parameter optimizations may be possible as guest and host ctxt 
can be derived from vcpu itself as James suggested in other review 
comments. I thought about doing all save/restore in assembly but for 
optimization now host keys are saved in vcpu_load stage in C so reused 
those C codes here also.

Again all these codes are beneficial with in-kernel ptrauth and hence in 
case of strong objection may revert to old way.
> 

> Do we really need to do this in assembly today?

During the last patchset review [1], James provided a lot of supporting 
arguments to have these switch routines called from assembly due to 
function outlining between kvm_vcpu_run_vhe() and __kvm_vcpu_run_nvhe().

[1]: https://lkml.org/lkml/2019/1/31/662

Thanks,
Amit D
> 

> Thanks,

> Mark.

>
Amit Kachhap March 1, 2019, 6:17 a.m. UTC | #6
On 2/21/19 9:21 PM, Dave Martin wrote:
> On Thu, Feb 21, 2019 at 12:29:42PM +0000, Mark Rutland wrote:

>> On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:

>>> From: Mark Rutland <mark.rutland@arm.com>

>>>

>>> When pointer authentication is supported, a guest may wish to use it.

>>> This patch adds the necessary KVM infrastructure for this to work, with

>>> a semi-lazy context switch of the pointer auth state.

>>>

>>> Pointer authentication feature is only enabled when VHE is built

>>> in the kernel and present into CPU implementation so only VHE code

>>> paths are modified.

>>

>> Nit: s/into/in the/

>>

>>>

>>> When we schedule a vcpu, we disable guest usage of pointer

>>> authentication instructions and accesses to the keys. While these are

>>> disabled, we avoid context-switching the keys. When we trap the guest

>>> trying to use pointer authentication functionality, we change to eagerly

>>> context-switching the keys, and enable the feature. The next time the

>>> vcpu is scheduled out/in, we start again. However the host key registers

>>> are saved in vcpu load stage as they remain constant for each vcpu

>>> schedule.

>>>

>>> Pointer authentication consists of address authentication and generic

>>> authentication, and CPUs in a system might have varied support for

>>> either. Where support for either feature is not uniform, it is hidden

>>> from guests via ID register emulation, as a result of the cpufeature

>>> framework in the host.

>>>

>>> Unfortunately, address authentication and generic authentication cannot

>>> be trapped separately, as the architecture provides a single EL2 trap

>>> covering both. If we wish to expose one without the other, we cannot

>>> prevent a (badly-written) guest from intermittently using a feature

>>> which is not uniformly supported (when scheduled on a physical CPU which

>>> supports the relevant feature). Hence, this patch expects both type of

>>> authentication to be present in a cpu.

>>>

>>> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

>>> [Only VHE, key switch from from assembly, kvm_supports_ptrauth

>>> checks, save host key in vcpu_load]

>>

>> Hmm, why do we need to do the key switch in assembly, given it's not

>> used in-kernel right now?

>>

>> Is that in preparation for in-kernel pointer auth usage? If so, please

>> call that out in the commit message.

> 

> [...]

> 

>> Huh, so we're actually doing the switch in C code...

>>

>>>   # KVM code is run at a different exception code with a different map, so

>>>   # compiler instrumentation that inserts callbacks or checks into the code may

>>> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

>>> index 675fdc1..b78cc15 100644

>>> --- a/arch/arm64/kvm/hyp/entry.S

>>> +++ b/arch/arm64/kvm/hyp/entry.S

>>> @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

>>>   

>>>   	add	x18, x0, #VCPU_CONTEXT

>>>   

>>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>>> +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

>>> +	mov	x2, x18

>>> +	bl	__ptrauth_switch_to_guest

>>> +#endif

>>

>> ... and conditionally *calling* that switch code from assembly ...

>>

>>> +

>>>   	// Restore guest regs x0-x17

>>>   	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]

>>>   	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]

>>> @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

>>>   

>>>   	get_host_ctxt	x2, x3

>>>   

>>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>>> +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

>>> +	// Save x0, x2 which are used later in callee saved registers.

>>> +	mov	x19, x0

>>> +	mov	x20, x2

>>> +	sub	x0, x1, #VCPU_CONTEXT

>>> +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]

>>> +	bl	__ptrauth_switch_to_host

>>> +	mov	x0, x19

>>> +	mov	x2, x20

>>> +#endif

>>

>> ... which adds a load of boilerplate for no immediate gain.

>>

>> Do we really need to do this in assembly today?

> 

> If we will need to move this to assembly when we add in-kernel ptrauth

> support, it may be best to have it in assembly from the start, to reduce

> unnecessary churn.

> 

> But having a mix of C and assembly is likely to make things more

> complicated: we should go with one or the other IMHO.

ok, I will check on this.

Thanks,
Amit D
> 

> Cheers

> ---Dave

>
Amit Kachhap March 1, 2019, 9:35 a.m. UTC | #7
Hi,

On 2/21/19 9:23 PM, Dave Martin wrote:
> On Tue, Feb 19, 2019 at 02:54:28PM +0530, Amit Daniel Kachhap wrote:

>> From: Mark Rutland <mark.rutland@arm.com>

>>

>> When pointer authentication is supported, a guest may wish to use it.

>> This patch adds the necessary KVM infrastructure for this to work, with

>> a semi-lazy context switch of the pointer auth state.

>>

>> Pointer authentication feature is only enabled when VHE is built

>> in the kernel and present into CPU implementation so only VHE code

>> paths are modified.

>>

>> When we schedule a vcpu, we disable guest usage of pointer

>> authentication instructions and accesses to the keys. While these are

>> disabled, we avoid context-switching the keys. When we trap the guest

>> trying to use pointer authentication functionality, we change to eagerly

>> context-switching the keys, and enable the feature. The next time the

>> vcpu is scheduled out/in, we start again. However the host key registers

>> are saved in vcpu load stage as they remain constant for each vcpu

>> schedule.

>>

>> Pointer authentication consists of address authentication and generic

>> authentication, and CPUs in a system might have varied support for

>> either. Where support for either feature is not uniform, it is hidden

>> from guests via ID register emulation, as a result of the cpufeature

>> framework in the host.

>>

>> Unfortunately, address authentication and generic authentication cannot

>> be trapped separately, as the architecture provides a single EL2 trap

>> covering both. If we wish to expose one without the other, we cannot

>> prevent a (badly-written) guest from intermittently using a feature

>> which is not uniformly supported (when scheduled on a physical CPU which

>> supports the relevant feature). Hence, this patch expects both type of

>> authentication to be present in a cpu.

>>

>> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

>> [Only VHE, key switch from from assembly, kvm_supports_ptrauth

>> checks, save host key in vcpu_load]

>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>

>> Reviewed-by: Julien Thierry <julien.thierry@arm.com>

>> Cc: Marc Zyngier <marc.zyngier@arm.com>

>> Cc: Christoffer Dall <christoffer.dall@arm.com>

>> Cc: kvmarm@lists.cs.columbia.edu

>> ---

>>   arch/arm/include/asm/kvm_host.h   |   1 +

>>   arch/arm64/include/asm/kvm_host.h |  23 +++++++++

>>   arch/arm64/include/asm/kvm_hyp.h  |   7 +++

>>   arch/arm64/kernel/traps.c         |   1 +

>>   arch/arm64/kvm/handle_exit.c      |  21 +++++---

>>   arch/arm64/kvm/hyp/Makefile       |   1 +

>>   arch/arm64/kvm/hyp/entry.S        |  17 +++++++

>>   arch/arm64/kvm/hyp/ptrauth-sr.c   | 101 ++++++++++++++++++++++++++++++++++++++

>>   arch/arm64/kvm/sys_regs.c         |  37 +++++++++++++-

>>   virt/kvm/arm/arm.c                |   2 +

>>   10 files changed, 201 insertions(+), 10 deletions(-)

>>   create mode 100644 arch/arm64/kvm/hyp/ptrauth-sr.c

> 

> [...]

> 

>> diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c

>> new file mode 100644

>> index 0000000..528ee6e

>> --- /dev/null

>> +++ b/arch/arm64/kvm/hyp/ptrauth-sr.c

>> @@ -0,0 +1,101 @@

>> +// SPDX-License-Identifier: GPL-2.0

>> +/*

>> + * arch/arm64/kvm/hyp/ptrauth-sr.c: Guest/host ptrauth save/restore

>> + *

>> + * Copyright 2018 Arm Limited

>> + * Author: Mark Rutland <mark.rutland@arm.com>

>> + *         Amit Daniel Kachhap <amit.kachhap@arm.com>

>> + */

>> +#include <linux/compiler.h>

>> +#include <linux/kvm_host.h>

>> +

>> +#include <asm/cpucaps.h>

>> +#include <asm/cpufeature.h>

>> +#include <asm/kvm_asm.h>

>> +#include <asm/kvm_hyp.h>

>> +#include <asm/pointer_auth.h>

>> +

>> +static __always_inline bool __ptrauth_is_enabled(struct kvm_vcpu *vcpu)

>> +{

>> +	return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&

>> +			vcpu->arch.ctxt.hcr_el2 & (HCR_API | HCR_APK);

>> +}

>> +

>> +#define __ptrauth_save_key(regs, key)						\

>> +({										\

>> +	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\

>> +	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\

>> +})

>> +

>> +static __always_inline void __ptrauth_save_state(struct kvm_cpu_context *ctxt)

> 

> Why __always_inline?

> 

>> +{

>> +	__ptrauth_save_key(ctxt->sys_regs, APIA);

>> +	__ptrauth_save_key(ctxt->sys_regs, APIB);

>> +	__ptrauth_save_key(ctxt->sys_regs, APDA);

>> +	__ptrauth_save_key(ctxt->sys_regs, APDB);

>> +	__ptrauth_save_key(ctxt->sys_regs, APGA);

>> +}

>> +

>> +#define __ptrauth_restore_key(regs, key) 					\

>> +({										\

>> +	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\

>> +	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\

>> +})

>> +

>> +static __always_inline void __ptrauth_restore_state(struct kvm_cpu_context *ctxt)

> 

> Same here.  I would hope these just need to be marked with the correct

> function attribute to disable ptrauth by the compiler.  I don't see why

> it makes a difference whether it's inline or not.

> 

> If the compiler semantics are not sufficiently clear, make it a macro.

ok.
> 

> (Bikeshedding here, so it you feel this has already been discussed to

> death I'm happy for this to stay as-is.)

> 

>> +{

>> +	__ptrauth_restore_key(ctxt->sys_regs, APIA);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APIB);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APDA);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APDB);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APGA);

>> +}

>> +

>> +/**

>> + * This function changes the key so assign Pointer Authentication safe

>> + * GCC attribute if protected by it.

>> + */

> 

> (I'd have preferred to keep __noptrauth here and define it do nothing for

> now.  But I'll defer to others on that, since this has already been

> discussed...)


ok __noptrauth annotation will make it clear. I will add it for all C 
error prone functions in the next iteration.
> 

>> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

>> +				  struct kvm_cpu_context *host_ctxt,

>> +				  struct kvm_cpu_context *guest_ctxt)

>> +{

>> +	if (!__ptrauth_is_enabled(vcpu))

>> +		return;

>> +

>> +	__ptrauth_restore_state(guest_ctxt);

>> +}

>> +

>> +/**

>> + * This function changes the key so assign Pointer Authentication safe

>> + * GCC attribute if protected by it.

>> + */

>> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

>> +				 struct kvm_cpu_context *guest_ctxt,

>> +				 struct kvm_cpu_context *host_ctxt)

>> +{

>> +	if (!__ptrauth_is_enabled(vcpu))

>> +		return;

>> +

>> +	__ptrauth_save_state(guest_ctxt);

>> +	__ptrauth_restore_state(host_ctxt);

>> +}

>> +

>> +/**

>> + * kvm_arm_vcpu_ptrauth_reset - resets ptrauth for vcpu schedule

>> + *

>> + * @vcpu: The VCPU pointer

>> + *

>> + * This function may be used to disable ptrauth and use it in a lazy context

>> + * via traps. However host key registers are saved here as they dont change

>> + * during host/guest switch.

>> + */

>> +void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu)

> 

> I feel this is not a good name.  It sounds too much like it resets the

> registers as part of vcpu reset, whereas really it's doing something

> completely different.

> 

> (Do you reset the regs anywhere btw?  I may have missed it...)

No there is not reset of registers. May be name like 
kvm_arm_vcpu_ptrauth_setup_lazy would be better.
> 

>> +{

>> +	struct kvm_cpu_context *host_ctxt;

>> +

>> +	if (kvm_supports_ptrauth()) {

>> +		kvm_arm_vcpu_ptrauth_disable(vcpu);

>> +		host_ctxt = vcpu->arch.host_cpu_context;

>> +		__ptrauth_save_state(host_ctxt);

>> +	}

>> +}

>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

>> index a6c9381..12529df 100644

>> --- a/arch/arm64/kvm/sys_regs.c

>> +++ b/arch/arm64/kvm/sys_regs.c

>> @@ -986,6 +986,32 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

>>   	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\

>>   	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }

>>   

>> +

>> +void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)

>> +{

>> +	vcpu->arch.ctxt.hcr_el2 |= (HCR_API | HCR_APK);

> 

> Pedantic nit: surplus ().

> 

> (Although opinions differ, and keeping them looks more symmetric with

> kvm_arm_vcpu_ptrauth_disable() -- either way, the code can stay as-is if

> you prefer.)

ok.
> 

>> +}

>> +

>> +void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)

>> +{

>> +	vcpu->arch.ctxt.hcr_el2 &= ~(HCR_API | HCR_APK);

>> +}

>> +

>> +static bool trap_ptrauth(struct kvm_vcpu *vcpu,

>> +			 struct sys_reg_params *p,

>> +			 const struct sys_reg_desc *rd)

>> +{

>> +	kvm_arm_vcpu_ptrauth_trap(vcpu);

>> +	return false;

> 

> Can we ever get here?  Won't PAC traps always be handled via

> handle_exit()?

> 

> Or can we also take sysreg access traps when the guest tries to access

> the ptrauth key registers?


When Guest kernel forks thread then the key registers are accessed to 
fill them with ptrauth keys and hcr_el2(APK bit) is not appropriately 
set at that moment. This causes trap to EL2 and the above function is 
invoked.
> 

> (I'm now wondering how this works for SVE.)

Not sure. Need to check.
> 

>> +}

>> +

>> +#define __PTRAUTH_KEY(k)						\

>> +	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k }

>> +

>> +#define PTRAUTH_KEY(k)							\

>> +	__PTRAUTH_KEY(k ## KEYLO_EL1),					\

>> +	__PTRAUTH_KEY(k ## KEYHI_EL1)

>> +

>>   static bool access_cntp_tval(struct kvm_vcpu *vcpu,

>>   		struct sys_reg_params *p,

>>   		const struct sys_reg_desc *r)

>> @@ -1045,9 +1071,10 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)

>>   					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |

>>   					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |

>>   					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);

>> -		if (val & ptrauth_mask)

>> +		if (!kvm_supports_ptrauth()) {

> 

> Don't we now always print this when ptrauth is not supported?

> 

> Previously we only printed a message in the interesting case, i.e.,

> where the host supports ptrauch but we cannot offer it to the guest.

Yes agreed. I will add proper checks here to skip prints for non ptrauth 
hosts.

Thanks,
Amit D
> 

>>   			kvm_debug("ptrauth unsupported for guests, suppressing\n");

>> -		val &= ~ptrauth_mask;

>> +			val &= ~ptrauth_mask;

>> +		}

>>   	} else if (id == SYS_ID_AA64MMFR1_EL1) {

>>   		if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))

>>   			kvm_debug("LORegions unsupported for guests, suppressing\n");

>> @@ -1316,6 +1343,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>>   	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },

>>   	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },

>>   

>> +	PTRAUTH_KEY(APIA),

>> +	PTRAUTH_KEY(APIB),

>> +	PTRAUTH_KEY(APDA),

>> +	PTRAUTH_KEY(APDB),

>> +	PTRAUTH_KEY(APGA),

>> +

>>   	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },

>>   	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },

>>   	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },

>> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c

>> index 2032a66..d7e003f 100644

>> --- a/virt/kvm/arm/arm.c

>> +++ b/virt/kvm/arm/arm.c

>> @@ -388,6 +388,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

>>   		vcpu_clear_wfe_traps(vcpu);

>>   	else

>>   		vcpu_set_wfe_traps(vcpu);

>> +

>> +	kvm_arm_vcpu_ptrauth_reset(vcpu);

>>   }

>>   

>>   void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

>> -- 

>> 2.7.4

>>

>> _______________________________________________

>> kvmarm mailing list

>> kvmarm@lists.cs.columbia.edu

>> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Amit Kachhap March 4, 2019, 10:51 a.m. UTC | #8
Hi James,

On 2/27/19 12:01 AM, James Morse wrote:
> Hi Amit,

> 

> On 19/02/2019 09:24, Amit Daniel Kachhap wrote:

>> From: Mark Rutland <mark.rutland@arm.com>

>>

>> When pointer authentication is supported, a guest may wish to use it.

>> This patch adds the necessary KVM infrastructure for this to work, with

>> a semi-lazy context switch of the pointer auth state.

>>

>> Pointer authentication feature is only enabled when VHE is built

>> in the kernel and present into CPU implementation so only VHE code

>> paths are modified.

> 

>> When we schedule a vcpu, we disable guest usage of pointer

>> authentication instructions and accesses to the keys. While these are

>> disabled, we avoid context-switching the keys. When we trap the guest

>> trying to use pointer authentication functionality, we change to eagerly

>> context-switching the keys, and enable the feature. The next time the

>> vcpu is scheduled out/in, we start again.

> 

>> However the host key registers

>> are saved in vcpu load stage as they remain constant for each vcpu

>> schedule.

> 

> (I think we can get away with doing this later ... with the hope of doing it never!)

> 

> 

>> Pointer authentication consists of address authentication and generic

>> authentication, and CPUs in a system might have varied support for

>> either. Where support for either feature is not uniform, it is hidden

>> from guests via ID register emulation, as a result of the cpufeature

>> framework in the host.

>>

>> Unfortunately, address authentication and generic authentication cannot

>> be trapped separately, as the architecture provides a single EL2 trap

>> covering both. If we wish to expose one without the other, we cannot

>> prevent a (badly-written) guest from intermittently using a feature

>> which is not uniformly supported (when scheduled on a physical CPU which

>> supports the relevant feature). Hence, this patch expects both type of

>> authentication to be present in a cpu.

> 

> 

>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h

>> index 2f1bb86..1bacf78 100644

>> --- a/arch/arm64/include/asm/kvm_host.h

>> +++ b/arch/arm64/include/asm/kvm_host.h

>> @@ -146,6 +146,18 @@ enum vcpu_sysreg {

> 

>> +static inline bool kvm_supports_ptrauth(void)

>> +{

>> +	return has_vhe() && system_supports_address_auth() &&

>> +				system_supports_generic_auth();

>> +}

> 

> Do we intend to support the implementation defined algorithm? I'd assumed not.

> 

> system_supports_address_auth() is defined as:

> | static inline bool system_supports_address_auth(void)

> | {

> | 	return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&

> | 		(cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||

> | 		cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));

> | }

> 

> 

> So we could return true from kvm_supports_ptrauth() even if we only support the imp-def

> algorithm.

> 

> I think we should hide the imp-def ptrauth support as KVM hides all other imp-def

> features. This lets us avoid trying to migrate values that have been signed with the

> imp-def algorithm.

I suppose imp-def algorithm should not make any difference in migration 
case even if the 2 system uses different imp-def algorithm. As the LR 
PAC field generation happens at runtime so only things matters is key 
value and SP which is taken care. Also the model on which I am testing 
uses imp-def algorithm. Or am I missing something ?
> 

> I'm worried that it could include some value that we can't migrate (e.g. the SoC serial

> number). Does the ARM-ARM say this can't happen?

> 

> All I can find is D5.1.5 "Pointer authentication in AArch64 state" of DDI0487D.a which has

> this clause for the imp-def algorithm:

> | For a set of arguments passed to the function, must give the same result for all PEs

> | that a thread of execution could migrate between.

> 

> ... with KVM we've extended the scope of migration significantly.

> 

> 

> Could we check the cpus_have_const_cap() values for the two architected algorithms directly?

> 

> 

>> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h

>> index 6e65cad..09e061a 100644

>> --- a/arch/arm64/include/asm/kvm_hyp.h

>> +++ b/arch/arm64/include/asm/kvm_hyp.h

>> @@ -153,6 +153,13 @@ bool __fpsimd_enabled(void);

>>   void activate_traps_vhe_load(struct kvm_vcpu *vcpu);

>>   void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);

>>   

>> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

>> +			       struct kvm_cpu_context *host_ctxt,

>> +			       struct kvm_cpu_context *guest_ctxt);

>> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

>> +			      struct kvm_cpu_context *guest_ctxt,

>> +			      struct kvm_cpu_context *host_ctxt);

> 

> 

> Why do you need the vcpu and the guest_ctxt?

> Would it be possible for these to just take the vcpu, and to pull the host context from

> the per-cpu variable?

> This would avoid any future bugs where the ctxt's are the wrong way round, taking two is

> unusual in KVM, but necessary here.

> 

> As you're calling these from asm you want the compiler to do as much of the type mangling

> as possible.

Yes it is possible. I will implement in my upcoming version.
> 

> 

>> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c

>> index 4e2fb87..5cac605 100644

>> --- a/arch/arm64/kernel/traps.c

>> +++ b/arch/arm64/kernel/traps.c

>> @@ -749,6 +749,7 @@ static const char *esr_class_str[] = {

>>   	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",

>>   	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",

>>   	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",

>> +	[ESR_ELx_EC_PAC]		= "Pointer authentication trap",

>>   	[ESR_ELx_EC_CP14_64]		= "CP14 MCRR/MRRC",

>>   	[ESR_ELx_EC_ILL]		= "PSTATE.IL",

>>   	[ESR_ELx_EC_SVC32]		= "SVC (AArch32)",

> 

> Is this needed? Or was it just missing from the parts already merged. (should it be a

> separate patch for the arch code)

Yes you are right and looks like this is a missed patch of commit 
aa6eece8ec5095e479. I suppose this can be posted as a separate patch.
> 

> It looks like KVM only prints it from kvm_handle_unknown_ec(), which would never happen as

> arm_exit_handlers[] has an entry for ESR_ELx_EC_PAC.

yes.
> 

> Is it true that the host could never take this trap either?, as it can only be taken when

> HCR_EL2.TGE is clear.

> (breadcrumbs from the ESR_ELx definition to "Trap to EL2 of EL0 accesses to Pointer

> authentication instructions")

>

Yes most of the ptrauth instructions are treated as NOP but some 
instructions like PACGA and XPAC [0] are always enabled and may trap if 
CONFIG_ARM64_PTR_AUTH is disabled. In VHE mode this is not trapping as 
HCR_EL2.TGE is set. But in NVHE mode this is causing hang instead of 
trap if tested with HCR_EL2.API=0. Further checking on it.

[0]: DDI0487D_a_arm (page: D5-2390)

>> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S

>> index 675fdc1..b78cc15 100644

>> --- a/arch/arm64/kvm/hyp/entry.S

>> +++ b/arch/arm64/kvm/hyp/entry.S

>> @@ -64,6 +64,12 @@ ENTRY(__guest_enter)

>>   

>>   	add	x18, x0, #VCPU_CONTEXT

>>   

>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>> +	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).

>> +	mov	x2, x18

>> +	bl	__ptrauth_switch_to_guest

>> +#endif

> 

> This calls back into C code with x18 clobbered... is that allowed?

> x18 has this weird platform-register/temporary-register behaviour, that depends on the

> compiler. The PCS[0] has a note that 'hand-coded assembler should avoid it entirely'!

Yes agree with you that x18 may get clobbered.
> 

> Can we assume that compiler generated code is using it from something, and depends on that

> value, which means we need to preserve or save/restore it when calling into C.

> 

> 

> The upshot? Could you use one of the callee saved registers instead of x18, then move it

> after your C call.

Yes using a callee register is an option.
> 

> 

>> @@ -118,6 +124,17 @@ ENTRY(__guest_exit)

>>   

>>   	get_host_ctxt	x2, x3

>>   

>> +#ifdef	CONFIG_ARM64_PTR_AUTH

>> +	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).

>> +	// Save x0, x2 which are used later in callee saved registers.

>> +	mov	x19, x0

>> +	mov	x20, x2

>> +	sub	x0, x1, #VCPU_CONTEXT

> 

>> +	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]

> 

> Is this to make the stack-trace look plausible?

> 

> 

>> +	bl	__ptrauth_switch_to_host

>> +	mov	x0, x19

>> +	mov	x2, x20

>> +#endif

> 

> (ditching the host_ctxt would let you move this above get_host_ctxt and the need to

> preserve its result)

ok.
> 

> 

>> diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c

>> new file mode 100644

>> index 0000000..528ee6e

>> --- /dev/null

>> +++ b/arch/arm64/kvm/hyp/ptrauth-sr.c

>> @@ -0,0 +1,101 @@

> 

>> +static __always_inline bool __ptrauth_is_enabled(struct kvm_vcpu *vcpu)

> 

> This __always_inline still looks weird! You said it might be needed to make it function

> pointer safe. If it is, could you add a comment explaining why.

> 

> (alternatives would be making it an #ifdef, disabling ptrauth for the whole file, or

> annotating this function too)

ok __noptrauth annotation may be better as some functions are already 
using it in this file.
> 

> 

>> +#define __ptrauth_save_key(regs, key)						\

>> +({										\

>> +	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\

>> +	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\

>> +})

>> +

>> +static __always_inline void __ptrauth_save_state(struct kvm_cpu_context *ctxt)

>> +{

>> +	__ptrauth_save_key(ctxt->sys_regs, APIA);

>> +	__ptrauth_save_key(ctxt->sys_regs, APIB);

>> +	__ptrauth_save_key(ctxt->sys_regs, APDA);

>> +	__ptrauth_save_key(ctxt->sys_regs, APDB);

>> +	__ptrauth_save_key(ctxt->sys_regs, APGA);

>> +}

>> +

>> +#define __ptrauth_restore_key(regs, key) 					\

>> +({										\

>> +	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\

>> +	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\

>> +})

>> +

>> +static __always_inline void __ptrauth_restore_state(struct kvm_cpu_context *ctxt)

>> +{

>> +	__ptrauth_restore_key(ctxt->sys_regs, APIA);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APIB);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APDA);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APDB);

>> +	__ptrauth_restore_key(ctxt->sys_regs, APGA);

> 

> Are writes to these registers self synchronising? I'd assume not, as they come as a pair.

> 

> I think this means we need an isb() here to ensure that when restoring the host registers

> the next host authentication attempt uses the key we wrote here? We don't need it for the

> guest, so we could put it at the end of __ptrauth_switch_to_host().

yes isb() is required.
> 

> 

>> +/**

>> + * This function changes the key so assign Pointer Authentication safe

>> + * GCC attribute if protected by it.

>> + */

> 

> ... this comment is the reminder for 'once we have host kernel ptrauth support'? could we

> add something to say that kernel support is when the attribute would be needed. Otherwise

> it reads like we're waiting for GCC support.

ok.
> 

> (I assume LLVM has a similar attribute ... is it exactly the same?)

> 

> 

>> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

>> +				  struct kvm_cpu_context *host_ctxt,

>> +				  struct kvm_cpu_context *guest_ctxt)

>> +{

> 

>> +}

> 

>> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

>> +				 struct kvm_cpu_context *guest_ctxt,

>> +				 struct kvm_cpu_context *host_ctxt)

>> +{

> 

>> +}

> 

> 

> Could you add NOKPROBE_SYMBOL(symbol_name) for these. This adds them to the kprobe

> blacklist as they aren't part of the __hyp_text. (and don't need to be as its VHE only).

> Without this, you can patch a software-breakpoint in here, which KVM won't handle as its

> already switched VBAR for entry to the guest.

ok.
> 

> Details in 7d82602909ed ("KVM: arm64: Forbid kprobing of the VHE world-switch code")

> 

> (... this turned up in a kernel version later than you based on ...)

> 

> 

>> +/**

>> + * kvm_arm_vcpu_ptrauth_reset - resets ptrauth for vcpu schedule

>> + *

>> + * @vcpu: The VCPU pointer

>> + *

>> + * This function may be used to disable ptrauth and use it in a lazy context

>> + * via traps. However host key registers are saved here as they dont change

>> + * during host/guest switch.

>> + */

>> +void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu)

>> +{

>> +	struct kvm_cpu_context *host_ctxt;

>> +

>> +	if (kvm_supports_ptrauth()) {

>> +		kvm_arm_vcpu_ptrauth_disable(vcpu);

>> +		host_ctxt = vcpu->arch.host_cpu_context;

> 

>> +		__ptrauth_save_state(host_ctxt);

> 

> Could you equally do this host-save in kvm_arm_vcpu_ptrauth_trap() before

> kvm_arm_vcpu_ptrauth_enable()? This would avoid saving the keys if the guest never gets

> the opportunity to change them. At the moment we do it on every vcpu_load().

ok nice suggestion. It works fine in kvm_arm_vcpu_ptrauth_trap().
> 

> 

> As kvm_arm_vcpu_ptrauth_reset() isn't used as part of the world-switch, could we keep it

> outside the 'hyp' directory? The Makefile for that directory expects to be building the

> hyp text, so it disables KASAN, KCOV and friends.

> kvm_arm_vcpu_ptrauth_reset() is safe for all of these, and its good for it to be covered

> by this debug infrastructure. Could you move it to guest.c?

ok.
> 

> 

>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

>> index a6c9381..12529df 100644

>> --- a/arch/arm64/kvm/sys_regs.c

>> +++ b/arch/arm64/kvm/sys_regs.c> @@ -1045,9 +1071,10 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)

>>   					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |

>>   					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |

>>   					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);

>> -		if (val & ptrauth_mask)

>> +		if (!kvm_supports_ptrauth()) {

>>   			kvm_debug("ptrauth unsupported for guests, suppressing\n");

>> -		val &= ~ptrauth_mask;

>> +			val &= ~ptrauth_mask;

>> +		}

> 

> This means that debug message gets printed even on systems that don't support ptrauth in

> hardware. (val&ptrauth_mask) used to cut them out, now kvm_supports_ptrauth() fails if the

> static keys are false, and we end up printing this message.

> Now that KVM supports pointer-auth, I don't think the debug message is useful, can we

> remove it? (it would now mean 'you didn't ask for ptrauth to be turned on')

ok.
> 

> 

> Could we always mask out the imp-def bits?

I guess no as explained before.
> 

> 

> This patch needs to be merged together with 4 & 5 so the user-abi is as it should be. This

> means the check_present/restrictions thing needs sorting so they're ready together.

ok.

Thanks,
Amit D>
> 

> Thanks,

> 

> James

> 

> 

> [0] http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf

>
diff mbox series

Patch

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 704667e..b200c14 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -345,6 +345,7 @@  static inline int kvm_arm_have_ssbd(void)
 
 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu) {}
 
 #define __KVM_HAVE_ARCH_VM_ALLOC
 struct kvm *kvm_arch_alloc_vm(void);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2f1bb86..1bacf78 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -146,6 +146,18 @@  enum vcpu_sysreg {
 	PMSWINC_EL0,	/* Software Increment Register */
 	PMUSERENR_EL0,	/* User Enable Register */
 
+	/* Pointer Authentication Registers */
+	APIAKEYLO_EL1,
+	APIAKEYHI_EL1,
+	APIBKEYLO_EL1,
+	APIBKEYHI_EL1,
+	APDAKEYLO_EL1,
+	APDAKEYHI_EL1,
+	APDBKEYLO_EL1,
+	APDBKEYHI_EL1,
+	APGAKEYLO_EL1,
+	APGAKEYHI_EL1,
+
 	/* 32bit specific registers. Keep them at the end of the range */
 	DACR32_EL2,	/* Domain Access Control Register */
 	IFSR32_EL2,	/* Instruction Fault Status Register */
@@ -439,6 +451,17 @@  static inline bool kvm_arch_requires_vhe(void)
 	return false;
 }
 
+static inline bool kvm_supports_ptrauth(void)
+{
+	return has_vhe() && system_supports_address_auth() &&
+				system_supports_generic_auth();
+}
+
+void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
+
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 6e65cad..09e061a 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -153,6 +153,13 @@  bool __fpsimd_enabled(void);
 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
 
+void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,
+			       struct kvm_cpu_context *host_ctxt,
+			       struct kvm_cpu_context *guest_ctxt);
+void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,
+			      struct kvm_cpu_context *guest_ctxt,
+			      struct kvm_cpu_context *host_ctxt);
+
 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
 void __noreturn __hyp_do_panic(unsigned long, ...);
 
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4e2fb87..5cac605 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -749,6 +749,7 @@  static const char *esr_class_str[] = {
 	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",
 	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",
 	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",
+	[ESR_ELx_EC_PAC]		= "Pointer authentication trap",
 	[ESR_ELx_EC_CP14_64]		= "CP14 MCRR/MRRC",
 	[ESR_ELx_EC_ILL]		= "PSTATE.IL",
 	[ESR_ELx_EC_SVC32]		= "SVC (AArch32)",
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 0b79834..7622ab3 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -174,19 +174,24 @@  static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 /*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+{
+	if (kvm_supports_ptrauth())
+		kvm_arm_vcpu_ptrauth_enable(vcpu);
+	else
+		kvm_inject_undefined(vcpu);
+}
+
+/*
  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
  * a NOP).
  */
 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	/*
-	 * We don't currently support ptrauth in a guest, and we mask the ID
-	 * registers to prevent well-behaved guests from trying to make use of
-	 * it.
-	 *
-	 * Inject an UNDEF, as if the feature really isn't present.
-	 */
-	kvm_inject_undefined(vcpu);
+	kvm_arm_vcpu_ptrauth_trap(vcpu);
 	return 1;
 }
 
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 82d1904..17cec99 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -19,6 +19,7 @@  obj-$(CONFIG_KVM_ARM_HOST) += switch.o
 obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += ptrauth-sr.o
 
 # KVM code is run at a different exception code with a different map, so
 # compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 675fdc1..b78cc15 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -64,6 +64,12 @@  ENTRY(__guest_enter)
 
 	add	x18, x0, #VCPU_CONTEXT
 
+#ifdef	CONFIG_ARM64_PTR_AUTH
+	// Prepare parameter for __ptrauth_switch_to_guest(vcpu, host, guest).
+	mov	x2, x18
+	bl	__ptrauth_switch_to_guest
+#endif
+
 	// Restore guest regs x0-x17
 	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
 	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
@@ -118,6 +124,17 @@  ENTRY(__guest_exit)
 
 	get_host_ctxt	x2, x3
 
+#ifdef	CONFIG_ARM64_PTR_AUTH
+	// Prepare parameter for __ptrauth_switch_to_host(vcpu, guest, host).
+	// Save x0, x2 which are used later in callee saved registers.
+	mov	x19, x0
+	mov	x20, x2
+	sub	x0, x1, #VCPU_CONTEXT
+	ldr	x29, [x2, #CPU_XREG_OFFSET(29)]
+	bl	__ptrauth_switch_to_host
+	mov	x0, x19
+	mov	x2, x20
+#endif
 	// Now restore the host regs
 	restore_callee_saved_regs x2
 
diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c
new file mode 100644
index 0000000..528ee6e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/ptrauth-sr.c
@@ -0,0 +1,101 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/arm64/kvm/hyp/ptrauth-sr.c: Guest/host ptrauth save/restore
+ *
+ * Copyright 2018 Arm Limited
+ * Author: Mark Rutland <mark.rutland@arm.com>
+ *         Amit Daniel Kachhap <amit.kachhap@arm.com>
+ */
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpucaps.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+#include <asm/pointer_auth.h>
+
+static __always_inline bool __ptrauth_is_enabled(struct kvm_vcpu *vcpu)
+{
+	return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
+			vcpu->arch.ctxt.hcr_el2 & (HCR_API | HCR_APK);
+}
+
+#define __ptrauth_save_key(regs, key)						\
+({										\
+	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\
+	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\
+})
+
+static __always_inline void __ptrauth_save_state(struct kvm_cpu_context *ctxt)
+{
+	__ptrauth_save_key(ctxt->sys_regs, APIA);
+	__ptrauth_save_key(ctxt->sys_regs, APIB);
+	__ptrauth_save_key(ctxt->sys_regs, APDA);
+	__ptrauth_save_key(ctxt->sys_regs, APDB);
+	__ptrauth_save_key(ctxt->sys_regs, APGA);
+}
+
+#define __ptrauth_restore_key(regs, key) 					\
+({										\
+	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\
+	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\
+})
+
+static __always_inline void __ptrauth_restore_state(struct kvm_cpu_context *ctxt)
+{
+	__ptrauth_restore_key(ctxt->sys_regs, APIA);
+	__ptrauth_restore_key(ctxt->sys_regs, APIB);
+	__ptrauth_restore_key(ctxt->sys_regs, APDA);
+	__ptrauth_restore_key(ctxt->sys_regs, APDB);
+	__ptrauth_restore_key(ctxt->sys_regs, APGA);
+}
+
+/**
+ * This function changes the key so assign Pointer Authentication safe
+ * GCC attribute if protected by it.
+ */
+void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,
+				  struct kvm_cpu_context *host_ctxt,
+				  struct kvm_cpu_context *guest_ctxt)
+{
+	if (!__ptrauth_is_enabled(vcpu))
+		return;
+
+	__ptrauth_restore_state(guest_ctxt);
+}
+
+/**
+ * This function changes the key so assign Pointer Authentication safe
+ * GCC attribute if protected by it.
+ */
+void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt,
+				 struct kvm_cpu_context *host_ctxt)
+{
+	if (!__ptrauth_is_enabled(vcpu))
+		return;
+
+	__ptrauth_save_state(guest_ctxt);
+	__ptrauth_restore_state(host_ctxt);
+}
+
+/**
+ * kvm_arm_vcpu_ptrauth_reset - resets ptrauth for vcpu schedule
+ *
+ * @vcpu: The VCPU pointer
+ *
+ * This function may be used to disable ptrauth and use it in a lazy context
+ * via traps. However host key registers are saved here as they dont change
+ * during host/guest switch.
+ */
+void kvm_arm_vcpu_ptrauth_reset(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *host_ctxt;
+
+	if (kvm_supports_ptrauth()) {
+		kvm_arm_vcpu_ptrauth_disable(vcpu);
+		host_ctxt = vcpu->arch.host_cpu_context;
+		__ptrauth_save_state(host_ctxt);
+	}
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a6c9381..12529df 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -986,6 +986,32 @@  static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
 
+
+void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.ctxt.hcr_el2 |= (HCR_API | HCR_APK);
+}
+
+void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.ctxt.hcr_el2 &= ~(HCR_API | HCR_APK);
+}
+
+static bool trap_ptrauth(struct kvm_vcpu *vcpu,
+			 struct sys_reg_params *p,
+			 const struct sys_reg_desc *rd)
+{
+	kvm_arm_vcpu_ptrauth_trap(vcpu);
+	return false;
+}
+
+#define __PTRAUTH_KEY(k)						\
+	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k }
+
+#define PTRAUTH_KEY(k)							\
+	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
+	__PTRAUTH_KEY(k ## KEYHI_EL1)
+
 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 		struct sys_reg_params *p,
 		const struct sys_reg_desc *r)
@@ -1045,9 +1071,10 @@  static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
 					 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
 					 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
 					 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
-		if (val & ptrauth_mask)
+		if (!kvm_supports_ptrauth()) {
 			kvm_debug("ptrauth unsupported for guests, suppressing\n");
-		val &= ~ptrauth_mask;
+			val &= ~ptrauth_mask;
+		}
 	} else if (id == SYS_ID_AA64MMFR1_EL1) {
 		if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
 			kvm_debug("LORegions unsupported for guests, suppressing\n");
@@ -1316,6 +1343,12 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
 
+	PTRAUTH_KEY(APIA),
+	PTRAUTH_KEY(APIB),
+	PTRAUTH_KEY(APDA),
+	PTRAUTH_KEY(APDB),
+	PTRAUTH_KEY(APGA),
+
 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 2032a66..d7e003f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -388,6 +388,8 @@  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		vcpu_clear_wfe_traps(vcpu);
 	else
 		vcpu_set_wfe_traps(vcpu);
+
+	kvm_arm_vcpu_ptrauth_reset(vcpu);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)