diff mbox series

[PATCHv2,10/12] arm64/kvm: context-switch ptrauth registers

Message ID 20171127163806.31435-11-mark.rutland@arm.com
State New
Headers show
Series ARMv8.3 pointer authentication userspace support | expand

Commit Message

Mark Rutland Nov. 27, 2017, 4:38 p.m. UTC
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.

When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again.

Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.

Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). When the guest is scheduled on a
physical CPU lacking the feature, these atetmps will result in an UNDEF
being taken by the guest.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>

Cc: Christoffer Dall <cdall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
---
 arch/arm64/include/asm/kvm_host.h | 23 +++++++++-
 arch/arm64/include/asm/kvm_hyp.h  |  7 +++
 arch/arm64/kvm/handle_exit.c      | 21 +++++++++
 arch/arm64/kvm/hyp/Makefile       |  1 +
 arch/arm64/kvm/hyp/ptrauth-sr.c   | 91 +++++++++++++++++++++++++++++++++++++++
 arch/arm64/kvm/hyp/switch.c       |  4 ++
 arch/arm64/kvm/sys_regs.c         | 32 ++++++++++++++
 7 files changed, 178 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kvm/hyp/ptrauth-sr.c

-- 
2.11.0

Comments

Christoffer Dall Feb. 6, 2018, 12:38 p.m. UTC | #1
On Mon, Nov 27, 2017 at 04:38:04PM +0000, Mark Rutland wrote:
> When pointer authentication is supported, a guest may wish to use it.

> This patch adds the necessary KVM infrastructure for this to work, with

> a semi-lazy context switch of the pointer auth state.

> 

> When we schedule a vcpu, 


That's not quite what the code does, the code only does this when we
schedule back a preempted or blocked vcpu thread.

> we disable guest usage of pointer

> authentication instructions and accesses to the keys. While these are

> disabled, we avoid context-switching the keys. When we trap the guest

> trying to use pointer authentication functionality, we change to eagerly

> context-switching the keys, and enable the feature. The next time the

> vcpu is scheduled out/in, we start again.

> 

> Pointer authentication consists of address authentication and generic

> authentication, and CPUs in a system might have varied support for

> either. Where support for either feature is not uniform, it is hidden

> from guests via ID register emulation, as a result of the cpufeature

> framework in the host.

> 

> Unfortunately, address authentication and generic authentication cannot

> be trapped separately, as the architecture provides a single EL2 trap

> covering both. If we wish to expose one without the other, we cannot

> prevent a (badly-written) guest from intermittently using a feature

> which is not uniformly supported (when scheduled on a physical CPU which

> supports the relevant feature). 


We could choose to always trap and emulate in software in this case,
couldn't we?  (not saying we should though).

Also, this patch doesn't let userspace decide if we should hide or
expose the feature to guests, and will expose new system registers to
userspace.  That means that on hardware supporting pointer
authentication, with this patch, it's not possible to migrate to a
machine which doesn't have the support.  That's probably a good thing
(false sense of security etc.), but I wonder if we should have a
mechanism for userspace to ask for pointer authentication in the guest
and only if that's enabled, do we expose the feature to the guest and in
the system register list to user space as well?

> When the guest is scheduled on a

> physical CPU lacking the feature, these atetmps will result in an UNDEF


attempts

> being taken by the guest.

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> Cc: Christoffer Dall <cdall@linaro.org>

> Cc: Marc Zyngier <marc.zyngier@arm.com>

> Cc: kvmarm@lists.cs.columbia.edu

> ---

>  arch/arm64/include/asm/kvm_host.h | 23 +++++++++-

>  arch/arm64/include/asm/kvm_hyp.h  |  7 +++

>  arch/arm64/kvm/handle_exit.c      | 21 +++++++++

>  arch/arm64/kvm/hyp/Makefile       |  1 +

>  arch/arm64/kvm/hyp/ptrauth-sr.c   | 91 +++++++++++++++++++++++++++++++++++++++

>  arch/arm64/kvm/hyp/switch.c       |  4 ++

>  arch/arm64/kvm/sys_regs.c         | 32 ++++++++++++++

>  7 files changed, 178 insertions(+), 1 deletion(-)

>  create mode 100644 arch/arm64/kvm/hyp/ptrauth-sr.c

> 

> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h

> index 39184aa3e2f2..2fc21a2a75a7 100644

> --- a/arch/arm64/include/asm/kvm_host.h

> +++ b/arch/arm64/include/asm/kvm_host.h

> @@ -136,6 +136,18 @@ enum vcpu_sysreg {

>  	PMSWINC_EL0,	/* Software Increment Register */

>  	PMUSERENR_EL0,	/* User Enable Register */

>  

> +	/* Pointer Authentication Registers */

> +	APIAKEYLO_EL1,

> +	APIAKEYHI_EL1,

> +	APIBKEYLO_EL1,

> +	APIBKEYHI_EL1,

> +	APDAKEYLO_EL1,

> +	APDAKEYHI_EL1,

> +	APDBKEYLO_EL1,

> +	APDBKEYHI_EL1,

> +	APGAKEYLO_EL1,

> +	APGAKEYHI_EL1,

> +

>  	/* 32bit specific registers. Keep them at the end of the range */

>  	DACR32_EL2,	/* Domain Access Control Register */

>  	IFSR32_EL2,	/* Instruction Fault Status Register */

> @@ -363,10 +375,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,

>  	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);

>  }

>  

> +void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu);

> +void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu);

> +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);

> +

>  static inline void kvm_arch_hardware_unsetup(void) {}

>  static inline void kvm_arch_sync_events(struct kvm *kvm) {}

>  static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

> -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}

> +

> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)

> +{

> +	kvm_arm_vcpu_ptrauth_disable(vcpu);

> +}

> +


I still find this decision to begin trapping again quite arbitrary, and
would at least prefer this to be in vcpu_load (which would make the
behavior match the commit text as well).

My expectation would be that if a guest is running software with pointer
authentication enabled, then it's likely to either keep using the
feature, or not use it at all, so I would make this a one-time flag.


Thanks,
-Christoffer

>  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}

>  

>  void kvm_arm_init_debug(void);

> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h

> index 08d3bb66c8b7..d0dd924cb175 100644

> --- a/arch/arm64/include/asm/kvm_hyp.h

> +++ b/arch/arm64/include/asm/kvm_hyp.h

> @@ -152,6 +152,13 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);

>  void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);

>  bool __fpsimd_enabled(void);

>  

> +void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

> +			       struct kvm_cpu_context *host_ctxt,

> +			       struct kvm_cpu_context *guest_ctxt);

> +void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

> +			      struct kvm_cpu_context *host_ctxt,

> +			      struct kvm_cpu_context *guest_ctxt);

> +

>  u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);

>  void __noreturn __hyp_do_panic(unsigned long, ...);

>  

> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c

> index b71247995469..d9aff3c86551 100644

> --- a/arch/arm64/kvm/handle_exit.c

> +++ b/arch/arm64/kvm/handle_exit.c

> @@ -136,6 +136,26 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)

>  	return ret;

>  }

>  

> +/*

> + * Handle the guest trying to use a ptrauth instruction, or trying to access a

> + * ptrauth register.

> + */

> +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)

> +{

> +	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH) ||

> +	    cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {

> +		kvm_arm_vcpu_ptrauth_enable(vcpu);

> +	} else {

> +		kvm_inject_undefined(vcpu);

> +	}

> +}

> +

> +static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)

> +{

> +	kvm_arm_vcpu_ptrauth_trap(vcpu);

> +	return 1;

> +}

> +

>  static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)

>  {

>  	u32 hsr = kvm_vcpu_get_hsr(vcpu);

> @@ -176,6 +196,7 @@ static exit_handle_fn arm_exit_handlers[] = {

>  	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,

>  	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,

>  	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,

> +	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,

>  };

>  

>  static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)

> diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile

> index f04400d494b7..2c2c3bd90cc0 100644

> --- a/arch/arm64/kvm/hyp/Makefile

> +++ b/arch/arm64/kvm/hyp/Makefile

> @@ -19,6 +19,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o

>  obj-$(CONFIG_KVM_ARM_HOST) += tlb.o

>  obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o

>  obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o

> +obj-$(CONFIG_KVM_ARM_HOST) += ptrauth-sr.o

>  

>  # KVM code is run at a different exception code with a different map, so

>  # compiler instrumentation that inserts callbacks or checks into the code may

> diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c

> new file mode 100644

> index 000000000000..2784fb373296

> --- /dev/null

> +++ b/arch/arm64/kvm/hyp/ptrauth-sr.c

> @@ -0,0 +1,91 @@

> +/*

> + * Copyright (C) 2017 ARM Ltd

> + *

> + * This program is free software; you can redistribute it and/or modify

> + * it under the terms of the GNU General Public License version 2 as

> + * published by the Free Software Foundation.

> + *

> + * This program is distributed in the hope that it will be useful,

> + * but WITHOUT ANY WARRANTY; without even the implied warranty of

> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> + * GNU General Public License for more details.

> + *

> + * You should have received a copy of the GNU General Public License

> + * along with this program.  If not, see <http://www.gnu.org/licenses/>.

> + */

> +

> +#include <linux/compiler.h>

> +#include <linux/kvm_host.h>

> +

> +#include <asm/cpucaps.h>

> +#include <asm/cpufeature.h>

> +#include <asm/kvm_asm.h>

> +#include <asm/kvm_hyp.h>

> +

> +static bool __hyp_text __ptrauth_is_enabled(struct kvm_vcpu *vcpu)

> +{

> +	return vcpu->arch.hcr_el2 & (HCR_API | HCR_APK);

> +}

> +

> +#define __ptrauth_save_key(regs, key)						\

> +({										\

> +	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\

> +	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static void __hyp_text __ptrauth_save_state(struct kvm_cpu_context *ctxt)

> +{

> +	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) {

> +		__ptrauth_save_key(ctxt->sys_regs, APIA);

> +		__ptrauth_save_key(ctxt->sys_regs, APIB);

> +		__ptrauth_save_key(ctxt->sys_regs, APDA);

> +		__ptrauth_save_key(ctxt->sys_regs, APDB);

> +	}

> +

> +	if (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {

> +		__ptrauth_save_key(ctxt->sys_regs, APGA);

> +	}

> +}

> +

> +#define __ptrauth_restore_key(regs, key) 					\

> +({										\

> +	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\

> +	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\

> +})

> +

> +static void __hyp_text __ptrauth_restore_state(struct kvm_cpu_context *ctxt)

> +{

> +

> +	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) {

> +		__ptrauth_restore_key(ctxt->sys_regs, APIA);

> +		__ptrauth_restore_key(ctxt->sys_regs, APIB);

> +		__ptrauth_restore_key(ctxt->sys_regs, APDA);

> +		__ptrauth_restore_key(ctxt->sys_regs, APDB);

> +	}

> +

> +	if (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {

> +		__ptrauth_restore_key(ctxt->sys_regs, APGA);

> +	}

> +}

> +

> +void __hyp_text __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,

> +					  struct kvm_cpu_context *host_ctxt,

> +					  struct kvm_cpu_context *guest_ctxt)

> +{

> +	if (!__ptrauth_is_enabled(vcpu))

> +		return;

> +

> +	__ptrauth_save_state(host_ctxt);

> +	__ptrauth_restore_state(guest_ctxt);

> +}

> +

> +void __hyp_text __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,

> +					 struct kvm_cpu_context *host_ctxt,

> +					 struct kvm_cpu_context *guest_ctxt)

> +{

> +	if (!__ptrauth_is_enabled(vcpu))

> +		return;

> +

> +	__ptrauth_save_state(guest_ctxt);

> +	__ptrauth_restore_state(host_ctxt);

> +}

> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c

> index 2205f0be3ced..d9be2762ac1a 100644

> --- a/arch/arm64/kvm/hyp/switch.c

> +++ b/arch/arm64/kvm/hyp/switch.c

> @@ -315,6 +315,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)

>  	__sysreg_restore_guest_state(guest_ctxt);

>  	__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);

>  

> +	__ptrauth_switch_to_guest(vcpu, host_ctxt, guest_ctxt);

> +

>  	/* Jump in the fire! */

>  again:

>  	exit_code = __guest_enter(vcpu, host_ctxt);

> @@ -373,6 +375,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)

>  

>  	fp_enabled = __fpsimd_enabled();

>  

> +	__ptrauth_switch_to_host(vcpu, host_ctxt, guest_ctxt);

> +

>  	__sysreg_save_guest_state(guest_ctxt);

>  	__sysreg32_save_state(vcpu);

>  	__timer_disable_traps(vcpu);

> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c

> index 1830ebc227d1..5fe3b2588bec 100644

> --- a/arch/arm64/kvm/sys_regs.c

> +++ b/arch/arm64/kvm/sys_regs.c

> @@ -838,6 +838,32 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

>  	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\

>  	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }

>  

> +

> +void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)

> +{

> +	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);

> +}

> +

> +void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)

> +{

> +	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);

> +}

> +

> +static bool trap_ptrauth(struct kvm_vcpu *vcpu,

> +			 struct sys_reg_params *p,

> +			 const struct sys_reg_desc *rd)

> +{

> +	kvm_arm_vcpu_ptrauth_trap(vcpu);

> +	return false;

> +}

> +

> +#define __PTRAUTH_KEY(k)						\

> +	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k }

> +

> +#define PTRAUTH_KEY(k)							\

> +	__PTRAUTH_KEY(k ## KEYLO_EL1),					\

> +	__PTRAUTH_KEY(k ## KEYHI_EL1)

> +

>  static bool access_cntp_tval(struct kvm_vcpu *vcpu,

>  		struct sys_reg_params *p,

>  		const struct sys_reg_desc *r)

> @@ -1156,6 +1182,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {

>  	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },

>  	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },

>  

> +	PTRAUTH_KEY(APIA),

> +	PTRAUTH_KEY(APIB),

> +	PTRAUTH_KEY(APDA),

> +	PTRAUTH_KEY(APDB),

> +	PTRAUTH_KEY(APGA),

> +

>  	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },

>  	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },

>  	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },

> -- 

> 2.11.0

>
Mark Rutland March 9, 2018, 2:28 p.m. UTC | #2
On Tue, Feb 06, 2018 at 01:38:47PM +0100, Christoffer Dall wrote:
> On Mon, Nov 27, 2017 at 04:38:04PM +0000, Mark Rutland wrote:

> > When pointer authentication is supported, a guest may wish to use it.

> > This patch adds the necessary KVM infrastructure for this to work, with

> > a semi-lazy context switch of the pointer auth state.

> > 

> > When we schedule a vcpu, 

> 

> That's not quite what the code does, the code only does this when we

> schedule back a preempted or blocked vcpu thread.


Does that only leave the case of the vCPU being scheduled for the first
time? Or am I missing something else?

[...]

> > we disable guest usage of pointer

> > authentication instructions and accesses to the keys. While these are

> > disabled, we avoid context-switching the keys. When we trap the guest

> > trying to use pointer authentication functionality, we change to eagerly

> > context-switching the keys, and enable the feature. The next time the

> > vcpu is scheduled out/in, we start again.

> > 

> > Pointer authentication consists of address authentication and generic

> > authentication, and CPUs in a system might have varied support for

> > either. Where support for either feature is not uniform, it is hidden

> > from guests via ID register emulation, as a result of the cpufeature

> > framework in the host.

> > 

> > Unfortunately, address authentication and generic authentication cannot

> > be trapped separately, as the architecture provides a single EL2 trap

> > covering both. If we wish to expose one without the other, we cannot

> > prevent a (badly-written) guest from intermittently using a feature

> > which is not uniformly supported (when scheduled on a physical CPU which

> > supports the relevant feature). 

> 

> We could choose to always trap and emulate in software in this case,

> couldn't we?  (not saying we should though).


Practically speaking, we cannot. Emulating the feature would be so
detrimental to performance as to render the feature useless --  every
function prologue/epilogue in a guest would end up trapping to hyp.

Additionally, if the algorithm is IMP DEF, we simply don't know how to
emulate it.

[...]

> Also, this patch doesn't let userspace decide if we should hide or

> expose the feature to guests, and will expose new system registers to

> userspace.  That means that on hardware supporting pointer

> authentication, with this patch, it's not possible to migrate to a

> machine which doesn't have the support.  That's probably a good thing

> (false sense of security etc.), but I wonder if we should have a

> mechanism for userspace to ask for pointer authentication in the guest

> and only if that's enabled, do we expose the feature to the guest and in

> the system register list to user space as well?


Making this an opt-in makes sense to me, given it affects migration.
I'll take a look at what we do for SVE.

[...]

> > When the guest is scheduled on a

> > physical CPU lacking the feature, these atetmps will result in an UNDEF

> 

> attempts


Whoops. Fixed.

[...]

> > +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)

> > +{

> > +	kvm_arm_vcpu_ptrauth_disable(vcpu);

> > +}

> > +

> 

> I still find this decision to begin trapping again quite arbitrary, and

> would at least prefer this to be in vcpu_load (which would make the

> behavior match the commit text as well).


Sure, done.

> My expectation would be that if a guest is running software with pointer

> authentication enabled, then it's likely to either keep using the

> feature, or not use it at all, so I would make this a one-time flag.


I think it's likely that some applications will use ptrauth while others
do not. Even if the gust OS supports ptrauth, KVM may repeatedly preempt
an application that doesn't use it, and we'd win in that case.

There are also some rarer cases, like kexec in a guest from a
ptrauth-aware kernel to a ptrauth-oblivious one.

I don't have strong feelings either way, and I have no data.

Thanks,
Mark.
Christoffer Dall April 9, 2018, 12:58 p.m. UTC | #3
Hi Mark,

[Sorry for late reply]

On Fri, Mar 09, 2018 at 02:28:38PM +0000, Mark Rutland wrote:
> On Tue, Feb 06, 2018 at 01:38:47PM +0100, Christoffer Dall wrote:

> > On Mon, Nov 27, 2017 at 04:38:04PM +0000, Mark Rutland wrote:

> > > When pointer authentication is supported, a guest may wish to use it.

> > > This patch adds the necessary KVM infrastructure for this to work, with

> > > a semi-lazy context switch of the pointer auth state.

> > > 

> > > When we schedule a vcpu, 

> > 

> > That's not quite what the code does, the code only does this when we

> > schedule back a preempted or blocked vcpu thread.

> 

> Does that only leave the case of the vCPU being scheduled for the first

> time? Or am I missing something else?

> 

> [...]


In the current patch, you're only calling kvm_arm_vcpu_ptrauth_disable()
from kvm_arch_sched_in() which is only called on the preempt notifier
patch, which leaves out every time we enter the guest from userspace and
therefore also the initial run of the vCPU (assuming there's no
preemption in the kernel prior to running the first time).

vcpu_load() takes care of all the cases.

> 


[...]

> > 

> > I still find this decision to begin trapping again quite arbitrary, and

> > would at least prefer this to be in vcpu_load (which would make the

> > behavior match the commit text as well).

> 

> Sure, done.

> 

> > My expectation would be that if a guest is running software with pointer

> > authentication enabled, then it's likely to either keep using the

> > feature, or not use it at all, so I would make this a one-time flag.

> 

> I think it's likely that some applications will use ptrauth while others

> do not. Even if the gust OS supports ptrauth, KVM may repeatedly preempt

> an application that doesn't use it, and we'd win in that case.

> 

> There are also some rarer cases, like kexec in a guest from a

> ptrauth-aware kernel to a ptrauth-oblivious one.

> 

> I don't have strong feelings either way, and I have no data.

> 


I think your intuition sounds sane, and let's reset the flag on every
vcpu_load, and we can always revisit when we have hardware and data if
someone reports a performance issue.

Thanks,
-Christoffer
Mark Rutland April 9, 2018, 2:37 p.m. UTC | #4
On Mon, Apr 09, 2018 at 02:58:18PM +0200, Christoffer Dall wrote:
> Hi Mark,

> 

> [Sorry for late reply]

> 

> On Fri, Mar 09, 2018 at 02:28:38PM +0000, Mark Rutland wrote:

> > On Tue, Feb 06, 2018 at 01:38:47PM +0100, Christoffer Dall wrote:

> > > On Mon, Nov 27, 2017 at 04:38:04PM +0000, Mark Rutland wrote:

> > > > When pointer authentication is supported, a guest may wish to use it.

> > > > This patch adds the necessary KVM infrastructure for this to work, with

> > > > a semi-lazy context switch of the pointer auth state.

> > > > 

> > > > When we schedule a vcpu, 

> > > 

> > > That's not quite what the code does, the code only does this when we

> > > schedule back a preempted or blocked vcpu thread.

> > 

> > Does that only leave the case of the vCPU being scheduled for the first

> > time? Or am I missing something else?

> > 

> > [...]

> 

> In the current patch, you're only calling kvm_arm_vcpu_ptrauth_disable()

> from kvm_arch_sched_in() which is only called on the preempt notifier

> patch, which leaves out every time we enter the guest from userspace and

> therefore also the initial run of the vCPU (assuming there's no

> preemption in the kernel prior to running the first time).

> 

> vcpu_load() takes care of all the cases.


I see.

> > > I still find this decision to begin trapping again quite arbitrary, and

> > > would at least prefer this to be in vcpu_load (which would make the

> > > behavior match the commit text as well).

> > 

> > Sure, done.

> > 

> > > My expectation would be that if a guest is running software with pointer

> > > authentication enabled, then it's likely to either keep using the

> > > feature, or not use it at all, so I would make this a one-time flag.

> > 

> > I think it's likely that some applications will use ptrauth while others

> > do not. Even if the gust OS supports ptrauth, KVM may repeatedly preempt

> > an application that doesn't use it, and we'd win in that case.

> > 

> > There are also some rarer cases, like kexec in a guest from a

> > ptrauth-aware kernel to a ptrauth-oblivious one.

> > 

> > I don't have strong feelings either way, and I have no data.

> 

> I think your intuition sounds sane, and let's reset the flag on every

> vcpu_load, and we can always revisit when we have hardware and data if

> someone reports a performance issue.


Cool. I've switched to vcpu_load() locally, and will use that in v3.

Thanks,
Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 39184aa3e2f2..2fc21a2a75a7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -136,6 +136,18 @@  enum vcpu_sysreg {
 	PMSWINC_EL0,	/* Software Increment Register */
 	PMUSERENR_EL0,	/* User Enable Register */
 
+	/* Pointer Authentication Registers */
+	APIAKEYLO_EL1,
+	APIAKEYHI_EL1,
+	APIBKEYLO_EL1,
+	APIBKEYHI_EL1,
+	APDAKEYLO_EL1,
+	APDAKEYHI_EL1,
+	APDBKEYLO_EL1,
+	APDBKEYHI_EL1,
+	APGAKEYLO_EL1,
+	APGAKEYHI_EL1,
+
 	/* 32bit specific registers. Keep them at the end of the range */
 	DACR32_EL2,	/* Domain Access Control Register */
 	IFSR32_EL2,	/* Instruction Fault Status Register */
@@ -363,10 +375,19 @@  static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
 }
 
+void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
+
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+	kvm_arm_vcpu_ptrauth_disable(vcpu);
+}
+
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 void kvm_arm_init_debug(void);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 08d3bb66c8b7..d0dd924cb175 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -152,6 +152,13 @@  void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
 bool __fpsimd_enabled(void);
 
+void __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,
+			       struct kvm_cpu_context *host_ctxt,
+			       struct kvm_cpu_context *guest_ctxt);
+void __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,
+			      struct kvm_cpu_context *host_ctxt,
+			      struct kvm_cpu_context *guest_ctxt);
+
 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
 void __noreturn __hyp_do_panic(unsigned long, ...);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b71247995469..d9aff3c86551 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -136,6 +136,26 @@  static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return ret;
 }
 
+/*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+{
+	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH) ||
+	    cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {
+		kvm_arm_vcpu_ptrauth_enable(vcpu);
+	} else {
+		kvm_inject_undefined(vcpu);
+	}
+}
+
+static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	kvm_arm_vcpu_ptrauth_trap(vcpu);
+	return 1;
+}
+
 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
@@ -176,6 +196,7 @@  static exit_handle_fn arm_exit_handlers[] = {
 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
+	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
 };
 
 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index f04400d494b7..2c2c3bd90cc0 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -19,6 +19,7 @@  obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
+obj-$(CONFIG_KVM_ARM_HOST) += ptrauth-sr.o
 
 # KVM code is run at a different exception code with a different map, so
 # compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/ptrauth-sr.c b/arch/arm64/kvm/hyp/ptrauth-sr.c
new file mode 100644
index 000000000000..2784fb373296
--- /dev/null
+++ b/arch/arm64/kvm/hyp/ptrauth-sr.c
@@ -0,0 +1,91 @@ 
+/*
+ * Copyright (C) 2017 ARM Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpucaps.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+static bool __hyp_text __ptrauth_is_enabled(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.hcr_el2 & (HCR_API | HCR_APK);
+}
+
+#define __ptrauth_save_key(regs, key)						\
+({										\
+	regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);	\
+	regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);	\
+})
+
+static void __hyp_text __ptrauth_save_state(struct kvm_cpu_context *ctxt)
+{
+	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) {
+		__ptrauth_save_key(ctxt->sys_regs, APIA);
+		__ptrauth_save_key(ctxt->sys_regs, APIB);
+		__ptrauth_save_key(ctxt->sys_regs, APDA);
+		__ptrauth_save_key(ctxt->sys_regs, APDB);
+	}
+
+	if (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {
+		__ptrauth_save_key(ctxt->sys_regs, APGA);
+	}
+}
+
+#define __ptrauth_restore_key(regs, key) 					\
+({										\
+	write_sysreg_s(regs[key ## KEYLO_EL1], SYS_ ## key ## KEYLO_EL1);	\
+	write_sysreg_s(regs[key ## KEYHI_EL1], SYS_ ## key ## KEYHI_EL1);	\
+})
+
+static void __hyp_text __ptrauth_restore_state(struct kvm_cpu_context *ctxt)
+{
+
+	if (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) {
+		__ptrauth_restore_key(ctxt->sys_regs, APIA);
+		__ptrauth_restore_key(ctxt->sys_regs, APIB);
+		__ptrauth_restore_key(ctxt->sys_regs, APDA);
+		__ptrauth_restore_key(ctxt->sys_regs, APDB);
+	}
+
+	if (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH)) {
+		__ptrauth_restore_key(ctxt->sys_regs, APGA);
+	}
+}
+
+void __hyp_text __ptrauth_switch_to_guest(struct kvm_vcpu *vcpu,
+					  struct kvm_cpu_context *host_ctxt,
+					  struct kvm_cpu_context *guest_ctxt)
+{
+	if (!__ptrauth_is_enabled(vcpu))
+		return;
+
+	__ptrauth_save_state(host_ctxt);
+	__ptrauth_restore_state(guest_ctxt);
+}
+
+void __hyp_text __ptrauth_switch_to_host(struct kvm_vcpu *vcpu,
+					 struct kvm_cpu_context *host_ctxt,
+					 struct kvm_cpu_context *guest_ctxt)
+{
+	if (!__ptrauth_is_enabled(vcpu))
+		return;
+
+	__ptrauth_save_state(guest_ctxt);
+	__ptrauth_restore_state(host_ctxt);
+}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 2205f0be3ced..d9be2762ac1a 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -315,6 +315,8 @@  int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__sysreg_restore_guest_state(guest_ctxt);
 	__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
 
+	__ptrauth_switch_to_guest(vcpu, host_ctxt, guest_ctxt);
+
 	/* Jump in the fire! */
 again:
 	exit_code = __guest_enter(vcpu, host_ctxt);
@@ -373,6 +375,8 @@  int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	fp_enabled = __fpsimd_enabled();
 
+	__ptrauth_switch_to_host(vcpu, host_ctxt, guest_ctxt);
+
 	__sysreg_save_guest_state(guest_ctxt);
 	__sysreg32_save_state(vcpu);
 	__timer_disable_traps(vcpu);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1830ebc227d1..5fe3b2588bec 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -838,6 +838,32 @@  static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
 
+
+void kvm_arm_vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+}
+
+void kvm_arm_vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+}
+
+static bool trap_ptrauth(struct kvm_vcpu *vcpu,
+			 struct sys_reg_params *p,
+			 const struct sys_reg_desc *rd)
+{
+	kvm_arm_vcpu_ptrauth_trap(vcpu);
+	return false;
+}
+
+#define __PTRAUTH_KEY(k)						\
+	{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k }
+
+#define PTRAUTH_KEY(k)							\
+	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
+	__PTRAUTH_KEY(k ## KEYHI_EL1)
+
 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 		struct sys_reg_params *p,
 		const struct sys_reg_desc *r)
@@ -1156,6 +1182,12 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
 
+	PTRAUTH_KEY(APIA),
+	PTRAUTH_KEY(APIB),
+	PTRAUTH_KEY(APDA),
+	PTRAUTH_KEY(APDB),
+	PTRAUTH_KEY(APGA),
+
 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },