diff mbox series

[RFC,v2,19/22] KVM: arm64: Provide userspace access to ZT0

Message ID 20231222-kvm-arm64-sme-v2-19-da226cb180bb@kernel.org
State New
Headers show
Series KVM: arm64: Implement support for SME in non-protected guests | expand

Commit Message

Mark Brown Dec. 22, 2023, 4:21 p.m. UTC
ZT0 is a single register with a refreshingly fixed size 512 bit register
which is like ZA accessible only when PSTATE.ZA is set. Add support for it
to the userspace API, as with ZA we allow the regster to be read or written
regardless of the state of PSTATE.ZA in order to simplify userspace usage.
The value will be reset to 0 whenever PSTATE.ZA changes from 0 to 1,
userspace can read stale values but these are not observable by the guest
without manipulation of PSTATE.ZA by userspace.

While there is currently only one ZT register the naming as ZT0 and the
instruction encoding clearly leave room for future extensions adding more
ZT registers. This encoding can readily support such an extension if one is
introduced.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h |  2 ++
 arch/arm64/include/uapi/asm/kvm.h |  2 ++
 arch/arm64/kvm/guest.c            | 13 +++++++++++--
 3 files changed, 15 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a1aa9471084d..6a5002ab8042 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -855,6 +855,8 @@  void vcpu_fp_guest_to_user(struct kvm_vcpu *vcpu);
 	} else {							\
 		__vcpu_vq = vcpu_sme_max_vq(vcpu);			\
 		__size_ret = ZA_SIG_REGS_SIZE(__vcpu_vq);		\
+		if (system_supports_sme2())				\
+			__size_ret += ZT_SIG_REG_SIZE;			\
 	}								\
 									\
 	__size_ret;							\
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 00fb2ea4c057..58640aeb88e4 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -371,6 +371,8 @@  struct kvm_arm_counter_offset {
 	 (((n) & (KVM_ARM64_SME_MAX_ZAHREG - 1)) << 5) |		\
 	 ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
 
+#define KVM_REG_ARM64_SME_ZTREG_SIZE	(512 / 8)
+
 /* Vector lengths pseudo-register: */
 #define KVM_REG_ARM64_SME_VLS		(KVM_REG_ARM64 | KVM_REG_ARM64_SME | \
 					 KVM_REG_SIZE_U512 | 0xffff)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index cb38af891387..fba5ff377b8b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -587,7 +587,6 @@  static int sme_reg_to_region(struct vec_state_reg_region *region,
 	const u64 zah_id_min = KVM_REG_ARM64_SME_ZAHREG(0, 0);
 	const u64 zah_id_max = KVM_REG_ARM64_SME_ZAHREG(za_h_max - 1,
 						       SVE_NUM_SLICES - 1);
-
 	unsigned int reg_num;
 
 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
@@ -598,14 +597,24 @@  static int sme_reg_to_region(struct vec_state_reg_region *region,
 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
 
 	if (reg->id >= zah_id_min && reg->id <= zah_id_max) {
-		/* ZA is exposed as SVE vectors ZA.H[n] */
 		if (!vcpu_has_sme(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
 			return -ENOENT;
 
+		/* ZA is exposed as SVE vectors ZA.H[n] */
 		reqoffset = ZA_SIG_ZAV_OFFSET(vq, reg_num) -
 			ZA_SIG_REGS_OFFSET;
 		reqlen = KVM_SVE_ZREG_SIZE;
 		maxlen = SVE_SIG_ZREG_SIZE(vq);
+	} if (reg->id == KVM_REG_ARM64_SME_ZT_BASE) {
+		/* ZA is exposed as SVE vectors ZA.H[n] */
+		if (!vcpu_has_sme2(vcpu) ||
+		    (reg->id & SVE_REG_SLICE_MASK) > 0 ||
+		    reg_num > 0)
+			return -ENOENT;
+
+		/* ZT0 is stored after ZA */
+		reqlen = KVM_REG_ARM64_SME_ZTREG_SIZE;
+		maxlen = KVM_REG_ARM64_SME_ZTREG_SIZE;
 	} else {
 		return -EINVAL;
 	}