diff mbox series

[RFC,v2,20/22] KVM: arm64: Support SME version configuration via ID registers

Message ID 20231222-kvm-arm64-sme-v2-20-da226cb180bb@kernel.org
State New
Headers show
Series KVM: arm64: Implement support for SME in non-protected guests | expand

Commit Message

Mark Brown Dec. 22, 2023, 4:21 p.m. UTC
As well as a substantial set of features which provide additional
instructions there are also two current extensions which add new
architectural state, SME2 (which adds ZT) and FA64 (which makes
FFR valid in streaming mode SVE). Allow all of these to be
configured through writes to the ID registers.

At present the guest support for SME2 and FA64 does not use the values
configured here pending clarity on the approach to be taken generally
with regards to parsing supported features from ID registers.

We always allocate state for the new architectural state which might be
enabled if the host supports it, in the case of FFR this simplifies the
already fiddly allocation and is needed when SVE is also supported. In
the case of ZT the register is 64 bytes which is not completely trivial
(though not so much relative to the other SME state) but it is not
expected that there will be many practical users that want SME1 only so
it is expected that guests with SME1 only would not be common enough to
justify the complication of handling this. If this proves to be a
problem we can improve things incrementally.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/kvm/sys_regs.c | 30 ++++++++++++++++++++++--------
 1 file changed, 22 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f908aa3fb606..1ea658615467 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1412,13 +1412,6 @@  static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
 	val = read_sanitised_ftr_reg(id);
 
 	switch (id) {
-	case SYS_ID_AA64PFR1_EL1:
-		if (!kvm_has_mte(vcpu->kvm))
-			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
-		if (!vcpu_has_sme(vcpu))
-			val &= ~ID_AA64PFR1_EL1_SME_MASK;
-		break;
 	case SYS_ID_AA64ISAR1_EL1:
 		if (!vcpu_has_ptrauth(vcpu))
 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
@@ -1582,6 +1575,20 @@  static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 	return val;
 }
 
+static u64 read_sanitised_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+					  const struct sys_reg_desc *rd)
+{
+	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+
+	if (!vcpu_has_sme(vcpu))
+		val &= ~ID_AA64PFR1_EL1_SME_MASK;
+
+	if (!kvm_has_mte(vcpu->kvm))
+		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
+
+	return val;
+}
+
 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit)			       \
 ({									       \
 	u64 __f_val = FIELD_GET(reg##_##field##_MASK, val);		       \
@@ -2164,7 +2171,14 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 		   ID_AA64PFR0_EL1_GIC |
 		   ID_AA64PFR0_EL1_AdvSIMD |
 		   ID_AA64PFR0_EL1_FP), },
-	ID_SANITISED(ID_AA64PFR1_EL1),
+	{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
+	  .access = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset = read_sanitised_id_aa64pfr1_el1,
+	  .val = ~(ID_AA64PFR1_EL1_MPAM_frac |
+		   ID_AA64PFR1_EL1_RAS_frac |
+		   ID_AA64PFR1_EL1_MTE), },
 	ID_UNALLOCATED(4,2),
 	ID_UNALLOCATED(4,3),
 	ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),