diff mbox series

[v9,40/40] squqsh traps

Message ID 20220126151120.3811248-41-broonie@kernel.org
State New
Headers show
Series arm64/sme: Initial support for the Scalable Matrix Extension | expand

Commit Message

Mark Brown Jan. 26, 2022, 3:11 p.m. UTC
---
 arch/arm64/kvm/fpsimd.c          | 22 ++++++++++------------
 arch/arm64/kvm/hyp/nvhe/switch.c | 20 ++++++++++----------
 arch/arm64/kvm/hyp/vhe/switch.c  |  4 ++--
 3 files changed, 22 insertions(+), 24 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index cecaddb644ce..1c585553d74f 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -153,18 +153,16 @@  void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 	 * If we have VHE then the Hyp code will reset CPACR_EL1 to
 	 * CPACR_EL1_DEFAULT and we need to reenable SME.
 	 */
-	if (has_vhe()) {
-		if (system_supports_sme()) {
-			/* Also restore EL0 state seen on entry */
-			if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
-				sysreg_clear_set(CPACR_EL1, 0,
-						 CPACR_EL1_SMEN_EL0EN |
-						 CPACR_EL1_SMEN_EL1EN);
-			else
-				sysreg_clear_set(CPACR_EL1,
-						 CPACR_EL1_SMEN_EL0EN,
-						 CPACR_EL1_SMEN_EL1EN);
-		}
+	if (has_vhe() && system_supports_sme()) {
+		/* Also restore EL0 state seen on entry */
+		if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
+			sysreg_clear_set(CPACR_EL1, 0,
+					 CPACR_EL1_SMEN_EL0EN |
+					 CPACR_EL1_SMEN_EL1EN);
+		else
+			sysreg_clear_set(CPACR_EL1,
+					 CPACR_EL1_SMEN_EL0EN,
+					 CPACR_EL1_SMEN_EL1EN);
 	}
 
 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 184bf6bd79b9..caace61ea459 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -47,21 +47,20 @@  static void __activate_traps(struct kvm_vcpu *vcpu)
 		val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
 		__activate_traps_fpsimd32(vcpu);
 	}
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+	if (cpus_have_final_cap(ARM64_SME))
 		val |= CPTR_EL2_TSM;
 
 	write_sysreg(val, cptr_el2);
 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
-	    cpus_have_final_cap(ARM64_HAS_FGT)) {
+	if (cpus_have_final_cap(ARM64_SME)) {
 		val = read_sysreg_s(SYS_HFGRTR_EL2);
-		val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+		val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
 			 HFGxTR_EL2_nSMPRI_EL1_MASK);
 		write_sysreg_s(val, SYS_HFGRTR_EL2);
 
 		val = read_sysreg_s(SYS_HFGWTR_EL2);
-		val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+		val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
 			 HFGxTR_EL2_nSMPRI_EL1_MASK);
 		write_sysreg_s(val, SYS_HFGWTR_EL2);
 	}
@@ -109,23 +108,24 @@  static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
-	    cpus_have_final_cap(ARM64_HAS_FGT)) {
+	if (cpus_have_final_cap(ARM64_SME)) {
 		u64 val;
 
 		val = read_sysreg_s(SYS_HFGRTR_EL2);
-		val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+		val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+			HFGxTR_EL2_nSMPRI_EL1_MASK;
 		write_sysreg_s(val, SYS_HFGRTR_EL2);
 
 		val = read_sysreg_s(SYS_HFGWTR_EL2);
-		val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+		val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+			HFGxTR_EL2_nSMPRI_EL1_MASK;
 		write_sysreg_s(val, SYS_HFGWTR_EL2);
 	}
 
 	cptr = CPTR_EL2_DEFAULT;
 	if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
 		cptr |= CPTR_EL2_TZ;
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+	if (cpus_have_final_cap(ARM64_SME))
 		cptr &= ~CPTR_EL2_TSM;
 
 	write_sysreg(cptr, cptr_el2);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 21217485514d..a4d2fb5c9710 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -60,7 +60,7 @@  static void __activate_traps(struct kvm_vcpu *vcpu)
 		__activate_traps_fpsimd32(vcpu);
 	}
 
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+	if (cpus_have_final_cap(ARM64_SME))
 		write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
 			     sctlr_el2);
 
@@ -85,7 +85,7 @@  static void __deactivate_traps(struct kvm_vcpu *vcpu)
 	 */
 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
-	if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+	if (cpus_have_final_cap(ARM64_SME))
 		write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
 			     sctlr_el2);