diff mbox series

[20/71] target/arm: Add ID_AA64SMFR0_EL1

Message ID 20220602214853.496211-21-richard.henderson@linaro.org
State New
Headers show
Series target/arm: Scalable Matrix Extension | expand

Commit Message

Richard Henderson June 2, 2022, 9:48 p.m. UTC
This register is allocated from the existing block of id registers,
so it is already RES0 for cpus that do not implement SME.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/cpu.h    | 25 +++++++++++++++++++++++++
 target/arm/helper.c |  4 ++--
 target/arm/kvm64.c  |  9 +++++----
 3 files changed, 32 insertions(+), 6 deletions(-)

Comments

Peter Maydell June 6, 2022, 1:05 p.m. UTC | #1
On Thu, 2 Jun 2022 at 23:17, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> This register is allocated from the existing block of id registers,
> so it is already RES0 for cpus that do not implement SME.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---

> --- a/target/arm/kvm64.c
> +++ b/target/arm/kvm64.c
> @@ -682,13 +682,14 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
>          ahcf->isar.id_aa64pfr0 = t;
>
>          /*
> -         * Before v5.1, KVM did not support SVE and did not expose
> -         * ID_AA64ZFR0_EL1 even as RAZ.  After v5.1, KVM still does
> -         * not expose the register to "user" requests like this
> -         * unless the host supports SVE.
> +         * KVM began exposing the unallocated ID registers as RAZ in 4.15.
> +         * Using SVE supported is an easy way to tell if these registers
> +         * are exposed, since both of these depend on SVE anyway.
>           */

This slightly loses context described in the old comment, though the
old comment isn't quite correct either. Between kernel commits 73433762fcae
(part of the initial implementation of SVE support) and f81cb2c3ad41 (which
fixed this bug), the kernel did indeed not expose ID_AA64ZFR0_EL1 as RAZ if
SVE was not implemented. So there's a range of kernels that had this
bug and for which we need to guard the access to ID_AA64ZFR0_EL1.
This isn't the case for ID_AA64SMFR0_EL1, though, which all kernels
should handle correctly (ignoring the pre-4.15 case).

So I think:
 (1) we should read ID_AA64SMFR0_EL1 further up in the same code
block where we read all the other ID registers like id_aa64mmfr0 etc.

 (2) separately, we should update this comment to read something like:

/*
 * There is a range of kernels between kernel commit 73433762fcae
 * and f81cb2c3ad41 which have a bug where the kernel doesn't expose
 * SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
 * SVE support, so we only read it here, rather than together with all
 * the other ID registers earlier.
 */

Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM
Richard Henderson June 6, 2022, 4:19 p.m. UTC | #2
On 6/6/22 06:05, Peter Maydell wrote:
> /*
>   * There is a range of kernels between kernel commit 73433762fcae
>   * and f81cb2c3ad41 which have a bug where the kernel doesn't expose
>   * SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
>   * SVE support, so we only read it here, rather than together with all
>   * the other ID registers earlier.
>   */

Thanks for the wording.  And looking at those two commits makes it all make sense.


r~
diff mbox series

Patch

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f6d114aad7..24c5266f35 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -966,6 +966,7 @@  struct ArchCPU {
         uint64_t id_aa64dfr0;
         uint64_t id_aa64dfr1;
         uint64_t id_aa64zfr0;
+        uint64_t id_aa64smfr0;
         uint64_t reset_pmcr_el0;
     } isar;
     uint64_t midr;
@@ -2190,6 +2191,15 @@  FIELD(ID_AA64ZFR0, I8MM, 44, 4)
 FIELD(ID_AA64ZFR0, F32MM, 52, 4)
 FIELD(ID_AA64ZFR0, F64MM, 56, 4)
 
+FIELD(ID_AA64SMFR0, F32F32, 32, 1)
+FIELD(ID_AA64SMFR0, B16F32, 34, 1)
+FIELD(ID_AA64SMFR0, F16F32, 35, 1)
+FIELD(ID_AA64SMFR0, I8I32, 36, 4)
+FIELD(ID_AA64SMFR0, F64F64, 48, 1)
+FIELD(ID_AA64SMFR0, I16I64, 52, 4)
+FIELD(ID_AA64SMFR0, SMEVER, 56, 4)
+FIELD(ID_AA64SMFR0, FA64, 63, 1)
+
 FIELD(ID_DFR0, COPDBG, 0, 4)
 FIELD(ID_DFR0, COPSDBG, 4, 4)
 FIELD(ID_DFR0, MMAPDBG, 8, 4)
@@ -4190,6 +4200,21 @@  static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
     return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
 }
 
+static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64);
+}
+
+static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf;
+}
+
+static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64);
+}
+
 /*
  * Feature tests for "does this exist in either 32-bit or 64-bit?"
  */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index cb44d528c0..48534db0bd 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7732,11 +7732,11 @@  void register_cp_regs_for_features(ARMCPU *cpu)
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa64_tid3,
               .resetvalue = cpu->isar.id_aa64zfr0 },
-            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+            { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
               .access = PL1_R, .type = ARM_CP_CONST,
               .accessfn = access_aa64_tid3,
-              .resetvalue = 0 },
+              .resetvalue = cpu->isar.id_aa64smfr0 },
             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
               .access = PL1_R, .type = ARM_CP_CONST,
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index b3f635fc95..28001643c6 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -682,13 +682,14 @@  bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
         ahcf->isar.id_aa64pfr0 = t;
 
         /*
-         * Before v5.1, KVM did not support SVE and did not expose
-         * ID_AA64ZFR0_EL1 even as RAZ.  After v5.1, KVM still does
-         * not expose the register to "user" requests like this
-         * unless the host supports SVE.
+         * KVM began exposing the unallocated ID registers as RAZ in 4.15.
+         * Using SVE supported is an easy way to tell if these registers
+         * are exposed, since both of these depend on SVE anyway.
          */
         err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
                               ARM64_SYS_REG(3, 0, 0, 4, 4));
+        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
+                              ARM64_SYS_REG(3, 0, 0, 4, 5));
     }
 
     kvm_arm_destroy_scratch_host_vcpu(fdarray);