@@ -48,7 +48,10 @@
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
+#define ARM64_HAS_ADDRESS_AUTH_ARCH 30
+#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 31
+#define ARM64_HAS_ADDRESS_AUTH 32
-#define ARM64_NCAPS 30
+#define ARM64_NCAPS 33
#endif /* __ASM_CPUCAPS_H */
@@ -142,6 +142,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1025,6 +1029,22 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
}
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ u64 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+ bool api, apa;
+
+ apa = cpuid_feature_extract_unsigned_field(isar1,
+ ID_AA64ISAR1_APA_SHIFT) > 0;
+ api = cpuid_feature_extract_unsigned_field(isar1,
+ ID_AA64ISAR1_API_SHIFT) > 0;
+
+ return apa || api;
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1201,6 +1221,33 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ {
+ .desc = "Address authentication (architected algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "Address authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .capability = ARM64_HAS_ADDRESS_AUTH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_address_auth,
+ },
+#endif /* CONFIG_ARM64_PTR_AUTH */
{},
};
So that we can dynamically handle the presence of pointer authentication functionality, wire up probing code in cpufeature.c. From ARMv8.3 onwards, ID_AA64ISAR1 is no longer entirely RES0, and now has four fields describing the presence of pointer authentication functionality: * APA - address authentication present, using an architected algorithm * API - address authentication present, using an IMP DEF algorithm * GPA - generic authentication present, using an architected algorithm * GPI - generic authentication present, using an IMP DEF algorithm For the moment we only care about address authentication, so we only need to check APA and API. It is assumed that if all CPUs support an IMP DEF algorithm, the same algorithm is used across all CPUs. Note that when we implement KVM support, we will also need to ensure that CPUs have uniform support for GPA and GPI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will.deacon@arm.com> --- arch/arm64/include/asm/cpucaps.h | 5 ++++- arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) -- 2.11.0