@@ -111,6 +111,11 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
uint arm64_requested_vuln_attrs = VULN_SPECTREV1;
+#if defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+#endif
+
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -233,15 +238,19 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+ __hardenbp_enab = false;
return;
+ }
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
+ if ((int)res.a0 < 0) {
+ __hardenbp_enab = false;
return;
+ }
cb = call_hvc_arch_workaround_1;
/* This is a guest, no need to patch KVM vectors */
smccc_start = NULL;
@@ -251,14 +260,17 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
+ if ((int)res.a0 < 0) {
+ __hardenbp_enab = false;
return;
+ }
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end;
break;
default:
+ __hardenbp_enab = false;
return;
}
@@ -509,7 +521,32 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
+ defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
+
+
+static bool __spectrev2_safe = true;
+
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
+ */
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ bool is_vul;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+
+ if (is_vul)
+ __spectrev2_safe = false;
+
+ arm64_requested_vuln_attrs |= VULN_SPECTREV2;
+
+ return is_vul;
+}
/*
* List of CPUs where we need to issue a psci call to
@@ -707,7 +744,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.cpu_enable = enable_smccc_arch_workaround_1,
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = check_branch_predictor,
+ .midr_range_list = arm64_bp_harden_smccc_cpus,
},
#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -758,4 +797,16 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__spectrev2_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (__hardenbp_enab)
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
#endif
Add code to track whether all the cores in the machine are vulnerable, and whether all the vulnerable cores have been mitigated. Once we have that information we can add the sysfs stub and provide an accurate view of what is known about the machine. Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> --- arch/arm64/kernel/cpu_errata.c | 61 +++++++++++++++++++++++++++++++--- 1 file changed, 56 insertions(+), 5 deletions(-) -- 2.17.2