diff mbox series

[v5,09/10] arm64: add sysfs vulnerability show for speculative store bypass

Message ID 20190227010544.597579-10-jeremy.linton@arm.com
State Superseded
Headers show
Series arm64: add system vulnerability sysfs entries | expand

Commit Message

Jeremy Linton Feb. 27, 2019, 1:05 a.m. UTC
Return status based on ssbd_state and the arm64 SSBS feature. If
the mitigation is disabled, or the firmware isn't responding then
return the expected machine state based on a new blacklist of known
vulnerable cores.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

---
 arch/arm64/kernel/cpu_errata.c | 43 ++++++++++++++++++++++++++++++++++
 1 file changed, 43 insertions(+)

-- 
2.20.1

Comments

Andre Przywara March 1, 2019, 7:02 a.m. UTC | #1
Hi,

On 2/26/19 7:05 PM, Jeremy Linton wrote:
> Return status based on ssbd_state and the arm64 SSBS feature. If

> the mitigation is disabled, or the firmware isn't responding then

> return the expected machine state based on a new blacklist of known

> vulnerable cores.

> 

> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

> ---

>   arch/arm64/kernel/cpu_errata.c | 43 ++++++++++++++++++++++++++++++++++

>   1 file changed, 43 insertions(+)

> 

> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c

> index 5f5611d17dc1..e1b03f643799 100644

> --- a/arch/arm64/kernel/cpu_errata.c

> +++ b/arch/arm64/kernel/cpu_errata.c

> @@ -279,6 +279,7 @@ static int detect_harden_bp_fw(void)

>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>   

>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

> +static bool __ssb_safe = true;

>   

>   static const struct ssbd_options {

>   	const char	*str;

> @@ -387,6 +388,9 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>   

>   	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

>   

> +	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))

> +		__ssb_safe = false;


Is that the only place where we set it to false?
What about if firmware reports that (at least one core) is vulnerable?

> +

>   	if (this_cpu_has_cap(ARM64_SSBS)) {

>   		required = false;

>   		goto out_printmsg;

> @@ -420,6 +424,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>   		ssbd_state = ARM64_SSBD_UNKNOWN;

>   		return false;

>   

> +	/* machines with mixed mitigation requirements must not return this */

>   	case SMCCC_RET_NOT_REQUIRED:

>   		pr_info_once("%s mitigation not required\n", entry->desc);

>   		ssbd_state = ARM64_SSBD_MITIGATED;

> @@ -475,6 +480,16 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>   	return required;

>   }

>   

> +/* known vulnerable cores */

> +static const struct midr_range arm64_ssb_cpus[] = {

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),

> +	{},

> +};

> +

>   static void __maybe_unused

>   cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)

>   {

> @@ -770,6 +785,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {

>   		.capability = ARM64_SSBD,

>   		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>   		.matches = has_ssbd_mitigation,

> +		.midr_range_list = arm64_ssb_cpus,

>   	},

>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>   	{

> @@ -808,3 +824,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,

>   

>   	return sprintf(buf, "Vulnerable\n");

>   }

> +

> +ssize_t cpu_show_spec_store_bypass(struct device *dev,

> +		struct device_attribute *attr, char *buf)

> +{

> +	/*

> +	 *  Two assumptions: First, ssbd_state reflects the worse case

> +	 *  for hetrogenous machines, and that if SSBS is supported its


                 heterogeneous

Cheers,
Andre.

> +	 *  supported by all cores.

> +	 */

> +	switch (ssbd_state) {

> +	case ARM64_SSBD_MITIGATED:

> +		return sprintf(buf, "Not affected\n");

> +

> +	case ARM64_SSBD_KERNEL:

> +	case ARM64_SSBD_FORCE_ENABLE:

> +		if (cpus_have_cap(ARM64_SSBS))

> +			return sprintf(buf, "Not affected\n");

> +		if (IS_ENABLED(CONFIG_ARM64_SSBD))

> +			return sprintf(buf,

> +			    "Mitigation: Speculative Store Bypass disabled\n");

> +	}

> +

> +	if (__ssb_safe)

> +		return sprintf(buf, "Not affected\n");

> +

> +	return sprintf(buf, "Vulnerable\n");

> +}

>
Jeremy Linton March 1, 2019, 4:41 p.m. UTC | #2
Hi,

On 3/1/19 1:02 AM, Andre Przywara wrote:
> Hi,

> 

> On 2/26/19 7:05 PM, Jeremy Linton wrote:

>> Return status based on ssbd_state and the arm64 SSBS feature. If

>> the mitigation is disabled, or the firmware isn't responding then

>> return the expected machine state based on a new blacklist of known

>> vulnerable cores.

>>

>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

>> ---

>>   arch/arm64/kernel/cpu_errata.c | 43 ++++++++++++++++++++++++++++++++++

>>   1 file changed, 43 insertions(+)

>>

>> diff --git a/arch/arm64/kernel/cpu_errata.c 

>> b/arch/arm64/kernel/cpu_errata.c

>> index 5f5611d17dc1..e1b03f643799 100644

>> --- a/arch/arm64/kernel/cpu_errata.c

>> +++ b/arch/arm64/kernel/cpu_errata.c

>> @@ -279,6 +279,7 @@ static int detect_harden_bp_fw(void)

>>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

>> +static bool __ssb_safe = true;

>>   static const struct ssbd_options {

>>       const char    *str;

>> @@ -387,6 +388,9 @@ static bool has_ssbd_mitigation(const struct 

>> arm64_cpu_capabilities *entry,

>>       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

>> +    if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))

>> +        __ssb_safe = false;

> 

> Is that the only place where we set it to false?

> What about if firmware reports that (at least one core) is vulnerable?


Maybe.. Normally if the firmware is functional enough to report the core 
state, then I would expect the kernel mitigation to be enabled. But your 
right, if the mitigation is disabled then it might be possible for us to 
miss the blacklist and report the machine safe even if the firmware 
reports it vulnerable.

The core problem though is really that the blacklist isn't complete, 
because we also report an incorrect state if the firmware fails to 
respond. Although that said, there are still some other interesting 
paths here which might fall into the "unknown" case if you get creative 
enough (ex: think force disabling a SSBS mitigated machine).

Anyway, its probably worth flagging the machine vulnerable if we get 
SMCC_RET_SUCCESS to avoid cases which miss the blacklist.


> 

>> +

>>       if (this_cpu_has_cap(ARM64_SSBS)) {

>>           required = false;

>>           goto out_printmsg;

>> @@ -420,6 +424,7 @@ static bool has_ssbd_mitigation(const struct 

>> arm64_cpu_capabilities *entry,

>>           ssbd_state = ARM64_SSBD_UNKNOWN;

>>           return false;

>> +    /* machines with mixed mitigation requirements must not return 

>> this */

>>       case SMCCC_RET_NOT_REQUIRED:

>>           pr_info_once("%s mitigation not required\n", entry->desc);

>>           ssbd_state = ARM64_SSBD_MITIGATED;

>> @@ -475,6 +480,16 @@ static bool has_ssbd_mitigation(const struct 

>> arm64_cpu_capabilities *entry,

>>       return required;

>>   }

>> +/* known vulnerable cores */

>> +static const struct midr_range arm64_ssb_cpus[] = {

>> +    MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

>> +    MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

>> +    MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

>> +    MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

>> +    MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),

>> +    {},

>> +};

>> +

>>   static void __maybe_unused

>>   cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities 

>> *__unused)

>>   {

>> @@ -770,6 +785,7 @@ const struct arm64_cpu_capabilities arm64_errata[] 

>> = {

>>           .capability = ARM64_SSBD,

>>           .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>>           .matches = has_ssbd_mitigation,

>> +        .midr_range_list = arm64_ssb_cpus,

>>       },

>>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>>       {

>> @@ -808,3 +824,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, 

>> struct device_attribute *attr,

>>       return sprintf(buf, "Vulnerable\n");

>>   }

>> +

>> +ssize_t cpu_show_spec_store_bypass(struct device *dev,

>> +        struct device_attribute *attr, char *buf)

>> +{

>> +    /*

>> +     *  Two assumptions: First, ssbd_state reflects the worse case

>> +     *  for hetrogenous machines, and that if SSBS is supported its

> 

>                  heterogeneous

> 

> Cheers,

> Andre.

> 

>> +     *  supported by all cores.

>> +     */

>> +    switch (ssbd_state) {

>> +    case ARM64_SSBD_MITIGATED:

>> +        return sprintf(buf, "Not affected\n");

>> +

>> +    case ARM64_SSBD_KERNEL:

>> +    case ARM64_SSBD_FORCE_ENABLE:

>> +        if (cpus_have_cap(ARM64_SSBS))

>> +            return sprintf(buf, "Not affected\n");

>> +        if (IS_ENABLED(CONFIG_ARM64_SSBD))

>> +            return sprintf(buf,

>> +                "Mitigation: Speculative Store Bypass disabled\n");

>> +    }

>> +

>> +    if (__ssb_safe)

>> +        return sprintf(buf, "Not affected\n");

>> +

>> +    return sprintf(buf, "Vulnerable\n");

>> +}

>>
diff mbox series

Patch

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 5f5611d17dc1..e1b03f643799 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -279,6 +279,7 @@  static int detect_harden_bp_fw(void)
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
 	const char	*str;
@@ -387,6 +388,9 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
+	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+		__ssb_safe = false;
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
 		required = false;
 		goto out_printmsg;
@@ -420,6 +424,7 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 		ssbd_state = ARM64_SSBD_UNKNOWN;
 		return false;
 
+	/* machines with mixed mitigation requirements must not return this */
 	case SMCCC_RET_NOT_REQUIRED:
 		pr_info_once("%s mitigation not required\n", entry->desc);
 		ssbd_state = ARM64_SSBD_MITIGATED;
@@ -475,6 +480,16 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 	return required;
 }
 
+/* known vulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+	{},
+};
+
 static void __maybe_unused
 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
 {
@@ -770,6 +785,7 @@  const struct arm64_cpu_capabilities arm64_errata[] = {
 		.capability = ARM64_SSBD,
 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 		.matches = has_ssbd_mitigation,
+		.midr_range_list = arm64_ssb_cpus,
 	},
 #ifdef CONFIG_ARM64_ERRATUM_1188873
 	{
@@ -808,3 +824,30 @@  ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
 
 	return sprintf(buf, "Vulnerable\n");
 }
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/*
+	 *  Two assumptions: First, ssbd_state reflects the worse case
+	 *  for hetrogenous machines, and that if SSBS is supported its
+	 *  supported by all cores.
+	 */
+	switch (ssbd_state) {
+	case ARM64_SSBD_MITIGATED:
+		return sprintf(buf, "Not affected\n");
+
+	case ARM64_SSBD_KERNEL:
+	case ARM64_SSBD_FORCE_ENABLE:
+		if (cpus_have_cap(ARM64_SSBS))
+			return sprintf(buf, "Not affected\n");
+		if (IS_ENABLED(CONFIG_ARM64_SSBD))
+			return sprintf(buf,
+			    "Mitigation: Speculative Store Bypass disabled\n");
+	}
+
+	if (__ssb_safe)
+		return sprintf(buf, "Not affected\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}