diff mbox series

[v3,6/7] arm64: add sysfs vulnerability show for speculative store bypass

Message ID 20190109235544.2992426-7-jeremy.linton@arm.com
State Superseded
Headers show
Series arm64: add system vulnerability sysfs entries | expand

Commit Message

Jeremy Linton Jan. 9, 2019, 11:55 p.m. UTC
Return status based on ssbd_state and the arm64 SSBS feature. If
the mitigation is disabled, or the firmware isn't responding then
return the expected machine state based on a new blacklist of known
vulnerable cores.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

---
 arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

-- 
2.17.2

Comments

Marc Zyngier Jan. 14, 2019, 10:15 a.m. UTC | #1
On 09/01/2019 23:55, Jeremy Linton wrote:
> Return status based on ssbd_state and the arm64 SSBS feature. If

> the mitigation is disabled, or the firmware isn't responding then

> return the expected machine state based on a new blacklist of known

> vulnerable cores.

> 

> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

> ---

>  arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++

>  1 file changed, 48 insertions(+)

> 

> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c

> index ee286d606d9b..c8ff96158b94 100644

> --- a/arch/arm64/kernel/cpu_errata.c

> +++ b/arch/arm64/kernel/cpu_errata.c

> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)

>  DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>  

>  int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

> +static bool __ssb_safe = true;

>  

>  static const struct ssbd_options {

>  	const char	*str;

> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>  {

>  	struct arm_smccc_res res;

>  	bool required = true;

> +	bool is_vul;

>  	s32 val;

>  

>  	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

>  

> +	is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);

> +

> +	if (is_vul)

> +		__ssb_safe = false;

> +

> +	arm64_requested_vuln_attrs |= VULN_SSB;

> +

>  	if (this_cpu_has_cap(ARM64_SSBS)) {

>  		required = false;

>  		goto out_printmsg;

> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>  		ssbd_state = ARM64_SSBD_UNKNOWN;

>  		return false;

>  

> +	/* machines with mixed mitigation requirements must not return this */

>  	case SMCCC_RET_NOT_REQUIRED:

>  		pr_info_once("%s mitigation not required\n", entry->desc);

>  		ssbd_state = ARM64_SSBD_MITIGATED;

> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>  

>  	return required;

>  }

> +

> +/* known vulnerable cores */

> +static const struct midr_range arm64_ssb_cpus[] = {

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),

> +	{},

> +};

> +

>  #endif	/* CONFIG_ARM64_SSBD */

>  

>  static void __maybe_unused

> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {

>  		.capability = ARM64_SSBD,

>  		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>  		.matches = has_ssbd_mitigation,

> +		.midr_range_list = arm64_ssb_cpus,

>  	},

>  #endif

>  #ifdef CONFIG_ARM64_ERRATUM_1188873

> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,

>  	return sprintf(buf, "Vulnerable\n");

>  }

>  

> +ssize_t cpu_show_spec_store_bypass(struct device *dev,

> +		struct device_attribute *attr, char *buf)

> +{

> +	/*

> +	 *  Two assumptions: First, get_ssbd_state() reflects the worse case

> +	 *  for hetrogenous machines, and that if SSBS is supported its

> +	 *  supported by all cores.

> +	 */

> +	switch (arm64_get_ssbd_state()) {

> +	case ARM64_SSBD_MITIGATED:

> +		return sprintf(buf, "Not affected\n");

> +

> +	case ARM64_SSBD_KERNEL:

> +	case ARM64_SSBD_FORCE_ENABLE:

> +		if (cpus_have_cap(ARM64_SSBS))

> +			return sprintf(buf, "Not affected\n");

> +		return sprintf(buf,

> +			"Mitigation: Speculative Store Bypass disabled\n");

> +	}

> +

> +	if (__ssb_safe)

> +		return sprintf(buf, "Not affected\n");


The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not
selected. What should we print in this case? "Vulnerable"? Or "Unknown"?

> +

> +	return sprintf(buf, "Vulnerable\n");

> +}

> +

>  #endif

> 


Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
Jeremy Linton Jan. 14, 2019, 4:37 p.m. UTC | #2
Hi,

On 01/14/2019 04:15 AM, Marc Zyngier wrote:
> On 09/01/2019 23:55, Jeremy Linton wrote:

>> Return status based on ssbd_state and the arm64 SSBS feature. If

>> the mitigation is disabled, or the firmware isn't responding then

>> return the expected machine state based on a new blacklist of known

>> vulnerable cores.

>>

>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

>> ---

>>   arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++

>>   1 file changed, 48 insertions(+)

>>

>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c

>> index ee286d606d9b..c8ff96158b94 100644

>> --- a/arch/arm64/kernel/cpu_errata.c

>> +++ b/arch/arm64/kernel/cpu_errata.c

>> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)

>>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>>   

>>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

>> +static bool __ssb_safe = true;

>>   

>>   static const struct ssbd_options {

>>   	const char	*str;

>> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>   {

>>   	struct arm_smccc_res res;

>>   	bool required = true;

>> +	bool is_vul;

>>   	s32 val;

>>   

>>   	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

>>   

>> +	is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);

>> +

>> +	if (is_vul)

>> +		__ssb_safe = false;

>> +

>> +	arm64_requested_vuln_attrs |= VULN_SSB;

>> +

>>   	if (this_cpu_has_cap(ARM64_SSBS)) {

>>   		required = false;

>>   		goto out_printmsg;

>> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>   		ssbd_state = ARM64_SSBD_UNKNOWN;

>>   		return false;

>>   

>> +	/* machines with mixed mitigation requirements must not return this */

>>   	case SMCCC_RET_NOT_REQUIRED:

>>   		pr_info_once("%s mitigation not required\n", entry->desc);

>>   		ssbd_state = ARM64_SSBD_MITIGATED;

>> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>   

>>   	return required;

>>   }

>> +

>> +/* known vulnerable cores */

>> +static const struct midr_range arm64_ssb_cpus[] = {

>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),

>> +	{},

>> +};

>> +

>>   #endif	/* CONFIG_ARM64_SSBD */

>>   

>>   static void __maybe_unused

>> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {

>>   		.capability = ARM64_SSBD,

>>   		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>>   		.matches = has_ssbd_mitigation,

>> +		.midr_range_list = arm64_ssb_cpus,

>>   	},

>>   #endif

>>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,

>>   	return sprintf(buf, "Vulnerable\n");

>>   }

>>   

>> +ssize_t cpu_show_spec_store_bypass(struct device *dev,

>> +		struct device_attribute *attr, char *buf)

>> +{

>> +	/*

>> +	 *  Two assumptions: First, get_ssbd_state() reflects the worse case

>> +	 *  for hetrogenous machines, and that if SSBS is supported its

>> +	 *  supported by all cores.

>> +	 */

>> +	switch (arm64_get_ssbd_state()) {

>> +	case ARM64_SSBD_MITIGATED:

>> +		return sprintf(buf, "Not affected\n");

>> +

>> +	case ARM64_SSBD_KERNEL:

>> +	case ARM64_SSBD_FORCE_ENABLE:

>> +		if (cpus_have_cap(ARM64_SSBS))

>> +			return sprintf(buf, "Not affected\n");

>> +		return sprintf(buf,

>> +			"Mitigation: Speculative Store Bypass disabled\n");

>> +	}

>> +

>> +	if (__ssb_safe)

>> +		return sprintf(buf, "Not affected\n");

> 

> The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not

> selected. What should we print in this case? "Vulnerable"? Or "Unknown"?


The immediate fix is that the __ssb_safe variable should be in its own 
conditional block which is  CONFIG_GENERIC_CPU_VULNERABILITIES || 
CONFIG_ARM64_SSBD. If the mitigation isn't built in then this code won't 
be run anyway because the sysfs entry won't be populated.


But, these CONFIG_ conditionals are less than ideal (and would be even 
uglier if they were made more efficient). My own opinion at this point 
is that we should really remove the compile time configs and leave the 
mitigation built all the time. The raw code is fairly small, and we 
could add in the nospectre_v2 command line options so that users can 
choose to runtime disable them. That would also remove the need to 
modify the core cpu vulnerabilities sysfs code.
Marc Zyngier Jan. 14, 2019, 5:05 p.m. UTC | #3
On 14/01/2019 16:37, Jeremy Linton wrote:
> Hi,

> 

> On 01/14/2019 04:15 AM, Marc Zyngier wrote:

>> On 09/01/2019 23:55, Jeremy Linton wrote:

>>> Return status based on ssbd_state and the arm64 SSBS feature. If

>>> the mitigation is disabled, or the firmware isn't responding then

>>> return the expected machine state based on a new blacklist of known

>>> vulnerable cores.

>>>

>>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

>>> ---

>>>   arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++

>>>   1 file changed, 48 insertions(+)

>>>

>>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c

>>> index ee286d606d9b..c8ff96158b94 100644

>>> --- a/arch/arm64/kernel/cpu_errata.c

>>> +++ b/arch/arm64/kernel/cpu_errata.c

>>> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)

>>>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>>>   

>>>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

>>> +static bool __ssb_safe = true;

>>>   

>>>   static const struct ssbd_options {

>>>   	const char	*str;

>>> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>>   {

>>>   	struct arm_smccc_res res;

>>>   	bool required = true;

>>> +	bool is_vul;

>>>   	s32 val;

>>>   

>>>   	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

>>>   

>>> +	is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);

>>> +

>>> +	if (is_vul)

>>> +		__ssb_safe = false;

>>> +

>>> +	arm64_requested_vuln_attrs |= VULN_SSB;

>>> +

>>>   	if (this_cpu_has_cap(ARM64_SSBS)) {

>>>   		required = false;

>>>   		goto out_printmsg;

>>> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>>   		ssbd_state = ARM64_SSBD_UNKNOWN;

>>>   		return false;

>>>   

>>> +	/* machines with mixed mitigation requirements must not return this */

>>>   	case SMCCC_RET_NOT_REQUIRED:

>>>   		pr_info_once("%s mitigation not required\n", entry->desc);

>>>   		ssbd_state = ARM64_SSBD_MITIGATED;

>>> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>>>   

>>>   	return required;

>>>   }

>>> +

>>> +/* known vulnerable cores */

>>> +static const struct midr_range arm64_ssb_cpus[] = {

>>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

>>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

>>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

>>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

>>> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),

>>> +	{},

>>> +};

>>> +

>>>   #endif	/* CONFIG_ARM64_SSBD */

>>>   

>>>   static void __maybe_unused

>>> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {

>>>   		.capability = ARM64_SSBD,

>>>   		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>>>   		.matches = has_ssbd_mitigation,

>>> +		.midr_range_list = arm64_ssb_cpus,

>>>   	},

>>>   #endif

>>>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>>> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,

>>>   	return sprintf(buf, "Vulnerable\n");

>>>   }

>>>   

>>> +ssize_t cpu_show_spec_store_bypass(struct device *dev,

>>> +		struct device_attribute *attr, char *buf)

>>> +{

>>> +	/*

>>> +	 *  Two assumptions: First, get_ssbd_state() reflects the worse case

>>> +	 *  for hetrogenous machines, and that if SSBS is supported its

>>> +	 *  supported by all cores.

>>> +	 */

>>> +	switch (arm64_get_ssbd_state()) {

>>> +	case ARM64_SSBD_MITIGATED:

>>> +		return sprintf(buf, "Not affected\n");

>>> +

>>> +	case ARM64_SSBD_KERNEL:

>>> +	case ARM64_SSBD_FORCE_ENABLE:

>>> +		if (cpus_have_cap(ARM64_SSBS))

>>> +			return sprintf(buf, "Not affected\n");

>>> +		return sprintf(buf,

>>> +			"Mitigation: Speculative Store Bypass disabled\n");

>>> +	}

>>> +

>>> +	if (__ssb_safe)

>>> +		return sprintf(buf, "Not affected\n");

>>

>> The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not

>> selected. What should we print in this case? "Vulnerable"? Or "Unknown"?

> 

> The immediate fix is that the __ssb_safe variable should be in its own 

> conditional block which is  CONFIG_GENERIC_CPU_VULNERABILITIES || 

> CONFIG_ARM64_SSBD. If the mitigation isn't built in then this code won't 

> be run anyway because the sysfs entry won't be populated.


But in that case, we should probably assume that the system is
vulnerable, and we get a different default value for __ssb_safe.

> But, these CONFIG_ conditionals are less than ideal (and would be even 

> uglier if they were made more efficient). My own opinion at this point 

> is that we should really remove the compile time configs and leave the 

> mitigation built all the time. The raw code is fairly small, and we 

> could add in the nospectre_v2 command line options so that users can 

> choose to runtime disable them. That would also remove the need to 

> modify the core cpu vulnerabilities sysfs code.


That'd work for me. The whole thing is now an intractable mess, and I'd
welcome some level of simplification.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
diff mbox series

Patch

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ee286d606d9b..c8ff96158b94 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -288,6 +288,7 @@  enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
 	const char	*str;
@@ -385,10 +386,18 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 {
 	struct arm_smccc_res res;
 	bool required = true;
+	bool is_vul;
 	s32 val;
 
 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
+	is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+
+	if (is_vul)
+		__ssb_safe = false;
+
+	arm64_requested_vuln_attrs |= VULN_SSB;
+
 	if (this_cpu_has_cap(ARM64_SSBS)) {
 		required = false;
 		goto out_printmsg;
@@ -422,6 +431,7 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 		ssbd_state = ARM64_SSBD_UNKNOWN;
 		return false;
 
+	/* machines with mixed mitigation requirements must not return this */
 	case SMCCC_RET_NOT_REQUIRED:
 		pr_info_once("%s mitigation not required\n", entry->desc);
 		ssbd_state = ARM64_SSBD_MITIGATED;
@@ -476,6 +486,17 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	return required;
 }
+
+/* known vulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+	{},
+};
+
 #endif	/* CONFIG_ARM64_SSBD */
 
 static void __maybe_unused
@@ -762,6 +783,7 @@  const struct arm64_cpu_capabilities arm64_errata[] = {
 		.capability = ARM64_SSBD,
 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 		.matches = has_ssbd_mitigation,
+		.midr_range_list = arm64_ssb_cpus,
 	},
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1188873
@@ -809,4 +831,30 @@  ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
 	return sprintf(buf, "Vulnerable\n");
 }
 
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/*
+	 *  Two assumptions: First, get_ssbd_state() reflects the worse case
+	 *  for hetrogenous machines, and that if SSBS is supported its
+	 *  supported by all cores.
+	 */
+	switch (arm64_get_ssbd_state()) {
+	case ARM64_SSBD_MITIGATED:
+		return sprintf(buf, "Not affected\n");
+
+	case ARM64_SSBD_KERNEL:
+	case ARM64_SSBD_FORCE_ENABLE:
+		if (cpus_have_cap(ARM64_SSBS))
+			return sprintf(buf, "Not affected\n");
+		return sprintf(buf,
+			"Mitigation: Speculative Store Bypass disabled\n");
+	}
+
+	if (__ssb_safe)
+		return sprintf(buf, "Not affected\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}
+
 #endif