diff mbox series

[v5,08/10] arm64: Always enable ssb vulnerability detection

Message ID 20190227010544.597579-9-jeremy.linton@arm.com
State Superseded
Headers show
Series arm64: add system vulnerability sysfs entries | expand

Commit Message

Jeremy Linton Feb. 27, 2019, 1:05 a.m. UTC
The ssb detection logic is necessary regardless of whether
the vulnerability mitigation code is built into the kernel.
Break it out so that the CONFIG option only controls the
mitigation logic and not the vulnerability detection.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

---
 arch/arm64/include/asm/cpufeature.h |  4 ----
 arch/arm64/kernel/cpu_errata.c      | 11 +++++++----
 2 files changed, 7 insertions(+), 8 deletions(-)

-- 
2.20.1

Comments

Andre Przywara March 1, 2019, 7:02 a.m. UTC | #1
Hi,

On 2/26/19 7:05 PM, Jeremy Linton wrote:
> The ssb detection logic is necessary regardless of whether

> the vulnerability mitigation code is built into the kernel.

> Break it out so that the CONFIG option only controls the

> mitigation logic and not the vulnerability detection.

> 

> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

> ---

>   arch/arm64/include/asm/cpufeature.h |  4 ----

>   arch/arm64/kernel/cpu_errata.c      | 11 +++++++----

>   2 files changed, 7 insertions(+), 8 deletions(-)

> 

> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h

> index dfcfba725d72..c2b60a021437 100644

> --- a/arch/arm64/include/asm/cpufeature.h

> +++ b/arch/arm64/include/asm/cpufeature.h

> @@ -628,11 +628,7 @@ static inline int arm64_get_ssbd_state(void)

>   #endif

>   }

>   

> -#ifdef CONFIG_ARM64_SSBD

>   void arm64_set_ssbd_mitigation(bool state);

> -#else

> -static inline void arm64_set_ssbd_mitigation(bool state) {}

> -#endif

>   

>   extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);

>   

> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c

> index 0f6e8f5d67bc..5f5611d17dc1 100644

> --- a/arch/arm64/kernel/cpu_errata.c

> +++ b/arch/arm64/kernel/cpu_errata.c

> @@ -276,7 +276,6 @@ static int detect_harden_bp_fw(void)

>   	return 1;

>   }

>   

> -#ifdef CONFIG_ARM64_SSBD

>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>   

>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

> @@ -347,6 +346,7 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,

>   		*updptr = cpu_to_le32(aarch64_insn_gen_nop());

>   }

>   

> +#ifdef CONFIG_ARM64_SSBD

>   void arm64_set_ssbd_mitigation(bool state)

>   {

>   	if (this_cpu_has_cap(ARM64_SSBS)) {

> @@ -371,6 +371,12 @@ void arm64_set_ssbd_mitigation(bool state)

>   		break;

>   	}

>   }

> +#else

> +void arm64_set_ssbd_mitigation(bool state)

> +{

> +	pr_info_once("SSBD, disabled by kernel configuration\n");


Is there a stray comma or is the continuation of some previous printout?

Regardless of that it looks good and compiles with both 
CONFIG_ARM64_SSBD defined or not:

Reviewed-by: Andre Przywara <andre.przywara@arm.com>


Cheers,
Andre.

> +}

> +#endif	/* CONFIG_ARM64_SSBD */

>   

>   static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>   				    int scope)

> @@ -468,7 +474,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

>   

>   	return required;

>   }

> -#endif	/* CONFIG_ARM64_SSBD */

>   

>   static void __maybe_unused

>   cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)

> @@ -760,14 +765,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {

>   		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),

>   	},

>   #endif

> -#ifdef CONFIG_ARM64_SSBD

>   	{

>   		.desc = "Speculative Store Bypass Disable",

>   		.capability = ARM64_SSBD,

>   		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>   		.matches = has_ssbd_mitigation,

>   	},

> -#endif

>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>   	{

>   		/* Cortex-A76 r0p0 to r2p0 */

>
Jeremy Linton March 1, 2019, 4:16 p.m. UTC | #2
On 3/1/19 1:02 AM, Andre Przywara wrote:
> Hi,

> 

> On 2/26/19 7:05 PM, Jeremy Linton wrote:

>> The ssb detection logic is necessary regardless of whether

>> the vulnerability mitigation code is built into the kernel.

>> Break it out so that the CONFIG option only controls the

>> mitigation logic and not the vulnerability detection.

>>

>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

>> ---

>>   arch/arm64/include/asm/cpufeature.h |  4 ----

>>   arch/arm64/kernel/cpu_errata.c      | 11 +++++++----

>>   2 files changed, 7 insertions(+), 8 deletions(-)

>>

>> diff --git a/arch/arm64/include/asm/cpufeature.h 

>> b/arch/arm64/include/asm/cpufeature.h

>> index dfcfba725d72..c2b60a021437 100644

>> --- a/arch/arm64/include/asm/cpufeature.h

>> +++ b/arch/arm64/include/asm/cpufeature.h

>> @@ -628,11 +628,7 @@ static inline int arm64_get_ssbd_state(void)

>>   #endif

>>   }

>> -#ifdef CONFIG_ARM64_SSBD

>>   void arm64_set_ssbd_mitigation(bool state);

>> -#else

>> -static inline void arm64_set_ssbd_mitigation(bool state) {}

>> -#endif

>>   extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);

>> diff --git a/arch/arm64/kernel/cpu_errata.c 

>> b/arch/arm64/kernel/cpu_errata.c

>> index 0f6e8f5d67bc..5f5611d17dc1 100644

>> --- a/arch/arm64/kernel/cpu_errata.c

>> +++ b/arch/arm64/kernel/cpu_errata.c

>> @@ -276,7 +276,6 @@ static int detect_harden_bp_fw(void)

>>       return 1;

>>   }

>> -#ifdef CONFIG_ARM64_SSBD

>>   DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

>>   int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

>> @@ -347,6 +346,7 @@ void __init arm64_enable_wa2_handling(struct 

>> alt_instr *alt,

>>           *updptr = cpu_to_le32(aarch64_insn_gen_nop());

>>   }

>> +#ifdef CONFIG_ARM64_SSBD

>>   void arm64_set_ssbd_mitigation(bool state)

>>   {

>>       if (this_cpu_has_cap(ARM64_SSBS)) {

>> @@ -371,6 +371,12 @@ void arm64_set_ssbd_mitigation(bool state)

>>           break;

>>       }

>>   }

>> +#else

>> +void arm64_set_ssbd_mitigation(bool state)

>> +{

>> +    pr_info_once("SSBD, disabled by kernel configuration\n");

> 

> Is there a stray comma or is the continuation of some previous printout?


This is on purpose because I didn't like the way it read if you expanded 
the acronym. I still don't, maybe a ":" is more appropriate.


> 

> Regardless of that it looks good and compiles with both 

> CONFIG_ARM64_SSBD defined or not:

> 

> Reviewed-by: Andre Przywara <andre.przywara@arm.com>

> 

> Cheers,

> Andre.

> 

>> +}

>> +#endif    /* CONFIG_ARM64_SSBD */

>>   static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities 

>> *entry,

>>                       int scope)

>> @@ -468,7 +474,6 @@ static bool has_ssbd_mitigation(const struct 

>> arm64_cpu_capabilities *entry,

>>       return required;

>>   }

>> -#endif    /* CONFIG_ARM64_SSBD */

>>   static void __maybe_unused

>>   cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities 

>> *__unused)

>> @@ -760,14 +765,12 @@ const struct arm64_cpu_capabilities 

>> arm64_errata[] = {

>>           ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),

>>       },

>>   #endif

>> -#ifdef CONFIG_ARM64_SSBD

>>       {

>>           .desc = "Speculative Store Bypass Disable",

>>           .capability = ARM64_SSBD,

>>           .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,

>>           .matches = has_ssbd_mitigation,

>>       },

>> -#endif

>>   #ifdef CONFIG_ARM64_ERRATUM_1188873

>>       {

>>           /* Cortex-A76 r0p0 to r2p0 */

>>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index dfcfba725d72..c2b60a021437 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -628,11 +628,7 @@  static inline int arm64_get_ssbd_state(void)
 #endif
 }
 
-#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
 
 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0f6e8f5d67bc..5f5611d17dc1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -276,7 +276,6 @@  static int detect_harden_bp_fw(void)
 	return 1;
 }
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
@@ -347,6 +346,7 @@  void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
 }
 
+#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state)
 {
 	if (this_cpu_has_cap(ARM64_SSBS)) {
@@ -371,6 +371,12 @@  void arm64_set_ssbd_mitigation(bool state)
 		break;
 	}
 }
+#else
+void arm64_set_ssbd_mitigation(bool state)
+{
+	pr_info_once("SSBD, disabled by kernel configuration\n");
+}
+#endif	/* CONFIG_ARM64_SSBD */
 
 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 				    int scope)
@@ -468,7 +474,6 @@  static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
 	return required;
 }
-#endif	/* CONFIG_ARM64_SSBD */
 
 static void __maybe_unused
 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
@@ -760,14 +765,12 @@  const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
 	},
 #endif
-#ifdef CONFIG_ARM64_SSBD
 	{
 		.desc = "Speculative Store Bypass Disable",
 		.capability = ARM64_SSBD,
 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 		.matches = has_ssbd_mitigation,
 	},
-#endif
 #ifdef CONFIG_ARM64_ERRATUM_1188873
 	{
 		/* Cortex-A76 r0p0 to r2p0 */