diff mbox series

[v3,11/13] arm64: Implement branch predictor hardening for affected Cortex-A CPUs

Message ID 1515432758-26440-12-git-send-email-will.deacon@arm.com
State Accepted
Commit aa6acde65e03186b5add8151e1ffe36c3c62639b
Headers show
Series arm64 kpti hardening and variant 2 workarounds | expand

Commit Message

Will Deacon Jan. 8, 2018, 5:32 p.m. UTC
Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing
and can theoretically be attacked by malicious code.

This patch implements a PSCI-based mitigation for these CPUs when available.
The call into firmware will invalidate the branch predictor state, preventing
any malicious entries from affecting other victim contexts.

Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

---
 arch/arm64/kernel/bpi.S        | 24 ++++++++++++++++++++++++
 arch/arm64/kernel/cpu_errata.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 66 insertions(+)

-- 
2.1.4

Comments

Suzuki K Poulose Jan. 9, 2018, 4:12 p.m. UTC | #1
On 08/01/18 17:32, Will Deacon wrote:
> Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing

> and can theoretically be attacked by malicious code.

> 

> This patch implements a PSCI-based mitigation for these CPUs when available.

> The call into firmware will invalidate the branch predictor state, preventing

> any malicious entries from affecting other victim contexts.

> 

> Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>

> Signed-off-by: Will Deacon <will.deacon@arm.com>


Will, Marc,

> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

> +	{

> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

> +		.enable = enable_psci_bp_hardening,

> +	},

> +	{

> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

> +		.enable = enable_psci_bp_hardening,

> +	},

> +	{

> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

> +		.enable = enable_psci_bp_hardening,

> +	},

> +	{

> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

> +		.enable = enable_psci_bp_hardening,

> +	},

> +#endif


The introduction of multiple entries for the same capability breaks
some assumptions in this_cpu_has_caps() and verify_local_cpu_features()
as they all stop at the first entry matching the "capability" and could
return wrong results. We need something like the following to make this
work, should someone add duplicate feature entry or use
this_cpu_has_caps() on one of the errata.

---8>---

arm64: capabilities: Handle duplicate entries for a capability

Sometimes a single capability could be listed multiple times with
differing matches(), e.g, CPU errata for different MIDR versions.
This breaks verify_local_cpu_feature() and this_cpu_has_cap() as
we stop checking for a capability on a CPU with the first
entry in the given table, which is not sufficient. Make sure we
run the checks for all entries of the same capability. We do
this by fixing __this_cpu_has_cap() to run through all the
entries in the given table for a match and reuse it for
verify_local_cpu_feature().

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

---
 arch/arm64/kernel/cpufeature.c | 44 ++++++++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 862a417ca0e2..0c43447f7406 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1120,6 +1120,26 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
 			cap_set_elf_hwcap(hwcaps);
 }
 
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+			       unsigned int cap)
+{
+	const struct arm64_cpu_capabilities *caps;
+
+	if (WARN_ON(preemptible()))
+		return false;
+
+	for (caps = cap_array; caps->desc; caps++)
+		if (caps->capability == cap &&
+		    caps->matches &&
+		    caps->matches(caps, SCOPE_LOCAL_CPU))
+			return true;
+	return false;
+}
+
 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info)
 {
@@ -1183,8 +1203,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
 }
 
 static void
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
 {
+	const struct arm64_cpu_capabilities *caps = caps_list;
 	for (; caps->matches; caps++) {
 		if (!cpus_have_cap(caps->capability))
 			continue;
@@ -1192,7 +1213,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
 		 * If the new CPU misses an advertised feature, we cannot proceed
 		 * further, park the cpu.
 		 */
-		if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+		if (!__this_cpu_has_cap(caps_list, caps->capability)) {
 			pr_crit("CPU%d: missing feature: %s\n",
 					smp_processor_id(), caps->desc);
 			cpu_die_early();
@@ -1274,25 +1295,6 @@ static void __init mark_const_caps_ready(void)
 	static_branch_enable(&arm64_const_caps_ready);
 }
 
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
-			       unsigned int cap)
-{
-	const struct arm64_cpu_capabilities *caps;
-
-	if (WARN_ON(preemptible()))
-		return false;
-
-	for (caps = cap_array; caps->desc; caps++)
-		if (caps->capability == cap && caps->matches)
-			return caps->matches(caps, SCOPE_LOCAL_CPU);
-
-	return false;
-}
-
 extern const struct arm64_cpu_capabilities arm64_errata[];
 
 bool this_cpu_has_cap(unsigned int cap)
-- 
2.13.6
Marc Zyngier Jan. 15, 2018, 11:51 a.m. UTC | #2
Hi Suzuki,

On 09/01/18 16:12, Suzuki K Poulose wrote:
> On 08/01/18 17:32, Will Deacon wrote:

>> Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing

>> and can theoretically be attacked by malicious code.

>>

>> This patch implements a PSCI-based mitigation for these CPUs when available.

>> The call into firmware will invalidate the branch predictor state, preventing

>> any malicious entries from affecting other victim contexts.

>>

>> Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>

>> Signed-off-by: Will Deacon <will.deacon@arm.com>

> 

> Will, Marc,

> 

>> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

>> +	{

>> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

>> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),

>> +		.enable = enable_psci_bp_hardening,

>> +	},

>> +	{

>> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

>> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),

>> +		.enable = enable_psci_bp_hardening,

>> +	},

>> +	{

>> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

>> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),

>> +		.enable = enable_psci_bp_hardening,

>> +	},

>> +	{

>> +		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,

>> +		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),

>> +		.enable = enable_psci_bp_hardening,

>> +	},

>> +#endif

> 

> The introduction of multiple entries for the same capability breaks

> some assumptions in this_cpu_has_caps() and verify_local_cpu_features()

> as they all stop at the first entry matching the "capability" and could

> return wrong results. We need something like the following to make this

> work, should someone add duplicate feature entry or use

> this_cpu_has_caps() on one of the errata.

> 

> ---8>---

> 

> arm64: capabilities: Handle duplicate entries for a capability

> 

> Sometimes a single capability could be listed multiple times with

> differing matches(), e.g, CPU errata for different MIDR versions.

> This breaks verify_local_cpu_feature() and this_cpu_has_cap() as

> we stop checking for a capability on a CPU with the first

> entry in the given table, which is not sufficient. Make sure we

> run the checks for all entries of the same capability. We do

> this by fixing __this_cpu_has_cap() to run through all the

> entries in the given table for a match and reuse it for

> verify_local_cpu_feature().

> 

> Cc: Mark Rutland <mark.rutland@arm.com>

> Cc: Will Deacon <will.deacon@arm.com>

> Cc: Marc Zyngier <marc.zyngier@arm.com>

> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> ---

>  arch/arm64/kernel/cpufeature.c | 44 ++++++++++++++++++++++--------------------

>  1 file changed, 23 insertions(+), 21 deletions(-)

> 

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> index 862a417ca0e2..0c43447f7406 100644

> --- a/arch/arm64/kernel/cpufeature.c

> +++ b/arch/arm64/kernel/cpufeature.c

> @@ -1120,6 +1120,26 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)

>  			cap_set_elf_hwcap(hwcaps);

>  }

>  

> +/*

> + * Check if the current CPU has a given feature capability.

> + * Should be called from non-preemptible context.

> + */

> +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,

> +			       unsigned int cap)

> +{

> +	const struct arm64_cpu_capabilities *caps;

> +

> +	if (WARN_ON(preemptible()))

> +		return false;

> +

> +	for (caps = cap_array; caps->desc; caps++)

> +		if (caps->capability == cap &&

> +		    caps->matches &&

> +		    caps->matches(caps, SCOPE_LOCAL_CPU))

> +			return true;

> +	return false;

> +}

> +

>  void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,

>  			    const char *info)

>  {

> @@ -1183,8 +1203,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)

>  }

>  

>  static void

> -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)

> +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)

>  {

> +	const struct arm64_cpu_capabilities *caps = caps_list;

>  	for (; caps->matches; caps++) {

>  		if (!cpus_have_cap(caps->capability))

>  			continue;

> @@ -1192,7 +1213,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)

>  		 * If the new CPU misses an advertised feature, we cannot proceed

>  		 * further, park the cpu.

>  		 */

> -		if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {

> +		if (!__this_cpu_has_cap(caps_list, caps->capability)) {

>  			pr_crit("CPU%d: missing feature: %s\n",

>  					smp_processor_id(), caps->desc);

>  			cpu_die_early();

> @@ -1274,25 +1295,6 @@ static void __init mark_const_caps_ready(void)

>  	static_branch_enable(&arm64_const_caps_ready);

>  }

>  

> -/*

> - * Check if the current CPU has a given feature capability.

> - * Should be called from non-preemptible context.

> - */

> -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,

> -			       unsigned int cap)

> -{

> -	const struct arm64_cpu_capabilities *caps;

> -

> -	if (WARN_ON(preemptible()))

> -		return false;

> -

> -	for (caps = cap_array; caps->desc; caps++)

> -		if (caps->capability == cap && caps->matches)

> -			return caps->matches(caps, SCOPE_LOCAL_CPU);

> -

> -	return false;

> -}

> -

>  extern const struct arm64_cpu_capabilities arm64_errata[];

>  

>  bool this_cpu_has_cap(unsigned int cap)

> 


This looks sensible to me.

Acked-by: Marc Zyngier <marc.zyngier@arm.com>


	M.
-- 
Jazz is not dead. It just smells funny...
Catalin Marinas Jan. 15, 2018, 6:01 p.m. UTC | #3
On Tue, Jan 09, 2018 at 04:12:18PM +0000, Suzuki K. Poulose wrote:
> arm64: capabilities: Handle duplicate entries for a capability

> 

> Sometimes a single capability could be listed multiple times with

> differing matches(), e.g, CPU errata for different MIDR versions.

> This breaks verify_local_cpu_feature() and this_cpu_has_cap() as

> we stop checking for a capability on a CPU with the first

> entry in the given table, which is not sufficient. Make sure we

> run the checks for all entries of the same capability. We do

> this by fixing __this_cpu_has_cap() to run through all the

> entries in the given table for a match and reuse it for

> verify_local_cpu_feature().

> 

> Cc: Mark Rutland <mark.rutland@arm.com>

> Cc: Will Deacon <will.deacon@arm.com>

> Cc: Marc Zyngier <marc.zyngier@arm.com>

> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> ---

>  arch/arm64/kernel/cpufeature.c | 44 ++++++++++++++++++++++--------------------

>  1 file changed, 23 insertions(+), 21 deletions(-)


Applied. Thanks.

-- 
Catalin
diff mbox series

Patch

diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
index 06a931eb2673..dec95bd82e31 100644
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -53,3 +53,27 @@  ENTRY(__bp_harden_hyp_vecs_start)
 	vectors __kvm_hyp_vector
 	.endr
 ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+	sub	sp, sp, #(8 * 18)
+	stp	x16, x17, [sp, #(16 * 0)]
+	stp	x14, x15, [sp, #(16 * 1)]
+	stp	x12, x13, [sp, #(16 * 2)]
+	stp	x10, x11, [sp, #(16 * 3)]
+	stp	x8, x9, [sp, #(16 * 4)]
+	stp	x6, x7, [sp, #(16 * 5)]
+	stp	x4, x5, [sp, #(16 * 6)]
+	stp	x2, x3, [sp, #(16 * 7)]
+	stp	x0, x1, [sp, #(16 * 8)]
+	mov	x0, #0x84000000
+	smc	#0
+	ldp	x16, x17, [sp, #(16 * 0)]
+	ldp	x14, x15, [sp, #(16 * 1)]
+	ldp	x12, x13, [sp, #(16 * 2)]
+	ldp	x10, x11, [sp, #(16 * 3)]
+	ldp	x8, x9, [sp, #(16 * 4)]
+	ldp	x6, x7, [sp, #(16 * 5)]
+	ldp	x4, x5, [sp, #(16 * 6)]
+	ldp	x2, x3, [sp, #(16 * 7)]
+	ldp	x0, x1, [sp, #(16 * 8)]
+	add	sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 16ea5c6f314e..cb0fb3796bb8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -53,6 +53,8 @@  static int cpu_enable_trap_ctr_access(void *__unused)
 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 #ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+
 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 				const char *hyp_vecs_end)
 {
@@ -94,6 +96,9 @@  static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 	spin_unlock(&bp_lock);
 }
 #else
+#define __psci_hyp_bp_inval_start	NULL
+#define __psci_hyp_bp_inval_end		NULL
+
 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 				      const char *hyp_vecs_start,
 				      const char *hyp_vecs_end)
@@ -118,6 +123,21 @@  static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
 
 	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
 }
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+
+	if (psci_ops.get_version)
+		install_bp_hardening_cb(entry,
+				       (bp_hardening_cb_t)psci_ops.get_version,
+				       __psci_hyp_bp_inval_start,
+				       __psci_hyp_bp_inval_end);
+
+	return 0;
+}
 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
 #define MIDR_RANGE(model, min, max) \
@@ -261,6 +281,28 @@  const struct arm64_cpu_capabilities arm64_errata[] = {
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 	},
 #endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+		.enable = enable_psci_bp_hardening,
+	},
+#endif
 	{
 	}
 };