diff mbox series

[1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

Message ID 20181206234408.1287689-2-jeremy.linton@arm.com
State Superseded
Headers show
Series add system vulnerability sysfs entries | expand

Commit Message

Jeremy Linton Dec. 6, 2018, 11:44 p.m. UTC
From: Mian Yousaf Kaukab <ykaukab@suse.de>


Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>

[Moved location of function]
Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

---
 arch/arm64/kernel/cpufeature.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

-- 
2.17.2

Comments

Jeremy Linton Dec. 12, 2018, 2:36 p.m. UTC | #1
Hi Julien,

Thanks for looking at this,

On 12/13/2018 03:13 AM, Julien Thierry wrote:
> Hi,

> 

> On 06/12/2018 23:44, Jeremy Linton wrote:

>> From: Mian Yousaf Kaukab <ykaukab@suse.de>

>>

>> Add is_meltdown_safe() which is a whitelist of known safe cores.

>>

>> Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>

>> [Moved location of function]

>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

>> ---

>>   arch/arm64/kernel/cpufeature.c | 16 ++++++++++++----

>>   1 file changed, 12 insertions(+), 4 deletions(-)

>>

>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

>> index aec5ecb85737..242898395f68 100644

>> --- a/arch/arm64/kernel/cpufeature.c

>> +++ b/arch/arm64/kernel/cpufeature.c

>> @@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)

>>   #ifdef CONFIG_UNMAP_KERNEL_AT_EL0

>>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

>>   

>> -static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>> -				int scope)

>> +static bool is_cpu_meltdown_safe(void)

>>   {

>>   	/* List of CPUs that are not vulnerable and don't need KPTI */

>>   	static const struct midr_range kpti_safe_list[] = {

>> @@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>   		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),

>>   		{ /* sentinel */ }

>>   	};

>> +	/* Don't force KPTI for CPUs that are not vulnerable */

> 

> This is really a nit, but that comment would make more sense where

> is_cpu_meltdown_safe() is called since unmap_kernel_at_el0 is the one

> deciding whether to apply KPTI, is_cpu_meltdown_safe() just states

> whether the core is safe of not.


That is a good point, thanks.


> 

> Otherwise:

> 

> Reviewed-by: Julien Thierry <julien.thierry@arm.com>

> 

> Cheers,

> 

> Julien

> 

>> +	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))

>> +		return true;

>> +

>> +	return false;

>> +}

>> +

>> +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>> +				int scope)

>> +{

>>   	char const *str = "command line option";

>>   

>>   	/*

>> @@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>   	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>>   		return true;

>>   

>> -	/* Don't force KPTI for CPUs that are not vulnerable */

>> -	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))

>> +	if (is_cpu_meltdown_safe())

>>   		return false;

>>   

>>   	/* Defer to CPU feature registers */

>>

>
Julien Thierry Dec. 13, 2018, 9:13 a.m. UTC | #2
Hi,

On 06/12/2018 23:44, Jeremy Linton wrote:
> From: Mian Yousaf Kaukab <ykaukab@suse.de>

> 

> Add is_meltdown_safe() which is a whitelist of known safe cores.

> 

> Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>

> [Moved location of function]

> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

> ---

>  arch/arm64/kernel/cpufeature.c | 16 ++++++++++++----

>  1 file changed, 12 insertions(+), 4 deletions(-)

> 

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> index aec5ecb85737..242898395f68 100644

> --- a/arch/arm64/kernel/cpufeature.c

> +++ b/arch/arm64/kernel/cpufeature.c

> @@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)

>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0

>  static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

>  

> -static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> -				int scope)

> +static bool is_cpu_meltdown_safe(void)

>  {

>  	/* List of CPUs that are not vulnerable and don't need KPTI */

>  	static const struct midr_range kpti_safe_list[] = {

> @@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>  		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),

>  		{ /* sentinel */ }

>  	};

> +	/* Don't force KPTI for CPUs that are not vulnerable */


This is really a nit, but that comment would make more sense where
is_cpu_meltdown_safe() is called since unmap_kernel_at_el0 is the one
deciding whether to apply KPTI, is_cpu_meltdown_safe() just states
whether the core is safe of not.

Otherwise:

Reviewed-by: Julien Thierry <julien.thierry@arm.com>


Cheers,

Julien

> +	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))

> +		return true;

> +

> +	return false;

> +}

> +

> +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> +				int scope)

> +{

>  	char const *str = "command line option";

>  

>  	/*

> @@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>  		return true;

>  

> -	/* Don't force KPTI for CPUs that are not vulnerable */

> -	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))

> +	if (is_cpu_meltdown_safe())

>  		return false;

>  

>  	/* Defer to CPU feature registers */

> 


-- 
Julien Thierry
diff mbox series

Patch

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..242898395f68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -908,8 +908,7 @@  has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
-				int scope)
+static bool is_cpu_meltdown_safe(void)
 {
 	/* List of CPUs that are not vulnerable and don't need KPTI */
 	static const struct midr_range kpti_safe_list[] = {
@@ -917,6 +916,16 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 		{ /* sentinel */ }
 	};
+	/* Don't force KPTI for CPUs that are not vulnerable */
+	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+		return true;
+
+	return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+				int scope)
+{
 	char const *str = "command line option";
 
 	/*
@@ -940,8 +949,7 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
 		return true;
 
-	/* Don't force KPTI for CPUs that are not vulnerable */
-	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+	if (is_cpu_meltdown_safe())
 		return false;
 
 	/* Defer to CPU feature registers */