diff mbox series

[v2,03/11] arm64: Take into account ID_AA64PFR0_EL1.CSV3

Message ID 1515157961-20963-4-git-send-email-will.deacon@arm.com
State Superseded
Headers show
Series arm64 kpti hardening and variant 2 workarounds | expand

Commit Message

Will Deacon Jan. 5, 2018, 1:12 p.m. UTC
For non-KASLR kernels where the KPTI behaviour has not been overridden
on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether
or not we should unmap the kernel whilst running at EL0.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

Signed-off-by: Will Deacon <will.deacon@arm.com>

---
 arch/arm64/include/asm/sysreg.h | 1 +
 arch/arm64/kernel/cpufeature.c  | 8 +++++++-
 2 files changed, 8 insertions(+), 1 deletion(-)

-- 
2.1.4

Comments

Jayachandran C Jan. 8, 2018, 7:24 a.m. UTC | #1
On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:
> For non-KASLR kernels where the KPTI behaviour has not been overridden

> on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

> or not we should unmap the kernel whilst running at EL0.

> 

> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> Signed-off-by: Will Deacon <will.deacon@arm.com>

> ---

>  arch/arm64/include/asm/sysreg.h | 1 +

>  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

>  2 files changed, 8 insertions(+), 1 deletion(-)

> 

> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

> index 08cc88574659..ae519bbd3f9e 100644

> --- a/arch/arm64/include/asm/sysreg.h

> +++ b/arch/arm64/include/asm/sysreg.h

> @@ -437,6 +437,7 @@

>  #define ID_AA64ISAR1_DPB_SHIFT		0

>  

>  /* id_aa64pfr0 */

> +#define ID_AA64PFR0_CSV3_SHIFT		60

>  #define ID_AA64PFR0_SVE_SHIFT		32

>  #define ID_AA64PFR0_GIC_SHIFT		24

>  #define ID_AA64PFR0_ASIMD_SHIFT		20

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> index 9f0545dfe497..d723fc071f39 100644

> --- a/arch/arm64/kernel/cpufeature.c

> +++ b/arch/arm64/kernel/cpufeature.c

> @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

>  };

>  

>  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

>  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

>  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

> @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

>  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>  				int __unused)

>  {

> +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

> +

>  	/* Forced on command line? */

>  	if (__kpti_forced) {

>  		pr_info_once("kernel page table isolation forced %s by command line option\n",

> @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>  		return true;

>  

> -	return false;

> +	/* Defer to CPU feature registers */

> +	return !cpuid_feature_extract_unsigned_field(pfr0,

> +						     ID_AA64PFR0_CSV3_SHIFT);


If I read this correctly, this enables KPTI on all processors without the CSV3
set (which seems to be a future capability).

Turning on KPTI has a small but significant overhead, so I think we should turn
it off on processors that are not vulnerable to CVE-2017-5754. Can we add something
like  this:

--->8
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 19ed09b..202b037 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                return __kpti_forced > 0;
        }
 
+       /* Don't force KPTI for CPUs that are not vulnerable */
+       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+               case MIDR_CAVIUM_THUNDERX2:
+               case MIDR_BRCM_VULCAN:
+                       return false;
+       }
+
        /* Useful for KASLR robustness */
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
                return true;
-- 
JC
Marc Zyngier Jan. 8, 2018, 9:20 a.m. UTC | #2
On 08/01/18 07:24, Jayachandran C wrote:
> On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:

>> For non-KASLR kernels where the KPTI behaviour has not been overridden

>> on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

>> or not we should unmap the kernel whilst running at EL0.

>>

>> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

>> Signed-off-by: Will Deacon <will.deacon@arm.com>

>> ---

>>  arch/arm64/include/asm/sysreg.h | 1 +

>>  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

>>  2 files changed, 8 insertions(+), 1 deletion(-)

>>

>> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

>> index 08cc88574659..ae519bbd3f9e 100644

>> --- a/arch/arm64/include/asm/sysreg.h

>> +++ b/arch/arm64/include/asm/sysreg.h

>> @@ -437,6 +437,7 @@

>>  #define ID_AA64ISAR1_DPB_SHIFT		0

>>  

>>  /* id_aa64pfr0 */

>> +#define ID_AA64PFR0_CSV3_SHIFT		60

>>  #define ID_AA64PFR0_SVE_SHIFT		32

>>  #define ID_AA64PFR0_GIC_SHIFT		24

>>  #define ID_AA64PFR0_ASIMD_SHIFT		20

>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

>> index 9f0545dfe497..d723fc071f39 100644

>> --- a/arch/arm64/kernel/cpufeature.c

>> +++ b/arch/arm64/kernel/cpufeature.c

>> @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

>>  };

>>  

>>  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

>> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

>>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

>>  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

>>  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

>> @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

>>  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>  				int __unused)

>>  {

>> +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

>> +

>>  	/* Forced on command line? */

>>  	if (__kpti_forced) {

>>  		pr_info_once("kernel page table isolation forced %s by command line option\n",

>> @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>>  		return true;

>>  

>> -	return false;

>> +	/* Defer to CPU feature registers */

>> +	return !cpuid_feature_extract_unsigned_field(pfr0,

>> +						     ID_AA64PFR0_CSV3_SHIFT);

> 

> If I read this correctly, this enables KPTI on all processors without the CSV3

> set (which seems to be a future capability).

> 

> Turning on KPTI has a small but significant overhead, so I think we should turn

> it off on processors that are not vulnerable to CVE-2017-5754. Can we add something

> like  this:

> 

> --->8

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> index 19ed09b..202b037 100644

> --- a/arch/arm64/kernel/cpufeature.c

> +++ b/arch/arm64/kernel/cpufeature.c

> @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>                 return __kpti_forced > 0;

>         }

>  

> +       /* Don't force KPTI for CPUs that are not vulnerable */

> +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> +               case MIDR_CAVIUM_THUNDERX2:

> +               case MIDR_BRCM_VULCAN:

> +                       return false;

> +       }

> +

>         /* Useful for KASLR robustness */

>         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>                 return true;

> 


KPTI is also an improvement for KASLR. Why would you deprive a user of
the choice to further secure their system?

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
Will Deacon Jan. 8, 2018, 5:06 p.m. UTC | #3
On Sun, Jan 07, 2018 at 11:24:02PM -0800, Jayachandran C wrote:
> On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:

> > For non-KASLR kernels where the KPTI behaviour has not been overridden

> > on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

> > or not we should unmap the kernel whilst running at EL0.

> > 

> > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> > Signed-off-by: Will Deacon <will.deacon@arm.com>

> > ---

> >  arch/arm64/include/asm/sysreg.h | 1 +

> >  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

> >  2 files changed, 8 insertions(+), 1 deletion(-)

> > 

> > diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

> > index 08cc88574659..ae519bbd3f9e 100644

> > --- a/arch/arm64/include/asm/sysreg.h

> > +++ b/arch/arm64/include/asm/sysreg.h

> > @@ -437,6 +437,7 @@

> >  #define ID_AA64ISAR1_DPB_SHIFT		0

> >  

> >  /* id_aa64pfr0 */

> > +#define ID_AA64PFR0_CSV3_SHIFT		60

> >  #define ID_AA64PFR0_SVE_SHIFT		32

> >  #define ID_AA64PFR0_GIC_SHIFT		24

> >  #define ID_AA64PFR0_ASIMD_SHIFT		20

> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > index 9f0545dfe497..d723fc071f39 100644

> > --- a/arch/arm64/kernel/cpufeature.c

> > +++ b/arch/arm64/kernel/cpufeature.c

> > @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

> >  };

> >  

> >  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

> > +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

> >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

> >  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

> >  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

> > @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

> >  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >  				int __unused)

> >  {

> > +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

> > +

> >  	/* Forced on command line? */

> >  	if (__kpti_forced) {

> >  		pr_info_once("kernel page table isolation forced %s by command line option\n",

> > @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> >  		return true;

> >  

> > -	return false;

> > +	/* Defer to CPU feature registers */

> > +	return !cpuid_feature_extract_unsigned_field(pfr0,

> > +						     ID_AA64PFR0_CSV3_SHIFT);

> 

> If I read this correctly, this enables KPTI on all processors without the CSV3

> set (which seems to be a future capability).

> 

> Turning on KPTI has a small but significant overhead, so I think we should turn

> it off on processors that are not vulnerable to CVE-2017-5754. Can we add something

> like  this:

> 

> --->8

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> index 19ed09b..202b037 100644

> --- a/arch/arm64/kernel/cpufeature.c

> +++ b/arch/arm64/kernel/cpufeature.c

> @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>                 return __kpti_forced > 0;

>         }

>  

> +       /* Don't force KPTI for CPUs that are not vulnerable */

> +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> +               case MIDR_CAVIUM_THUNDERX2:

> +               case MIDR_BRCM_VULCAN:

> +                       return false;

> +       }

> +


KASLR aside (I agree with Marc on that), I did consider an MIDR whitelist,
but it gets nasty for big.LITTLE systems if maxcpus= is used and we see a
non-whitelisted CPU after we've booted. At this point, we can't actually
bring the thing online.

You could make the argument that if you're passing maxcpus= then you can just
easily pass kpti= as well, but I wasn't sure.

Will
Jayachandran C Jan. 8, 2018, 5:40 p.m. UTC | #4
On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:
> On 08/01/18 07:24, Jayachandran C wrote:

> > On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:

> >> For non-KASLR kernels where the KPTI behaviour has not been overridden

> >> on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

> >> or not we should unmap the kernel whilst running at EL0.

> >>

> >> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> >> Signed-off-by: Will Deacon <will.deacon@arm.com>

> >> ---

> >>  arch/arm64/include/asm/sysreg.h | 1 +

> >>  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

> >>  2 files changed, 8 insertions(+), 1 deletion(-)

> >>

> >> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

> >> index 08cc88574659..ae519bbd3f9e 100644

> >> --- a/arch/arm64/include/asm/sysreg.h

> >> +++ b/arch/arm64/include/asm/sysreg.h

> >> @@ -437,6 +437,7 @@

> >>  #define ID_AA64ISAR1_DPB_SHIFT		0

> >>  

> >>  /* id_aa64pfr0 */

> >> +#define ID_AA64PFR0_CSV3_SHIFT		60

> >>  #define ID_AA64PFR0_SVE_SHIFT		32

> >>  #define ID_AA64PFR0_GIC_SHIFT		24

> >>  #define ID_AA64PFR0_ASIMD_SHIFT		20

> >> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> >> index 9f0545dfe497..d723fc071f39 100644

> >> --- a/arch/arm64/kernel/cpufeature.c

> >> +++ b/arch/arm64/kernel/cpufeature.c

> >> @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

> >>  };

> >>  

> >>  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

> >> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

> >>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

> >>  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

> >>  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

> >> @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

> >>  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >>  				int __unused)

> >>  {

> >> +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

> >> +

> >>  	/* Forced on command line? */

> >>  	if (__kpti_forced) {

> >>  		pr_info_once("kernel page table isolation forced %s by command line option\n",

> >> @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >>  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> >>  		return true;

> >>  

> >> -	return false;

> >> +	/* Defer to CPU feature registers */

> >> +	return !cpuid_feature_extract_unsigned_field(pfr0,

> >> +						     ID_AA64PFR0_CSV3_SHIFT);

> > 

> > If I read this correctly, this enables KPTI on all processors without the CSV3

> > set (which seems to be a future capability).

> > 

> > Turning on KPTI has a small but significant overhead, so I think we should turn

> > it off on processors that are not vulnerable to CVE-2017-5754. Can we add something

> > like  this:

> > 

> > --->8

> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > index 19ed09b..202b037 100644

> > --- a/arch/arm64/kernel/cpufeature.c

> > +++ b/arch/arm64/kernel/cpufeature.c

> > @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >                 return __kpti_forced > 0;

> >         }

> >  

> > +       /* Don't force KPTI for CPUs that are not vulnerable */

> > +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> > +               case MIDR_CAVIUM_THUNDERX2:

> > +               case MIDR_BRCM_VULCAN:

> > +                       return false;

> > +       }

> > +

> >         /* Useful for KASLR robustness */

> >         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> >                 return true;

> > 

> 

> KPTI is also an improvement for KASLR. Why would you deprive a user of

> the choice to further secure their system?


The user has a choice with kpti= at the kernel command line, so we are
not depriving the user of a choice. KASLR is expected to be enabled by
distributions, and KPTI will be enabled by default as well.

On systems that are not vulnerable to variant 3, this is an unnecessary
overhead.

JC
Jayachandran C Jan. 8, 2018, 5:50 p.m. UTC | #5
On Mon, Jan 08, 2018 at 05:06:24PM +0000, Will Deacon wrote:
> On Sun, Jan 07, 2018 at 11:24:02PM -0800, Jayachandran C wrote:

> > On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:

> > > For non-KASLR kernels where the KPTI behaviour has not been overridden

> > > on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

> > > or not we should unmap the kernel whilst running at EL0.

> > > 

> > > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

> > > Signed-off-by: Will Deacon <will.deacon@arm.com>

> > > ---

> > >  arch/arm64/include/asm/sysreg.h | 1 +

> > >  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

> > >  2 files changed, 8 insertions(+), 1 deletion(-)

> > > 

> > > diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

> > > index 08cc88574659..ae519bbd3f9e 100644

> > > --- a/arch/arm64/include/asm/sysreg.h

> > > +++ b/arch/arm64/include/asm/sysreg.h

> > > @@ -437,6 +437,7 @@

> > >  #define ID_AA64ISAR1_DPB_SHIFT		0

> > >  

> > >  /* id_aa64pfr0 */

> > > +#define ID_AA64PFR0_CSV3_SHIFT		60

> > >  #define ID_AA64PFR0_SVE_SHIFT		32

> > >  #define ID_AA64PFR0_GIC_SHIFT		24

> > >  #define ID_AA64PFR0_ASIMD_SHIFT		20

> > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > > index 9f0545dfe497..d723fc071f39 100644

> > > --- a/arch/arm64/kernel/cpufeature.c

> > > +++ b/arch/arm64/kernel/cpufeature.c

> > > @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

> > >  };

> > >  

> > >  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

> > > +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

> > >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

> > >  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

> > >  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

> > > @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

> > >  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> > >  				int __unused)

> > >  {

> > > +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

> > > +

> > >  	/* Forced on command line? */

> > >  	if (__kpti_forced) {

> > >  		pr_info_once("kernel page table isolation forced %s by command line option\n",

> > > @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> > >  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> > >  		return true;

> > >  

> > > -	return false;

> > > +	/* Defer to CPU feature registers */

> > > +	return !cpuid_feature_extract_unsigned_field(pfr0,

> > > +						     ID_AA64PFR0_CSV3_SHIFT);

> > 

> > If I read this correctly, this enables KPTI on all processors without the CSV3

> > set (which seems to be a future capability).

> > 

> > Turning on KPTI has a small but significant overhead, so I think we should turn

> > it off on processors that are not vulnerable to CVE-2017-5754. Can we add something

> > like  this:

> > 

> > --->8

> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > index 19ed09b..202b037 100644

> > --- a/arch/arm64/kernel/cpufeature.c

> > +++ b/arch/arm64/kernel/cpufeature.c

> > @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> >                 return __kpti_forced > 0;

> >         }

> >  

> > +       /* Don't force KPTI for CPUs that are not vulnerable */

> > +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> > +               case MIDR_CAVIUM_THUNDERX2:

> > +               case MIDR_BRCM_VULCAN:

> > +                       return false;

> > +       }

> > +

> 

> KASLR aside (I agree with Marc on that), I did consider an MIDR whitelist,

> but it gets nasty for big.LITTLE systems if maxcpus= is used and we see a

> non-whitelisted CPU after we've booted. At this point, we can't actually

> bring the thing online.

> 

> You could make the argument that if you're passing maxcpus= then you can just

> easily pass kpti= as well, but I wasn't sure.


The code above should be a reasonable addition for getting the default right.
If by any chance these CPUs are shown to be vulnerable to timing attacks that
can bypass KASLR, we will need to move the change below the line:

  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  		return true;

The current code, which is purely based on future capability does not handle
CPUs without the vulnerablility properly. Can we fix this?

Thanks,
JC
Will Deacon Jan. 8, 2018, 5:51 p.m. UTC | #6
On Mon, Jan 08, 2018 at 09:40:17AM -0800, Jayachandran C wrote:
> On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:

> > On 08/01/18 07:24, Jayachandran C wrote:

> > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > > index 19ed09b..202b037 100644

> > > --- a/arch/arm64/kernel/cpufeature.c

> > > +++ b/arch/arm64/kernel/cpufeature.c

> > > @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> > >                 return __kpti_forced > 0;

> > >         }

> > >  

> > > +       /* Don't force KPTI for CPUs that are not vulnerable */

> > > +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> > > +               case MIDR_CAVIUM_THUNDERX2:

> > > +               case MIDR_BRCM_VULCAN:

> > > +                       return false;

> > > +       }

> > > +

> > >         /* Useful for KASLR robustness */

> > >         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> > >                 return true;

> > > 

> > 

> > KPTI is also an improvement for KASLR. Why would you deprive a user of

> > the choice to further secure their system?

> 

> The user has a choice with kpti= at the kernel command line, so we are

> not depriving the user of a choice. KASLR is expected to be enabled by

> distributions, and KPTI will be enabled by default as well.

> 

> On systems that are not vulnerable to variant 3, this is an unnecessary

> overhead.


KASLR can be bypassed on CPUs that are not vulnerable to variant 3 simply
by timing how long accesses to kernel addresses from EL0 take -- please read
the original KAISER paper for details about that attack on x86. kpti
mitigates that. If you don't care about KASLR, don't enable it (arguably
it's useless without kpti).

Will
Marc Zyngier Jan. 8, 2018, 5:52 p.m. UTC | #7
On 08/01/18 17:40, Jayachandran C wrote:
> On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:

>> On 08/01/18 07:24, Jayachandran C wrote:

>>> On Fri, Jan 05, 2018 at 01:12:33PM +0000, Will Deacon wrote:

>>>> For non-KASLR kernels where the KPTI behaviour has not been overridden

>>>> on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether

>>>> or not we should unmap the kernel whilst running at EL0.

>>>>

>>>> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>

>>>> Signed-off-by: Will Deacon <will.deacon@arm.com>

>>>> ---

>>>>  arch/arm64/include/asm/sysreg.h | 1 +

>>>>  arch/arm64/kernel/cpufeature.c  | 8 +++++++-

>>>>  2 files changed, 8 insertions(+), 1 deletion(-)

>>>>

>>>> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h

>>>> index 08cc88574659..ae519bbd3f9e 100644

>>>> --- a/arch/arm64/include/asm/sysreg.h

>>>> +++ b/arch/arm64/include/asm/sysreg.h

>>>> @@ -437,6 +437,7 @@

>>>>  #define ID_AA64ISAR1_DPB_SHIFT		0

>>>>  

>>>>  /* id_aa64pfr0 */

>>>> +#define ID_AA64PFR0_CSV3_SHIFT		60

>>>>  #define ID_AA64PFR0_SVE_SHIFT		32

>>>>  #define ID_AA64PFR0_GIC_SHIFT		24

>>>>  #define ID_AA64PFR0_ASIMD_SHIFT		20

>>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

>>>> index 9f0545dfe497..d723fc071f39 100644

>>>> --- a/arch/arm64/kernel/cpufeature.c

>>>> +++ b/arch/arm64/kernel/cpufeature.c

>>>> @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {

>>>>  };

>>>>  

>>>>  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {

>>>> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),

>>>>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),

>>>>  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),

>>>>  	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),

>>>> @@ -851,6 +852,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

>>>>  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>>>  				int __unused)

>>>>  {

>>>> +	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

>>>> +

>>>>  	/* Forced on command line? */

>>>>  	if (__kpti_forced) {

>>>>  		pr_info_once("kernel page table isolation forced %s by command line option\n",

>>>> @@ -862,7 +865,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>>>  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>>>>  		return true;

>>>>  

>>>> -	return false;

>>>> +	/* Defer to CPU feature registers */

>>>> +	return !cpuid_feature_extract_unsigned_field(pfr0,

>>>> +						     ID_AA64PFR0_CSV3_SHIFT);

>>>

>>> If I read this correctly, this enables KPTI on all processors without the CSV3

>>> set (which seems to be a future capability).

>>>

>>> Turning on KPTI has a small but significant overhead, so I think we should turn

>>> it off on processors that are not vulnerable to CVE-2017-5754. Can we add something

>>> like  this:

>>>

>>> --->8

>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

>>> index 19ed09b..202b037 100644

>>> --- a/arch/arm64/kernel/cpufeature.c

>>> +++ b/arch/arm64/kernel/cpufeature.c

>>> @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>>                 return __kpti_forced > 0;

>>>         }

>>>  

>>> +       /* Don't force KPTI for CPUs that are not vulnerable */

>>> +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

>>> +               case MIDR_CAVIUM_THUNDERX2:

>>> +               case MIDR_BRCM_VULCAN:

>>> +                       return false;

>>> +       }

>>> +

>>>         /* Useful for KASLR robustness */

>>>         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>>>                 return true;

>>>

>>

>> KPTI is also an improvement for KASLR. Why would you deprive a user of

>> the choice to further secure their system?

> 

> The user has a choice with kpti= at the kernel command line, so we are

> not depriving the user of a choice. KASLR is expected to be enabled by

> distributions, and KPTI will be enabled by default as well.

> 

> On systems that are not vulnerable to variant 3, this is an unnecessary

> overhead.


KASLR can be defeated if you don't have KPTI enabled. The original
KAISER paper is quite clear about that.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
Alan Cox Jan. 8, 2018, 6:22 p.m. UTC | #8
> > On systems that are not vulnerable to variant 3, this is an unnecessary

> > overhead.  

> 

> KASLR can be bypassed on CPUs that are not vulnerable to variant 3 simply

> by timing how long accesses to kernel addresses from EL0 take -- please read

> the original KAISER paper for details about that attack on x86. kpti

> mitigates that. If you don't care about KASLR, don't enable it (arguably

> it's useless without kpti).


KASLR is primarily of value for remote protection.

Alan
Jayachandran C Jan. 9, 2018, 4:06 a.m. UTC | #9
On Mon, Jan 08, 2018 at 05:51:00PM +0000, Will Deacon wrote:
> On Mon, Jan 08, 2018 at 09:40:17AM -0800, Jayachandran C wrote:

> > On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:

> > > On 08/01/18 07:24, Jayachandran C wrote:

> > > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > > > index 19ed09b..202b037 100644

> > > > --- a/arch/arm64/kernel/cpufeature.c

> > > > +++ b/arch/arm64/kernel/cpufeature.c

> > > > @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> > > >                 return __kpti_forced > 0;

> > > >         }

> > > >  

> > > > +       /* Don't force KPTI for CPUs that are not vulnerable */

> > > > +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> > > > +               case MIDR_CAVIUM_THUNDERX2:

> > > > +               case MIDR_BRCM_VULCAN:

> > > > +                       return false;

> > > > +       }

> > > > +

> > > >         /* Useful for KASLR robustness */

> > > >         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> > > >                 return true;

> > > > 

> > > 

> > > KPTI is also an improvement for KASLR. Why would you deprive a user of

> > > the choice to further secure their system?

> > 

> > The user has a choice with kpti= at the kernel command line, so we are

> > not depriving the user of a choice. KASLR is expected to be enabled by

> > distributions, and KPTI will be enabled by default as well.

> > 

> > On systems that are not vulnerable to variant 3, this is an unnecessary

> > overhead.

> 

> KASLR can be bypassed on CPUs that are not vulnerable to variant 3 simply

> by timing how long accesses to kernel addresses from EL0 take -- please read

> the original KAISER paper for details about that attack on x86. kpti

> mitigates that. If you don't care about KASLR, don't enable it (arguably

> it's useless without kpti).


The code above assumes that all ARM CPUs (now and future) will be vulnerable
to timing attacks that can bypass KASLR. I don't think that is a correct
assumption to make.

If ThunderX2 is shown to be vulnerable to any timing based attack we can
certainly move the MIDR check after the check for the CONFIG_RANDOMIZE_BASE.
But I don't think that is the case now, if you have any PoC code to check
this I can run on the processor and make the change.

It is pretty clear that we need a whitelist check either before or after the
CONFIG_RANDOMIZE_BASE check.

The kaiser paper seems to say that ARM TTBR0/1 made it more immune, and the
prefetch paper(if I understand correctly) showed that prefetch on some ARM
cores can be used for timing attack. This is probably and area where you will
have better information, so any specific pointers would be appreciated - 
especially ones showing that all ARM CPUs are susceptible.

Thanks,
JC.
Will Deacon Jan. 9, 2018, 10 a.m. UTC | #10
On Mon, Jan 08, 2018 at 08:06:27PM -0800, Jayachandran C wrote:
> On Mon, Jan 08, 2018 at 05:51:00PM +0000, Will Deacon wrote:

> > On Mon, Jan 08, 2018 at 09:40:17AM -0800, Jayachandran C wrote:

> > > On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:

> > > > On 08/01/18 07:24, Jayachandran C wrote:

> > > > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

> > > > > index 19ed09b..202b037 100644

> > > > > --- a/arch/arm64/kernel/cpufeature.c

> > > > > +++ b/arch/arm64/kernel/cpufeature.c

> > > > > @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

> > > > >                 return __kpti_forced > 0;

> > > > >         }

> > > > >  

> > > > > +       /* Don't force KPTI for CPUs that are not vulnerable */

> > > > > +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

> > > > > +               case MIDR_CAVIUM_THUNDERX2:

> > > > > +               case MIDR_BRCM_VULCAN:

> > > > > +                       return false;

> > > > > +       }

> > > > > +

> > > > >         /* Useful for KASLR robustness */

> > > > >         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

> > > > >                 return true;

> > > > > 

> > > > 

> > > > KPTI is also an improvement for KASLR. Why would you deprive a user of

> > > > the choice to further secure their system?

> > > 

> > > The user has a choice with kpti= at the kernel command line, so we are

> > > not depriving the user of a choice. KASLR is expected to be enabled by

> > > distributions, and KPTI will be enabled by default as well.

> > > 

> > > On systems that are not vulnerable to variant 3, this is an unnecessary

> > > overhead.

> > 

> > KASLR can be bypassed on CPUs that are not vulnerable to variant 3 simply

> > by timing how long accesses to kernel addresses from EL0 take -- please read

> > the original KAISER paper for details about that attack on x86. kpti

> > mitigates that. If you don't care about KASLR, don't enable it (arguably

> > it's useless without kpti).

> 

> The code above assumes that all ARM CPUs (now and future) will be vulnerable

> to timing attacks that can bypass KASLR. I don't think that is a correct

> assumption to make.


Well, the code is assuming that the difference between a TLB hit and a miss
can be measured and that permission faulting entries can be cached in the
TLB. I think that's a safe assumption for the moment. You can also disable
kaslr on the command line and at compile-time if you don't want to use it,
and the same thing applies to kpti. I really see this more as user
preference, rather than something that should be keyed off the MIDR and we
already provide those controls via the command line.

To be clear: I'll take the MIDR whitelisting, but only after the KASLR check
above.

> If ThunderX2 is shown to be vulnerable to any timing based attack we can

> certainly move the MIDR check after the check for the CONFIG_RANDOMIZE_BASE.

> But I don't think that is the case now, if you have any PoC code to check

> this I can run on the processor and make the change.


I haven't tried, but if you have a TLB worth its salt, I suspect you can
defeat kaslr by timing prefetches or faulting loads to kernel addresses.

> It is pretty clear that we need a whitelist check either before or after the

> CONFIG_RANDOMIZE_BASE check.


Please send a patch implementing this after the check.

> The kaiser paper seems to say that ARM TTBR0/1 made it more immune, and the

> prefetch paper(if I understand correctly) showed that prefetch on some ARM

> cores can be used for timing attack. This is probably and area where you will

> have better information, so any specific pointers would be appreciated - 

> especially ones showing that all ARM CPUs are susceptible.


Pretty much all the stuff specific to ARM (and there's not much) in the
paper is incorrect, but the basic premise of the timnig attacks is sound.

Will
Jon Masters Jan. 19, 2018, 1 a.m. UTC | #11
On 01/09/2018 05:00 AM, Will Deacon wrote:
> On Mon, Jan 08, 2018 at 08:06:27PM -0800, Jayachandran C wrote:

>> On Mon, Jan 08, 2018 at 05:51:00PM +0000, Will Deacon wrote:

>>> On Mon, Jan 08, 2018 at 09:40:17AM -0800, Jayachandran C wrote:

>>>> On Mon, Jan 08, 2018 at 09:20:09AM +0000, Marc Zyngier wrote:

>>>>> On 08/01/18 07:24, Jayachandran C wrote:

>>>>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c

>>>>>> index 19ed09b..202b037 100644

>>>>>> --- a/arch/arm64/kernel/cpufeature.c

>>>>>> +++ b/arch/arm64/kernel/cpufeature.c

>>>>>> @@ -862,6 +862,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

>>>>>>                 return __kpti_forced > 0;

>>>>>>         }

>>>>>>  

>>>>>> +       /* Don't force KPTI for CPUs that are not vulnerable */

>>>>>> +       switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {

>>>>>> +               case MIDR_CAVIUM_THUNDERX2:

>>>>>> +               case MIDR_BRCM_VULCAN:

>>>>>> +                       return false;

>>>>>> +       }

>>>>>> +

>>>>>>         /* Useful for KASLR robustness */

>>>>>>         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))

>>>>>>                 return true;

>>>>>>

>>>>>

>>>>> KPTI is also an improvement for KASLR. Why would you deprive a user of

>>>>> the choice to further secure their system?

>>>>

>>>> The user has a choice with kpti= at the kernel command line, so we are

>>>> not depriving the user of a choice. KASLR is expected to be enabled by

>>>> distributions, and KPTI will be enabled by default as well.

>>>>

>>>> On systems that are not vulnerable to variant 3, this is an unnecessary

>>>> overhead.

>>>

>>> KASLR can be bypassed on CPUs that are not vulnerable to variant 3 simply

>>> by timing how long accesses to kernel addresses from EL0 take -- please read

>>> the original KAISER paper for details about that attack on x86. kpti

>>> mitigates that. If you don't care about KASLR, don't enable it (arguably

>>> it's useless without kpti).

>>

>> The code above assumes that all ARM CPUs (now and future) will be vulnerable

>> to timing attacks that can bypass KASLR. I don't think that is a correct

>> assumption to make.

> 

> Well, the code is assuming that the difference between a TLB hit and a miss

> can be measured and that permission faulting entries can be cached in the

> TLB. I think that's a safe assumption for the moment. You can also disable

> kaslr on the command line and at compile-time if you don't want to use it,

> and the same thing applies to kpti. I really see this more as user

> preference, rather than something that should be keyed off the MIDR and we

> already provide those controls via the command line.

> 

> To be clear: I'll take the MIDR whitelisting, but only after the KASLR check

> above.

> 

>> If ThunderX2 is shown to be vulnerable to any timing based attack we can

>> certainly move the MIDR check after the check for the CONFIG_RANDOMIZE_BASE.

>> But I don't think that is the case now, if you have any PoC code to check

>> this I can run on the processor and make the change.

> 

> I haven't tried, but if you have a TLB worth its salt, I suspect you can

> defeat kaslr by timing prefetches or faulting loads to kernel addresses.

> 

>> It is pretty clear that we need a whitelist check either before or after the

>> CONFIG_RANDOMIZE_BASE check.

> 

> Please send a patch implementing this after the check.


JC: what's the plan here from Cavium? I didn't see such a patch (but
might have missed it). I've asked that we follow the same logic as on
x86 within Red Hat and default to disabling (k)pti on hardware known not
to be vulnerable to that explicit attack. Sure, KASLR bypass is "not
good"(TM) and there are ton(ne)s of new ways to do that found all the
time, but the performance hit is non-zero and there is a difference
between breaking randomization vs. leaking cache data, and HPC folks are
among those who are going to come asking why they need to turn off PTI
all over the place. The equivalent would be on x86 where one vendor
always has PTI enabled, the other only if the user explicitly requests
that it be turned on at boot time.

Jon.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 08cc88574659..ae519bbd3f9e 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -437,6 +437,7 @@ 
 #define ID_AA64ISAR1_DPB_SHIFT		0
 
 /* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT		60
 #define ID_AA64PFR0_SVE_SHIFT		32
 #define ID_AA64PFR0_GIC_SHIFT		24
 #define ID_AA64PFR0_ASIMD_SHIFT		20
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9f0545dfe497..d723fc071f39 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -145,6 +145,7 @@  static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
@@ -851,6 +852,8 @@  static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 				int __unused)
 {
+	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
 	/* Forced on command line? */
 	if (__kpti_forced) {
 		pr_info_once("kernel page table isolation forced %s by command line option\n",
@@ -862,7 +865,9 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
 		return true;
 
-	return false;
+	/* Defer to CPU feature registers */
+	return !cpuid_feature_extract_unsigned_field(pfr0,
+						     ID_AA64PFR0_CSV3_SHIFT);
 }
 
 static int __init parse_kpti(char *str)
@@ -967,6 +972,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 	},
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	{
+		.desc = "Kernel page table isolation (KPTI)",
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
 		.def_scope = SCOPE_SYSTEM,
 		.matches = unmap_kernel_at_el0,