Message ID | 20180215150248.28922-11-julien.grall@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: PSCI 1.1 and SMCCC-1.1 support and XSA-254 variant 2 update | expand |
Julien, On 15.02.18 17:02, Julien Grall wrote: > Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. > > Signed-off-by: Julien Grall <julien.grall@arm.com> Reviewed-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com> > --- > Changes in v3: > - Add the missing call to smc #0. > > Changes in v2: > - Patch added > --- > xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ > xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- > xen/include/asm-arm/smccc.h | 1 + > 3 files changed, 45 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S > index 4b7f1dc21f..981fb83a88 100644 > --- a/xen/arch/arm/arm64/bpi.S > +++ b/xen/arch/arm/arm64/bpi.S > @@ -16,6 +16,8 @@ > * along with this program. If not, see <http://www.gnu.org/licenses/>. > */ > > +#include <asm/smccc.h> > + > .macro ventry target > .rept 31 > nop > @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) > add sp, sp, #(8 * 18) > ENTRY(__psci_hyp_bp_inval_end) > > +ENTRY(__smccc_workaround_1_smc_start) > + sub sp, sp, #(8 * 4) > + stp x2, x3, [sp, #(8 * 0)] > + stp x0, x1, [sp, #(8 * 2)] > + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID > + smc #0 > + ldp x2, x3, [sp, #(8 * 0)] > + ldp x0, x1, [sp, #(8 * 2)] > + add sp, sp, #(8 * 4) > +ENTRY(__smccc_workaround_1_smc_end) > + > /* > * Local variables: > * mode: ASM > diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c > index 8d5f8d372a..dec9074422 100644 > --- a/xen/arch/arm/cpuerrata.c > +++ b/xen/arch/arm/cpuerrata.c > @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, > return ret; > } > > +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; > + > +static bool > +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) > +{ > + struct arm_smccc_res res; > + > + /* > + * Enable callbacks are called on every CPU based on the > + * capabilities. So double-check whether the CPU matches the > + * entry. > + */ > + if ( !entry->matches(entry) ) > + return false; > + > + if ( smccc_ver < SMCCC_VERSION(1, 1) ) > + return false; > + > + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, > + ARM_SMCCC_ARCH_WORKAROUND_1_FID, &res); > + if ( res.a0 != ARM_SMCCC_SUCCESS ) > + return false; > + > + return install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, > + __smccc_workaround_1_smc_end, > + "call ARM_SMCCC_ARCH_WORKAROUND_1"); > +} > + > extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; > > static int enable_psci_bp_hardening(void *data) > @@ -154,12 +182,14 @@ static int enable_psci_bp_hardening(void *data) > bool ret = true; > static bool warned = false; > > + if ( check_smccc_arch_workaround_1(data) ) > + return 0; > /* > * The mitigation is using PSCI version function to invalidate the > * branch predictor. This function is only available with PSCI 0.2 > * and later. > */ > - if ( psci_ver >= PSCI_VERSION(0, 2) ) > + else if ( psci_ver >= PSCI_VERSION(0, 2) ) > ret = install_bp_hardening_vec(data, __psci_hyp_bp_inval_start, > __psci_hyp_bp_inval_end, > "call PSCI get version"); > diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h > index 154772b728..8342cc33fe 100644 > --- a/xen/include/asm-arm/smccc.h > +++ b/xen/include/asm-arm/smccc.h > @@ -261,6 +261,7 @@ struct arm_smccc_res { > /* SMCCC error codes */ > #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) > #define ARM_SMCCC_NOT_SUPPORTED (-1) > +#define ARM_SMCCC_SUCCESS (0) > > /* SMCCC function identifier range which is reserved for existing APIs */ > #define ARM_SMCCC_RESERVED_RANGE_START 0x0 >
On Thu, 15 Feb 2018, Julien Grall wrote: > Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > > --- > Changes in v3: > - Add the missing call to smc #0. > > Changes in v2: > - Patch added > --- > xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ > xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- > xen/include/asm-arm/smccc.h | 1 + > 3 files changed, 45 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S > index 4b7f1dc21f..981fb83a88 100644 > --- a/xen/arch/arm/arm64/bpi.S > +++ b/xen/arch/arm/arm64/bpi.S > @@ -16,6 +16,8 @@ > * along with this program. If not, see <http://www.gnu.org/licenses/>. > */ > > +#include <asm/smccc.h> > + > .macro ventry target > .rept 31 > nop > @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) > add sp, sp, #(8 * 18) > ENTRY(__psci_hyp_bp_inval_end) > > +ENTRY(__smccc_workaround_1_smc_start) > + sub sp, sp, #(8 * 4) > + stp x2, x3, [sp, #(8 * 0)] > + stp x0, x1, [sp, #(8 * 2)] > + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID > + smc #0 > + ldp x2, x3, [sp, #(8 * 0)] > + ldp x0, x1, [sp, #(8 * 2)] > + add sp, sp, #(8 * 4) > +ENTRY(__smccc_workaround_1_smc_end) > + > /* > * Local variables: > * mode: ASM > diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c > index 8d5f8d372a..dec9074422 100644 > --- a/xen/arch/arm/cpuerrata.c > +++ b/xen/arch/arm/cpuerrata.c > @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, > return ret; > } > > +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; > + > +static bool > +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) > +{ > + struct arm_smccc_res res; > + > + /* > + * Enable callbacks are called on every CPU based on the > + * capabilities. So double-check whether the CPU matches the > + * entry. > + */ > + if ( !entry->matches(entry) ) > + return false; I think this should be return true? > + if ( smccc_ver < SMCCC_VERSION(1, 1) ) > + return false; > + > + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, > + ARM_SMCCC_ARCH_WORKAROUND_1_FID, &res); > + if ( res.a0 != ARM_SMCCC_SUCCESS ) > + return false; > + > + return install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, > + __smccc_workaround_1_smc_end, > + "call ARM_SMCCC_ARCH_WORKAROUND_1"); > +} > + > extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; > > static int enable_psci_bp_hardening(void *data) > @@ -154,12 +182,14 @@ static int enable_psci_bp_hardening(void *data) > bool ret = true; > static bool warned = false; > > + if ( check_smccc_arch_workaround_1(data) ) > + return 0; > /* > * The mitigation is using PSCI version function to invalidate the > * branch predictor. This function is only available with PSCI 0.2 > * and later. > */ > - if ( psci_ver >= PSCI_VERSION(0, 2) ) > + else if ( psci_ver >= PSCI_VERSION(0, 2) ) > ret = install_bp_hardening_vec(data, __psci_hyp_bp_inval_start, > __psci_hyp_bp_inval_end, > "call PSCI get version"); > diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h > index 154772b728..8342cc33fe 100644 > --- a/xen/include/asm-arm/smccc.h > +++ b/xen/include/asm-arm/smccc.h > @@ -261,6 +261,7 @@ struct arm_smccc_res { > /* SMCCC error codes */ > #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) > #define ARM_SMCCC_NOT_SUPPORTED (-1) > +#define ARM_SMCCC_SUCCESS (0) > > /* SMCCC function identifier range which is reserved for existing APIs */ > #define ARM_SMCCC_RESERVED_RANGE_START 0x0 > -- > 2.11.0 >
Hi Stefano, On 21/02/2018 00:35, Stefano Stabellini wrote: > On Thu, 15 Feb 2018, Julien Grall wrote: >> Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> >> >> --- >> Changes in v3: >> - Add the missing call to smc #0. >> >> Changes in v2: >> - Patch added >> --- >> xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ >> xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- >> xen/include/asm-arm/smccc.h | 1 + >> 3 files changed, 45 insertions(+), 1 deletion(-) >> >> diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S >> index 4b7f1dc21f..981fb83a88 100644 >> --- a/xen/arch/arm/arm64/bpi.S >> +++ b/xen/arch/arm/arm64/bpi.S >> @@ -16,6 +16,8 @@ >> * along with this program. If not, see <http://www.gnu.org/licenses/>. >> */ >> >> +#include <asm/smccc.h> >> + >> .macro ventry target >> .rept 31 >> nop >> @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) >> add sp, sp, #(8 * 18) >> ENTRY(__psci_hyp_bp_inval_end) >> >> +ENTRY(__smccc_workaround_1_smc_start) >> + sub sp, sp, #(8 * 4) >> + stp x2, x3, [sp, #(8 * 0)] >> + stp x0, x1, [sp, #(8 * 2)] >> + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID >> + smc #0 >> + ldp x2, x3, [sp, #(8 * 0)] >> + ldp x0, x1, [sp, #(8 * 2)] >> + add sp, sp, #(8 * 4) >> +ENTRY(__smccc_workaround_1_smc_end) >> + >> /* >> * Local variables: >> * mode: ASM >> diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c >> index 8d5f8d372a..dec9074422 100644 >> --- a/xen/arch/arm/cpuerrata.c >> +++ b/xen/arch/arm/cpuerrata.c >> @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, >> return ret; >> } >> >> +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; >> + >> +static bool >> +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) >> +{ >> + struct arm_smccc_res res; >> + >> + /* >> + * Enable callbacks are called on every CPU based on the >> + * capabilities. So double-check whether the CPU matches the >> + * entry. >> + */ >> + if ( !entry->matches(entry) ) >> + return false; > > I think this should be return true? Both are valid. It depends how you consider the workflow here. If you return: - true: You say that this helper already took care of that CPU. So no need to continue further. - false: This CPU does not match, let's fallback to a different method. That method will bailout later (see install_bp_hardening_vec). I choose the latte because the SMCCC workaround is considered as an alternative method. So we want to fallback to the other one if it does not work at the cost of few extra instructions. But that's boot and going to be reworked in patch #11. Indeed this is just a temporary solution to plumb the new hardening method before we kill the PSCI_GET_VERSION one. Cheers,
Hi, On 15/02/18 15:02, Julien Grall wrote: > Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > > --- > Changes in v3: > - Add the missing call to smc #0. > > Changes in v2: > - Patch added > --- > xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ > xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- > xen/include/asm-arm/smccc.h | 1 + > 3 files changed, 45 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S > index 4b7f1dc21f..981fb83a88 100644 > --- a/xen/arch/arm/arm64/bpi.S > +++ b/xen/arch/arm/arm64/bpi.S > @@ -16,6 +16,8 @@ > * along with this program. If not, see <http://www.gnu.org/licenses/>. > */ > > +#include <asm/smccc.h> > + > .macro ventry target > .rept 31 > nop > @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) > add sp, sp, #(8 * 18) > ENTRY(__psci_hyp_bp_inval_end) > > +ENTRY(__smccc_workaround_1_smc_start) > + sub sp, sp, #(8 * 4) > + stp x2, x3, [sp, #(8 * 0)] > + stp x0, x1, [sp, #(8 * 2)] > + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID > + smc #0 > + ldp x2, x3, [sp, #(8 * 0)] > + ldp x0, x1, [sp, #(8 * 2)] I was expecting the restore to *mirror* the saving order, so x0, x1 first, then x2, x3. The code you have is correct, but somewhat surprising. I wonder if you could just swap those two lines. Or even better: you swap the store commands above, so that they match what a push sequence would look like (higher addresses first). > + add sp, sp, #(8 * 4) > +ENTRY(__smccc_workaround_1_smc_end) > + > /* > * Local variables: > * mode: ASM > diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c > index 8d5f8d372a..dec9074422 100644 > --- a/xen/arch/arm/cpuerrata.c > +++ b/xen/arch/arm/cpuerrata.c > @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, > return ret; > } > > +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; > + > +static bool > +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) > +{ > + struct arm_smccc_res res; > + > + /* > + * Enable callbacks are called on every CPU based on the > + * capabilities. So double-check whether the CPU matches the > + * entry. > + */ > + if ( !entry->matches(entry) ) > + return false; > + > + if ( smccc_ver < SMCCC_VERSION(1, 1) ) > + return false; > + I guess we are calling the actual workaround function here to ultimately know if that is implemented? And we know that this function isn't harmful to call in any case? Can you add a comment stating this here? Otherwise it's slightly confusing to see the actual call in the function actually called check_ and installing the workaround. Cheers, Andre. > + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, > + ARM_SMCCC_ARCH_WORKAROUND_1_FID, &res); > + if ( res.a0 != ARM_SMCCC_SUCCESS ) > + return false; > + > + return install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, > + __smccc_workaround_1_smc_end, > + "call ARM_SMCCC_ARCH_WORKAROUND_1"); > +} > + > extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; > > static int enable_psci_bp_hardening(void *data) > @@ -154,12 +182,14 @@ static int enable_psci_bp_hardening(void *data) > bool ret = true; > static bool warned = false; > > + if ( check_smccc_arch_workaround_1(data) ) > + return 0; > /* > * The mitigation is using PSCI version function to invalidate the > * branch predictor. This function is only available with PSCI 0.2 > * and later. > */ > - if ( psci_ver >= PSCI_VERSION(0, 2) ) > + else if ( psci_ver >= PSCI_VERSION(0, 2) ) > ret = install_bp_hardening_vec(data, __psci_hyp_bp_inval_start, > __psci_hyp_bp_inval_end, > "call PSCI get version"); > diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h > index 154772b728..8342cc33fe 100644 > --- a/xen/include/asm-arm/smccc.h > +++ b/xen/include/asm-arm/smccc.h > @@ -261,6 +261,7 @@ struct arm_smccc_res { > /* SMCCC error codes */ > #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) > #define ARM_SMCCC_NOT_SUPPORTED (-1) > +#define ARM_SMCCC_SUCCESS (0) > > /* SMCCC function identifier range which is reserved for existing APIs */ > #define ARM_SMCCC_RESERVED_RANGE_START 0x0 >
On Wed, 21 Feb 2018, Julien Grall wrote: > Hi Stefano, > > On 21/02/2018 00:35, Stefano Stabellini wrote: > > On Thu, 15 Feb 2018, Julien Grall wrote: > > > Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. > > > > > > Signed-off-by: Julien Grall <julien.grall@arm.com> > > > > > > --- > > > Changes in v3: > > > - Add the missing call to smc #0. > > > > > > Changes in v2: > > > - Patch added > > > --- > > > xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ > > > xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- > > > xen/include/asm-arm/smccc.h | 1 + > > > 3 files changed, 45 insertions(+), 1 deletion(-) > > > > > > diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S > > > index 4b7f1dc21f..981fb83a88 100644 > > > --- a/xen/arch/arm/arm64/bpi.S > > > +++ b/xen/arch/arm/arm64/bpi.S > > > @@ -16,6 +16,8 @@ > > > * along with this program. If not, see <http://www.gnu.org/licenses/>. > > > */ > > > +#include <asm/smccc.h> > > > + > > > .macro ventry target > > > .rept 31 > > > nop > > > @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) > > > add sp, sp, #(8 * 18) > > > ENTRY(__psci_hyp_bp_inval_end) > > > +ENTRY(__smccc_workaround_1_smc_start) > > > + sub sp, sp, #(8 * 4) > > > + stp x2, x3, [sp, #(8 * 0)] > > > + stp x0, x1, [sp, #(8 * 2)] > > > + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID > > > + smc #0 > > > + ldp x2, x3, [sp, #(8 * 0)] > > > + ldp x0, x1, [sp, #(8 * 2)] > > > + add sp, sp, #(8 * 4) > > > +ENTRY(__smccc_workaround_1_smc_end) > > > + > > > /* > > > * Local variables: > > > * mode: ASM > > > diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c > > > index 8d5f8d372a..dec9074422 100644 > > > --- a/xen/arch/arm/cpuerrata.c > > > +++ b/xen/arch/arm/cpuerrata.c > > > @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct > > > arm_cpu_capabilities *entry, > > > return ret; > > > } > > > +extern char __smccc_workaround_1_smc_start[], > > > __smccc_workaround_1_smc_end[]; > > > + > > > +static bool > > > +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) > > > +{ > > > + struct arm_smccc_res res; > > > + > > > + /* > > > + * Enable callbacks are called on every CPU based on the > > > + * capabilities. So double-check whether the CPU matches the > > > + * entry. > > > + */ > > > + if ( !entry->matches(entry) ) > > > + return false; > > > > I think this should be return true? > > Both are valid. It depends how you consider the workflow here. If you return: > - true: You say that this helper already took care of that CPU. So no > need to continue further. > - false: This CPU does not match, let's fallback to a different > method. That method will bailout later (see install_bp_hardening_vec). > > I choose the latte because the SMCCC workaround is considered as an > alternative method. So we want to fallback to the other one if it does not > work at the cost of few extra instructions. But that's boot and going to be > reworked in patch #11. Indeed this is just a temporary solution to plumb the > new hardening method before we kill the PSCI_GET_VERSION one. Yeah, I noticed that this is moot given the next patches in the series. Given that you are already resending the series, I would also change this to return true because I think it makes more sense, but it is unimportant so either way: Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
On 21/02/18 16:07, Andre Przywara wrote: > Hi, Hi Andre, > On 15/02/18 15:02, Julien Grall wrote: >> Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> >> >> --- >> Changes in v3: >> - Add the missing call to smc #0. >> >> Changes in v2: >> - Patch added >> --- >> xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ >> xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- >> xen/include/asm-arm/smccc.h | 1 + >> 3 files changed, 45 insertions(+), 1 deletion(-) >> >> diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S >> index 4b7f1dc21f..981fb83a88 100644 >> --- a/xen/arch/arm/arm64/bpi.S >> +++ b/xen/arch/arm/arm64/bpi.S >> @@ -16,6 +16,8 @@ >> * along with this program. If not, see <http://www.gnu.org/licenses/>. >> */ >> >> +#include <asm/smccc.h> >> + >> .macro ventry target >> .rept 31 >> nop >> @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) >> add sp, sp, #(8 * 18) >> ENTRY(__psci_hyp_bp_inval_end) >> >> +ENTRY(__smccc_workaround_1_smc_start) >> + sub sp, sp, #(8 * 4) >> + stp x2, x3, [sp, #(8 * 0)] >> + stp x0, x1, [sp, #(8 * 2)] >> + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID >> + smc #0 >> + ldp x2, x3, [sp, #(8 * 0)] >> + ldp x0, x1, [sp, #(8 * 2)] > > I was expecting the restore to *mirror* the saving order, so x0, x1 > first, then x2, x3. The code you have is correct, but somewhat > surprising. I wonder if you could just swap those two lines. > > Or even better: you swap the store commands above, so that they match > what a push sequence would look like (higher addresses first). I will choose this solution. > >> + add sp, sp, #(8 * 4) >> +ENTRY(__smccc_workaround_1_smc_end) >> + >> /* >> * Local variables: >> * mode: ASM >> diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c >> index 8d5f8d372a..dec9074422 100644 >> --- a/xen/arch/arm/cpuerrata.c >> +++ b/xen/arch/arm/cpuerrata.c >> @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, >> return ret; >> } >> >> +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; >> + >> +static bool >> +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) >> +{ >> + struct arm_smccc_res res; >> + >> + /* >> + * Enable callbacks are called on every CPU based on the >> + * capabilities. So double-check whether the CPU matches the >> + * entry. >> + */ >> + if ( !entry->matches(entry) ) >> + return false; >> + >> + if ( smccc_ver < SMCCC_VERSION(1, 1) ) >> + return false; >> + > > I guess we are calling the actual workaround function here to ultimately > know if that is implemented? And we know that this function isn't > harmful to call in any case? What do you mean? This is very similar to what we do in enable_psci_bp_hardening. Except here we say the platform does not have SMCCC 1.1, so fallback to another solution. > Can you add a comment stating this here? Stating what? It is clear enough that you can't call arm_smccc_1_1_smc if the SMCCC version is not 1.1 (or later). > Otherwise it's slightly > confusing to see the actual call in the function actually called check_ > and installing the workaround. Please see the follow-up patch. The current naming makes sense because we will fallback the PSCI one if not working. Cheers,
Hi, On 21/02/18 17:35, Stefano Stabellini wrote: > On Wed, 21 Feb 2018, Julien Grall wrote: >> Hi Stefano, >> >> On 21/02/2018 00:35, Stefano Stabellini wrote: >>> On Thu, 15 Feb 2018, Julien Grall wrote: >>>> Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. >>>> >>>> Signed-off-by: Julien Grall <julien.grall@arm.com> >>>> >>>> --- >>>> Changes in v3: >>>> - Add the missing call to smc #0. >>>> >>>> Changes in v2: >>>> - Patch added >>>> --- >>>> xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ >>>> xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- >>>> xen/include/asm-arm/smccc.h | 1 + >>>> 3 files changed, 45 insertions(+), 1 deletion(-) >>>> >>>> diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S >>>> index 4b7f1dc21f..981fb83a88 100644 >>>> --- a/xen/arch/arm/arm64/bpi.S >>>> +++ b/xen/arch/arm/arm64/bpi.S >>>> @@ -16,6 +16,8 @@ >>>> * along with this program. If not, see <http://www.gnu.org/licenses/>. >>>> */ >>>> +#include <asm/smccc.h> >>>> + >>>> .macro ventry target >>>> .rept 31 >>>> nop >>>> @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) >>>> add sp, sp, #(8 * 18) >>>> ENTRY(__psci_hyp_bp_inval_end) >>>> +ENTRY(__smccc_workaround_1_smc_start) >>>> + sub sp, sp, #(8 * 4) >>>> + stp x2, x3, [sp, #(8 * 0)] >>>> + stp x0, x1, [sp, #(8 * 2)] >>>> + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID >>>> + smc #0 >>>> + ldp x2, x3, [sp, #(8 * 0)] >>>> + ldp x0, x1, [sp, #(8 * 2)] >>>> + add sp, sp, #(8 * 4) >>>> +ENTRY(__smccc_workaround_1_smc_end) >>>> + >>>> /* >>>> * Local variables: >>>> * mode: ASM >>>> diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c >>>> index 8d5f8d372a..dec9074422 100644 >>>> --- a/xen/arch/arm/cpuerrata.c >>>> +++ b/xen/arch/arm/cpuerrata.c >>>> @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct >>>> arm_cpu_capabilities *entry, >>>> return ret; >>>> } >>>> +extern char __smccc_workaround_1_smc_start[], >>>> __smccc_workaround_1_smc_end[]; >>>> + >>>> +static bool >>>> +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) >>>> +{ >>>> + struct arm_smccc_res res; >>>> + >>>> + /* >>>> + * Enable callbacks are called on every CPU based on the >>>> + * capabilities. So double-check whether the CPU matches the >>>> + * entry. >>>> + */ >>>> + if ( !entry->matches(entry) ) >>>> + return false; >>> >>> I think this should be return true? >> >> Both are valid. It depends how you consider the workflow here. If you return: >> - true: You say that this helper already took care of that CPU. So no >> need to continue further. >> - false: This CPU does not match, let's fallback to a different >> method. That method will bailout later (see install_bp_hardening_vec). >> >> I choose the latte because the SMCCC workaround is considered as an >> alternative method. So we want to fallback to the other one if it does not >> work at the cost of few extra instructions. But that's boot and going to be >> reworked in patch #11. Indeed this is just a temporary solution to plumb the >> new hardening method before we kill the PSCI_GET_VERSION one. > > Yeah, I noticed that this is moot given the next patches in the series. > Given that you are already resending the series, I would also change this > to return true because I think it makes more sense, but it is > unimportant so either way: I will keep false, because it make little sense to return true here, It will actually bring more confusion as we return false just after for a similar case. > > > Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > I am going to drop it as I need some rework base on Andre's comment. Cheers,
diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S index 4b7f1dc21f..981fb83a88 100644 --- a/xen/arch/arm/arm64/bpi.S +++ b/xen/arch/arm/arm64/bpi.S @@ -16,6 +16,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <asm/smccc.h> + .macro ventry target .rept 31 nop @@ -81,6 +83,17 @@ ENTRY(__psci_hyp_bp_inval_start) add sp, sp, #(8 * 18) ENTRY(__psci_hyp_bp_inval_end) +ENTRY(__smccc_workaround_1_smc_start) + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +ENTRY(__smccc_workaround_1_smc_end) + /* * Local variables: * mode: ASM diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c index 8d5f8d372a..dec9074422 100644 --- a/xen/arch/arm/cpuerrata.c +++ b/xen/arch/arm/cpuerrata.c @@ -147,6 +147,34 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, return ret; } +extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; + +static bool +check_smccc_arch_workaround_1(const struct arm_cpu_capabilities *entry) +{ + struct arm_smccc_res res; + + /* + * Enable callbacks are called on every CPU based on the + * capabilities. So double-check whether the CPU matches the + * entry. + */ + if ( !entry->matches(entry) ) + return false; + + if ( smccc_ver < SMCCC_VERSION(1, 1) ) + return false; + + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, + ARM_SMCCC_ARCH_WORKAROUND_1_FID, &res); + if ( res.a0 != ARM_SMCCC_SUCCESS ) + return false; + + return install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, + __smccc_workaround_1_smc_end, + "call ARM_SMCCC_ARCH_WORKAROUND_1"); +} + extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; static int enable_psci_bp_hardening(void *data) @@ -154,12 +182,14 @@ static int enable_psci_bp_hardening(void *data) bool ret = true; static bool warned = false; + if ( check_smccc_arch_workaround_1(data) ) + return 0; /* * The mitigation is using PSCI version function to invalidate the * branch predictor. This function is only available with PSCI 0.2 * and later. */ - if ( psci_ver >= PSCI_VERSION(0, 2) ) + else if ( psci_ver >= PSCI_VERSION(0, 2) ) ret = install_bp_hardening_vec(data, __psci_hyp_bp_inval_start, __psci_hyp_bp_inval_end, "call PSCI get version"); diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h index 154772b728..8342cc33fe 100644 --- a/xen/include/asm-arm/smccc.h +++ b/xen/include/asm-arm/smccc.h @@ -261,6 +261,7 @@ struct arm_smccc_res { /* SMCCC error codes */ #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) #define ARM_SMCCC_NOT_SUPPORTED (-1) +#define ARM_SMCCC_SUCCESS (0) /* SMCCC function identifier range which is reserved for existing APIs */ #define ARM_SMCCC_RESERVED_RANGE_START 0x0
Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v3: - Add the missing call to smc #0. Changes in v2: - Patch added --- xen/arch/arm/arm64/bpi.S | 13 +++++++++++++ xen/arch/arm/cpuerrata.c | 32 +++++++++++++++++++++++++++++++- xen/include/asm-arm/smccc.h | 1 + 3 files changed, 45 insertions(+), 1 deletion(-)