@@ -21,6 +21,7 @@
#include <xen/wait.h>
#include <asm/alternative.h>
+#include <asm/cpuerrata.h>
#include <asm/cpufeature.h>
#include <asm/current.h>
#include <asm/event.h>
@@ -572,6 +573,13 @@ int vcpu_initialise(struct vcpu *v)
if ( (rc = vcpu_vtimer_init(v)) != 0 )
goto fail;
+ /*
+ * The workaround 2 (i.e SSBD mitigation) is enabled by default if
+ * supported.
+ */
+ if ( get_ssbd_state() == ARM_SSBD_RUNTIME )
+ v->arch.cpu_info->flags |= CPUINFO_WORKAROUND_2_FLAG;
+
return rc;
fail:
@@ -2011,10 +2011,23 @@ inject_abt:
inject_iabt_exception(regs, gva, hsr.len);
}
+static inline bool needs_ssbd_flip(struct vcpu *v)
+{
+ if ( !check_workaround_ssbd() )
+ return false;
+
+ return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) &&
+ cpu_require_ssbd_mitigation();
+}
+
static void enter_hypervisor_head(struct cpu_user_regs *regs)
{
if ( guest_mode(regs) )
{
+ /* If the guest has disabled the workaround, bring it back on. */
+ if ( needs_ssbd_flip(current) )
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
+
/*
* If we pended a virtual abort, preserve it until it gets cleared.
* See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details,
@@ -2260,6 +2273,13 @@ void leave_hypervisor_tail(void)
*/
SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT);
+ /*
+ * The hypervisor runs with the workaround always present.
+ * If the guest wants it disabled, so be it...
+ */
+ if ( needs_ssbd_flip(current) )
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL);
+
return;
}
local_irq_enable();
@@ -18,6 +18,7 @@
#include <xen/lib.h>
#include <xen/types.h>
#include <public/arch-arm/smccc.h>
+#include <asm/cpuerrata.h>
#include <asm/cpufeature.h>
#include <asm/monitor.h>
#include <asm/regs.h>
@@ -104,6 +105,23 @@ static bool handle_arch(struct cpu_user_regs *regs)
if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) )
ret = 0;
break;
+ case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
+ switch ( get_ssbd_state() )
+ {
+ case ARM_SSBD_UNKNOWN:
+ case ARM_SSBD_FORCE_DISABLE:
+ break;
+
+ case ARM_SSBD_RUNTIME:
+ ret = ARM_SMCCC_SUCCESS;
+ break;
+
+ case ARM_SSBD_FORCE_ENABLE:
+ case ARM_SSBD_MITIGATED:
+ ret = ARM_SMCCC_NOT_REQUIRED;
+ break;
+ }
+ break;
}
set_user_reg(regs, 0, ret);
@@ -114,6 +132,25 @@ static bool handle_arch(struct cpu_user_regs *regs)
case ARM_SMCCC_ARCH_WORKAROUND_1_FID:
/* No return value */
return true;
+
+ case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
+ {
+ bool enable = (uint32_t)get_user_reg(regs, 1);
+
+ /*
+ * ARM_WORKAROUND_2_FID should only be called when mitigation
+ * state can be changed at runtime.
+ */
+ if ( unlikely(get_ssbd_state() != ARM_SSBD_RUNTIME) )
+ return true;
+
+ if ( enable )
+ get_cpu_info()->flags |= CPUINFO_WORKAROUND_2_FLAG;
+ else
+ get_cpu_info()->flags &= ~CPUINFO_WORKAROUND_2_FLAG;
+
+ return true;
+ }
}
return false;
@@ -7,6 +7,10 @@
#include <asm/percpu.h>
#include <asm/processor.h>
+/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */
+#define CPUINFO_WORKAROUND_2_FLAG_SHIFT 0
+#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT)
+
#ifndef __ASSEMBLY__
struct vcpu;
@@ -21,7 +25,7 @@ DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
struct cpu_info {
struct cpu_user_regs guest_cpu_user_regs;
unsigned long elr;
- unsigned int pad;
+ uint32_t flags;
};
static inline struct cpu_info *get_cpu_info(void)