[Xen-devel,for-4.13,v4,11/19] xen/arm: Ensure the SSBD workaround is re-enabled right after exiting a guest

Message ID 20191031150922.22938-12-julien.grall@arm.com
State New
Headers show
Series
  • xen/arm: XSA-201 and XSA-263 fixes
Related show

Commit Message

Julien Grall Oct. 31, 2019, 3:09 p.m.
At the moment, SSBD workaround is re-enabled for Xen after interrupts
are unmasked. This means we may end up to execute some part of the
hypervisor if an interrupt is received before the workaround is
re-enabled.

Each trap may require to unmask different interrupts.
As the rest of enter_hypervisor_from_guest() does not require to have
interrupts masked, the function is now split in two parts:
    1) enter_hypervisor_from_guest_preirq() called with interrupts
       masked.
    2) enter_hypervisor_from_guest() called with interrupts unmasked.

Note that while it might be possible to avoid spliting the function in
two parts, it requires a bit more work than I can currently invest to
avoid using indirect branch.

Furthermore, the function name is rather generic as there might be more
work to dob before interrupts are unmasked in the future.

Fixes: a7898e4c59 ("xen/arm: Add ARCH_WORKAROUND_2 support for guests")
Reported-by: Andrii Anisov <andrii_anisov@epam.com>
Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    Changes in v4:
        - Remove spurious line

    Changes in v3:
        - Rework the arm32 part

    Changes in v2:
        - Add Arm32 code
        - Rename enter_hypervisor_from_guest_noirq() to
        enter_hypervisor_from_guest_preirq()
        - Update the commit message to explain the choice of splitting
        the code.
---
 xen/arch/arm/arm32/entry.S |  2 +-
 xen/arch/arm/arm64/entry.S |  1 +
 xen/arch/arm/traps.c       | 14 ++++++++++++--
 3 files changed, 14 insertions(+), 3 deletions(-)

Comments

Stefano Stabellini Oct. 31, 2019, 6:15 p.m. | #1
On Thu, 31 Oct 2019, Julien Grall wrote:
> At the moment, SSBD workaround is re-enabled for Xen after interrupts
> are unmasked. This means we may end up to execute some part of the
> hypervisor if an interrupt is received before the workaround is
> re-enabled.
> 
> Each trap may require to unmask different interrupts.
> As the rest of enter_hypervisor_from_guest() does not require to have
> interrupts masked, the function is now split in two parts:
>     1) enter_hypervisor_from_guest_preirq() called with interrupts
>        masked.
>     2) enter_hypervisor_from_guest() called with interrupts unmasked.
> 
> Note that while it might be possible to avoid spliting the function in
> two parts, it requires a bit more work than I can currently invest to
> avoid using indirect branch.
> 
> Furthermore, the function name is rather generic as there might be more
> work to dob before interrupts are unmasked in the future.
> 
> Fixes: a7898e4c59 ("xen/arm: Add ARCH_WORKAROUND_2 support for guests")
> Reported-by: Andrii Anisov <andrii_anisov@epam.com>
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>     Changes in v4:
>         - Remove spurious line
> 
>     Changes in v3:
>         - Rework the arm32 part
> 
>     Changes in v2:
>         - Add Arm32 code
>         - Rename enter_hypervisor_from_guest_noirq() to
>         enter_hypervisor_from_guest_preirq()
>         - Update the commit message to explain the choice of splitting
>         the code.
> ---
>  xen/arch/arm/arm32/entry.S |  2 +-
>  xen/arch/arm/arm64/entry.S |  1 +
>  xen/arch/arm/traps.c       | 14 ++++++++++++--
>  3 files changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S
> index cea4e0e302..0a9c248ee2 100644
> --- a/xen/arch/arm/arm32/entry.S
> +++ b/xen/arch/arm/arm32/entry.S
> @@ -118,7 +118,7 @@ abort_guest_exit_end:
>          bne return_from_trap
>  
>  skip_check:
> -        mov pc, lr
> +        b   enter_hypervisor_from_guest_preirq
>  ENDPROC(arch_enter_hypervisor_from_guest_preirq)
>  
>          /*
> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> index 97dc60210d..d4fb5fdc1c 100644
> --- a/xen/arch/arm/arm64/entry.S
> +++ b/xen/arch/arm/arm64/entry.S
> @@ -191,6 +191,7 @@
>          ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
>                      "nop; nop",
>                      SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
> +        bl      enter_hypervisor_from_guest_preirq
>          msr     daifclr, \iflags
>          bl      enter_hypervisor_from_guest
>          mov     x0, sp
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index adbedc2d15..cb4e3b627b 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -1986,15 +1986,25 @@ static inline bool needs_ssbd_flip(struct vcpu *v)
>  
>  /*
>   * Actions that needs to be done after entering the hypervisor from the
> - * guest and before we handle any request.
> + * guest and before the interrupts are unmasked.
>   */
> -void enter_hypervisor_from_guest(void)
> +void enter_hypervisor_from_guest_preirq(void)
>  {
>      struct vcpu *v = current;
>  
>      /* If the guest has disabled the workaround, bring it back on. */
>      if ( needs_ssbd_flip(v) )
>          arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
> +}
> +
> +/*
> + * Actions that needs to be done after entering the hypervisor from the
> + * guest and before we handle any request. Depending on the exception trap,
> + * this may be called with interrupts unmasked.
> + */
> +void enter_hypervisor_from_guest(void)
> +{
> +    struct vcpu *v = current;
>  
>      /*
>       * If we pended a virtual abort, preserve it until it gets cleared.
> -- 
> 2.11.0
>

Patch

diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S
index cea4e0e302..0a9c248ee2 100644
--- a/xen/arch/arm/arm32/entry.S
+++ b/xen/arch/arm/arm32/entry.S
@@ -118,7 +118,7 @@  abort_guest_exit_end:
         bne return_from_trap
 
 skip_check:
-        mov pc, lr
+        b   enter_hypervisor_from_guest_preirq
 ENDPROC(arch_enter_hypervisor_from_guest_preirq)
 
         /*
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index 97dc60210d..d4fb5fdc1c 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -191,6 +191,7 @@ 
         ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
                     "nop; nop",
                     SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
+        bl      enter_hypervisor_from_guest_preirq
         msr     daifclr, \iflags
         bl      enter_hypervisor_from_guest
         mov     x0, sp
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index adbedc2d15..cb4e3b627b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1986,15 +1986,25 @@  static inline bool needs_ssbd_flip(struct vcpu *v)
 
 /*
  * Actions that needs to be done after entering the hypervisor from the
- * guest and before we handle any request.
+ * guest and before the interrupts are unmasked.
  */
-void enter_hypervisor_from_guest(void)
+void enter_hypervisor_from_guest_preirq(void)
 {
     struct vcpu *v = current;
 
     /* If the guest has disabled the workaround, bring it back on. */
     if ( needs_ssbd_flip(v) )
         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
+}
+
+/*
+ * Actions that needs to be done after entering the hypervisor from the
+ * guest and before we handle any request. Depending on the exception trap,
+ * this may be called with interrupts unmasked.
+ */
+void enter_hypervisor_from_guest(void)
+{
+    struct vcpu *v = current;
 
     /*
      * If we pended a virtual abort, preserve it until it gets cleared.