From patchwork Thu Sep 26 18:37:59 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 01/10] xen/arm64: entry: Introduce a macro to generate guest vector and use it X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174514 Message-Id: <20190926183808.11630-2-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:37:59 +0100 From: Julien Grall List-Id: Xen developer discussion Most of the guest vectors are using the same pattern. This makes fairly tedious to alter the pattern and risk introducing mistakes when updating each path. A new macro is introduced to generate the guest vectors and now use it in the one that use the open-code version. Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk Reviewed-by: Stefano Stabellini --- xen/arch/arm/arm64/entry.S | 84 ++++++++++++++++------------------------------ 1 file changed, 28 insertions(+), 56 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 2d9a2713a1..8665d2844a 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -157,6 +157,30 @@ .endm + /* + * Generate a guest vector. + * + * iflags: Correspond to the list of interrupts to unmask + * save_x0_x1: See the description on top of the macro 'entry' + */ + .macro guest_vector compat, iflags, trap, save_x0_x1=1 + entry hyp=0, compat=\compat, save_x0_x1=\save_x0_x1 + /* + * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT + * is not set. If a vSError took place, the initial exception will be + * skipped. Exit ASAP + */ + ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", + "nop; nop", + SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) + msr daifclr, \iflags + mov x0, sp + bl do_trap_\trap +1: + exit hyp=0, compat=\compat + .endm + + /* * Bad Abort numbers *----------------- @@ -290,36 +314,10 @@ guest_sync_slowpath: * x0/x1 may have been scratch by the fast path above, so avoid * to save them. */ - entry hyp=0, compat=0, save_x0_x1=0 - /* - * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT - * is not set. If a vSError took place, the initial exception will be - * skipped. Exit ASAP - */ - ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", - "nop; nop", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) - msr daifclr, #6 - mov x0, sp - bl do_trap_guest_sync -1: - exit hyp=0, compat=0 + guest_vector compat=0, iflags=6, trap=guest_sync, save_x0_x1=0 guest_irq: - entry hyp=0, compat=0 - /* - * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT - * is not set. If a vSError took place, the initial exception will be - * skipped. Exit ASAP - */ - ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", - "nop; nop", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) - msr daifclr, #4 - mov x0, sp - bl do_trap_irq -1: - exit hyp=0, compat=0 + guest_vector compat=0, iflags=4, trap=irq guest_fiq_invalid: entry hyp=0, compat=0 @@ -333,36 +331,10 @@ guest_error: exit hyp=0, compat=0 guest_sync_compat: - entry hyp=0, compat=1 - /* - * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT - * is not set. If a vSError took place, the initial exception will be - * skipped. Exit ASAP - */ - ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", - "nop; nop", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) - msr daifclr, #6 - mov x0, sp - bl do_trap_guest_sync -1: - exit hyp=0, compat=1 + guest_vector compat=1, iflags=6, trap=guest_sync guest_irq_compat: - entry hyp=0, compat=1 - /* - * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT - * is not set. If a vSError took place, the initial exception will be - * skipped. Exit ASAP - */ - ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", - "nop; nop", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) - msr daifclr, #4 - mov x0, sp - bl do_trap_irq -1: - exit hyp=0, compat=1 + guest_vector compat=1, iflags=4, trap=irq guest_fiq_invalid_compat: entry hyp=0, compat=1 From patchwork Thu Sep 26 18:38:00 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 02/10] xen/arm64: head: Check if an SError is pending when receiving a vSError X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174506 Message-Id: <20190926183808.11630-3-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:00 +0100 From: Julien Grall List-Id: Xen developer discussion At the moment, when we receive an SError exception from the guest, we don't check if there are any other pending. For hardening the code, we should ensure any pending SError are accounted to the guest before executing any code with SError unmasked. The recently introduced macro 'guest_vector' could used to generate the two vectors and therefore take advantage of any change required in the future. Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk Reviewed-by: Stefano Stabellini --- xen/arch/arm/arm64/entry.S | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 8665d2844a..40d9f3ec8c 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -324,11 +324,7 @@ guest_fiq_invalid: invalid BAD_FIQ guest_error: - entry hyp=0, compat=0 - msr daifclr, #6 - mov x0, sp - bl do_trap_guest_serror - exit hyp=0, compat=0 + guest_vector compat=0, iflags=6, trap=guest_serror guest_sync_compat: guest_vector compat=1, iflags=6, trap=guest_sync @@ -341,11 +337,7 @@ guest_fiq_invalid_compat: invalid BAD_FIQ guest_error_compat: - entry hyp=0, compat=1 - msr daifclr, #6 - mov x0, sp - bl do_trap_guest_serror - exit hyp=0, compat=1 + guest_vector compat=1, iflags=6, trap=guest_serror ENTRY(return_to_new_vcpu32) exit hyp=0, compat=1 From patchwork Thu Sep 26 18:38:01 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 03/10] xen/arm: traps: Rework entry/exit from the guest path X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174515 Message-Id: <20190926183808.11630-4-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:01 +0100 From: Julien Grall List-Id: Xen developer discussion At the moment, enter_hypervisor_head() and leave_hypervisor_tail() are used to deal with actions to be done before/after any guest request is handled. While they are meant to work in pair, the former is called for most of the traps, including traps from the same exception level (i.e. hypervisor) whilst the latter will only be called when returning to the guest. As pointed out, the enter_hypervisor_head() is not called from all the traps, so this makes potentially difficult to extend it for the dealing with same exception level. Furthermore, some assembly only path will require to call enter_hypervisor_tail(). So the function is now directly call by assembly in for guest vector only. This means that the check whether we are called in a guest trap can now be removed. Take the opportunity to rename enter_hypervisor_tail() and leave_hypervisor_tail() to something more meaningful and document them. This should help everyone to understand the purpose of the two functions. Signed-off-by: Julien Grall --- I haven't done the 32-bits part yet. I wanted to gather feedback before looking in details how to integrate that with Arm32. --- xen/arch/arm/arm64/entry.S | 4 ++- xen/arch/arm/traps.c | 71 ++++++++++++++++++++++------------------------ 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 40d9f3ec8c..9eafae516b 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -147,7 +147,7 @@ .if \hyp == 0 /* Guest mode */ - bl leave_hypervisor_tail /* Disables interrupts on return */ + bl leave_hypervisor_to_guest /* Disables interrupts on return */ exit_guest \compat @@ -175,6 +175,8 @@ SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) msr daifclr, \iflags mov x0, sp + bl enter_hypervisor_from_guest + mov x0, sp bl do_trap_\trap 1: exit hyp=0, compat=\compat diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index a3b961bd06..20ba34ec91 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -2006,47 +2006,46 @@ static inline bool needs_ssbd_flip(struct vcpu *v) cpu_require_ssbd_mitigation(); } -static void enter_hypervisor_head(struct cpu_user_regs *regs) +/* + * Actions that needs to be done after exiting the guest and before any + * request from it is handled. + */ +void enter_hypervisor_from_guest(struct cpu_user_regs *regs) { - if ( guest_mode(regs) ) - { - struct vcpu *v = current; + struct vcpu *v = current; - /* If the guest has disabled the workaround, bring it back on. */ - if ( needs_ssbd_flip(v) ) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL); + /* If the guest has disabled the workaround, bring it back on. */ + if ( needs_ssbd_flip(v) ) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL); - /* - * If we pended a virtual abort, preserve it until it gets cleared. - * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details, - * but the crucial bit is "On taking a vSError interrupt, HCR_EL2.VSE - * (alias of HCR.VA) is cleared to 0." - */ - if ( v->arch.hcr_el2 & HCR_VA ) - v->arch.hcr_el2 = READ_SYSREG(HCR_EL2); + /* + * If we pended a virtual abort, preserve it until it gets cleared. + * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details, + * but the crucial bit is "On taking a vSError interrupt, HCR_EL2.VSE + * (alias of HCR.VA) is cleared to 0." + */ + if ( v->arch.hcr_el2 & HCR_VA ) + v->arch.hcr_el2 = READ_SYSREG(HCR_EL2); #ifdef CONFIG_NEW_VGIC - /* - * We need to update the state of our emulated devices using level - * triggered interrupts before syncing back the VGIC state. - * - * TODO: Investigate whether this is necessary to do on every - * trap and how it can be optimised. - */ - vtimer_update_irqs(v); - vcpu_update_evtchn_irq(v); + /* + * We need to update the state of our emulated devices using level + * triggered interrupts before syncing back the VGIC state. + * + * TODO: Investigate whether this is necessary to do on every + * trap and how it can be optimised. + */ + vtimer_update_irqs(v); + vcpu_update_evtchn_irq(v); #endif - vgic_sync_from_lrs(v); - } + vgic_sync_from_lrs(v); } void do_trap_guest_sync(struct cpu_user_regs *regs) { const union hsr hsr = { .bits = regs->hsr }; - enter_hypervisor_head(regs); - switch ( hsr.ec ) { case HSR_EC_WFI_WFE: @@ -2180,8 +2179,6 @@ void do_trap_hyp_sync(struct cpu_user_regs *regs) { const union hsr hsr = { .bits = regs->hsr }; - enter_hypervisor_head(regs); - switch ( hsr.ec ) { #ifdef CONFIG_ARM_64 @@ -2218,27 +2215,21 @@ void do_trap_hyp_sync(struct cpu_user_regs *regs) void do_trap_hyp_serror(struct cpu_user_regs *regs) { - enter_hypervisor_head(regs); - __do_trap_serror(regs, VABORT_GEN_BY_GUEST(regs)); } void do_trap_guest_serror(struct cpu_user_regs *regs) { - enter_hypervisor_head(regs); - __do_trap_serror(regs, true); } void do_trap_irq(struct cpu_user_regs *regs) { - enter_hypervisor_head(regs); gic_interrupt(regs, 0); } void do_trap_fiq(struct cpu_user_regs *regs) { - enter_hypervisor_head(regs); gic_interrupt(regs, 1); } @@ -2281,7 +2272,13 @@ static void check_for_vcpu_work(void) local_irq_disable(); } -void leave_hypervisor_tail(void) +/* + * Actions that needs to be done before entering the guest. This is the + * last thing executed before the guest context is fully restored. + * + * The function will return with interrupts disabled. + */ +void leave_hypervisor_to_guest(void) { local_irq_disable(); From patchwork Thu Sep 26 18:38:02 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 04/10] xen/arm: Ensure the SSBD workaround is re-enabled right after exiting a guest X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174516 Message-Id: <20190926183808.11630-5-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Andrii Anisov , Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:02 +0100 From: Julien Grall List-Id: Xen developer discussion At the moment, SSBD workaround is re-enabled for Xen after interrupts are unmasked. This means we may end up to execute some part of the hypervisor if an interrupt is received before the workaround is re-enabled. As the rest of enter_hypervisor_from_guest() does not require to have interrupts masked, the function is now split in two parts: 1) enter_hypervisor_from_guest_noirq() called with interrupts masked. 2) enter_hypervisor_from_guest() called with interrupts unmasked. Note that while enter_hypervisor_from_guest_noirq() does not use the on-stack context registers, it is still passed as parameter to match the rest of the C functions called from the entry path. Fixes: a7898e4c59 ("xen/arm: Add ARCH_WORKAROUND_2 support for guests") Reported-by: Andrii Anisov Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk --- Note the Arm32 code has not been changed yet. I am also open on turn both enter_hypervisor_from_guest_noirq() and enter_hypervisor_from() to functions not taking any parameters. --- xen/arch/arm/arm64/entry.S | 2 ++ xen/arch/arm/traps.c | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 9eafae516b..458d12f188 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -173,6 +173,8 @@ ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", "nop; nop", SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) + mov x0, sp + bl enter_hypervisor_from_guest_noirq msr daifclr, \iflags mov x0, sp bl enter_hypervisor_from_guest diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 20ba34ec91..5848dd8399 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -2007,16 +2007,26 @@ static inline bool needs_ssbd_flip(struct vcpu *v) } /* - * Actions that needs to be done after exiting the guest and before any - * request from it is handled. + * Actions that needs to be done after exiting the guest and before the + * interrupts are unmasked. */ -void enter_hypervisor_from_guest(struct cpu_user_regs *regs) +void enter_hypervisor_from_guest_noirq(struct cpu_user_regs *regs) { struct vcpu *v = current; /* If the guest has disabled the workaround, bring it back on. */ if ( needs_ssbd_flip(v) ) arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL); +} + +/* + * Actions that needs to be done after exiting the guest and before any + * request from it is handled. Depending on the exception trap, this may + * be called with interrupts unmasked. + */ +void enter_hypervisor_from_guest(struct cpu_user_regs *regs) +{ + struct vcpu *v = current; /* * If we pended a virtual abort, preserve it until it gets cleared. From patchwork Thu Sep 26 18:38:03 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 05/10] xen/arm: alternative: Remove unused parameter for alternative_if_not_cap X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174513 Message-Id: <20190926183808.11630-6-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:03 +0100 From: Julien Grall List-Id: Xen developer discussion The macro alternative_if_not_cap is taking two parameters. The second parameter is never used and it is hard to see how this can be used correctly as it is only protecting the alternative section magic. Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk Acked-by: Stefano Stabellini --- xen/include/asm-arm/alternative.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/xen/include/asm-arm/alternative.h b/xen/include/asm-arm/alternative.h index dedb6dd001..2830a6da2d 100644 --- a/xen/include/asm-arm/alternative.h +++ b/xen/include/asm-arm/alternative.h @@ -116,13 +116,11 @@ int apply_alternatives(const struct alt_instr *start, const struct alt_instr *en * The code that follows this macro will be assembled and linked as * normal. There are no restrictions on this code. */ -.macro alternative_if_not cap, enable = 1 - .if \enable +.macro alternative_if_not cap .pushsection .altinstructions, "a" altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection 661: - .endif .endm /* From patchwork Thu Sep 26 18:38:04 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 06/10] xen/arm: Move ARCH_PATCH_INSN_SIZE out of the header livepatch.h X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174510 Message-Id: <20190926183808.11630-7-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Stefano Stabellini , Konrad Rzeszutek Wilk , andrii.anisov@gmail.com, Ross Lagerwall , Julien Grall , Volodymyr Babchuk Date: Thu, 26 Sep 2019 19:38:04 +0100 From: Julien Grall List-Id: Xen developer discussion At the moment, ARCH_PATCH_INSN_SIZE is defined in the header livepatch.h. However, this is also used in the alternative code. Rather than including livepatch.h just for using the define, move it in the header insn.h which seems more suitable. Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk Reviewed-by: Ross Lagerwall Acked-by: Stefano Stabellini --- xen/arch/arm/alternative.c | 2 -- xen/include/asm-arm/insn.h | 3 +++ xen/include/asm-arm/livepatch.h | 4 +--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c index 52ed7edf69..237c4e5642 100644 --- a/xen/arch/arm/alternative.c +++ b/xen/arch/arm/alternative.c @@ -30,8 +30,6 @@ #include #include #include -/* XXX: Move ARCH_PATCH_INSN_SIZE out of livepatch.h */ -#include #include /* Override macros from asm/page.h to make them work with mfn_t */ diff --git a/xen/include/asm-arm/insn.h b/xen/include/asm-arm/insn.h index 3489179826..19277212e1 100644 --- a/xen/include/asm-arm/insn.h +++ b/xen/include/asm-arm/insn.h @@ -11,6 +11,9 @@ # error "unknown ARM variant" #endif +/* On ARM32,64 instructions are always 4 bytes long. */ +#define ARCH_PATCH_INSN_SIZE 4 + #endif /* !__ARCH_ARM_INSN */ /* * Local variables: diff --git a/xen/include/asm-arm/livepatch.h b/xen/include/asm-arm/livepatch.h index 6bca79deb9..026af5e7dc 100644 --- a/xen/include/asm-arm/livepatch.h +++ b/xen/include/asm-arm/livepatch.h @@ -7,9 +7,7 @@ #define __XEN_ARM_LIVEPATCH_H__ #include /* For SZ_* macros. */ - -/* On ARM32,64 instructions are always 4 bytes long. */ -#define ARCH_PATCH_INSN_SIZE 4 +#include /* * The va of the hypervisor .text region. We need this as the From patchwork Thu Sep 26 18:38:05 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 07/10] xen/arm: Allow insn.h to be called from assembly X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174511 Message-Id: <20190926183808.11630-8-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:05 +0100 From: Julien Grall List-Id: Xen developer discussion A follow-up patch will require to include insn.h from assembly code. So wee need to protect any C-specific definition to avoid compilation error when used in assembly code. Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk --- xen/include/asm-arm/insn.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/xen/include/asm-arm/insn.h b/xen/include/asm-arm/insn.h index 19277212e1..00391f83f9 100644 --- a/xen/include/asm-arm/insn.h +++ b/xen/include/asm-arm/insn.h @@ -1,8 +1,14 @@ #ifndef __ARCH_ARM_INSN #define __ARCH_ARM_INSN +#ifndef __ASSEMBLY__ + #include +/* + * At the moment, arch-specific headers contain only definition for C + * code. + */ #if defined(CONFIG_ARM_64) # include #elif defined(CONFIG_ARM_32) @@ -11,6 +17,8 @@ # error "unknown ARM variant" #endif +#endif /* __ASSEMBLY__ */ + /* On ARM32,64 instructions are always 4 bytes long. */ #define ARCH_PATCH_INSN_SIZE 4 From patchwork Thu Sep 26 18:38:06 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 08/10] xen/arm: alternative: add auto-nop infrastructure X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174508 Message-Id: <20190926183808.11630-9-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Mark Rutland , Stefano Stabellini , Will Deacon , andrii.anisov@gmail.com, Julien Grall , Volodymyr Babchuk Date: Thu, 26 Sep 2019 19:38:06 +0100 From: Julien Grall List-Id: Xen developer discussion From: Mark Rutland In some cases, one side of an alternative sequence is simply a number of NOPs used to balance the other side. Keeping track of this manually is tedious, and the presence of large chains of NOPs makes the code more painful to read than necessary. To ameliorate matters, this patch adds a new alternative_else_nop_endif, which automatically balances an alternative sequence with a trivial NOP sled. In many cases, we would like a NOP-sled in the default case, and instructions patched in in the presence of a feature. To enable the NOPs to be generated automatically for this case, this patch also adds a new alternative_if, and updates alternative_else and alternative_endif to work with either alternative_if or alternative_endif. The alternative infrastructure was originally ported from Linux. So this is pretty much a straight backport from commit 792d47379f4d "arm64: alternative: add auto-nop infrastructure". The only difference is the nops macro added as not yet existing in Xen. Signed-off-by: Mark Rutland [will: use new nops macro to generate nop sequences] Signed-off-by: Will Deacon [julien: Add nops and port to Xen] Signed-off-by: Julien Grall Reviewed-by: Volodymyr Babchuk Acked-by: Stefano Stabellini --- xen/include/asm-arm/alternative.h | 70 +++++++++++++++++++++++++++++---------- xen/include/asm-arm/macros.h | 7 ++++ 2 files changed, 60 insertions(+), 17 deletions(-) diff --git a/xen/include/asm-arm/alternative.h b/xen/include/asm-arm/alternative.h index 2830a6da2d..e8271ac04e 100644 --- a/xen/include/asm-arm/alternative.h +++ b/xen/include/asm-arm/alternative.h @@ -2,6 +2,7 @@ #define __ASM_ALTERNATIVE_H #include +#include #define ARM_CB_PATCH ARM_NCAPS @@ -111,34 +112,55 @@ int apply_alternatives(const struct alt_instr *start, const struct alt_instr *en .endm /* - * Begin an alternative code sequence. + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. * - * The code that follows this macro will be assembled and linked as - * normal. There are no restrictions on this code. + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. */ .macro alternative_if_not cap + .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection 661: .endm +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .pushsection .altinstr_replacement, "ax" + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + /* - * Provide the alternative code sequence. - * - * The code that follows this macro is assembled into a special - * section to be used for dynamic patching. Code that follows this - * macro must: - * - * 1. Be exactly the same length (in bytes) as the default code - * sequence. - * - * 2. Not contain a branch target that is used outside of the - * alternative sequence it is defined in (branches into an - * alternative sequence are not fixed up). + * Provide the other half of the alternative code sequence. */ .macro alternative_else -662: .pushsection .altinstr_replacement, "ax" +662: + .if .Lasm_alt_mode==0 + .pushsection .altinstr_replacement, "ax" + .else + .popsection + .endif 663: .endm @@ -154,12 +176,26 @@ int apply_alternatives(const struct alt_instr *start, const struct alt_instr *en * Complete an alternative code sequence. */ .macro alternative_endif -664: .popsection +664: + .if .Lasm_alt_mode==0 + .popsection + .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) .endm /* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / ARCH_PATCH_INSN_SIZE +alternative_endif +.endm + +/* * Callback-based alternative epilogue */ .macro alternative_cb_end diff --git a/xen/include/asm-arm/macros.h b/xen/include/asm-arm/macros.h index 1d4bb41d15..91ea3505e4 100644 --- a/xen/include/asm-arm/macros.h +++ b/xen/include/asm-arm/macros.h @@ -13,4 +13,11 @@ # error "unknown ARM variant" #endif + /* NOP sequence */ + .macro nops, num + .rept \num + nop + .endr + .endm + #endif /* __ASM_ARM_MACROS_H */ From patchwork Thu Sep 26 18:38:07 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 09/10] xen/arm: asm: Replace use of ALTERNATIVE with alternative_if X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174512 Message-Id: <20190926183808.11630-10-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:07 +0100 From: Julien Grall List-Id: Xen developer discussion Using alternative_if makes the code a bit more streamlined. Take the opportunity to use the new auto-nop infrastructure to avoid counting the number of nop in the else part for arch/arm/arm64/entry.S Signed-off-by: Julien Grall Reviewed-By: Volodymyr Babchuk Reviewed-by: Volodymyr Babchuk --- This is pretty much a matter of taste, but at least for arm64 this allows us to use the auto-nop infrastructure. So the arm32 is more to keep inline with arm64. --- xen/arch/arm/arm32/entry.S | 9 ++++++--- xen/arch/arm/arm64/entry.S | 8 +++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S index 0b4cd19abd..1428cd3583 100644 --- a/xen/arch/arm/arm32/entry.S +++ b/xen/arch/arm/arm32/entry.S @@ -65,9 +65,12 @@ save_guest_regs: * If the SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT has been set in the cpu * feature, the checking of pending SErrors will be skipped. */ - ALTERNATIVE("nop", - "b skip_check", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) + alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT + nop + alternative_else + b skip_check + alternative_endif + /* * Start to check pending virtual abort in the gap of Guest -> HYP * world switch. diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 458d12f188..91cf6ee6f4 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -170,9 +170,11 @@ * is not set. If a vSError took place, the initial exception will be * skipped. Exit ASAP */ - ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f", - "nop; nop", - SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) + alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT + bl check_pending_vserror + cbnz x0, 1f + alternative_else_nop_endif + mov x0, sp bl enter_hypervisor_from_guest_noirq msr daifclr, \iflags From patchwork Thu Sep 26 18:38:08 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [Xen-devel, RFC, for-4.13, 10/10] xen/arm64: entry: Ensure the guest state is synced when receiving a vSError X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 174509 Message-Id: <20190926183808.11630-11-julien.grall@arm.com> To: xen-devel@lists.xenproject.org Cc: Julien Grall , Stefano Stabellini , Volodymyr Babchuk , andrii.anisov@gmail.com Date: Thu, 26 Sep 2019 19:38:08 +0100 From: Julien Grall List-Id: Xen developer discussion At the moment, when a SError is received while checking for a pending one, we will skip the handling the initial exception. This includes call to exit_from_guest{, _noirq} that is used to synchronize part of the guest state with the internal representation. However, we still call leave_hypervisor_tail() which is used for preempting the guest and synchronizing back part of the guest state. exit_from_guest{, _noirq} works in pair with leave_hypervisor_tail(), so skipping if former may result to a loss of some part of guest state. An example is the new vGIC which will save the state of the LRS on exit from the guest and rewrite all of them on entry to the guest. For now, calling leave_hypervisor_tail() is not necessary when injecting a vSError to the guest. But as the path is spread accross multiple file, it is hard to enforce that for the future (someone we may want to crash the domain). Therefore it is best to call exit_from_guest{, _noirq} in the vSError path as well. Note that the return value of check_pending_vserror is now set in x19 instead of x0. This is because we want to keep the value across call to C-function and x0, unlike x19, will not be saved by the callee. Signed-off-by: Julien Grall --- I am not aware of any issues other than with the new vGIC. But I haven't looked hard enough so I think it would be worth to try to fix it for Xen 4.13. --- xen/arch/arm/arm64/entry.S | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 91cf6ee6f4..f5350247e1 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -168,11 +168,13 @@ /* * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT * is not set. If a vSError took place, the initial exception will be - * skipped. Exit ASAP + * skipped. + * + * However, we still need to call exit_from_guest{,_noirq} as the + * return path to the guest may rely on state saved by them. */ alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT bl check_pending_vserror - cbnz x0, 1f alternative_else_nop_endif mov x0, sp @@ -180,6 +182,11 @@ msr daifclr, \iflags mov x0, sp bl enter_hypervisor_from_guest + + alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT + cbnz x19, 1f + alternative_else_nop_endif + mov x0, sp bl do_trap_\trap 1: @@ -383,9 +390,9 @@ return_from_trap: /* * This function is used to check pending virtual SError in the gap of * EL1 -> EL2 world switch. - * The x0 register will be used to indicate the results of detection. - * x0 -- Non-zero indicates a pending virtual SError took place. - * x0 -- Zero indicates no pending virtual SError took place. + * The register x19 will be used to indicate the results of detection. + * x19 -- Non-zero indicates a pending virtual SError took place. + * x19 -- Zero indicates no pending virtual SError took place. */ check_pending_vserror: /* @@ -432,9 +439,9 @@ abort_guest_exit_end: /* * Not equal, the pending SError exception took place, set - * x0 to non-zero. + * x19 to non-zero. */ - cset x0, ne + cset x19, ne ret