From patchwork Thu Mar 1 12:53:38 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [01/45] mm: Introduce lm_alias X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130181 Message-Id: <1519908862-11425-2-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Laura Abbott , Alex Shi Date: Thu, 1 Mar 2018 20:53:38 +0800 From: Alex Shi List-Id: From: Laura Abbott commit 568c5fe5a54 upstream. Certain architectures may have the kernel image mapped separately to alias the linear map. Introduce a macro lm_alias to translate a kernel image symbol into its linear alias. This is used in part with work to add CONFIG_DEBUG_VIRTUAL support for arm64. Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Will Deacon Signed-off-by: Alex Shi --- include/linux/mm.h | 4 ++++ 1 file changed, 4 insertions(+) -- 2.7.4 diff --git a/include/linux/mm.h b/include/linux/mm.h index 2217e2f..edd2480 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #endif +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. From patchwork Thu Mar 1 12:53:42 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [05/45] arm64: move TASK_* definitions to X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130185 Message-Id: <1519908862-11425-6-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Yury Norov , Laura Abbott , James Morse , Alex Shi Date: Thu, 1 Mar 2018 20:53:42 +0800 From: Alex Shi List-Id: From: Yury Norov commit eef94a3d09aab upstream. ILP32 series [1] introduces the dependency on for TASK_SIZE macro. Which in turn requires , and include , giving a circular dependency, because TASK_SIZE is currently located in . In other architectures, TASK_SIZE is defined in , and moving TASK_SIZE there fixes the problem. Discussion: https://patchwork.kernel.org/patch/9929107/ [1] https://github.com/norov/linux/tree/ilp32-next CC: Will Deacon CC: Laura Abbott Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Suggested-by: Mark Rutland Signed-off-by: Yury Norov Signed-off-by: Will Deacon Signed-off-by: Alex Shi --- arch/arm64/include/asm/memory.h | 15 --------------- arch/arm64/include/asm/processor.h | 21 +++++++++++++++++++++ arch/arm64/kernel/entry.S | 2 +- 3 files changed, 22 insertions(+), 16 deletions(-) -- 2.7.4 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 53211a0..269b979 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -60,8 +60,6 @@ * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) << VA_BITS) @@ -74,19 +72,6 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#ifdef CONFIG_COMPAT -#define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#else -#define TASK_SIZE TASK_SIZE_64 -#endif /* CONFIG_COMPAT */ - -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define KERNEL_START _text #define KERNEL_END _end diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 60e3482..4258f4d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,6 +19,10 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#ifndef __ASSEMBLY__ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -37,6 +41,22 @@ #include #include +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + */ +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -192,4 +212,5 @@ int cpu_enable_pan(void *__unused); int cpu_enable_uao(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b4c7db4..478f0fe 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include From patchwork Thu Mar 1 12:53:48 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [11/45] arm64: syscallno is secretly an int, make it official X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130191 Message-Id: <1519908862-11425-12-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Dave Martin , Alex Shi Date: Thu, 1 Mar 2018 20:53:48 +0800 From: Alex Shi List-Id: From: Dave Martin commit 35d0e6fb4d upstream. The upper 32 bits of the syscallno field in thread_struct are handled inconsistently, being sometimes zero extended and sometimes sign-extended. In fact, only the lower 32 bits seem to have any real significance for the behaviour of the code: it's been OK to handle the upper bits inconsistently because they don't matter. Currently, the only place I can find where those bits are significant is in calling trace_sys_enter(), which may be unintentional: for example, if a compat tracer attempts to cancel a syscall by passing -1 to (COMPAT_)PTRACE_SET_SYSCALL at the syscall-enter-stop, it will be traced as syscall 4294967295 rather than -1 as might be expected (and as occurs for a native tracer doing the same thing). Elsewhere, reads of syscallno cast it to an int or truncate it. There's also a conspicuous amount of code and casting to bodge around the fact that although semantically an int, syscallno is stored as a u64. Let's not pretend any more. In order to preserve the stp x instruction that stores the syscall number in entry.S, this patch special-cases the layout of struct pt_regs for big endian so that the newly 32-bit syscallno field maps onto the low bits of the stored value. This is not beautiful, but benchmarking of the getpid syscall on Juno suggests indicates a minor slowdown if the stp is split into an stp x and stp w. Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Alex Shi --- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/include/asm/ptrace.h | 9 ++++++++- arch/arm64/kernel/entry.S | 34 +++++++++++++++++----------------- arch/arm64/kernel/ptrace.c | 2 +- arch/arm64/kernel/signal.c | 6 +++--- arch/arm64/kernel/signal32.c | 2 +- arch/arm64/kernel/traps.c | 2 +- 7 files changed, 32 insertions(+), 25 deletions(-) -- 2.7.4 diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5917147..f5cdda6 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -130,7 +130,7 @@ struct thread_struct { static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) { memset(regs, 0, sizeof(*regs)); - regs->syscallno = ~0UL; + regs->syscallno = ~0; regs->pc = pc; } diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index ada08b5..7721d7a 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -116,7 +116,14 @@ struct pt_regs { }; }; u64 orig_x0; - u64 syscallno; +#ifdef __AARCH64EB__ + u32 unused2; + s32 syscallno; +#else + s32 syscallno; + u32 unused2; +#endif + u64 orig_addr_limit; u64 unused; // maintain 16 byte alignment }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6915697..48c41ff 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -116,8 +116,8 @@ * Set syscallno to -1 by default (overridden later if real syscall). */ .if \el == 0 - mvn x21, xzr - str x21, [sp, #S_SYSCALLNO] + mvn w21, wzr + str w21, [sp, #S_SYSCALLNO] .endif /* @@ -232,8 +232,9 @@ alternative_else_nop_endif * * x7 is reserved for the system call number in 32-bit mode. */ -sc_nr .req x25 // number of system calls -scno .req x26 // syscall number +wsc_nr .req w25 // number of system calls +wscno .req w26 // syscall number +xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer tsk .req x28 // current thread_info @@ -519,8 +520,8 @@ el0_svc_compat: * AArch32 syscall handling */ adrp stbl, compat_sys_call_table // load compat syscall table pointer - uxtw scno, w7 // syscall number in w7 (r7) - mov sc_nr, #__NR_compat_syscalls + mov wscno, w7 // syscall number in w7 (r7) + mov wsc_nr, #__NR_compat_syscalls b el0_svc_naked .align 6 @@ -741,19 +742,19 @@ ENDPROC(ret_from_fork) .align 6 el0_svc: adrp stbl, sys_call_table // load syscall table pointer - uxtw scno, w8 // syscall number in w8 - mov sc_nr, #__NR_syscalls + mov wscno, w8 // syscall number in w8 + mov wsc_nr, #__NR_syscalls el0_svc_naked: // compat entry point - stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number + stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number enable_dbg_and_irq ct_user_exit 1 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks tst x16, #_TIF_SYSCALL_WORK b.ne __sys_trace - cmp scno, sc_nr // check upper syscall limit + cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys - ldr x16, [stbl, scno, lsl #3] // address in the syscall table + ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine b ret_fast_syscall ni_sys: @@ -767,24 +768,23 @@ ENDPROC(el0_svc) * switches, and waiting for our parent to respond. */ __sys_trace: - mov w0, #-1 // set default errno for - cmp scno, x0 // user-issued syscall(-1) + cmp wscno, #-1 // user-issued syscall(-1)? b.ne 1f - mov x0, #-ENOSYS + mov x0, #-ENOSYS // set default errno if so str x0, [sp, #S_X0] 1: mov x0, sp bl syscall_trace_enter cmp w0, #-1 // skip the syscall? b.eq __sys_trace_return_skipped - uxtw scno, w0 // syscall number (possibly new) + mov wscno, w0 // syscall number (possibly new) mov x1, sp // pointer to regs - cmp scno, sc_nr // check upper syscall limit + cmp wscno, wsc_nr // check upper syscall limit b.hs __ni_sys_trace ldp x0, x1, [sp] // restore the syscall args ldp x2, x3, [sp, #S_X2] ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] - ldr x16, [stbl, scno, lsl #3] // address in the syscall table + ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine __sys_trace_return: diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8eedeef..193c621 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1346,7 +1346,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, if (dir == PTRACE_SYSCALL_EXIT) tracehook_report_syscall_exit(regs, 0); else if (tracehook_report_syscall_entry(regs)) - regs->syscallno = ~0UL; + regs->syscallno = ~0; regs->regs[regno] = saved_reg; } diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 404dd67..fd7eba8 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -113,7 +113,7 @@ static int restore_sigframe(struct pt_regs *regs, /* * Avoid sys_rt_sigreturn() restarting. */ - regs->syscallno = ~0UL; + regs->syscallno = ~0; err |= !valid_user_regs(®s->user_regs, current); @@ -332,7 +332,7 @@ static void do_signal(struct pt_regs *regs) { unsigned long continue_addr = 0, restart_addr = 0; int retval = 0; - int syscall = (int)regs->syscallno; + int syscall = regs->syscallno; struct ksignal ksig; /* @@ -346,7 +346,7 @@ static void do_signal(struct pt_regs *regs) /* * Avoid additional syscall restarting via ret_to_user. */ - regs->syscallno = ~0UL; + regs->syscallno = ~0; /* * Prepare for system call restart. We do this here so that a diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index b7063de..1effea2 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -354,7 +354,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, /* * Avoid compat_sys_sigreturn() restarting. */ - regs->syscallno = ~0UL; + regs->syscallno = ~0; err |= !valid_user_regs(®s->user_regs, current); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c743d1f..59f80b0 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -541,7 +541,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) if (show_unhandled_signals_ratelimited()) { pr_info("%s[%d]: syscall %d\n", current->comm, - task_pid_nr(current), (int)regs->syscallno); + task_pid_nr(current), regs->syscallno); dump_instr("", regs); if (user_mode(regs)) __show_regs(regs); From patchwork Thu Mar 1 12:53:52 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [15/45] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130225 Message-Id: <1519908862-11425-16-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Alex Shi Date: Thu, 1 Mar 2018 20:53:52 +0800 From: Alex Shi List-Id: From: Will Deacon Rewritting from commit f71c2ffcb20d upstream. On LTS 4.9, there has no raw_copy_from/to_user, neither __copy_user_flushcache, and it isn't good idead to pick up them. The following is origin commit log, that's also applicable for the new patch. Like we've done for get_user and put_user, ensure that user pointers are masked before invoking the underlying __arch_{clear,copy_*}_user operations. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Alex Shi --- arch/arm64/include/asm/uaccess.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) -- 2.7.4 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index ffa4e39..fbf4ce4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -357,14 +357,14 @@ static inline unsigned long __must_check __copy_from_user(void *to, const void _ { kasan_check_write(to, n); check_object_size(to, n, false); - return __arch_copy_from_user(to, from, n); + return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n); } static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { kasan_check_read(from, n); check_object_size(from, n, true); - return __arch_copy_to_user(to, from, n); + return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n); } static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) @@ -374,7 +374,7 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u check_object_size(to, n, false); if (access_ok(VERIFY_READ, from, n)) { - res = __arch_copy_from_user(to, from, n); + res = __arch_copy_from_user(to, __uaccess_mask_ptr(from), n); } if (unlikely(res)) memset(to + (n - res), 0, res); @@ -387,7 +387,7 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi check_object_size(from, n, true); if (access_ok(VERIFY_WRITE, to, n)) { - n = __arch_copy_to_user(to, from, n); + n = __arch_copy_to_user(__uaccess_mask_ptr(to), from, n); } return n; } @@ -395,7 +395,7 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) - n = __copy_in_user(to, from, n); + n = __copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n); return n; } From patchwork Thu Mar 1 12:53:59 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [22/45] arm64: Move post_ttbr_update_workaround to C code X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130201 Message-Id: <1519908862-11425-23-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Alex Shi Date: Thu, 1 Mar 2018 20:53:59 +0800 From: Alex Shi List-Id: From: Marc Zyngier commit 95e3de3590e3 upstream. We will soon need to invoke a CPU-specific function pointer after changing page tables, so move post_ttbr_update_workaround out into C code to make this possible. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Alex Shi --- arch/arm64/mm/context.c | 9 +++++++++ arch/arm64/mm/proc.S | 3 +-- 2 files changed, 10 insertions(+), 2 deletions(-) -- 2.7.4 diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index efcf1f7..32eeabe91 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -224,6 +224,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456, + CONFIG_CAVIUM_ERRATUM_27456)); +} + static int asids_init(void) { asid_bits = get_cpu_asid_bits(); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index c2adb0c..cca061a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -136,8 +136,7 @@ ENTRY(cpu_do_switch_mm) bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 isb - post_ttbr0_update_workaround - ret + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" From patchwork Thu Mar 1 12:54:03 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [26/45] arm64: entry: Apply BP hardening for high-priority synchronous exceptions X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130204 Message-Id: <1519908862-11425-27-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Alex Shi Date: Thu, 1 Mar 2018 20:54:03 +0800 From: Alex Shi List-Id: From: Will Deacon commit 5dfc6ed27710 upstream. Software-step and PC alignment fault exceptions have higher priority than instruction abort exceptions, so apply the BP hardening hooks there too if the user PC appears to reside in kernel space. Reported-by: Dan Hettena Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Alex Shi --- arch/arm64/kernel/entry.S | 6 ++++-- arch/arm64/mm/fault.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) -- 2.7.4 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index bdb0139..d50c2fe 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -584,8 +584,10 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + msr daifclr, #(8 | 4 | 1) +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c95b194..6120a14 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -617,6 +617,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, struct siginfo info; struct task_struct *tsk = current; + if (user_mode(regs)) { + if (instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + local_irq_enable(); + } + if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", tsk->comm, task_pid_nr(tsk), @@ -676,6 +682,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + if (!inf->fn(addr, esr, regs)) { rv = 1; } else { From patchwork Thu Mar 1 12:54:10 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [33/45] arm/arm64: KVM: Add smccc accessors to PSCI code X-Patchwork-Submitter: Alex Shi X-Patchwork-Id: 130211 Message-Id: <1519908862-11425-34-git-send-email-alex.shi@linaro.org> To: Marc Zyngier , Will Deacon , Ard Biesheuvel , Catalin Marinas , stable@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: Alex Shi Date: Thu, 1 Mar 2018 20:54:10 +0800 From: Alex Shi List-Id: From: Marc Zyngier commit 84684fecd7ea upstream. Instead of open coding the accesses to the various registers, let's add explicit SMCCC accessors. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Alex Shi --- arch/arm/kvm/psci.c | 52 ++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 10 deletions(-) -- 2.7.4 diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 4adfa28..bc334d6 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -33,6 +33,38 @@ #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) +static u32 smccc_get_function(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 0); +} + +static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 1); +} + +static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 2); +} + +static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 3); +} + +static void smccc_set_retval(struct kvm_vcpu *vcpu, + unsigned long a0, + unsigned long a1, + unsigned long a2, + unsigned long a3) +{ + vcpu_set_reg(vcpu, 0, a0); + vcpu_set_reg(vcpu, 1, a1); + vcpu_set_reg(vcpu, 2, a2); + vcpu_set_reg(vcpu, 3, a3); +} + static unsigned long psci_affinity_mask(unsigned long affinity_level) { if (affinity_level <= 3) @@ -75,7 +107,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) unsigned long context_id; phys_addr_t target_pc; - cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; + cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); @@ -94,8 +126,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; } - target_pc = vcpu_get_reg(source_vcpu, 2); - context_id = vcpu_get_reg(source_vcpu, 3); + target_pc = smccc_get_arg2(source_vcpu); + context_id = smccc_get_arg3(source_vcpu); kvm_reset_vcpu(vcpu); @@ -114,7 +146,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) * NOTE: We always update r0 (or x0) because for PSCI v0.1 * the general puspose registers are undefined upon CPU_ON. */ - vcpu_set_reg(vcpu, 0, context_id); + smccc_set_retval(vcpu, context_id, 0, 0, 0); vcpu->arch.power_off = false; smp_mb(); /* Make sure the above is visible */ @@ -134,8 +166,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; - target_affinity = vcpu_get_reg(vcpu, 1); - lowest_affinity_level = vcpu_get_reg(vcpu, 2); + target_affinity = smccc_get_arg1(vcpu); + lowest_affinity_level = smccc_get_arg2(vcpu); /* Determine target affinity mask */ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); @@ -209,7 +241,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; int ret = 1; @@ -276,14 +308,14 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return ret; } static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; switch (psci_fn) { @@ -301,7 +333,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return 1; }