diff mbox series

[10/18] arm64: convert native/compat syscall entry to C

Message ID 20180514094640.27569-11-mark.rutland@arm.com
State Superseded
Headers show
Series arm64: invoke syscalls with pt_regs | expand

Commit Message

Mark Rutland May 14, 2018, 9:46 a.m. UTC
Now that the syscall invocation logic is in C, we can migrate the rest
of the syscall entry logic over, so that the entry assembly needn't look
at the register values at all.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
---
 arch/arm64/kernel/entry.S   | 42 ++++--------------------------------------
 arch/arm64/kernel/syscall.c | 40 ++++++++++++++++++++++++++++++++++++++--
 2 files changed, 42 insertions(+), 40 deletions(-)

-- 
2.11.0

Comments

Dave Martin May 14, 2018, 11:07 a.m. UTC | #1
On Mon, May 14, 2018 at 10:46:32AM +0100, Mark Rutland wrote:
> Now that the syscall invocation logic is in C, we can migrate the rest

> of the syscall entry logic over, so that the entry assembly needn't look

> at the register values at all.

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> Cc: Catalin Marinas <catalin.marinas@arm.com>

> Cc: Will Deacon <will.deacon@arm.com>

> ---

>  arch/arm64/kernel/entry.S   | 42 ++++--------------------------------------

>  arch/arm64/kernel/syscall.c | 40 ++++++++++++++++++++++++++++++++++++++--

>  2 files changed, 42 insertions(+), 40 deletions(-)

> 

> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S

> index 5c60369b52fc..13afefbf608f 100644

> --- a/arch/arm64/kernel/entry.S

> +++ b/arch/arm64/kernel/entry.S

> @@ -690,14 +690,9 @@ el0_sync_compat:

>  	b.ge	el0_dbg

>  	b	el0_inv

>  el0_svc_compat:

> -	/*

> -	 * AArch32 syscall handling

> -	 */

> -	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags

> -	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer

> -	mov	wscno, w7			// syscall number in w7 (r7)

> -	mov     wsc_nr, #__NR_compat_syscalls

> -	b	el0_svc_naked

> +	mov	x0, sp

> +	bl	el0_svc_compat_handler

> +	b	ret_to_user

>  

>  	.align	6

>  el0_irq_compat:

> @@ -895,37 +890,8 @@ ENDPROC(ret_to_user)

>   */

>  	.align	6

>  el0_svc:

> -	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags

> -	adrp	stbl, sys_call_table		// load syscall table pointer

> -	mov	wscno, w8			// syscall number in w8

> -	mov	wsc_nr, #__NR_syscalls

> -

> -#ifdef CONFIG_ARM64_SVE

> -alternative_if_not ARM64_SVE

> -	b	el0_svc_naked

> -alternative_else_nop_endif

> -	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:

> -	bic	x16, x16, #_TIF_SVE		// discard SVE state

> -	str	x16, [tsk, #TSK_TI_FLAGS]

> -

> -	/*

> -	 * task_fpsimd_load() won't be called to update CPACR_EL1 in

> -	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only

> -	 * happens if a context switch or kernel_neon_begin() or context

> -	 * modification (sigreturn, ptrace) intervenes.

> -	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:

> -	 */

> -	mrs	x9, cpacr_el1

> -	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0

> -	msr	cpacr_el1, x9			// synchronised by eret to el0

> -#endif

> -

> -el0_svc_naked:					// compat entry point

>  	mov	x0, sp

> -	mov	w1, wscno

> -	mov	w2, wsc_nr

> -	mov	x3, stbl

> -	bl	el0_svc_common

> +	bl	el0_svc_handler

>  	b	ret_to_user

>  ENDPROC(el0_svc)

>  

> diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c

> index 5df857e32b48..4706f841e758 100644

> --- a/arch/arm64/kernel/syscall.c

> +++ b/arch/arm64/kernel/syscall.c

> @@ -6,7 +6,9 @@

>  #include <linux/ptrace.h>

>  

>  #include <asm/daifflags.h>

> +#include <asm/fpsimd.h>

>  #include <asm/thread_info.h>

> +#include <asm/unistd.h>

>  

>  long do_ni_syscall(struct pt_regs *regs);

>  

> @@ -41,8 +43,8 @@ static inline bool has_syscall_work(unsigned long flags)

>  int syscall_trace_enter(struct pt_regs *regs);

>  void syscall_trace_exit(struct pt_regs *regs);

>  

> -asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> -			       syscall_fn_t syscall_table[])

> +static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> +			   syscall_fn_t syscall_table[])

>  {

>  	unsigned long flags = current_thread_info()->flags;

>  

> @@ -79,3 +81,37 @@ asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

>  trace_exit:

>  	syscall_trace_exit(regs);

>  }

> +

> +static inline void sve_user_reset(void)


Static function with no caller...

> +{

> +	if (!system_supports_sve())

> +		return;

> +

> +	/*

> +	 * task_fpsimd_load() won't be called to update CPACR_EL1 in

> +	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only

> +	 * happens if a context switch or kernel_neon_begin() or context

> +	 * modification (sigreturn, ptrace) intervenes.

> +	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.

> +	 */

> +	if (test_and_clear_thread_flag(TIF_SVE))

> +		sve_user_disable();


sve_user_disable() is already inline, and incorporates the if()
internally via sysreg_clear_set().

So, should this just be

	clear_thread_flag(TIF_SVE);
	sve_user_disable();

> +}

> +

> +extern syscall_fn_t sys_call_table[];

> +

> +asmlinkage void el0_svc_handler(struct pt_regs *regs)

> +{


if (system_supports_sve()) ?

> +	sve_user_disable();


Or should this be replaced by a call to sve_user_reset()?

I suspect the latter, since we do want to be clearing TIF_SVE here too.


[...]

Cheers
---Dave
Mark Rutland May 14, 2018, 11:58 a.m. UTC | #2
On Mon, May 14, 2018 at 12:07:30PM +0100, Dave Martin wrote:
> On Mon, May 14, 2018 at 10:46:32AM +0100, Mark Rutland wrote:


> > diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c

> > index 5df857e32b48..4706f841e758 100644

> > --- a/arch/arm64/kernel/syscall.c

> > +++ b/arch/arm64/kernel/syscall.c

> > @@ -6,7 +6,9 @@

> >  #include <linux/ptrace.h>

> >  

> >  #include <asm/daifflags.h>

> > +#include <asm/fpsimd.h>

> >  #include <asm/thread_info.h>

> > +#include <asm/unistd.h>

> >  

> >  long do_ni_syscall(struct pt_regs *regs);

> >  

> > @@ -41,8 +43,8 @@ static inline bool has_syscall_work(unsigned long flags)

> >  int syscall_trace_enter(struct pt_regs *regs);

> >  void syscall_trace_exit(struct pt_regs *regs);

> >  

> > -asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> > -			       syscall_fn_t syscall_table[])

> > +static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> > +			   syscall_fn_t syscall_table[])

> >  {

> >  	unsigned long flags = current_thread_info()->flags;

> >  

> > @@ -79,3 +81,37 @@ asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> >  trace_exit:

> >  	syscall_trace_exit(regs);

> >  }

> > +

> > +static inline void sve_user_reset(void)

> 

> Static function with no caller...


Ugh, this was intended to be called below in el0_svc_handler().

> > +{

> > +	if (!system_supports_sve())

> > +		return;

> > +

> > +	/*

> > +	 * task_fpsimd_load() won't be called to update CPACR_EL1 in

> > +	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only

> > +	 * happens if a context switch or kernel_neon_begin() or context

> > +	 * modification (sigreturn, ptrace) intervenes.

> > +	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.

> > +	 */

> > +	if (test_and_clear_thread_flag(TIF_SVE))

> > +		sve_user_disable();

> 

> sve_user_disable() is already inline, and incorporates the if()

> internally via sysreg_clear_set().

> 

> So, should this just be

> 

> 	clear_thread_flag(TIF_SVE);

> 	sve_user_disable();


Sure. That does mean we'll unconditionally read cpacr_el1, but I assume
you're happy with that. I'll note the difference in the commit message.

> > +}

> > +

> > +extern syscall_fn_t sys_call_table[];

> > +

> > +asmlinkage void el0_svc_handler(struct pt_regs *regs)

> > +{

> 

> if (system_supports_sve()) ?

> 

> > +	sve_user_disable();

> 

> Or should this be replaced by a call to sve_user_reset()?

> 

> I suspect the latter, since we do want to be clearing TIF_SVE here too.


Yes, this was mean to be sve_user_reset().

Thanks,
Mark.
Dave Martin May 14, 2018, 2:43 p.m. UTC | #3
On Mon, May 14, 2018 at 12:58:05PM +0100, Mark Rutland wrote:
> On Mon, May 14, 2018 at 12:07:30PM +0100, Dave Martin wrote:

> > On Mon, May 14, 2018 at 10:46:32AM +0100, Mark Rutland wrote:

> 

> > > diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c

> > > index 5df857e32b48..4706f841e758 100644

> > > --- a/arch/arm64/kernel/syscall.c

> > > +++ b/arch/arm64/kernel/syscall.c

> > > @@ -6,7 +6,9 @@

> > >  #include <linux/ptrace.h>

> > >  

> > >  #include <asm/daifflags.h>

> > > +#include <asm/fpsimd.h>

> > >  #include <asm/thread_info.h>

> > > +#include <asm/unistd.h>

> > >  

> > >  long do_ni_syscall(struct pt_regs *regs);

> > >  

> > > @@ -41,8 +43,8 @@ static inline bool has_syscall_work(unsigned long flags)

> > >  int syscall_trace_enter(struct pt_regs *regs);

> > >  void syscall_trace_exit(struct pt_regs *regs);

> > >  

> > > -asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> > > -			       syscall_fn_t syscall_table[])

> > > +static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> > > +			   syscall_fn_t syscall_table[])

> > >  {

> > >  	unsigned long flags = current_thread_info()->flags;

> > >  

> > > @@ -79,3 +81,37 @@ asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,

> > >  trace_exit:

> > >  	syscall_trace_exit(regs);

> > >  }

> > > +

> > > +static inline void sve_user_reset(void)

> > 

> > Static function with no caller...

> 

> Ugh, this was intended to be called below in el0_svc_handler().

> 

> > > +{

> > > +	if (!system_supports_sve())

> > > +		return;

> > > +

> > > +	/*

> > > +	 * task_fpsimd_load() won't be called to update CPACR_EL1 in

> > > +	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only

> > > +	 * happens if a context switch or kernel_neon_begin() or context

> > > +	 * modification (sigreturn, ptrace) intervenes.

> > > +	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.

> > > +	 */

> > > +	if (test_and_clear_thread_flag(TIF_SVE))

> > > +		sve_user_disable();

> > 

> > sve_user_disable() is already inline, and incorporates the if()

> > internally via sysreg_clear_set().

> > 

> > So, should this just be

> > 

> > 	clear_thread_flag(TIF_SVE);

> > 	sve_user_disable();

> 

> Sure. That does mean we'll unconditionally read cpacr_el1, but I assume

> you're happy with that. I'll note the difference in the commit message.


This is what the code does today, conditioned no system_supports_sve().

I'm assuming that reading CPACR_EL1 is cheap ... or have you come across
counterexamples to that?

> > > +}

> > > +

> > > +extern syscall_fn_t sys_call_table[];

> > > +

> > > +asmlinkage void el0_svc_handler(struct pt_regs *regs)

> > > +{

> > 

> > if (system_supports_sve()) ?

> > 

> > > +	sve_user_disable();

> > 

> > Or should this be replaced by a call to sve_user_reset()?

> > 

> > I suspect the latter, since we do want to be clearing TIF_SVE here too.

> 

> Yes, this was mean to be sve_user_reset().


OK.  Just to be clear, I think there should be a system_supports_sve()
check here (in case that wasn't obvious from my previous reply).

Cheers
---Dave
Mark Rutland May 14, 2018, 3:01 p.m. UTC | #4
On Mon, May 14, 2018 at 03:43:36PM +0100, Dave Martin wrote:
> On Mon, May 14, 2018 at 12:58:05PM +0100, Mark Rutland wrote:

> > On Mon, May 14, 2018 at 12:07:30PM +0100, Dave Martin wrote:

> > > On Mon, May 14, 2018 at 10:46:32AM +0100, Mark Rutland wrote:

> > > > +{

> > > > +	if (!system_supports_sve())

> > > > +		return;

> > > > +

> > > > +	/*

> > > > +	 * task_fpsimd_load() won't be called to update CPACR_EL1 in

> > > > +	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only

> > > > +	 * happens if a context switch or kernel_neon_begin() or context

> > > > +	 * modification (sigreturn, ptrace) intervenes.

> > > > +	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.

> > > > +	 */

> > > > +	if (test_and_clear_thread_flag(TIF_SVE))

> > > > +		sve_user_disable();

> > > 

> > > sve_user_disable() is already inline, and incorporates the if()

> > > internally via sysreg_clear_set().

> > > 

> > > So, should this just be

> > > 

> > > 	clear_thread_flag(TIF_SVE);

> > > 	sve_user_disable();

> > 

> > Sure. That does mean we'll unconditionally read cpacr_el1, but I assume

> > you're happy with that. I'll note the difference in the commit message.

> 

> This is what the code does today, conditioned no system_supports_sve().

> 

> I'm assuming that reading CPACR_EL1 is cheap ... or have you come across

> counterexamples to that?


I have no data either way. :)

> > > > +}

> > > > +

> > > > +extern syscall_fn_t sys_call_table[];

> > > > +

> > > > +asmlinkage void el0_svc_handler(struct pt_regs *regs)

> > > > +{

> > > 

> > > if (system_supports_sve()) ?

> > > 

> > > > +	sve_user_disable();

> > > 

> > > Or should this be replaced by a call to sve_user_reset()?

> > > 

> > > I suspect the latter, since we do want to be clearing TIF_SVE here too.

> > 

> > Yes, this was mean to be sve_user_reset().

> 

> OK.  Just to be clear, I think there should be a system_supports_sve()

> check here (in case that wasn't obvious from my previous reply).


I understood that; the check is inside sve_user_reset(), which I had
mean to call here.

With your above comments, I now have the following:

static inline void sve_user_reset(void)
{
	if (!system_supports_sve())
		return;

	/*
	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
	 * happens if a context switch or kernel_neon_begin() or context
	 * modification (sigreturn, ptrace) intervenes.
	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.
	 */
	clear_thread_flag(TIF_SVE);
	sve_user_disable();
}

asmlinkage void el0_svc_handler(struct pt_regs *regs)
{
	sve_user_reset();
	el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}

... which I think alleviates that concern?

Thanks,
Mark.
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5c60369b52fc..13afefbf608f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -690,14 +690,9 @@  el0_sync_compat:
 	b.ge	el0_dbg
 	b	el0_inv
 el0_svc_compat:
-	/*
-	 * AArch32 syscall handling
-	 */
-	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
-	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
-	mov	wscno, w7			// syscall number in w7 (r7)
-	mov     wsc_nr, #__NR_compat_syscalls
-	b	el0_svc_naked
+	mov	x0, sp
+	bl	el0_svc_compat_handler
+	b	ret_to_user
 
 	.align	6
 el0_irq_compat:
@@ -895,37 +890,8 @@  ENDPROC(ret_to_user)
  */
 	.align	6
 el0_svc:
-	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
-	adrp	stbl, sys_call_table		// load syscall table pointer
-	mov	wscno, w8			// syscall number in w8
-	mov	wsc_nr, #__NR_syscalls
-
-#ifdef CONFIG_ARM64_SVE
-alternative_if_not ARM64_SVE
-	b	el0_svc_naked
-alternative_else_nop_endif
-	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:
-	bic	x16, x16, #_TIF_SVE		// discard SVE state
-	str	x16, [tsk, #TSK_TI_FLAGS]
-
-	/*
-	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
-	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
-	 * happens if a context switch or kernel_neon_begin() or context
-	 * modification (sigreturn, ptrace) intervenes.
-	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
-	 */
-	mrs	x9, cpacr_el1
-	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0
-	msr	cpacr_el1, x9			// synchronised by eret to el0
-#endif
-
-el0_svc_naked:					// compat entry point
 	mov	x0, sp
-	mov	w1, wscno
-	mov	w2, wsc_nr
-	mov	x3, stbl
-	bl	el0_svc_common
+	bl	el0_svc_handler
 	b	ret_to_user
 ENDPROC(el0_svc)
 
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5df857e32b48..4706f841e758 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -6,7 +6,9 @@ 
 #include <linux/ptrace.h>
 
 #include <asm/daifflags.h>
+#include <asm/fpsimd.h>
 #include <asm/thread_info.h>
+#include <asm/unistd.h>
 
 long do_ni_syscall(struct pt_regs *regs);
 
@@ -41,8 +43,8 @@  static inline bool has_syscall_work(unsigned long flags)
 int syscall_trace_enter(struct pt_regs *regs);
 void syscall_trace_exit(struct pt_regs *regs);
 
-asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
-			       syscall_fn_t syscall_table[])
+static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+			   syscall_fn_t syscall_table[])
 {
 	unsigned long flags = current_thread_info()->flags;
 
@@ -79,3 +81,37 @@  asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 trace_exit:
 	syscall_trace_exit(regs);
 }
+
+static inline void sve_user_reset(void)
+{
+	if (!system_supports_sve())
+		return;
+
+	/*
+	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
+	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+	 * happens if a context switch or kernel_neon_begin() or context
+	 * modification (sigreturn, ptrace) intervenes.
+	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.
+	 */
+	if (test_and_clear_thread_flag(TIF_SVE))
+		sve_user_disable();
+}
+
+extern syscall_fn_t sys_call_table[];
+
+asmlinkage void el0_svc_handler(struct pt_regs *regs)
+{
+	sve_user_disable();
+	el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
+}
+
+#ifdef CONFIG_COMPAT
+extern syscall_fn_t compat_sys_call_table[];
+
+asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
+{
+	el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
+		       compat_sys_call_table);
+}
+#endif