diff mbox

[v2,2/2] arm64: enable context tracking

Message ID 1399419149-26685-3-git-send-email-larry.bassel@linaro.org
State New
Headers show

Commit Message

Larry Bassel May 6, 2014, 11:32 p.m. UTC
Make calls to ct_user_enter when the kernel is exited
and ct_user_exit when the kernel is entered (in el0_da,
el0_ia, el0_svc, el0_irq).

These macros expand to function calls which will only work
properly if el0_sync and related code has been rearranged
(in a previous patch of this series).

In order to avoid saving registers, the slow syscall path
is forced (as x86 does).

The calls to ct_user_exit are made after hw debugging has been
enabled (enable_dbg).

The call to ct_user_enter is made at the beginning of the
kernel_exit macro.

This patch is based on earlier work by Kevin Hilman.

Signed-off-by: Kevin Hilman <khilman@linaro.org>
Signed-off-by: Larry Bassel <larry.bassel@linaro.org>
---
 arch/arm64/Kconfig                   |  1 +
 arch/arm64/include/asm/thread_info.h |  1 +
 arch/arm64/kernel/entry.S            | 22 ++++++++++++++++++++++
 3 files changed, 24 insertions(+)

Comments

Will Deacon May 7, 2014, 10:17 a.m. UTC | #1
On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
> Make calls to ct_user_enter when the kernel is exited
> and ct_user_exit when the kernel is entered (in el0_da,
> el0_ia, el0_svc, el0_irq).

Why only these entry points? I can reschedule after any exception from EL0,
so I'd expect all exceptions from userspace to need annotating, no?

> These macros expand to function calls which will only work
> properly if el0_sync and related code has been rearranged
> (in a previous patch of this series).
> 
> In order to avoid saving registers, the slow syscall path
> is forced (as x86 does).

... and if you decide to handle undef exceptions, I think you'll need
the register saving too, in case the kernel needs to perform emulation.

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Larry Bassel May 7, 2014, 9:35 p.m. UTC | #2
On 07 May 14 11:17, Will Deacon wrote:
> On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
> > Make calls to ct_user_enter when the kernel is exited
> > and ct_user_exit when the kernel is entered (in el0_da,
> > el0_ia, el0_svc, el0_irq).
> 
> Why only these entry points? I can reschedule after any exception from EL0,
> so I'd expect all exceptions from userspace to need annotating, no?
> 
> > These macros expand to function calls which will only work
> > properly if el0_sync and related code has been rearranged
> > (in a previous patch of this series).
> > 
> > In order to avoid saving registers, the slow syscall path
> > is forced (as x86 does).
> 
> ... and if you decide to handle undef exceptions, I think you'll need
> the register saving too, in case the kernel needs to perform emulation.

These are excellent points, I will rework the patch and submit v3.

Thanks for the feedback.

> 
> Will

Larry
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Will Deacon May 8, 2014, 10:25 a.m. UTC | #3
On Thu, May 08, 2014 at 12:49:04AM +0100, Kevin Hilman wrote:
> Hi Will,

Hello Kevin,

> Will Deacon <will.deacon@arm.com> writes:
> > On Wed, May 07, 2014 at 12:32:29AM +0100, Larry Bassel wrote:
> >> Make calls to ct_user_enter when the kernel is exited
> >> and ct_user_exit when the kernel is entered (in el0_da,
> >> el0_ia, el0_svc, el0_irq).
> >
> > Why only these entry points? I can reschedule after any exception from EL0,
> > so I'd expect all exceptions from userspace to need annotating, no?
> 
> In my initial approach to this, you might recall (though it was over a
> year ago now) was to just instrument kernel_enter rather than sprinkle
> the instrumentaion in cl0_*.  However, your concern at the time was that
> since it was before debugging was enabled it would complicate debugging
> these paths.
> 
> Any chance you have any other suggestion on how we might do this in
> kernel_entry rather than sprinkling them all over cl0_*?  or is the
> sprinkling the only good way to handle this.

Unfortunately, different exceptions do subtly different things before
invoking the main handler. For example:

 - Stashing the far
 - Enabling IRQs
 - Enabling debug
 - All the stuff on the syscall path

so putting the logic in kernel_entry isn't really do-able, unfortunately.

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
diff mbox

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e6e4d37..152d92b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -55,6 +55,7 @@  config ARM64
 	select RTC_LIB
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
+	select HAVE_CONTEXT_TRACKING
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 720e70b..301ea6a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -108,6 +108,7 @@  static inline struct thread_info *current_thread_info(void)
 #define TIF_SINGLESTEP		21
 #define TIF_32BIT		22	/* 32bit process */
 #define TIF_SWITCH_MM		23	/* deferred switch_mm */
+#define TIF_NOHZ                24
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index d920d7f..5fe447c 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,6 +30,22 @@ 
 #include <asm/unistd32.h>
 
 /*
+ * Context tracking subsystem.  Used to instrument transitions
+ * between user and kernel mode.
+ */
+	.macro ct_user_exit
+#ifdef CONFIG_CONTEXT_TRACKING
+	bl	context_tracking_user_exit
+#endif
+	.endm
+
+	.macro ct_user_enter
+#ifdef CONFIG_CONTEXT_TRACKING
+	bl	context_tracking_user_enter
+#endif
+	.endm
+
+/*
  * Bad Abort numbers
  *-----------------
  */
@@ -88,6 +104,7 @@ 
 	.macro	kernel_exit, el, ret = 0
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
+	ct_user_enter
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	.endif
 	.if	\ret
@@ -427,6 +444,7 @@  el0_da:
 	enable_dbg
 	// enable interrupts before calling the main handler
 	enable_irq
+	ct_user_exit
 	mov	x0, x26
 	bic	x0, x0, #(0xff << 56)
 	mov	x1, x25
@@ -443,6 +461,7 @@  el0_ia:
 	enable_dbg
 	// enable interrupts before calling the main handler
 	enable_irq
+	ct_user_exit
 	mov	x0, x26
 	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
 	mov	x2, sp
@@ -511,6 +530,7 @@  el0_irq_naked:
 	bl	trace_hardirqs_off
 #endif
 
+	ct_user_exit
 	irq_handler
 	get_thread_info tsk
 
@@ -633,10 +653,12 @@  el0_svc_naked:					// compat entry point
 	isb
 	enable_dbg
 	enable_irq
+	ct_user_exit
 
 	get_thread_info tsk
 	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall tracing
 	tbnz	x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+	tbnz	x16, #TIF_NOHZ, __sys_trace
 	adr	lr, ret_fast_syscall		// return address
 	cmp     scno, sc_nr                     // check upper syscall limit
 	b.hs	ni_sys