diff mbox

[v3] ARM: add get_user() support for 8 byte types

Message ID 1402587755-29245-1-git-send-email-daniel.thompson@linaro.org
State New
Headers show

Commit Message

Daniel Thompson June 12, 2014, 3:42 p.m. UTC
A new atomic modeset/pageflip ioctl being developed in DRM requires
get_user() to work for 64bit types (in addition to just put_user()).

v1: original
v2: pass correct size to check_uaccess, and better handling of narrowing
    double word read with __get_user_xb() (Russell King's suggestion)
v3: fix a couple of checkpatch issues

Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
---

Notes:
    I'd like to wake this patch up again. It was rejected back in 2012 on
    the grounds that other architectures (notably x86-32) didn't implement
    this so adding for ARM risked portability problems in drivers. However
    shortly after the discussion (in fact I believe that as a *result* of
    that discussion) support for 64-bit get_user() was added for x86-32.
    
    A quick review of different architectures uaccess.h shows that ARM is
    in the minority (even after excluding 64-bit architectures) in not
    implementing this feature.
    
    The reasons to wake it up are the same as before. Recent contributions,
    including to DRM[1] and binder[2] would prefer to use the 64-bit values
    in their interfaces without gotchas like having to use copy_from_user().
    
    [1] http://thread.gmane.org/gmane.comp.video.dri.devel/102135/focus=102149
    [2] http://thread.gmane.org/gmane.linux.kernel/1653448/focus=1653449

 arch/arm/include/asm/uaccess.h | 18 +++++++++++++++++-
 arch/arm/lib/getuser.S         | 17 ++++++++++++++++-
 2 files changed, 33 insertions(+), 2 deletions(-)

Comments

Arnd Bergmann June 12, 2014, 5:04 p.m. UTC | #1
On Thursday 12 June 2014 16:42:35 Daniel Thompson wrote:
>  #define __get_user_check(x,p)                                                  \
>         ({                                                              \
>                 unsigned long __limit = current_thread_info()->addr_limit - 1; \
>                 register const typeof(*(p)) __user *__p asm("r0") = (p);\
> -               register unsigned long __r2 asm("r2");                  \
> +               register typeof(x) __r2 asm("r2");                      \
>                 register unsigned long __l asm("r1") = __limit;         \
>                 register int __e asm("r0");                             \
>                 switch (sizeof(*(__p))) {                               \
> @@ -142,6 +152,12 @@ extern int __get_user_4(void *);
>                 case 4:                                                 \
>                         __get_user_x(__r2, __p, __e, __l, 4);           \
>                         break;                                          \
> +               case 8:                                                 \
> +                       if (sizeof((x)) < 8)                            \
> +                               __get_user_xb(__r2, __p, __e, __l, 4);  \
> +                       else                                            \
> +                               __get_user_x(__r2, __p, __e, __l, 8);   \
> +                       break;                                          \
>                 default: __e = __get_user_bad(); break;                 \
>                 }                                                       \
>                 x = (typeof(*(p))) __r2;                                \
> 

I don't think there is a way to do this without copying the
__builtin_choose_expr() hack from x86.

	Arnd
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Daniel Thompson June 17, 2014, 10:17 a.m. UTC | #2
On 12/06/14 16:58, Russell King - ARM Linux wrote:
> On Thu, Jun 12, 2014 at 04:42:35PM +0100, Daniel Thompson wrote:
>> A new atomic modeset/pageflip ioctl being developed in DRM requires
>> get_user() to work for 64bit types (in addition to just put_user()).
>>
>> v1: original
>> v2: pass correct size to check_uaccess, and better handling of narrowing
>>     double word read with __get_user_xb() (Russell King's suggestion)
>> v3: fix a couple of checkpatch issues
> 
> This is still unsafe.
> 
>>  #define __get_user_check(x,p)							\
>>  	({								\
>>  		unsigned long __limit = current_thread_info()->addr_limit - 1; \
>>  		register const typeof(*(p)) __user *__p asm("r0") = (p);\
>> -		register unsigned long __r2 asm("r2");			\
>> +		register typeof(x) __r2 asm("r2");			\
> 
> So, __r2 becomes the type of 'x'.  If 'x' is a 64-bit type, and *p is
> an 8-bit, 16-bit, or 32-bit type, this fails horribly by leaving the
> upper word of __r2 undefined.

It is true that at after the switch statement the contents of r3 are
undefined. However...


>  		register unsigned long __l asm("r1") = __limit; \
>  		register int __e asm("r0");			\
>  		switch (sizeof(*(__p))) {			\
> @@ -142,6 +152,12 @@ extern int __get_user_4(void *);
>  		case 4:						\
>  			__get_user_x(__r2, __p, __e, __l, 4);	\
>  			break;					\
> +		case 8:						\
> +			if (sizeof((x)) < 8)			\
> +				__get_user_xb(__r2, __p, __e, __l, 4);\
> +			else					\
> +				__get_user_x(__r2, __p, __e, __l, 8);\
> +			break;					\
>  		default: __e = __get_user_bad(); break;		\
>  		}						\
>  		x = (typeof(*(p))) __r2;			\

... at this point there is a narrowing cast followed by an implicit
widening. This results in compiler either ignoring r3 altogether or, if
spilling to the stack, generating code to set r3 to zero before doing
the store.

I think this approach also makes 8-bit and 16-bit get_user() faster in
some cases where the type of *p and x are similar 8- or 16-bit types.
This is because the compiler will never generate a redundant narrowings.

Note that the speed improvement looks extremely marginal; the size of
the .text section (for a multi_v7_defconfig kernel) only gets 96 bytes
smaller (a.k.a. 0.0015%).


Daniel.
Russell King - ARM Linux June 17, 2014, 11:09 a.m. UTC | #3
On Tue, Jun 17, 2014 at 11:17:23AM +0100, Daniel Thompson wrote:
> ... at this point there is a narrowing cast followed by an implicit
> widening. This results in compiler either ignoring r3 altogether or, if
> spilling to the stack, generating code to set r3 to zero before doing
> the store.

In actual fact, there's very little difference between the two
implementations in terms of generated code.

The difference between them is what happens on the 64-bit big endian
narrowing case, where we use __get_user_4 with your version.  This
adds one additional instruction.

The little endian case results in identical code except for register
usage - for example, with my test for a 32-bit being widened to 64-bit:

        str     lr, [sp, #-4]!
-       mov     r3, r0
+       mov     ip, r0
        mov     r0, r1
 #APP
 @ 280 "t-getuser.c" 1
        bl      __get_user_4
 @ 0 "" 2
-       str     r2, [r3, #0]
-       mov     r2, #0
-       str     r2, [r3, #4]
+       mov     r3, #0
+       str     r2, [ip, #0]
+       str     r3, [ip, #4]
        ldr     pc, [sp], #4

and 64-bit narrowed to 32-bit:

        str     lr, [sp, #-4]!
-       mov     ip, r0
+       mov     r3, r0
        mov     r0, r1
 #APP
 @ 275 "t-getuser.c" 1
-       bl      __get_user_8
+       bl      __get_user_4
 @ 0 "" 2
-       str     r2, [ip, #0]
+       str     r2, [r3, #0]
        ldr     pc, [sp], #4

In terms of type checking, both seem to get it correct (which is something
I'm concerned about by any implementation since this is just as important
as the generated code).
Daniel Thompson June 17, 2014, 1:28 p.m. UTC | #4
On 17/06/14 12:09, Russell King - ARM Linux wrote:
> On Tue, Jun 17, 2014 at 11:17:23AM +0100, Daniel Thompson wrote:
>> ... at this point there is a narrowing cast followed by an implicit
>> widening. This results in compiler either ignoring r3 altogether or, if
>> spilling to the stack, generating code to set r3 to zero before doing
>> the store.
> 
> In actual fact, there's very little difference between the two
> implementations in terms of generated code.
> 
> The difference between them is what happens on the 64-bit big endian
> narrowing case, where we use __get_user_4 with your version.  This
> adds one additional instruction.

Good point.


> and 64-bit narrowed to 32-bit:
> 
>         str     lr, [sp, #-4]!
> -       mov     ip, r0
> +       mov     r3, r0
>         mov     r0, r1
>  #APP
>  @ 275 "t-getuser.c" 1
> -       bl      __get_user_8
> +       bl      __get_user_4
>  @ 0 "" 2
> -       str     r2, [ip, #0]
> +       str     r2, [r3, #0]
>         ldr     pc, [sp], #4

The later case avoids allocating r3 for the __get_user_x and should
reduce register pressure and, potentially, saves a few instructions
elsewhere (one of my rather large test functions does demonstrate this
effect).

I don't know if we care about that. If we do I'm certainly happy to put
a patch together than exploits this (whilst avoiding the add in the big
endian case).


Daniel.
Russell King - ARM Linux June 17, 2014, 1:36 p.m. UTC | #5
On Tue, Jun 17, 2014 at 02:28:44PM +0100, Daniel Thompson wrote:
> On 17/06/14 12:09, Russell King - ARM Linux wrote:
> > On Tue, Jun 17, 2014 at 11:17:23AM +0100, Daniel Thompson wrote:
> >> ... at this point there is a narrowing cast followed by an implicit
> >> widening. This results in compiler either ignoring r3 altogether or, if
> >> spilling to the stack, generating code to set r3 to zero before doing
> >> the store.
> > 
> > In actual fact, there's very little difference between the two
> > implementations in terms of generated code.
> > 
> > The difference between them is what happens on the 64-bit big endian
> > narrowing case, where we use __get_user_4 with your version.  This
> > adds one additional instruction.
> 
> Good point.
> 
> 
> > and 64-bit narrowed to 32-bit:
> > 
> >         str     lr, [sp, #-4]!
> > -       mov     ip, r0
> > +       mov     r3, r0
> >         mov     r0, r1
> >  #APP
> >  @ 275 "t-getuser.c" 1
> > -       bl      __get_user_8
> > +       bl      __get_user_4
> >  @ 0 "" 2
> > -       str     r2, [ip, #0]
> > +       str     r2, [r3, #0]
> >         ldr     pc, [sp], #4
> 
> The later case avoids allocating r3 for the __get_user_x and should
> reduce register pressure and, potentially, saves a few instructions
> elsewhere (one of my rather large test functions does demonstrate this
> effect).
> 
> I don't know if we care about that. If we do I'm certainly happy to put
> a patch together than exploits this (whilst avoiding the add in the big
> endian case).

No need - the + case is your version, the - case is my version.  So your
version wins on this point. :)
Daniel Thompson June 17, 2014, 1:54 p.m. UTC | #6
On 17/06/14 14:36, Russell King - ARM Linux wrote:
> On Tue, Jun 17, 2014 at 02:28:44PM +0100, Daniel Thompson wrote:
>> On 17/06/14 12:09, Russell King - ARM Linux wrote:
>>> On Tue, Jun 17, 2014 at 11:17:23AM +0100, Daniel Thompson wrote:
>>>> ... at this point there is a narrowing cast followed by an implicit
>>>> widening. This results in compiler either ignoring r3 altogether or, if
>>>> spilling to the stack, generating code to set r3 to zero before doing
>>>> the store.
>>>
>>> In actual fact, there's very little difference between the two
>>> implementations in terms of generated code.
>>>
>>> The difference between them is what happens on the 64-bit big endian
>>> narrowing case, where we use __get_user_4 with your version.  This
>>> adds one additional instruction.
>>
>> Good point.
>>
>>
>>> and 64-bit narrowed to 32-bit:
>>>
>>>         str     lr, [sp, #-4]!
>>> -       mov     ip, r0
>>> +       mov     r3, r0
>>>         mov     r0, r1
>>>  #APP
>>>  @ 275 "t-getuser.c" 1
>>> -       bl      __get_user_8
>>> +       bl      __get_user_4
>>>  @ 0 "" 2
>>> -       str     r2, [ip, #0]
>>> +       str     r2, [r3, #0]
>>>         ldr     pc, [sp], #4
>>
>> The later case avoids allocating r3 for the __get_user_x and should
>> reduce register pressure and, potentially, saves a few instructions
>> elsewhere (one of my rather large test functions does demonstrate this
>> effect).
>>
>> I don't know if we care about that. If we do I'm certainly happy to put
>> a patch together than exploits this (whilst avoiding the add in the big
>> endian case).
> 
> No need - the + case is your version, the - case is my version.  So your
> version wins on this point. :)

:) Thanks, although credit really goes to Rob Clark...

I think currently:

1. Rob's patch is better for register pressure in the narrowing case
   (above).

2. Your patch is probably better for big endian due to the add in Rob's
   version. I say probably because, without proof, I suspect the cost
   of the add would in most cases outweigh the register pressure
   benefit.

3. Your patch has better implementation of __get_user_8 (it uses ldrd).

Hence I'm suspect we need to combine elements from both patches.
diff mbox

Patch

diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 75d9579..5f7db3fb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,6 +107,7 @@  static inline void set_fs(mm_segment_t fs)
 extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
+extern int __get_user_8(void *);
 
 #define __GUP_CLOBBER_1	"lr", "cc"
 #ifdef CONFIG_CPU_USE_DOMAINS
@@ -115,6 +116,7 @@  extern int __get_user_4(void *);
 #define __GUP_CLOBBER_2 "lr", "cc"
 #endif
 #define __GUP_CLOBBER_4	"lr", "cc"
+#define __GUP_CLOBBER_8	"lr", "cc"
 
 #define __get_user_x(__r2,__p,__e,__l,__s)				\
 	   __asm__ __volatile__ (					\
@@ -125,11 +127,19 @@  extern int __get_user_4(void *);
 		: "0" (__p), "r" (__l)					\
 		: __GUP_CLOBBER_##__s)
 
+/* narrowing a double-word get into a single 32bit word register: */
+#ifdef BIG_ENDIAN
+#define __get_user_xb(__r2, __p, __e, __l, __s)				\
+	__get_user_x(__r2, (uintptr_t)__p + 4, __e, __l, __s)
+#else
+#define __get_user_xb __get_user_x
+#endif
+
 #define __get_user_check(x,p)							\
 	({								\
 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
 		register const typeof(*(p)) __user *__p asm("r0") = (p);\
-		register unsigned long __r2 asm("r2");			\
+		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		switch (sizeof(*(__p))) {				\
@@ -142,6 +152,12 @@  extern int __get_user_4(void *);
 		case 4:							\
 			__get_user_x(__r2, __p, __e, __l, 4);		\
 			break;						\
+		case 8:							\
+			if (sizeof((x)) < 8)				\
+				__get_user_xb(__r2, __p, __e, __l, 4);	\
+			else						\
+				__get_user_x(__r2, __p, __e, __l, 8);	\
+			break;						\
 		default: __e = __get_user_bad(); break;			\
 		}							\
 		x = (typeof(*(p))) __r2;				\
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9b06bb4..ed98707 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -18,7 +18,7 @@ 
  * Inputs:	r0 contains the address
  *		r1 contains the address limit, which must be preserved
  * Outputs:	r0 is the error code
- *		r2 contains the zero-extended value
+ *		r2, r3 contains the zero-extended value
  *		lr corrupted
  *
  * No other registers must be altered.  (see <asm/uaccess.h>
@@ -66,6 +66,19 @@  ENTRY(__get_user_4)
 	mov	pc, lr
 ENDPROC(__get_user_4)
 
+ENTRY(__get_user_8)
+	check_uaccess r0, 8, r1, r2, __get_user_bad
+#ifdef CONFIG_THUMB2_KERNEL
+5: TUSER(ldr)	r2, [r0]
+6: TUSER(ldr)	r3, [r0, #4]
+#else
+5: TUSER(ldr)	r2, [r0], #4
+6: TUSER(ldr)	r3, [r0]
+#endif
+	mov	r0, #0
+	mov	pc, lr
+ENDPROC(__get_user_8)
+
 __get_user_bad:
 	mov	r2, #0
 	mov	r0, #-EFAULT
@@ -77,4 +90,6 @@  ENDPROC(__get_user_bad)
 	.long	2b, __get_user_bad
 	.long	3b, __get_user_bad
 	.long	4b, __get_user_bad
+	.long	5b, __get_user_bad
+	.long	6b, __get_user_bad
 .popsection