diff mbox series

[v4.4,V2,11/43] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user

Message ID 7d56c56af2f883958d5e74fa3178a1f774b9fd94.1562908075.git.viresh.kumar@linaro.org
State Superseded
Headers show
Series V4.4 backport of arm64 Spectre patches | expand

Commit Message

Viresh Kumar July 12, 2019, 5:27 a.m. UTC
From: Will Deacon <will.deacon@arm.com>


commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.

Signed-off-by: Will Deacon <will.deacon@arm.com>

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

[ v4.4: fixup for v4.4 style uaccess primitives ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

---
 arch/arm64/include/asm/uaccess.h | 20 ++++++++++++--------
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/clear_user.S      |  6 +++---
 arch/arm64/lib/copy_in_user.S    |  4 ++--
 4 files changed, 19 insertions(+), 15 deletions(-)

-- 
2.21.0.rc0.269.g1a574e7a288b

Comments

Mark Rutland July 31, 2019, 12:37 p.m. UTC | #1
On Fri, Jul 12, 2019 at 10:57:59AM +0530, Viresh Kumar wrote:
> From: Will Deacon <will.deacon@arm.com>

> 

> commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

> 

> Like we've done for get_user and put_user, ensure that user pointers

> are masked before invoking the underlying __arch_{clear,copy_*}_user

> operations.

> 

> Signed-off-by: Will Deacon <will.deacon@arm.com>

> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

> [ v4.4: fixup for v4.4 style uaccess primitives ]

> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>


[...]

>  static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)

>  {

>  	kasan_check_write(to, n);

> -	return  __arch_copy_from_user(to, from, n);

> +	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);

> +

>  }

>  

>  static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)

>  {

>  	kasan_check_read(from, n);

> -	return  __arch_copy_to_user(to, from, n);

> +	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);

> +

>  }


Can we please drop the trailing whitespace from each of these? That
wasn't in the upstreadm commit or v4.9.y.

Otherwise, this looks fine.

Thanks,
Mark.
Viresh Kumar Aug. 1, 2019, 3:38 a.m. UTC | #2
On 31-07-19, 13:37, Mark Rutland wrote:
> On Fri, Jul 12, 2019 at 10:57:59AM +0530, Viresh Kumar wrote:

> > From: Will Deacon <will.deacon@arm.com>

> > 

> > commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

> > 

> > Like we've done for get_user and put_user, ensure that user pointers

> > are masked before invoking the underlying __arch_{clear,copy_*}_user

> > operations.

> > 

> > Signed-off-by: Will Deacon <will.deacon@arm.com>

> > Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

> > [ v4.4: fixup for v4.4 style uaccess primitives ]

> > Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

> 

> [...]

> 

> >  static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)

> >  {

> >  	kasan_check_write(to, n);

> > -	return  __arch_copy_from_user(to, from, n);

> > +	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);

> > +

> >  }

> >  

> >  static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)

> >  {

> >  	kasan_check_read(from, n);

> > -	return  __arch_copy_to_user(to, from, n);

> > +	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);

> > +

> >  }

> 

> Can we please drop the trailing whitespace from each of these? That

> wasn't in the upstreadm commit or v4.9.y.


That was a mistake on my end it seems. Fixed now. Thanks.

-- 
viresh
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 693a0d784534..a25b8726ffa9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -303,19 +303,20 @@  do {									\
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
-	return  __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
+
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
-	return  __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
+
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -338,22 +339,25 @@  static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(__uaccess_mask_ptr(to), n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index c654df05b7d7..abe4e0984dbb 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -35,8 +35,8 @@  EXPORT_SYMBOL(clear_page);
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..fc6bb0f83511 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,7 +24,7 @@ 
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -32,7 +32,7 @@ 
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
@@ -57,7 +57,7 @@  USER(9f, strb	wzr, [x0]	)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..0219aa85b3cc 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -67,7 +67,7 @@ 
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	add	end, x0, x2
@@ -76,7 +76,7 @@  ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2