diff mbox series

[RFC,2/2] arm64: compat: cacheflush syscall: process only pages that are in the memory

Message ID 20180126111441.29353-2-m.szyprowski@samsung.com
State New
Headers show
Series [RFC,1/2] arm: cacheflush syscall: process only pages that are in the memory | expand

Commit Message

Marek Szyprowski Jan. 26, 2018, 11:14 a.m. UTC
glibc in ARM 32bit mode calls cacheflush syscall on the whole textrels
section of the relocated binaries. However, relocation usually doesn't
touch all pages of that section, so not all of them are read to memory
when calling this syscall. However flush_cache_user_range() function will
unconditionally touch all pages from the provided range, resulting
additional overhead related to reading all clean pages. Optimize this by
calling flush_cache_user_range() only on the pages that are already in the
memory.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

---
 arch/arm64/kernel/sys_compat.c | 28 +++++++++++++++++++++-------
 1 file changed, 21 insertions(+), 7 deletions(-)

-- 
2.15.0

Comments

Catalin Marinas Jan. 26, 2018, 5:41 p.m. UTC | #1
On Fri, Jan 26, 2018 at 12:14:41PM +0100, Marek Szyprowski wrote:
> @@ -32,23 +33,36 @@

>  static long

>  __do_compat_cache_op(unsigned long start, unsigned long end)

[...]
> +		if (follow_page(vma, start, 0)) {

> +			ret = __flush_cache_user_range(start, start + chunk);

> +			if (ret)

> +				goto done;

> +		}


This looks pretty expensive for pages already in memory. Could we do
some tricks with the AT instruction in __flush_cache_user_range() so
that we skip the flushing if the page isn't there? We know that when a
page is mapped, the cache will get cleaned/invalidated via set_pte_at()
+ sync_icache_dcache(), so the only problem of a race is flushing the
caches twice for a page. If a page gets unmapped after AT, we may bring
it back through the cache ops.

-- 
Catalin
Catalin Marinas Jan. 26, 2018, 6:02 p.m. UTC | #2
On Fri, Jan 26, 2018 at 05:41:45PM +0000, Catalin Marinas wrote:
> On Fri, Jan 26, 2018 at 12:14:41PM +0100, Marek Szyprowski wrote:

> > @@ -32,23 +33,36 @@

> >  static long

> >  __do_compat_cache_op(unsigned long start, unsigned long end)

> [...]

> > +		if (follow_page(vma, start, 0)) {

> > +			ret = __flush_cache_user_range(start, start + chunk);

> > +			if (ret)

> > +				goto done;

> > +		}

> 

> This looks pretty expensive for pages already in memory. Could we do

> some tricks with the AT instruction in __flush_cache_user_range() so

> that we skip the flushing if the page isn't there? We know that when a

> page is mapped, the cache will get cleaned/invalidated via set_pte_at()

> + sync_icache_dcache(), so the only problem of a race is flushing the

> caches twice for a page. If a page gets unmapped after AT, we may bring

> it back through the cache ops.


Alternatively, we could try to use pagefault_disable()/enable() around
this but we need to make sure we cover all the cases where a page may
not be flushed even when it is actually present (i.e. old pte). For
example, ptep_set_access_flags() skips __sync_icache_dcache().

-- 
Catalin
diff mbox series

Patch

diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 8b8bbd3eaa52..4a047db3fdd4 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -25,6 +25,7 @@ 
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/unistd.h>
@@ -32,23 +33,36 @@ 
 static long
 __do_compat_cache_op(unsigned long start, unsigned long end)
 {
-	long ret;
+	struct vm_area_struct *vma = NULL;
+	long ret = 0;
 
+	down_read(&current->mm->mmap_sem);
 	do {
 		unsigned long chunk = min(PAGE_SIZE, end - start);
 
+		if (!vma || vma->vm_end <= start) {
+			vma = find_vma(current->mm, start);
+			if (!vma) {
+				ret = -EFAULT;
+				goto done;
+			}
+		}
+
 		if (fatal_signal_pending(current))
-			return 0;
+			goto done;
 
-		ret = __flush_cache_user_range(start, start + chunk);
-		if (ret)
-			return ret;
+		if (follow_page(vma, start, 0)) {
+			ret = __flush_cache_user_range(start, start + chunk);
+			if (ret)
+				goto done;
+		}
 
 		cond_resched();
 		start += chunk;
 	} while (start < end);
-
-	return 0;
+done:
+	up_read(&current->mm->mmap_sem);
+	return ret;
 }
 
 static inline long