diff mbox

[06/13] vrange: Add GFP_NO_VRANGE allocation flag

Message ID 1370913139-9320-7-git-send-email-john.stultz@linaro.org
State Superseded
Headers show

Commit Message

John Stultz June 11, 2013, 1:12 a.m. UTC
From: Minchan Kim <minchan@kernel.org>

In cloning the vroot tree during a fork, we have to
allocate memory while hold the vroot lock. This is problematic,
as the memory allocation can trigger reclaim, which might require
grabbing a vroot lock in order to find purgable pages.

Thus this patch introduces GFP_NO_VRANGE which will allow
us to avoid having a allocation for vrange to trigger any
volatile range purging.

Signed-off-by: Minchan Kim <minchan@kernel.org
[jstultz: Split out from a different patch, created new commit message]
Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/gfp.h | 7 +++++--
 mm/vrange.c         | 2 +-
 2 files changed, 6 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0f615eb..fa52199 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -35,6 +35,7 @@  struct vm_area_struct;
 #define ___GFP_NO_KSWAPD	0x400000u
 #define ___GFP_OTHER_NODE	0x800000u
 #define ___GFP_WRITE		0x1000000u
+#define ___GFP_NO_VRANGE	0x2000000u
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -70,6 +71,7 @@  struct vm_area_struct;
 #define __GFP_HIGH	((__force gfp_t)___GFP_HIGH)	/* Should access emergency pools? */
 #define __GFP_IO	((__force gfp_t)___GFP_IO)	/* Can start physical IO? */
 #define __GFP_FS	((__force gfp_t)___GFP_FS)	/* Can call down to low-level FS? */
+#define __GFP_NO_VRANGE ((__force gfp_t)___GFP_NO_VRANGE) /* Can't reclaim volatile pages */
 #define __GFP_COLD	((__force gfp_t)___GFP_COLD)	/* Cache-cold page required */
 #define __GFP_NOWARN	((__force gfp_t)___GFP_NOWARN)	/* Suppress page allocation failure warning */
 #define __GFP_REPEAT	((__force gfp_t)___GFP_REPEAT)	/* See above */
@@ -99,7 +101,7 @@  struct vm_area_struct;
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26	/* Room for N __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -134,7 +136,8 @@  struct vm_area_struct;
 /* Control page allocator reclaim behavior */
 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
-			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+			__GFP_NO_VRANGE)
 
 /* Control slab gfp mask during early boot */
 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
diff --git a/mm/vrange.c b/mm/vrange.c
index 0ab741e..914c109 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -204,7 +204,7 @@  int vrange_fork(struct mm_struct *new_mm, struct mm_struct *old_mm)
 		range = vrange_entry(next);
 		next = rb_next(next);
 
-		new_range = __vrange_alloc(GFP_KERNEL);
+		new_range = __vrange_alloc(GFP_KERNEL|__GFP_NO_VRANGE);
 		if (!new_range)
 			goto fail;
 		__vrange_set(new_range, range->node.start,