diff mbox

[12/12] vrange: Add checks in fault handler so we return SIGBUS on purged file pages

Message ID 1367605636-18284-13-git-send-email-john.stultz@linaro.org
State Superseded
Headers show

Commit Message

John Stultz May 3, 2013, 6:27 p.m. UTC
The earlier fvrange() work didn't handle setting the SIGBUS faulting
when purged file pages were accessed. This patch remedies that.

XXX: Likely need to consolidate anonymous and file page methods
here. Using truncate_inode_page_ranges is easier for now then
going over all the ptes and marking them w/ mkvrange().

Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/vrange.h |  5 ++++-
 mm/filemap.c           |  1 +
 mm/memory.c            | 13 ++++++++++++-
 mm/vrange.c            | 31 ++++++++++++++++++++++++++++---
 4 files changed, 45 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/include/linux/vrange.h b/include/linux/vrange.h
index 761dca0..599b044 100644
--- a/include/linux/vrange.h
+++ b/include/linux/vrange.h
@@ -57,7 +57,10 @@  int discard_vpage(struct page *page);
 bool vrange_address(struct mm_struct *mm, unsigned long start,
 			unsigned long end);
 
-extern bool is_purged_vrange(struct mm_struct *mm, unsigned long address);
+extern bool is_purged_anon_vrange(struct mm_struct *mm, unsigned long address);
+extern bool is_purged_file_vrange(struct address_space *mapping,
+					unsigned long offset);
+
 
 unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard);
 void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address);
diff --git a/mm/filemap.c b/mm/filemap.c
index e1979fd..4ba0ccc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@ 
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
 #include <linux/cleancache.h>
+#include <linux/vrange.h>
 #include "internal.h"
 
 /*
diff --git a/mm/memory.c b/mm/memory.c
index b22fa63..e38a1a1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3481,9 +3481,20 @@  static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
 		unsigned int flags, pte_t orig_pte)
 {
+	struct address_space *mapping = 0;
+
 	pgoff_t pgoff = (((address & PAGE_MASK)
 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
+
+	if (vma->vm_file)
+		mapping = vma->vm_file->f_mapping;
+
+	if (mapping)
+		if (is_purged_file_vrange(mapping, (pgoff<<PAGE_CACHE_SHIFT)))
+			return VM_FAULT_SIGBUS;
+
+
 	pte_unmap(page_table);
 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
@@ -3718,7 +3729,7 @@  anon:
 		}
 
 		if (unlikely(pte_vrange(entry))) {
-			if (!is_purged_vrange(mm, address)) {
+			if (!is_purged_anon_vrange(mm, address)) {
 				lru_move_vrange_to_head(mm, address);
 				/* zap pte */
 				ptl = pte_lockptr(mm, pmd);
diff --git a/mm/vrange.c b/mm/vrange.c
index 5590677..344b4d7 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -599,7 +599,7 @@  out:
 	return ret;
 }
 
-static int try_to_discard_vpage(struct page *page)
+static int try_to_discard_anon_vpage(struct page *page)
 {
 	struct anon_vma *anon_vma;
 	struct anon_vma_chain *avc;
@@ -673,7 +673,7 @@  int discard_vpage(struct page *page)
 	VM_BUG_ON(!PageLocked(page));
 	VM_BUG_ON(PageLRU(page));
 
-	if (try_to_discard_vpage(page)) {
+	if (try_to_discard_anon_vpage(page)) {
 		if (PageSwapCache(page))
 			try_to_free_swap(page);
 
@@ -687,7 +687,7 @@  int discard_vpage(struct page *page)
 	return 0;
 }
 
-bool is_purged_vrange(struct mm_struct *mm, unsigned long address)
+bool is_purged_anon_vrange(struct mm_struct *mm, unsigned long address)
 {
 	struct vrange_root *vroot = &mm->vroot;
 	struct interval_tree_node *node;
@@ -706,6 +706,31 @@  bool is_purged_vrange(struct mm_struct *mm, unsigned long address)
 	return ret;
 }
 
+
+bool is_purged_file_vrange(struct address_space *mapping, unsigned long offset)
+{
+	struct vrange_root *vroot = &mapping->vroot;
+	struct interval_tree_node *node;
+	struct vrange *range;
+	bool ret = false;
+
+
+	if (!vroot)
+		return false;
+
+	vrange_lock(vroot);
+	node = interval_tree_iter_first(&vroot->v_rb, offset,
+						offset + PAGE_SIZE - 1);
+	if (node) {
+		range = container_of(node, struct vrange, node);
+		if (range->purged)
+			ret = true;
+	}
+	vrange_unlock(vroot);
+	return ret;
+}
+
+
 static void vrange_pte_entry(pte_t pteval, unsigned long address,
 			unsigned ptent_size, struct mm_walk *walk)
 {