diff mbox

[13/13] vrange: Add checks in fault handler so we return SIGBUS on purged file pages

Message ID 1370916692-9576-14-git-send-email-john.stultz@linaro.org
State Superseded
Headers show

Commit Message

John Stultz June 11, 2013, 2:11 a.m. UTC
The earlier fvrange() work didn't handle setting the SIGBUS faulting
when purged file pages were accessed. This patch remedies that.

XXX: Likely need to consolidate anonymous and file page methods
here. Using truncate_inode_page_ranges is easier for now then
going over all the ptes and marking them w/ mkvrange().

Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/vrange.h |  5 ++++-
 mm/filemap.c           |  1 +
 mm/memory.c            | 13 ++++++++++++-
 mm/vrange.c            | 31 ++++++++++++++++++++++++++++---
 4 files changed, 45 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/include/linux/vrange.h b/include/linux/vrange.h
index bd36d67..ac60f76 100644
--- a/include/linux/vrange.h
+++ b/include/linux/vrange.h
@@ -58,7 +58,10 @@  int discard_vpage(struct page *page);
 bool vrange_address(struct mm_struct *mm, unsigned long start,
 			unsigned long end);
 
-extern bool is_purged_vrange(struct mm_struct *mm, unsigned long address);
+extern bool is_purged_anon_vrange(struct mm_struct *mm, unsigned long address);
+extern bool is_purged_file_vrange(struct address_space *mapping,
+					unsigned long offset);
+
 
 unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard);
 void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address);
diff --git a/mm/filemap.c b/mm/filemap.c
index 7905fe7..06c7997 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@ 
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
 #include <linux/cleancache.h>
+#include <linux/vrange.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
diff --git a/mm/memory.c b/mm/memory.c
index 341c794..1783327 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3486,9 +3486,20 @@  static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
 		unsigned int flags, pte_t orig_pte)
 {
+	struct address_space *mapping = 0;
+
 	pgoff_t pgoff = (((address & PAGE_MASK)
 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
+
+	if (vma->vm_file)
+		mapping = vma->vm_file->f_mapping;
+
+	if (mapping)
+		if (is_purged_file_vrange(mapping, (pgoff<<PAGE_CACHE_SHIFT)))
+			return VM_FAULT_SIGBUS;
+
+
 	pte_unmap(page_table);
 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
@@ -3723,7 +3734,7 @@  anon:
 		}
 
 		if (unlikely(pte_vrange(entry))) {
-			if (!is_purged_vrange(mm, address)) {
+			if (!is_purged_anon_vrange(mm, address)) {
 				lru_move_vrange_to_head(mm, address);
 				/* zap pte */
 				ptl = pte_lockptr(mm, pmd);
diff --git a/mm/vrange.c b/mm/vrange.c
index 84e9b91..c66535f 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -573,7 +573,7 @@  out:
 	return ret;
 }
 
-static int try_to_discard_vpage(struct page *page)
+static int try_to_discard_anon_vpage(struct page *page)
 {
 	struct anon_vma *anon_vma;
 	struct anon_vma_chain *avc;
@@ -647,7 +647,7 @@  int discard_vpage(struct page *page)
 	VM_BUG_ON(!PageLocked(page));
 	VM_BUG_ON(PageLRU(page));
 
-	if (try_to_discard_vpage(page)) {
+	if (try_to_discard_anon_vpage(page)) {
 		if (PageSwapCache(page))
 			try_to_free_swap(page);
 
@@ -661,7 +661,7 @@  int discard_vpage(struct page *page)
 	return 0;
 }
 
-bool is_purged_vrange(struct mm_struct *mm, unsigned long address)
+bool is_purged_anon_vrange(struct mm_struct *mm, unsigned long address)
 {
 	struct vrange_root *vroot = &mm->vroot;
 	struct interval_tree_node *node;
@@ -680,6 +680,31 @@  bool is_purged_vrange(struct mm_struct *mm, unsigned long address)
 	return ret;
 }
 
+
+bool is_purged_file_vrange(struct address_space *mapping, unsigned long offset)
+{
+	struct vrange_root *vroot = &mapping->vroot;
+	struct interval_tree_node *node;
+	struct vrange *range;
+	bool ret = false;
+
+
+	if (!vroot)
+		return false;
+
+	vrange_lock(vroot);
+	node = interval_tree_iter_first(&vroot->v_rb, offset,
+						offset + PAGE_SIZE - 1);
+	if (node) {
+		range = container_of(node, struct vrange, node);
+		if (range->purged)
+			ret = true;
+	}
+	vrange_unlock(vroot);
+	return ret;
+}
+
+
 static void vrange_pte_entry(pte_t pteval, unsigned long address,
 			unsigned ptent_size, struct mm_walk *walk)
 {