diff mbox

[12/13] vrange: Enable purging of file backed volatile ranges

Message ID 1370913139-9320-13-git-send-email-john.stultz@linaro.org
State Superseded
Headers show

Commit Message

John Stultz June 11, 2013, 1:12 a.m. UTC
Rework the victim range selection to also support
file backed volatile ranges.

Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/vrange.h |  10 +++++
 mm/vrange.c            | 112 ++++++++++++++++++++++++++++++++++++-------------
 2 files changed, 92 insertions(+), 30 deletions(-)
diff mbox

Patch

diff --git a/include/linux/vrange.h b/include/linux/vrange.h
index b6e8b99..bd36d67 100644
--- a/include/linux/vrange.h
+++ b/include/linux/vrange.h
@@ -3,6 +3,7 @@ 
 
 #include <linux/vrange_types.h>
 #include <linux/mm.h>
+#include <linux/fs.h>
 
 #define vrange_entry(ptr) \
 	container_of(ptr, struct vrange, node.rb)
@@ -38,6 +39,15 @@  static inline struct mm_struct *vrange_get_owner_mm(struct vrange *vrange)
 	return container_of(vrange->owner, struct mm_struct, vroot);
 }
 
+static inline
+struct address_space *vrange_get_owner_mapping(struct vrange *vrange)
+{
+	if (vrange_type(vrange) != VRANGE_FILE)
+		return NULL;
+	return container_of(vrange->owner, struct address_space, vroot);
+}
+
+
 void vrange_init(void);
 extern int vrange_clear(struct vrange_root *vroot,
 				unsigned long start, unsigned long end);
diff --git a/mm/vrange.c b/mm/vrange.c
index e9ea728..84e9b91 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -757,8 +757,9 @@  unsigned int discard_vma_pages(struct zone *zone, struct mm_struct *mm,
 	return ret;
 }
 
-unsigned int discard_vrange(struct zone *zone, struct vrange *vrange,
-				int nr_to_discard)
+static unsigned int discard_anon_vrange(struct zone *zone,
+					struct vrange *vrange,
+					int nr_to_discard)
 {
 	struct mm_struct *mm;
 	unsigned long start = vrange->node.start;
@@ -799,46 +800,91 @@  out:
 	return nr_discarded;
 }
 
+static unsigned int discard_file_vrange(struct zone *zone,
+					struct vrange *vrange,
+					int nr_to_discard)
+{
+	struct address_space *mapping;
+	unsigned long start = vrange->node.start;
+	unsigned long end = vrange->node.last;
+	unsigned long count = ((end-start) >> PAGE_CACHE_SHIFT);
+
+	mapping = vrange_get_owner_mapping(vrange);
+
+	truncate_inode_pages_range(mapping, start, end);
+	vrange->purged = true;
+
+	return count;
+}
+
+unsigned int discard_vrange(struct zone *zone, struct vrange *vrange,
+				int nr_to_discard)
+{
+	if (vrange_type(vrange) == VRANGE_MM)
+		return discard_anon_vrange(zone, vrange, nr_to_discard);
+	return discard_file_vrange(zone, vrange, nr_to_discard);
+}
+
+
+/* Take a vrange refcount and depending on the type
+ * the vrange->owner's mm refcount or inode refcount
+ */
+static int hold_victim_vrange(struct vrange *vrange)
+{
+	if (vrange_type(vrange) == VRANGE_MM) {
+		struct mm_struct *mm = vrange_get_owner_mm(vrange);
+
+
+		if (atomic_read(&mm->mm_users) == 0)
+			return -1;
+
+
+		if (!atomic_inc_not_zero(&vrange->refcount))
+			return -1;
+		/*
+		 * we need to access mmap_sem further routine so
+		 * need to get a refcount of mm.
+		 * NOTE: We guarantee mm_count isn't zero in here because
+		 * if we found vrange from LRU list, it means we are
+		 * before exit_vrange or remove_vrange.
+		 */
+		atomic_inc(&mm->mm_count);
+	} else {
+		struct address_space *mapping;
+		mapping = vrange_get_owner_mapping(vrange);
+
+		if (!atomic_inc_not_zero(&vrange->refcount))
+			return -1;
+		__iget(mapping->host);
+	}
+
+	return 0;
+}
+
+
+
 /*
- * Get next victim vrange from LRU and hold a vrange refcount
- * and vrange->mm's refcount.
+ * Get next victim vrange from LRU and hold needed refcounts.
  */
 struct vrange *get_victim_vrange(void)
 {
-	struct mm_struct *mm;
 	struct vrange *vrange = NULL;
 	struct list_head *cur, *tmp;
 
 	spin_lock(&lru_lock);
 	list_for_each_prev_safe(cur, tmp, &lru_vrange) {
 		vrange = list_entry(cur, struct vrange, lru);
-		mm = vrange_get_owner_mm(vrange);
-		/* the process is exiting so pass it */
-		if (atomic_read(&mm->mm_users) == 0) {
-			list_del_init(&vrange->lru);
-			vrange = NULL;
-			continue;
-		}
 
-		/* vrange is freeing so continue to loop */
-		if (!atomic_inc_not_zero(&vrange->refcount)) {
+		if (hold_victim_vrange(vrange)) {
 			list_del_init(&vrange->lru);
 			vrange = NULL;
 			continue;
 		}
 
-		/*
-		 * we need to access mmap_sem further routine so
-		 * need to get a refcount of mm.
-		 * NOTE: We guarantee mm_count isn't zero in here because
-		 * if we found vrange from LRU list, it means we are
-		 * before exit_vrange or remove_vrange.
-		 */
-		atomic_inc(&mm->mm_count);
-
 		/* Isolate vrange */
 		list_del_init(&vrange->lru);
 		break;
+
 	}
 
 	spin_unlock(&lru_lock);
@@ -847,9 +893,18 @@  struct vrange *get_victim_vrange(void)
 
 void put_victim_range(struct vrange *vrange)
 {
-	struct mm_struct *mm = vrange_get_owner_mm(vrange);
 	put_vrange(vrange);
-	mmdrop(mm);
+
+	if (vrange_type(vrange) == VRANGE_MM) {
+		struct mm_struct *mm = vrange_get_owner_mm(vrange);
+
+		mmdrop(mm);
+	} else {
+		struct address_space *mapping;
+
+		mapping = vrange_get_owner_mapping(vrange);
+		iput(mapping->host);
+	}
 }
 
 unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard)
@@ -858,11 +913,8 @@  unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard)
 	unsigned int nr_discarded = 0;
 
 	start_vrange = vrange = get_victim_vrange();
-	if (start_vrange) {
-		struct mm_struct *mm = vrange_get_owner_mm(start_vrange);
-		atomic_inc(&start_vrange->refcount);
-		atomic_inc(&mm->mm_count);
-	}
+	if (start_vrange)
+		hold_victim_vrange(start_vrange);
 
 	while (vrange) {
 		nr_discarded += discard_vrange(zone, vrange, nr_to_discard);