diff mbox

[2/3] vrange: Add purged page detection on setting memory non-volatile

Message ID 1394749270-24750-2-git-send-email-john.stultz@linaro.org
State New
Headers show

Commit Message

John Stultz March 13, 2014, 10:21 p.m. UTC
Users of volatile ranges will need to know if memory was discarded.
This patch adds the purged state tracking required to inform userland
when it marks memory as non-volatile that some memory in that range
was purged and needs to be regenerated.

This simplified implementation which uses some of the logic from
Minchan's earlier efforts, so credit to Minchan for his work.

Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/swap.h   | 15 ++++++++++++--
 include/linux/vrange.h | 13 ++++++++++++
 mm/vrange.c            | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 82 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 46ba0c6..18c12f9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -70,8 +70,19 @@  static inline int current_is_kswapd(void)
 #define SWP_HWPOISON_NUM 0
 #endif
 
-#define MAX_SWAPFILES \
-	((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+
+/*
+ * Purged volatile range pages
+ */
+#define SWP_VRANGE_PURGED_NUM 1
+#define SWP_VRANGE_PURGED (MAX_SWAPFILES + SWP_HWPOISON_NUM + SWP_MIGRATION_NUM)
+
+
+#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)	\
+				- SWP_MIGRATION_NUM	\
+				- SWP_HWPOISON_NUM	\
+				- SWP_VRANGE_PURGED_NUM	\
+			)
 
 /*
  * Magic header for a swap area. The first part of the union is
diff --git a/include/linux/vrange.h b/include/linux/vrange.h
index 652396b..c4a1616 100644
--- a/include/linux/vrange.h
+++ b/include/linux/vrange.h
@@ -1,7 +1,20 @@ 
 #ifndef _LINUX_VRANGE_H
 #define _LINUX_VRANGE_H
 
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
 #define VRANGE_NONVOLATILE 0
 #define VRANGE_VOLATILE 1
 
+static inline swp_entry_t swp_entry_mk_vrange_purged(void)
+{
+	return swp_entry(SWP_VRANGE_PURGED, 0);
+}
+
+static inline int entry_is_vrange_purged(swp_entry_t entry)
+{
+	return swp_type(entry) == SWP_VRANGE_PURGED;
+}
+
 #endif /* _LINUX_VRANGE_H */
diff --git a/mm/vrange.c b/mm/vrange.c
index d9116b1..0214076 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -8,6 +8,59 @@ 
 #include <linux/mm_inline.h>
 #include "internal.h"
 
+struct vrange_walker {
+	struct vm_area_struct *vma;
+	int pages_purged;
+};
+
+static int vrange_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+				struct mm_walk *walk)
+{
+	struct vrange_walker *vw = walk->private;
+	struct vm_area_struct *uninitialized_var(vma);
+	pte_t *pte;
+	spinlock_t *ptl;
+
+	vma = vw->vma;
+	split_huge_page_pmd(vma, addr, pmd);
+	if (pmd_trans_unstable(pmd))
+		return 0;
+
+	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+	for (; addr != end; pte++, addr += PAGE_SIZE) {
+		if (!pte_present(*pte)) {
+			swp_entry_t vrange_entry = pte_to_swp_entry(*pte);
+
+			if (unlikely(entry_is_vrange_purged(vrange_entry)))
+				vw->pages_purged = 1;
+		}
+	}
+	pte_unmap_unlock(pte - 1, ptl);
+	cond_resched();
+
+	return 0;
+}
+
+static unsigned long vrange_check_purged(struct mm_struct *mm,
+					 struct vm_area_struct *vma,
+					 unsigned long start,
+					 unsigned long end)
+{
+	struct vrange_walker vw;
+	struct mm_walk vrange_walk = {
+		.pmd_entry = vrange_pte_range,
+		.mm = vma->vm_mm,
+		.private = &vw,
+	};
+	vw.pages_purged = 0;
+	vw.vma = vma;
+
+	walk_page_range(start, end, &vrange_walk);
+
+	return vw.pages_purged;
+
+}
+
 static ssize_t do_vrange(struct mm_struct *mm, unsigned long start,
 				unsigned long end, int mode, int *purged)
 {
@@ -57,6 +110,9 @@  static ssize_t do_vrange(struct mm_struct *mm, unsigned long start,
 			break;
 			case VRANGE_NONVOLATILE:
 				new_flags &= ~VM_VOLATILE;
+				lpurged |= vrange_check_purged(mm, vma,
+								vma->vm_start,
+								vma->vm_end);
 		}
 
 		pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);