@@ -19,6 +19,11 @@
struct page;
#include <linux/range.h>
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+#include <asm/unaccepted_memory.h>
+#endif
+
extern struct range pfn_mapped[];
extern int nr_pfn_mapped;
@@ -6,9 +6,12 @@
#include <linux/types.h>
struct boot_params;
+struct page;
void mark_unaccepted(struct boot_params *params, u64 start, u64 num);
void accept_memory(phys_addr_t start, phys_addr_t end);
+void maybe_mark_page_unaccepted(struct page *page, unsigned int order);
+void accept_page(struct page *page, unsigned int order);
#endif
@@ -60,3 +60,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
+
+obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o
new file mode 100644
@@ -0,0 +1,90 @@
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/unaccepted_memory.h>
+
+static DEFINE_SPINLOCK(unaccepted_memory_lock);
+
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+
+static void __accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long *unaccepted_memory;
+ unsigned int rs, re;
+
+ unaccepted_memory = __va(boot_params.unaccepted_memory);
+ rs = start / PMD_SIZE;
+
+ for_each_set_bitrange_from(rs, re, unaccepted_memory,
+ DIV_ROUND_UP(end, PMD_SIZE)) {
+ /* Platform-specific memory-acceptance call goes here */
+ panic("Cannot accept memory");
+ bitmap_clear(unaccepted_memory, rs, re - rs);
+ }
+}
+
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long flags;
+ if (!boot_params.unaccepted_memory)
+ return;
+
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ __accept_memory(start, end);
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+void __init maybe_mark_page_unaccepted(struct page *page, unsigned int order)
+{
+ unsigned long *unaccepted_memory;
+ phys_addr_t addr = page_to_phys(page);
+ unsigned long flags;
+ bool unaccepted = false;
+ unsigned int i;
+
+ if (!boot_params.unaccepted_memory)
+ return;
+
+ unaccepted_memory = __va(boot_params.unaccepted_memory);
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ if (order < PMD_ORDER) {
+ BUG_ON(test_bit(addr / PMD_SIZE, unaccepted_memory));
+ goto out;
+ }
+
+ for (i = 0; i < (1 << (order - PMD_ORDER)); i++) {
+ if (test_bit(addr / PMD_SIZE + i, unaccepted_memory)) {
+ unaccepted = true;
+ break;
+ }
+ }
+
+ /* At least part of page is uneccepted */
+ if (unaccepted)
+ __SetPageBuddyUnaccepted(page);
+out:
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+void accept_page(struct page *page, unsigned int order)
+{
+ phys_addr_t addr = round_down(page_to_phys(page), PMD_SIZE);
+ int i;
+
+ WARN_ON_ONCE(!boot_params.unaccepted_memory);
+
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ if (order < PMD_ORDER)
+ order = PMD_ORDER;
+
+ accept_memory(addr, addr + (PAGE_SIZE << order));
+
+ for (i = 0; i < (1 << order); i++) {
+ if (PageBuddyUnaccepted(page + i))
+ __ClearPageBuddyUnaccepted(page + i);
+ }
+}
Core-mm requires few helpers to support unaccepted memory: - accept_memory() checks the range of addresses against the bitmap and accept memory if needed; - maybe_set_page_offline() checks the bitmap and marks a page with PageOffline() if memory acceptance required on the first allocation of the page. - accept_and_clear_page_offline() accepts memory for the page and clears PageOffline(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/include/asm/page.h | 5 ++ arch/x86/include/asm/unaccepted_memory.h | 3 + arch/x86/mm/Makefile | 2 + arch/x86/mm/unaccepted_memory.c | 90 ++++++++++++++++++++++++ 4 files changed, 100 insertions(+) create mode 100644 arch/x86/mm/unaccepted_memory.c