diff mbox series

[Xen-devel,v5,11/16] xen/mm: Switch some of page_alloc.c to typesafe MFN

Message ID 20180314182009.14274-12-julien.grall@arm.com
State Superseded
Headers show
Series xen: Convert page_to_mfn and mfn_to_page to use typesafe MFN | expand

Commit Message

Julien Grall March 14, 2018, 6:20 p.m. UTC
From: Julien Grall <julien.grall@arm.com>

No functional change intended.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>

---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Julien Grall <julien.grall@arm.com>

Wei, I have kept the reviewed-by because the changes were minor. Let's
me know if you want me to drop it.

    Changes in v5:
        - Add Wei's reviewed-by
        - Fix coding style (space before and after '+')
        - Rework the commit title as page_alloc.c was not fully
        converted to typesafe MFN.

    Changes in v4:
        - Patch added
---
 xen/common/page_alloc.c    | 64 ++++++++++++++++++++++++++--------------------
 xen/include/asm-arm/numa.h |  8 +++---
 2 files changed, 41 insertions(+), 31 deletions(-)

Comments

George Dunlap March 15, 2018, 5:02 p.m. UTC | #1
On 03/14/2018 06:20 PM, julien.grall@arm.com wrote:
> From: Julien Grall <julien.grall@arm.com>
> 
> No functional change intended.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>

This patch by itself doesn't look like it actually makes things simpler
in general, but I'll trust that it will in the future:

Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Julien Grall March 15, 2018, 6:07 p.m. UTC | #2
Hi George,

Thank you for the review.

On 15/03/18 17:02, George Dunlap wrote:
> On 03/14/2018 06:20 PM, julien.grall@arm.com wrote:
>> From: Julien Grall <julien.grall@arm.com>
>>
>> No functional change intended.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> 
> This patch by itself doesn't look like it actually makes things simpler
> in general, but I'll trust that it will in the future:

I think it would be possible to do further clean-up. What I wanted to 
avoid is having mfn_x(page_to_mfn(pg)) all around the code after page 
#16 because the override is dropped.

> 
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>
> 

Cheers,
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 4de8988bea..6e50fb2621 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,6 +151,12 @@ 
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef page_to_mfn
+#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -197,7 +203,7 @@  PAGE_LIST_HEAD(page_broken_list);
  * first_valid_mfn is exported because it is use in ARM specific NUMA
  * helpers. See comment in asm-arm/numa.h.
  */
-unsigned long first_valid_mfn = ~0UL;
+mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER;
 
 static struct bootmem_region {
     unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
@@ -283,7 +289,7 @@  void __init init_boot_pages(paddr_t ps, paddr_t pe)
     if ( pe <= ps )
         return;
 
-    first_valid_mfn = min_t(unsigned long, ps >> PAGE_SHIFT, first_valid_mfn);
+    first_valid_mfn = mfn_min(maddr_to_mfn(ps), first_valid_mfn);
 
     bootmem_region_add(ps >> PAGE_SHIFT, pe >> PAGE_SHIFT);
 
@@ -397,7 +403,7 @@  mfn_t __init alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
 
 #define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 1 : ((b) - PAGE_SHIFT))
 #define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN :  \
-                          (flsl(page_to_mfn(pg)) ? : 1))
+                          (flsl(mfn_x(page_to_mfn(pg))) ? : 1))
 
 typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
 static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
@@ -729,7 +735,7 @@  static void page_list_add_scrub(struct page_info *pg, unsigned int node,
 static void poison_one_page(struct page_info *pg)
 {
 #ifdef CONFIG_SCRUB_DEBUG
-    mfn_t mfn = _mfn(page_to_mfn(pg));
+    mfn_t mfn = page_to_mfn(pg);
     uint64_t *ptr;
 
     if ( !scrub_debug )
@@ -744,7 +750,7 @@  static void poison_one_page(struct page_info *pg)
 static void check_one_page(struct page_info *pg)
 {
 #ifdef CONFIG_SCRUB_DEBUG
-    mfn_t mfn = _mfn(page_to_mfn(pg));
+    mfn_t mfn = page_to_mfn(pg);
     const uint64_t *ptr;
     unsigned int i;
 
@@ -992,7 +998,8 @@  static struct page_info *alloc_heap_pages(
         /* Ensure cache and RAM are consistent for platforms where the
          * guest can control its own visibility of/through the cache.
          */
-        flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & MEMF_no_icache_flush));
+        flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])),
+                          !(memflags & MEMF_no_icache_flush));
     }
 
     spin_unlock(&heap_lock);
@@ -1344,7 +1351,8 @@  bool scrub_free_pages(void)
 static void free_heap_pages(
     struct page_info *pg, unsigned int order, bool need_scrub)
 {
-    unsigned long mask, mfn = page_to_mfn(pg);
+    unsigned long mask;
+    mfn_t mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
     unsigned int zone = page_to_zone(pg);
 
@@ -1381,7 +1389,7 @@  static void free_heap_pages(
 
         /* This page is not a guest frame any more. */
         page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
-        set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
+        set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
 
         if ( need_scrub )
         {
@@ -1409,12 +1417,12 @@  static void free_heap_pages(
     {
         mask = 1UL << order;
 
-        if ( (page_to_mfn(pg) & mask) )
+        if ( (mfn_x(page_to_mfn(pg)) & mask) )
         {
             struct page_info *predecessor = pg - mask;
 
             /* Merge with predecessor block? */
-            if ( !mfn_valid(_mfn(page_to_mfn(predecessor))) ||
+            if ( !mfn_valid(page_to_mfn(predecessor)) ||
                  !page_state_is(predecessor, free) ||
                  (PFN_ORDER(predecessor) != order) ||
                  (phys_to_nid(page_to_maddr(predecessor)) != node) )
@@ -1437,7 +1445,7 @@  static void free_heap_pages(
             struct page_info *successor = pg + mask;
 
             /* Merge with successor block? */
-            if ( !mfn_valid(_mfn(page_to_mfn(successor))) ||
+            if ( !mfn_valid(page_to_mfn(successor)) ||
                  !page_state_is(successor, free) ||
                  (PFN_ORDER(successor) != order) ||
                  (phys_to_nid(page_to_maddr(successor)) != node) )
@@ -1470,7 +1478,7 @@  static unsigned long mark_page_offline(struct page_info *pg, int broken)
 {
     unsigned long nx, x, y = pg->count_info;
 
-    ASSERT(page_is_ram_type(page_to_mfn(pg), RAM_TYPE_CONVENTIONAL));
+    ASSERT(page_is_ram_type(mfn_x(page_to_mfn(pg)), RAM_TYPE_CONVENTIONAL));
     ASSERT(spin_is_locked(&heap_lock));
 
     do {
@@ -1533,7 +1541,7 @@  int offline_page(unsigned long mfn, int broken, uint32_t *status)
     }
 
     *status = 0;
-    pg = mfn_to_page(mfn);
+    pg = mfn_to_page(_mfn(mfn));
 
     if ( is_xen_fixed_mfn(mfn) )
     {
@@ -1640,7 +1648,7 @@  unsigned int online_page(unsigned long mfn, uint32_t *status)
         return -EINVAL;
     }
 
-    pg = mfn_to_page(mfn);
+    pg = mfn_to_page(_mfn(mfn));
 
     spin_lock(&heap_lock);
 
@@ -1694,7 +1702,7 @@  int query_page_offline(unsigned long mfn, uint32_t *status)
     *status = 0;
     spin_lock(&heap_lock);
 
-    pg = mfn_to_page(mfn);
+    pg = mfn_to_page(_mfn(mfn));
 
     if ( page_state_is(pg, offlining) )
         *status |= PG_OFFLINE_STATUS_OFFLINE_PENDING;
@@ -1726,7 +1734,7 @@  static void init_heap_pages(
      * Update first_valid_mfn to ensure those regions are covered.
      */
     spin_lock(&heap_lock);
-    first_valid_mfn = min_t(unsigned long, page_to_mfn(pg), first_valid_mfn);
+    first_valid_mfn = mfn_min(page_to_mfn(pg), first_valid_mfn);
     spin_unlock(&heap_lock);
 
     for ( i = 0; i < nr_pages; i++ )
@@ -1735,14 +1743,14 @@  static void init_heap_pages(
 
         if ( unlikely(!avail[nid]) )
         {
-            unsigned long s = page_to_mfn(pg + i);
-            unsigned long e = page_to_mfn(pg + nr_pages - 1) + 1;
+            unsigned long s = mfn_x(page_to_mfn(pg + i));
+            unsigned long e = mfn_x(mfn_add(page_to_mfn(pg + nr_pages - 1), 1));
             bool_t use_tail = (nid == phys_to_nid(pfn_to_paddr(e - 1))) &&
                               !(s & ((1UL << MAX_ORDER) - 1)) &&
                               (find_first_set_bit(e) <= find_first_set_bit(s));
             unsigned long n;
 
-            n = init_node_heap(nid, page_to_mfn(pg+i), nr_pages - i,
+            n = init_node_heap(nid, mfn_x(page_to_mfn(pg + i)), nr_pages - i,
                                &use_tail);
             BUG_ON(i + n > nr_pages);
             if ( n && !use_tail )
@@ -1796,7 +1804,7 @@  void __init end_boot_allocator(void)
         if ( (r->s < r->e) &&
              (phys_to_nid(pfn_to_paddr(r->s)) == cpu_to_node(0)) )
         {
-            init_heap_pages(mfn_to_page(r->s), r->e - r->s);
+            init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
             r->e = r->s;
             break;
         }
@@ -1805,7 +1813,7 @@  void __init end_boot_allocator(void)
     {
         struct bootmem_region *r = &bootmem_region_list[i];
         if ( r->s < r->e )
-            init_heap_pages(mfn_to_page(r->s), r->e - r->s);
+            init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
     }
     nr_bootmem_regions = 0;
     init_heap_pages(virt_to_page(bootmem_region_list), 1);
@@ -1862,7 +1870,7 @@  static void __init smp_scrub_heap_pages(void *data)
 
     for ( mfn = start; mfn < end; mfn++ )
     {
-        pg = mfn_to_page(mfn);
+        pg = mfn_to_page(_mfn(mfn));
 
         /* Check the mfn is valid and page is free. */
         if ( !mfn_valid(_mfn(mfn)) || !page_state_is(pg, free) )
@@ -1915,7 +1923,7 @@  static void __init scrub_heap_pages(void)
         if ( !node_spanned_pages(i) )
             continue;
         /* Calculate Node memory start and end address. */
-        start = max(node_start_pfn(i), first_valid_mfn);
+        start = max(node_start_pfn(i), mfn_x(first_valid_mfn));
         end = min(node_start_pfn(i) + node_spanned_pages(i), max_page);
         /* Just in case NODE has 1 page and starts below first_valid_mfn. */
         end = max(end, start);
@@ -2159,17 +2167,17 @@  void free_xenheap_pages(void *v, unsigned int order)
 
 void init_domheap_pages(paddr_t ps, paddr_t pe)
 {
-    unsigned long smfn, emfn;
+    mfn_t smfn, emfn;
 
     ASSERT(!in_irq());
 
-    smfn = round_pgup(ps) >> PAGE_SHIFT;
-    emfn = round_pgdown(pe) >> PAGE_SHIFT;
+    smfn = maddr_to_mfn(round_pgup(ps));
+    emfn = maddr_to_mfn(round_pgup(pe));
 
-    if ( emfn <= smfn )
+    if ( mfn_x(emfn) <= mfn_x(smfn) )
         return;
 
-    init_heap_pages(mfn_to_page(smfn), emfn - smfn);
+    init_heap_pages(mfn_to_page(smfn), mfn_x(emfn) - mfn_x(smfn));
 }
 
 
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 7e0b69413d..490d1f31aa 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -1,6 +1,8 @@ 
 #ifndef __ARCH_ARM_NUMA_H
 #define __ARCH_ARM_NUMA_H
 
+#include <xen/mm.h>
+
 typedef u8 nodeid_t;
 
 /* Fake one node for now. See also node_online_map. */
@@ -16,11 +18,11 @@  static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
  * TODO: make first_valid_mfn static when NUMA is supported on Arm, this
  * is required because the dummy helpers are using it.
  */
-extern unsigned long first_valid_mfn;
+extern mfn_t first_valid_mfn;
 
 /* XXX: implement NUMA support */
-#define node_spanned_pages(nid) (max_page - first_valid_mfn)
-#define node_start_pfn(nid) (first_valid_mfn)
+#define node_spanned_pages(nid) (max_page - mfn_x(first_valid_mfn))
+#define node_start_pfn(nid) (mfn_x(first_valid_mfn))
 #define __node_distance(a, b) (20)
 
 static inline unsigned int arch_get_dma_bitsize(void)