diff mbox series

[Xen-devel,v4,15/16] xen/x86: Switch mfn_to_page in x86_64/mm.c to use typesafe MFN

Message ID 20180221140259.29360-16-julien.grall@arm.com
State Superseded
Headers show
Series xen: Convert page_to_mfn and mfn_to_page to use typesafe MFN | expand

Commit Message

Julien Grall Feb. 21, 2018, 2:02 p.m. UTC
No functional change intendend.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

    Changes in v4:
        - Patch added
---
 xen/arch/x86/x86_64/mm.c | 46 ++++++++++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 20 deletions(-)

Comments

Jan Beulich March 2, 2018, 3:57 p.m. UTC | #1
>>> On 21.02.18 at 15:02, <julien.grall@arm.com> wrote:
> @@ -160,7 +162,8 @@ static int m2p_mapped(unsigned long spfn)
>  
>  static int share_hotadd_m2p_table(struct mem_hotadd_info *info)
>  {
> -    unsigned long i, n, v, m2p_start_mfn = 0;
> +    unsigned long i, n, v;
> +    mfn_t m2p_start_mfn = _mfn(0);

INVALID_MFN again please.

With that
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cfbc8ecf16..6b679882d6 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -43,6 +43,8 @@  asm(".file \"" __FILE__ "\"");
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef page_to_mfn
 #define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
 
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
@@ -160,7 +162,8 @@  static int m2p_mapped(unsigned long spfn)
 
 static int share_hotadd_m2p_table(struct mem_hotadd_info *info)
 {
-    unsigned long i, n, v, m2p_start_mfn = 0;
+    unsigned long i, n, v;
+    mfn_t m2p_start_mfn = _mfn(0);
     l3_pgentry_t l3e;
     l2_pgentry_t l2e;
 
@@ -180,15 +183,16 @@  static int share_hotadd_m2p_table(struct mem_hotadd_info *info)
             l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
             if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
                 continue;
-            m2p_start_mfn = l2e_get_pfn(l2e);
+            m2p_start_mfn = l2e_get_mfn(l2e);
         }
         else
             continue;
 
         for ( i = 0; i < n; i++ )
         {
-            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
-            if (hotadd_mem_valid(m2p_start_mfn + i, info))
+            struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i));
+
+            if ( hotadd_mem_valid(mfn_x(mfn_add(m2p_start_mfn, i)), info) )
                 share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
@@ -204,12 +208,13 @@  static int share_hotadd_m2p_table(struct mem_hotadd_info *info)
         l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
         if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
             continue;
-        m2p_start_mfn = l2e_get_pfn(l2e);
+        m2p_start_mfn = l2e_get_mfn(l2e);
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
-            if (hotadd_mem_valid(m2p_start_mfn + i, info))
+            struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i));
+
+            if ( hotadd_mem_valid(mfn_x(mfn_add(m2p_start_mfn, i)), info) )
                 share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
@@ -720,10 +725,10 @@  static void cleanup_frame_table(struct mem_hotadd_info *info)
     unsigned long sva, eva;
     l3_pgentry_t l3e;
     l2_pgentry_t l2e;
-    unsigned long spfn, epfn;
+    mfn_t spfn, epfn;
 
-    spfn = info->spfn;
-    epfn = info->epfn;
+    spfn = _mfn(info->spfn);
+    epfn = _mfn(info->epfn);
 
     sva = (unsigned long)mfn_to_page(spfn);
     eva = (unsigned long)mfn_to_page(epfn);
@@ -795,16 +800,17 @@  static int setup_frametable_chunk(void *start, void *end,
 
 static int extend_frame_table(struct mem_hotadd_info *info)
 {
-    unsigned long cidx, nidx, eidx, spfn, epfn;
+    unsigned long cidx, nidx, eidx;
+    mfn_t spfn, epfn;
 
-    spfn = info->spfn;
-    epfn = info->epfn;
+    spfn = _mfn(info->spfn);
+    epfn = _mfn(info->epfn);
 
-    eidx = (pfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
-    nidx = cidx = pfn_to_pdx(spfn)/PDX_GROUP_COUNT;
+    eidx = (mfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
+    nidx = cidx = mfn_to_pdx(spfn)/PDX_GROUP_COUNT;
 
-    ASSERT( pfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
-            pfn_to_pdx(epfn) <= FRAMETABLE_NR );
+    ASSERT( mfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
+            mfn_to_pdx(epfn) <= FRAMETABLE_NR );
 
     if ( test_bit(cidx, pdx_group_valid) )
         cidx = find_next_zero_bit(pdx_group_valid, eidx, cidx);
@@ -866,7 +872,7 @@  void __init subarch_init_memory(void)
 
         for ( i = 0; i < n; i++ )
         {
-            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
+            struct page_info *page = mfn_to_page(_mfn(m2p_start_mfn + i));
             share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
@@ -886,7 +892,7 @@  void __init subarch_init_memory(void)
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
+            struct page_info *page = mfn_to_page(_mfn(m2p_start_mfn + i));
             share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
@@ -1274,7 +1280,7 @@  static int transfer_pages_to_heap(struct mem_hotadd_info *info)
      */
     for (i = info->spfn; i < info->cur; i++)
     {
-        pg = mfn_to_page(i);
+        pg = mfn_to_page(_mfn(i));
         pg->count_info = PGC_state_inuse;
     }