diff mbox series

[Xen-devel,v2,6/9] xen/kexec, kimage: Convert kexec and kimage to use typesafe mfn_t

Message ID 20171005174222.29161-7-julien.grall@linaro.org
State Accepted
Commit e7c9678259b134e9b7d999950603208fd490c8d0
Headers show
Series xen: Convert __page_to_mfn and __mfn_to_page to use typesafe MFN | expand

Commit Message

Julien Grall Oct. 5, 2017, 5:42 p.m. UTC
At the same time, correctly align one the prototype changed.

Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
 xen/common/kexec.c       | 16 ++++++++--------
 xen/common/kimage.c      | 30 ++++++++++++++++++------------
 xen/include/xen/kimage.h |  4 ++--
 3 files changed, 28 insertions(+), 22 deletions(-)

Comments

Andrew Cooper Oct. 5, 2017, 5:51 p.m. UTC | #1
On 05/10/17 18:42, Julien Grall wrote:
> At the same time, correctly align one the prototype changed.
>
> Signed-off-by: Julien Grall <julien.grall@linaro.org>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox series

Patch

diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index fcc68bd4d8..c14cbb2b9c 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -905,11 +905,11 @@  static uint16_t kexec_load_v1_arch(void)
 #endif
 }
 
-static int kexec_segments_add_segment(
-    unsigned int *nr_segments, xen_kexec_segment_t *segments,
-    unsigned long mfn)
+static int kexec_segments_add_segment(unsigned int *nr_segments,
+                                      xen_kexec_segment_t *segments,
+                                      mfn_t mfn)
 {
-    paddr_t maddr = (paddr_t)mfn << PAGE_SHIFT;
+    paddr_t maddr = mfn_to_maddr(mfn);
     unsigned int n = *nr_segments;
 
     /* Need a new segment? */
@@ -930,7 +930,7 @@  static int kexec_segments_add_segment(
     return 0;
 }
 
-static int kexec_segments_from_ind_page(unsigned long mfn,
+static int kexec_segments_from_ind_page(mfn_t mfn,
                                         unsigned int *nr_segments,
                                         xen_kexec_segment_t *segments,
                                         bool_t compat)
@@ -939,7 +939,7 @@  static int kexec_segments_from_ind_page(unsigned long mfn,
     kimage_entry_t *entry;
     int ret = 0;
 
-    page = map_domain_page(_mfn(mfn));
+    page = map_domain_page(mfn);
 
     /*
      * Walk the indirection page list, adding destination pages to the
@@ -961,7 +961,7 @@  static int kexec_segments_from_ind_page(unsigned long mfn,
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            entry = page = map_domain_page(_mfn(mfn));
+            entry = page = map_domain_page(mfn);
             continue;
         case IND_DONE:
             goto done;
@@ -990,7 +990,7 @@  static int kexec_do_load_v1(xen_kexec_load_v1_t *load, int compat)
     xen_kexec_segment_t *segments;
     uint16_t arch;
     unsigned int nr_segments = 0;
-    unsigned long ind_mfn = load->image.indirection_page >> PAGE_SHIFT;
+    mfn_t ind_mfn = maddr_to_mfn(load->image.indirection_page);
     int ret;
 
     arch = kexec_load_v1_arch();
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 07587896a4..afd8292cc1 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -23,6 +23,12 @@ 
 
 #include <asm/page.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(pg)  _mfn(__page_to_mfn(pg))
+
 /*
  * When kexec transitions to the new kernel there is a one-to-one
  * mapping between physical and virtual addresses.  On processors
@@ -76,7 +82,7 @@  static struct page_info *kimage_alloc_zeroed_page(unsigned memflags)
     if ( !page )
         return NULL;
 
-    clear_domain_page(_mfn(page_to_mfn(page)));
+    clear_domain_page(page_to_mfn(page));
 
     return page;
 }
@@ -405,7 +411,7 @@  static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima
     if ( page )
     {
         image->next_crash_page = hole_end;
-        clear_domain_page(_mfn(page_to_mfn(page)));
+        clear_domain_page(page_to_mfn(page));
     }
 
     return page;
@@ -641,7 +647,7 @@  static struct page_info *kimage_alloc_page(struct kexec_image *image,
             *old = (addr & ~PAGE_MASK) | IND_SOURCE;
             unmap_domain_page(old);
 
-            page = mfn_to_page(mfn_x(old_mfn));
+            page = mfn_to_page(old_mfn);
             break;
         }
         else
@@ -840,11 +846,11 @@  kimage_entry_t *kimage_entry_next(kimage_entry_t *entry, bool_t compat)
     return entry + 1;
 }
 
-unsigned long kimage_entry_mfn(kimage_entry_t *entry, bool_t compat)
+mfn_t kimage_entry_mfn(kimage_entry_t *entry, bool_t compat)
 {
     if ( compat )
-        return *(uint32_t *)entry >> PAGE_SHIFT;
-    return *entry >> PAGE_SHIFT;
+        return maddr_to_mfn(*(uint32_t *)entry);
+    return maddr_to_mfn(*entry);
 }
 
 unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat)
@@ -854,7 +860,7 @@  unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat)
     return *entry & 0xf;
 }
 
-int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
+int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn,
                      bool_t compat)
 {
     void *page;
@@ -862,7 +868,7 @@  int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
     int ret = 0;
     paddr_t dest = KIMAGE_NO_DEST;
 
-    page = map_domain_page(_mfn(ind_mfn));
+    page = map_domain_page(ind_mfn);
     if ( !page )
         return -ENOMEM;
 
@@ -873,7 +879,7 @@  int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
     for ( entry = page; ;  )
     {
         unsigned long ind;
-        unsigned long mfn;
+        mfn_t mfn;
 
         ind = kimage_entry_ind(entry, compat);
         mfn = kimage_entry_mfn(entry, compat);
@@ -881,14 +887,14 @@  int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
         switch ( ind )
         {
         case IND_DESTINATION:
-            dest = (paddr_t)mfn << PAGE_SHIFT;
+            dest = mfn_to_maddr(mfn);
             ret = kimage_set_destination(image, dest);
             if ( ret < 0 )
                 goto done;
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            page = map_domain_page(_mfn(mfn));
+            page = map_domain_page(mfn);
             entry = page;
             continue;
         case IND_DONE:
@@ -913,7 +919,7 @@  int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
                 goto done;
             }
 
-            copy_domain_page(_mfn(page_to_mfn(xen_page)), _mfn(mfn));
+            copy_domain_page(page_to_mfn(xen_page), mfn);
             put_page(guest_page);
 
             ret = kimage_add_page(image, page_to_maddr(xen_page));
diff --git a/xen/include/xen/kimage.h b/xen/include/xen/kimage.h
index d10ebf7844..cbfb9e9054 100644
--- a/xen/include/xen/kimage.h
+++ b/xen/include/xen/kimage.h
@@ -48,9 +48,9 @@  struct page_info *kimage_alloc_control_page(struct kexec_image *image,
                                             unsigned memflags);
 
 kimage_entry_t *kimage_entry_next(kimage_entry_t *entry, bool_t compat);
-unsigned long kimage_entry_mfn(kimage_entry_t *entry, bool_t compat);
+mfn_t kimage_entry_mfn(kimage_entry_t *entry, bool_t compat);
 unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat);
-int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
+int kimage_build_ind(struct kexec_image *image, mfn_t ind_mfn,
                      bool_t compat);
 
 #endif /* __ASSEMBLY__ */