diff mbox series

[Xen-devel,v3,06/15] xen/x86: p2m-pod: Clean-up use of typesafe MFN

Message ID 20171002125941.11274-7-julien.grall@arm.com
State Accepted
Commit 014934dd94d1ecbb10799869e870d1a7d88c6de0
Headers show
Series xen/x86: Clean-up the PoD code | expand

Commit Message

Julien Grall Oct. 2, 2017, 12:59 p.m. UTC
Some unboxing/boxing can be avoided by using mfn_add(...) instead.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>

---

Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

    Changes in v3:
        - Add George's and Wei's reviewed-by

    Changes in v2:
        - Add Andrew's acked-by
---
 xen/arch/x86/mm/p2m-pod.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index bcc87aee03..34f5239b6d 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -101,7 +101,7 @@  p2m_pod_cache_add(struct p2m_domain *p2m,
      * promise to provide zero pages. So we scrub pages before using.
      */
     for ( i = 0; i < (1UL << order); i++ )
-        clear_domain_page(_mfn(mfn_x(page_to_mfn(page)) + i));
+        clear_domain_page(mfn_add(page_to_mfn(page), i));
 
     /* First, take all pages off the domain list */
     lock_page_alloc(p2m);
@@ -743,7 +743,7 @@  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
             mfn0 = mfn;
             type0 = type;
         }
-        else if ( type != type0 || mfn_x(mfn) != (mfn_x(mfn0) + i) )
+        else if ( type != type0 || !mfn_eq(mfn, mfn_add(mfn0, i)) )
             goto out;
 
         n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
@@ -758,7 +758,7 @@  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     for ( i = 0; i < SUPERPAGE_PAGES; i++ )
     {
         /* Quick zero-check */
-        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
+        map = map_domain_page(mfn_add(mfn0, i));
 
         for ( j = 0; j < 16; j++ )
             if ( *(map + j) != 0 )
@@ -783,7 +783,7 @@  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
      */
     for ( i = 0; i < SUPERPAGE_PAGES; i++ )
     {
-        mfn = _mfn(mfn_x(mfn0) + i);
+        mfn = mfn_add(mfn0, i);
         if ( (mfn_to_page(mfn)->count_info & PGC_count_mask) > 1 )
         {
             reset = 1;
@@ -794,7 +794,7 @@  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     /* Finally, do a full zero-check */
     for ( i = 0; i < SUPERPAGE_PAGES; i++ )
     {
-        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
+        map = map_domain_page(mfn_add(mfn0, i));
 
         for ( j = 0; j < (PAGE_SIZE / sizeof(*map)); j++ )
             if ( *(map+j) != 0 )