diff mbox series

[Xen-devel,v3,for-next,4/4] xen: Convert __page_to_mfn and __mfn_to_page to use typesafe MFN

Message ID 20171101140316.31333-5-julien.grall@linaro.org
State New
Headers show
Series xen: Convert __page_to_mfn and _mfn_to_page to use typesafe MFN | expand

Commit Message

Julien Grall Nov. 1, 2017, 2:03 p.m. UTC
Most of the users of page_to_mfn and mfn_to_page are either overriding
the macros to make them work with mfn_t or use mfn_x/_mfn because the
rest of the function use mfn_t.

So make __page_to_mfn and __mfn_to_page return mfn_t by default.

Only reasonable clean-ups are done in this patch because it is
already quite big. So some of the files now override page_to_mfn and
mfn_to_page to avoid using mfn_t.

Lastly, domain_page_to_mfn is also converted to use mfn_t given that
most of the callers are now switched to _mfn(domain_page_to_mfn(...)).

Signed-off-by: Julien Grall <julien.grall@linaro.org>

---

Andrew suggested to drop IS_VALID_PAGE in xen/tmem_xen.h. His comment
was:

"/sigh  This is tautological.  The definition of a "valid mfn" in this
case is one for which we have frametable entry, and by having a struct
page_info in our hands, this is by definition true (unless you have a
wild pointer, at which point your bug is elsewhere).

IS_VALID_PAGE() is only ever used in assertions and never usefully, so
instead I would remove it entirely rather than trying to fix it up."

I can remove the function in a separate patch at the begining of the
series if Konrad (TMEM maintainer) is happy with that.

Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
Cc: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Gang Wei <gang.wei@intel.com>
Cc: Shane Wang <shane.wang@intel.com>

    Changes in v3:
        - Rebase on the latest staging and fix some conflicts. Tags
        haven't be retained.
        - Switch the printf format to PRI_mfn

    Changes in v2:
        - Some part have been moved in separate patch
        - Remove one spurious comment
        - Convert domain_page_to_mfn to use mfn_t
---
 xen/arch/arm/domain_build.c             |  2 --
 xen/arch/arm/kernel.c                   |  2 +-
 xen/arch/arm/mem_access.c               |  2 +-
 xen/arch/arm/mm.c                       |  8 ++++----
 xen/arch/arm/p2m.c                      | 10 ++--------
 xen/arch/x86/cpu/vpmu.c                 |  4 ++--
 xen/arch/x86/domain.c                   | 21 +++++++++++----------
 xen/arch/x86/domain_page.c              |  6 +++---
 xen/arch/x86/domctl.c                   |  2 +-
 xen/arch/x86/hvm/dm.c                   |  2 +-
 xen/arch/x86/hvm/dom0_build.c           |  6 +++---
 xen/arch/x86/hvm/emulate.c              |  6 +++---
 xen/arch/x86/hvm/hvm.c                  | 16 ++++++++--------
 xen/arch/x86/hvm/ioreq.c                |  6 +++---
 xen/arch/x86/hvm/stdvga.c               |  2 +-
 xen/arch/x86/hvm/svm/svm.c              |  4 ++--
 xen/arch/x86/hvm/viridian.c             |  6 +++---
 xen/arch/x86/hvm/vmx/vmcs.c             |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c              | 10 +++++-----
 xen/arch/x86/hvm/vmx/vvmx.c             |  6 +++---
 xen/arch/x86/mm.c                       |  6 ------
 xen/arch/x86/mm/guest_walk.c            |  6 +++---
 xen/arch/x86/mm/hap/guest_walk.c        |  2 +-
 xen/arch/x86/mm/hap/hap.c               |  6 ------
 xen/arch/x86/mm/hap/nested_ept.c        |  2 +-
 xen/arch/x86/mm/mem_sharing.c           |  5 -----
 xen/arch/x86/mm/p2m-ept.c               |  4 ++++
 xen/arch/x86/mm/p2m-pod.c               |  6 ------
 xen/arch/x86/mm/p2m.c                   |  6 ------
 xen/arch/x86/mm/paging.c                |  6 ------
 xen/arch/x86/mm/shadow/private.h        | 16 ++--------------
 xen/arch/x86/numa.c                     |  2 +-
 xen/arch/x86/physdev.c                  |  2 +-
 xen/arch/x86/pv/callback.c              |  6 ------
 xen/arch/x86/pv/descriptor-tables.c     | 10 ----------
 xen/arch/x86/pv/dom0_build.c            |  6 ++++++
 xen/arch/x86/pv/domain.c                |  6 ------
 xen/arch/x86/pv/emul-gate-op.c          |  6 ------
 xen/arch/x86/pv/emul-priv-op.c          | 10 ----------
 xen/arch/x86/pv/grant_table.c           |  6 ------
 xen/arch/x86/pv/ro-page-fault.c         |  6 ------
 xen/arch/x86/smpboot.c                  |  6 ------
 xen/arch/x86/tboot.c                    |  4 ++--
 xen/arch/x86/traps.c                    |  4 ++--
 xen/arch/x86/x86_64/mm.c                |  6 ++++++
 xen/common/domain.c                     |  4 ++--
 xen/common/grant_table.c                |  6 ++++++
 xen/common/kimage.c                     |  6 ------
 xen/common/memory.c                     |  6 ++++++
 xen/common/page_alloc.c                 |  6 ++++++
 xen/common/tmem.c                       |  2 +-
 xen/common/tmem_xen.c                   |  4 ----
 xen/common/trace.c                      |  6 ++++++
 xen/common/vmap.c                       |  9 +++++----
 xen/common/xenoprof.c                   |  2 --
 xen/drivers/passthrough/amd/iommu_map.c |  6 ++++++
 xen/drivers/passthrough/iommu.c         |  2 +-
 xen/drivers/passthrough/x86/iommu.c     |  2 +-
 xen/include/asm-arm/mm.h                | 16 +++++++++-------
 xen/include/asm-arm/p2m.h               |  4 ++--
 xen/include/asm-x86/mm.h                | 12 ++++++------
 xen/include/asm-x86/p2m.h               |  2 +-
 xen/include/asm-x86/page.h              | 32 ++++++++++++++++----------------
 xen/include/xen/domain_page.h           |  8 ++++----
 xen/include/xen/tmem_xen.h              |  2 +-
 65 files changed, 166 insertions(+), 239 deletions(-)

Comments

Paul Durrant Nov. 1, 2017, 3:35 p.m. UTC | #1
> -----Original Message-----
> From: Julien Grall [mailto:julien.grall@linaro.org]
> Sent: 01 November 2017 14:03
> To: xen-devel@lists.xen.org
> Cc: Julien Grall <julien.grall@linaro.org>; Stefano Stabellini
> <sstabellini@kernel.org>; Julien Grall <julien.grall@arm.com>; Andrew
> Cooper <Andrew.Cooper3@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Jan
> Beulich <jbeulich@suse.com>; Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com>; Tim (Xen.org) <tim@xen.org>; Wei Liu
> <wei.liu2@citrix.com>; Razvan Cojocaru <rcojocaru@bitdefender.com>;
> Tamas K Lengyel <tamas@tklengyel.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; Boris Ostrovsky <boris.ostrovsky@oracle.com>;
> Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>; Jun Nakajima
> <jun.nakajima@intel.com>; Kevin Tian <kevin.tian@intel.com>; George
> Dunlap <George.Dunlap@citrix.com>; Gang Wei <gang.wei@intel.com>;
> Shane Wang <shane.wang@intel.com>
> Subject: [PATCH v3 for-next 4/4] xen: Convert __page_to_mfn and
> __mfn_to_page to use typesafe MFN
> 
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
> 
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
> 
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
> 
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>
> 

emulate bits...

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
> 
> Andrew suggested to drop IS_VALID_PAGE in xen/tmem_xen.h. His
> comment
> was:
> 
> "/sigh  This is tautological.  The definition of a "valid mfn" in this
> case is one for which we have frametable entry, and by having a struct
> page_info in our hands, this is by definition true (unless you have a
> wild pointer, at which point your bug is elsewhere).
> 
> IS_VALID_PAGE() is only ever used in assertions and never usefully, so
> instead I would remove it entirely rather than trying to fix it up."
> 
> I can remove the function in a separate patch at the begining of the
> series if Konrad (TMEM maintainer) is happy with that.
> 
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Julien Grall <julien.grall@arm.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <George.Dunlap@eu.citrix.com>
> Cc: Ian Jackson <ian.jackson@eu.citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Cc: Tim Deegan <tim@xen.org>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
> Cc: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Paul Durrant <paul.durrant@citrix.com>
> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Cc: Jun Nakajima <jun.nakajima@intel.com>
> Cc: Kevin Tian <kevin.tian@intel.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Gang Wei <gang.wei@intel.com>
> Cc: Shane Wang <shane.wang@intel.com>
> 
>     Changes in v3:
>         - Rebase on the latest staging and fix some conflicts. Tags
>         haven't be retained.
>         - Switch the printf format to PRI_mfn
> 
>     Changes in v2:
>         - Some part have been moved in separate patch
>         - Remove one spurious comment
>         - Convert domain_page_to_mfn to use mfn_t
> ---
>  xen/arch/arm/domain_build.c             |  2 --
>  xen/arch/arm/kernel.c                   |  2 +-
>  xen/arch/arm/mem_access.c               |  2 +-
>  xen/arch/arm/mm.c                       |  8 ++++----
>  xen/arch/arm/p2m.c                      | 10 ++--------
>  xen/arch/x86/cpu/vpmu.c                 |  4 ++--
>  xen/arch/x86/domain.c                   | 21 +++++++++++----------
>  xen/arch/x86/domain_page.c              |  6 +++---
>  xen/arch/x86/domctl.c                   |  2 +-
>  xen/arch/x86/hvm/dm.c                   |  2 +-
>  xen/arch/x86/hvm/dom0_build.c           |  6 +++---
>  xen/arch/x86/hvm/emulate.c              |  6 +++---
>  xen/arch/x86/hvm/hvm.c                  | 16 ++++++++--------
>  xen/arch/x86/hvm/ioreq.c                |  6 +++---
>  xen/arch/x86/hvm/stdvga.c               |  2 +-
>  xen/arch/x86/hvm/svm/svm.c              |  4 ++--
>  xen/arch/x86/hvm/viridian.c             |  6 +++---
>  xen/arch/x86/hvm/vmx/vmcs.c             |  2 +-
>  xen/arch/x86/hvm/vmx/vmx.c              | 10 +++++-----
>  xen/arch/x86/hvm/vmx/vvmx.c             |  6 +++---
>  xen/arch/x86/mm.c                       |  6 ------
>  xen/arch/x86/mm/guest_walk.c            |  6 +++---
>  xen/arch/x86/mm/hap/guest_walk.c        |  2 +-
>  xen/arch/x86/mm/hap/hap.c               |  6 ------
>  xen/arch/x86/mm/hap/nested_ept.c        |  2 +-
>  xen/arch/x86/mm/mem_sharing.c           |  5 -----
>  xen/arch/x86/mm/p2m-ept.c               |  4 ++++
>  xen/arch/x86/mm/p2m-pod.c               |  6 ------
>  xen/arch/x86/mm/p2m.c                   |  6 ------
>  xen/arch/x86/mm/paging.c                |  6 ------
>  xen/arch/x86/mm/shadow/private.h        | 16 ++--------------
>  xen/arch/x86/numa.c                     |  2 +-
>  xen/arch/x86/physdev.c                  |  2 +-
>  xen/arch/x86/pv/callback.c              |  6 ------
>  xen/arch/x86/pv/descriptor-tables.c     | 10 ----------
>  xen/arch/x86/pv/dom0_build.c            |  6 ++++++
>  xen/arch/x86/pv/domain.c                |  6 ------
>  xen/arch/x86/pv/emul-gate-op.c          |  6 ------
>  xen/arch/x86/pv/emul-priv-op.c          | 10 ----------
>  xen/arch/x86/pv/grant_table.c           |  6 ------
>  xen/arch/x86/pv/ro-page-fault.c         |  6 ------
>  xen/arch/x86/smpboot.c                  |  6 ------
>  xen/arch/x86/tboot.c                    |  4 ++--
>  xen/arch/x86/traps.c                    |  4 ++--
>  xen/arch/x86/x86_64/mm.c                |  6 ++++++
>  xen/common/domain.c                     |  4 ++--
>  xen/common/grant_table.c                |  6 ++++++
>  xen/common/kimage.c                     |  6 ------
>  xen/common/memory.c                     |  6 ++++++
>  xen/common/page_alloc.c                 |  6 ++++++
>  xen/common/tmem.c                       |  2 +-
>  xen/common/tmem_xen.c                   |  4 ----
>  xen/common/trace.c                      |  6 ++++++
>  xen/common/vmap.c                       |  9 +++++----
>  xen/common/xenoprof.c                   |  2 --
>  xen/drivers/passthrough/amd/iommu_map.c |  6 ++++++
>  xen/drivers/passthrough/iommu.c         |  2 +-
>  xen/drivers/passthrough/x86/iommu.c     |  2 +-
>  xen/include/asm-arm/mm.h                | 16 +++++++++-------
>  xen/include/asm-arm/p2m.h               |  4 ++--
>  xen/include/asm-x86/mm.h                | 12 ++++++------
>  xen/include/asm-x86/p2m.h               |  2 +-
>  xen/include/asm-x86/page.h              | 32 ++++++++++++++++----------------
>  xen/include/xen/domain_page.h           |  8 ++++----
>  xen/include/xen/tmem_xen.h              |  2 +-
>  65 files changed, 166 insertions(+), 239 deletions(-)
> 
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 5532068ab1..4b554b49c1 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -50,8 +50,6 @@ struct map_range_data
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef virt_to_mfn
>  #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> 
>  //#define DEBUG_11_ALLOCATION
>  #ifdef DEBUG_11_ALLOCATION
> diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
> index c2755a9ab9..018d1aed06 100644
> --- a/xen/arch/arm/kernel.c
> +++ b/xen/arch/arm/kernel.c
> @@ -295,7 +295,7 @@ static __init int kernel_decompress(struct
> bootmodule *mod)
>          iounmap(input);
>          return -ENOMEM;
>      }
> -    mfn = _mfn(page_to_mfn(pages));
> +    mfn = page_to_mfn(pages);
>      output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR,
> VMAP_DEFAULT);
> 
>      rc = perform_gunzip(output, input, size);
> diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
> index 0f2cbb81d3..112e291cba 100644
> --- a/xen/arch/arm/mem_access.c
> +++ b/xen/arch/arm/mem_access.c
> @@ -210,7 +210,7 @@ p2m_mem_access_check_and_get_page(vaddr_t
> gva, unsigned long flag,
>      if ( t != p2m_ram_rw )
>          goto err;
> 
> -    page = mfn_to_page(mfn_x(mfn));
> +    page = mfn_to_page(mfn);
> 
>      if ( unlikely(!get_page(page, v->domain)) )
>          page = NULL;
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 3c328e2df5..b8818e03ab 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -477,7 +477,7 @@ void unmap_domain_page(const void *va)
>      local_irq_restore(flags);
>  }
> 
> -unsigned long domain_page_map_to_mfn(const void *ptr)
> +mfn_t domain_page_map_to_mfn(const void *ptr)
>  {
>      unsigned long va = (unsigned long)ptr;
>      lpae_t *map = this_cpu(xen_dommap);
> @@ -485,12 +485,12 @@ unsigned long domain_page_map_to_mfn(const
> void *ptr)
>      unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK;
> 
>      if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
> -        return __virt_to_mfn(va);
> +        return virt_to_mfn(va);
> 
>      ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
>      ASSERT(map[slot].pt.avail != 0);
> 
> -    return map[slot].pt.base + offset;
> +    return _mfn(map[slot].pt.base + offset);
>  }
>  #endif
> 
> @@ -1288,7 +1288,7 @@ int xenmem_add_to_physmap_one(
>              return -EINVAL;
>          }
> 
> -        mfn = _mfn(page_to_mfn(page));
> +        mfn = page_to_mfn(page);
>          t = p2m_map_foreign;
> 
>          rcu_unlock_domain(od);
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 68b488997d..d1ef535a43 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -38,12 +38,6 @@ static unsigned int __read_mostly max_vmid =
> MAX_VMID_8_BIT;
> 
>  #define P2M_ROOT_PAGES    (1<<P2M_ROOT_ORDER)
> 
> -/* Override macros from asm/mm.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  unsigned int __read_mostly p2m_ipa_bits;
> 
>  /* Helpers to lookup the properties of each level */
> @@ -97,8 +91,8 @@ void dump_p2m_lookup(struct domain *d, paddr_t
> addr)
> 
>      printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr);
> 
> -    printk("P2M @ %p mfn:0x%lx\n",
> -           p2m->root, __page_to_mfn(p2m->root));
> +    printk("P2M @ %p mfn:%#"PRI_mfn"\n",
> +           p2m->root, mfn_x(page_to_mfn(p2m->root)));
> 
>      dump_pt_walk(page_to_maddr(p2m->root), addr,
>                   P2M_ROOT_LEVEL, P2M_ROOT_PAGES);
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index 7baf4614be..b978e05613 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -653,7 +653,7 @@ static void pvpmu_finish(struct domain *d,
> xen_pmu_params_t *params)
>  {
>      struct vcpu *v;
>      struct vpmu_struct *vpmu;
> -    uint64_t mfn;
> +    mfn_t mfn;
>      void *xenpmu_data;
> 
>      if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] ==
> NULL) )
> @@ -675,7 +675,7 @@ static void pvpmu_finish(struct domain *d,
> xen_pmu_params_t *params)
>      if ( xenpmu_data )
>      {
>          mfn = domain_page_map_to_mfn(xenpmu_data);
> -        ASSERT(mfn_valid(_mfn(mfn)));
> +        ASSERT(mfn_valid(mfn));
>          unmap_domain_page_global(xenpmu_data);
>          put_page_and_type(mfn_to_page(mfn));
>      }
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 735f45c133..cb596c4a31 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -186,7 +186,7 @@ void dump_pageframe_info(struct domain *d)
>                  }
>              }
>              printk("    DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
> -                   _p(page_to_mfn(page)),
> +                   _p(mfn_x(page_to_mfn(page))),
>                     page->count_info, page->u.inuse.type_info);
>          }
>          spin_unlock(&d->page_alloc_lock);
> @@ -199,7 +199,7 @@ void dump_pageframe_info(struct domain *d)
>      page_list_for_each ( page, &d->xenpage_list )
>      {
>          printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
> -               _p(page_to_mfn(page)),
> +               _p(mfn_x(page_to_mfn(page))),
>                 page->count_info, page->u.inuse.type_info);
>      }
>      spin_unlock(&d->page_alloc_lock);
> @@ -621,7 +621,8 @@ int arch_domain_soft_reset(struct domain *d)
>      struct page_info *page = virt_to_page(d->shared_info), *new_page;
>      int ret = 0;
>      struct domain *owner;
> -    unsigned long mfn, gfn;
> +    mfn_t mfn;
> +    unsigned long gfn;
>      p2m_type_t p2mt;
>      unsigned int i;
> 
> @@ -655,7 +656,7 @@ int arch_domain_soft_reset(struct domain *d)
>      ASSERT( owner == d );
> 
>      mfn = page_to_mfn(page);
> -    gfn = mfn_to_gmfn(d, mfn);
> +    gfn = mfn_to_gmfn(d, mfn_x(mfn));
> 
>      /*
>       * gfn == INVALID_GFN indicates that the shared_info page was never
> mapped
> @@ -664,7 +665,7 @@ int arch_domain_soft_reset(struct domain *d)
>      if ( gfn == gfn_x(INVALID_GFN) )
>          goto exit_put_page;
> 
> -    if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) != mfn )
> +    if ( !mfn_eq(get_gfn_query(d, gfn, &p2mt), mfn) )
>      {
>          printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN
> (%lx)\n",
>                 d->domain_id, gfn);
> @@ -681,7 +682,7 @@ int arch_domain_soft_reset(struct domain *d)
>          goto exit_put_gfn;
>      }
> 
> -    ret = guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn),
> PAGE_ORDER_4K);
> +    ret = guest_physmap_remove_page(d, _gfn(gfn), mfn,
> PAGE_ORDER_4K);
>      if ( ret )
>      {
>          printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame
> %lx\n",
> @@ -690,7 +691,7 @@ int arch_domain_soft_reset(struct domain *d)
>          goto exit_put_gfn;
>      }
> 
> -    ret = guest_physmap_add_page(d, _gfn(gfn),
> _mfn(page_to_mfn(new_page)),
> +    ret = guest_physmap_add_page(d, _gfn(gfn), page_to_mfn(new_page),
>                                   PAGE_ORDER_4K);
>      if ( ret )
>      {
> @@ -988,7 +989,7 @@ int arch_set_info_guest(
>                  {
>                      if ( (page->u.inuse.type_info & PGT_type_mask) ==
>                           PGT_l4_page_table )
> -                        done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
> +                        done = !fill_ro_mpt(page_to_mfn(page));
> 
>                      page_unlock(page);
>                  }
> @@ -1115,7 +1116,7 @@ int arch_set_info_guest(
>          l4_pgentry_t *l4tab;
> 
>          l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table));
> -        *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
> +        *l4tab = l4e_from_mfn(page_to_mfn(cr3_page),
>              _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
>          unmap_domain_page(l4tab);
>      }
> @@ -1945,7 +1946,7 @@ int domain_relinquish_resources(struct domain *d)
>          if ( d->arch.pirq_eoi_map != NULL )
>          {
>              unmap_domain_page_global(d->arch.pirq_eoi_map);
> -            put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
> +            put_page_and_type(mfn_to_page(_mfn(d-
> >arch.pirq_eoi_map_mfn)));
>              d->arch.pirq_eoi_map = NULL;
>              d->arch.auto_unmask = 0;
>          }
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 3432a854dd..88046b39c9 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -331,13 +331,13 @@ void unmap_domain_page_global(const void *ptr)
>  }
> 
>  /* Translate a map-domain-page'd address to the underlying MFN */
> -unsigned long domain_page_map_to_mfn(const void *ptr)
> +mfn_t domain_page_map_to_mfn(const void *ptr)
>  {
>      unsigned long va = (unsigned long)ptr;
>      const l1_pgentry_t *pl1e;
> 
>      if ( va >= DIRECTMAP_VIRT_START )
> -        return virt_to_mfn(ptr);
> +        return _mfn(virt_to_mfn(ptr));
> 
>      if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
>      {
> @@ -350,5 +350,5 @@ unsigned long domain_page_map_to_mfn(const void
> *ptr)
>          pl1e = &__linear_l1_table[l1_linear_offset(va)];
>      }
> 
> -    return l1e_get_pfn(*pl1e);
> +    return l1e_get_mfn(*pl1e);
>  }
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 80b4df9ec9..9f6c92411a 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -429,7 +429,7 @@ long arch_do_domctl(
>          {
>              if ( i >= max_pfns )
>                  break;
> -            mfn = page_to_mfn(page);
> +            mfn = mfn_x(page_to_mfn(page));
>              if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
>                                        i, &mfn, 1) )
>              {
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index a787f43737..4760816ee6 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -219,7 +219,7 @@ static int modified_memory(struct domain *d,
>              page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
>              if ( page )
>              {
> -                mfn_t gmfn = _mfn(page_to_mfn(page));
> +                mfn_t gmfn = page_to_mfn(page);
> 
>                  paging_mark_dirty(d, gmfn);
>                  /*
> diff --git a/xen/arch/x86/hvm/dom0_build.c
> b/xen/arch/x86/hvm/dom0_build.c
> index a67071c739..b50506ec63 100644
> --- a/xen/arch/x86/hvm/dom0_build.c
> +++ b/xen/arch/x86/hvm/dom0_build.c
> @@ -120,7 +120,7 @@ static int __init pvh_populate_memory_range(struct
> domain *d,
>              continue;
>          }
> 
> -        rc = guest_physmap_add_page(d, _gfn(start),
> _mfn(page_to_mfn(page)),
> +        rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page),
>                                      order);
>          if ( rc != 0 )
>          {
> @@ -270,7 +270,7 @@ static int __init
> pvh_setup_vmx_realmode_helpers(struct domain *d)
>      }
>      write_32bit_pse_identmap(ident_pt);
>      unmap_domain_page(ident_pt);
> -    put_page(mfn_to_page(mfn_x(mfn)));
> +    put_page(mfn_to_page(mfn));
>      d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
>      if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
>              printk("Unable to set identity page tables as reserved in the memory
> map\n");
> @@ -288,7 +288,7 @@ static void __init pvh_steal_low_ram(struct domain
> *d, unsigned long start,
> 
>      for ( mfn = start; mfn < start + nr_pages; mfn++ )
>      {
> -        struct page_info *pg = mfn_to_page(mfn);
> +        struct page_info *pg = mfn_to_page(_mfn(mfn));
>          int rc;
> 
>          rc = unshare_xen_page_with_guest(pg, dom_io);
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index e924ce07c4..312aa91416 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -590,7 +590,7 @@ static void *hvmemul_map_linear_addr(
>              goto unhandleable;
>          }
> 
> -        *mfn++ = _mfn(page_to_mfn(page));
> +        *mfn++ = page_to_mfn(page);
> 
>          if ( p2m_is_discard_write(p2mt) )
>          {
> @@ -622,7 +622,7 @@ static void *hvmemul_map_linear_addr(
>   out:
>      /* Drop all held references. */
>      while ( mfn-- > hvmemul_ctxt->mfn )
> -        put_page(mfn_to_page(mfn_x(*mfn)));
> +        put_page(mfn_to_page(*mfn));
> 
>      return err;
>  }
> @@ -648,7 +648,7 @@ static void hvmemul_unmap_linear_addr(
>      {
>          ASSERT(mfn_valid(*mfn));
>          paging_mark_dirty(currd, *mfn);
> -        put_page(mfn_to_page(mfn_x(*mfn)));
> +        put_page(mfn_to_page(*mfn));
> 
>          *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
>      }
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 205b4cb685..4d795ee700 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2211,7 +2211,7 @@ int hvm_set_cr0(unsigned long value, bool_t
> may_defer)
>              v->arch.guest_table = pagetable_from_page(page);
> 
>              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn =
> %lx",
> -                        v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
> +                        v->arch.hvm_vcpu.guest_cr[3], mfn_x(page_to_mfn(page)));
>          }
>      }
>      else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
> @@ -2546,7 +2546,7 @@ static void *_hvm_map_guest_frame(unsigned
> long gfn, bool_t permanent,
>          if ( unlikely(p2m_is_discard_write(p2mt)) )
>              *writable = 0;
>          else if ( !permanent )
> -            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
> +            paging_mark_dirty(d, page_to_mfn(page));
>      }
> 
>      if ( !permanent )
> @@ -2588,7 +2588,7 @@ void *hvm_map_guest_frame_ro(unsigned long
> gfn, bool_t permanent)
> 
>  void hvm_unmap_guest_frame(void *p, bool_t permanent)
>  {
> -    unsigned long mfn;
> +    mfn_t mfn;
>      struct page_info *page;
> 
>      if ( !p )
> @@ -2609,7 +2609,7 @@ void hvm_unmap_guest_frame(void *p, bool_t
> permanent)
>          list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
>              if ( track->page == page )
>              {
> -                paging_mark_dirty(d, _mfn(mfn));
> +                paging_mark_dirty(d, mfn);
>                  list_del(&track->list);
>                  xfree(track);
>                  break;
> @@ -2626,7 +2626,7 @@ void
> hvm_mapped_guest_frames_mark_dirty(struct domain *d)
> 
>      spin_lock(&d->arch.hvm_domain.write_map.lock);
>      list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> -        paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
> +        paging_mark_dirty(d, page_to_mfn(track->page));
>      spin_unlock(&d->arch.hvm_domain.write_map.lock);
>  }
> 
> @@ -3200,8 +3200,8 @@ static enum hvm_translation_result __hvm_copy(
> 
>                  if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
>                      dprintk(XENLOG_G_DEBUG,
> -                            "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
> -                            v, gfn_x(gfn), page_to_mfn(page));
> +                            "%pv attempted write to read-only gfn %#lx
> (mfn=%#"PRI_mfn")\n",
> +                            v, gfn_x(gfn), mfn_x(page_to_mfn(page)));
>              }
>              else
>              {
> @@ -3209,7 +3209,7 @@ static enum hvm_translation_result __hvm_copy(
>                      memcpy(p, buf, count);
>                  else
>                      memset(p, 0, count);
> -                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
> +                paging_mark_dirty(v->domain, page_to_mfn(page));
>              }
>          }
>          else
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index d5afe20cc8..0f823d201a 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -268,7 +268,7 @@ static void hvm_remove_ioreq_gfn(
>      struct domain *d, struct hvm_ioreq_page *iorp)
>  {
>      if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
> -                                   _mfn(page_to_mfn(iorp->page)), 0) )
> +                                   page_to_mfn(iorp->page), 0) )
>          domain_crash(d);
>      clear_page(iorp->va);
>  }
> @@ -281,9 +281,9 @@ static int hvm_add_ioreq_gfn(
>      clear_page(iorp->va);
> 
>      rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
> -                                _mfn(page_to_mfn(iorp->page)), 0);
> +                                page_to_mfn(iorp->page), 0);
>      if ( rc == 0 )
> -        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
> +        paging_mark_dirty(d, page_to_mfn(iorp->page));
> 
>      return rc;
>  }
> diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
> index 088fbdf8ce..925bab2438 100644
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -590,7 +590,7 @@ void stdvga_init(struct domain *d)
>          if ( pg == NULL )
>              break;
>          s->vram_page[i] = pg;
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>      }
> 
>      if ( i == ARRAY_SIZE(s->vram_page) )
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index b9cf423fd9..f50f931598 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1521,7 +1521,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
>          if ( !pg )
>              goto err;
> 
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>          *this_hsa = page_to_maddr(pg);
>      }
> 
> @@ -1531,7 +1531,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
>          if ( !pg )
>              goto err;
> 
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>          *this_vmcb = page_to_maddr(pg);
>      }
> 
> diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
> index f0fa59d7d5..070551e1ab 100644
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -354,7 +354,7 @@ static void enable_hypercall_page(struct domain *d)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN
> %#"PRI_mfn")\n",
> -                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>          return;
>      }
> 
> @@ -414,7 +414,7 @@ static void initialize_vp_assist(struct vcpu *v)
> 
>   fail:
>      gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN
> %#"PRI_mfn")\n", gmfn,
> -             page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +             mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>  }
> 
>  static void teardown_vp_assist(struct vcpu *v)
> @@ -494,7 +494,7 @@ static void update_reference_tsc(struct domain *d,
> bool_t initialize)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN
> %#"PRI_mfn")\n",
> -                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>          return;
>      }
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index b5100b5021..8eaa58e3c0 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1437,7 +1437,7 @@ int vmx_vcpu_enable_pml(struct vcpu *v)
> 
>      vmx_vmcs_enter(v);
> 
> -    __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) <<
> PAGE_SHIFT);
> +    __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
>      __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
> 
>      v->arch.hvm_vmx.secondary_exec_control |=
> SECONDARY_EXEC_ENABLE_PML;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index b18cceab55..c657ba89f5 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2978,7 +2978,7 @@ gp_fault:
>  static int vmx_alloc_vlapic_mapping(struct domain *d)
>  {
>      struct page_info *pg;
> -    unsigned long mfn;
> +    mfn_t mfn;
> 
>      if ( !cpu_has_vmx_virtualize_apic_accesses )
>          return 0;
> @@ -2987,10 +2987,10 @@ static int vmx_alloc_vlapic_mapping(struct
> domain *d)
>      if ( !pg )
>          return -ENOMEM;
>      mfn = page_to_mfn(pg);
> -    clear_domain_page(_mfn(mfn));
> +    clear_domain_page(mfn);
>      share_xen_page_with_guest(pg, d, XENSHARE_writable);
> -    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
> -    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
> _mfn(mfn),
> +    d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
> +    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
> mfn,
>                         PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
> 
>      return 0;
> @@ -3001,7 +3001,7 @@ static void vmx_free_vlapic_mapping(struct
> domain *d)
>      unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
> 
>      if ( mfn != 0 )
> -        free_shared_domheap_page(mfn_to_page(mfn));
> +        free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
>  }
> 
>  static void vmx_install_vlapic_mapping(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index dde02c076b..4836d69c0e 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -84,7 +84,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
> 
> -        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
> +        clear_domain_page(page_to_mfn(vmread_bitmap));
> 
>          vmwrite_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmwrite_bitmap )
> @@ -1729,7 +1729,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs
> *regs)
>                  nvcpu->nv_vvmcx = vvmcx;
>                  nvcpu->nv_vvmcxaddr = gpa;
>                  v->arch.hvm_vmx.vmcs_shadow_maddr =
> -                    pfn_to_paddr(domain_page_map_to_mfn(vvmcx));
> +                    mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
>              }
>              else
>              {
> @@ -1815,7 +1815,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs
> *regs)
>          {
>              if ( writable )
>                  clear_vvmcs_launched(&nvmx->launched_list,
> -                                     domain_page_map_to_mfn(vvmcs));
> +                                     mfn_x(domain_page_map_to_mfn(vvmcs)));
>              else
>                  rc = VMFAIL_VALID;
>              hvm_unmap_guest_frame(vvmcs, 0);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index a20fdcaea4..f6ca4f884b 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -128,12 +128,6 @@
> 
>  #include "pv/mm.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  /* Mapping of the fixmap space needed early. */
>  l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
>      l1_fixmap[L1_PAGETABLE_ENTRIES];
> diff --git a/xen/arch/x86/mm/guest_walk.c
> b/xen/arch/x86/mm/guest_walk.c
> index 6055fec1ad..f67aeda3d0 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -469,20 +469,20 @@ guest_walk_tables(struct vcpu *v, struct
> p2m_domain *p2m,
>      if ( l3p )
>      {
>          unmap_domain_page(l3p);
> -        put_page(mfn_to_page(mfn_x(gw->l3mfn)));
> +        put_page(mfn_to_page(gw->l3mfn));
>      }
>  #endif
>  #if GUEST_PAGING_LEVELS >= 3
>      if ( l2p )
>      {
>          unmap_domain_page(l2p);
> -        put_page(mfn_to_page(mfn_x(gw->l2mfn)));
> +        put_page(mfn_to_page(gw->l2mfn));
>      }
>  #endif
>      if ( l1p )
>      {
>          unmap_domain_page(l1p);
> -        put_page(mfn_to_page(mfn_x(gw->l1mfn)));
> +        put_page(mfn_to_page(gw->l1mfn));
>      }
> 
>      return walk_ok;
> diff --git a/xen/arch/x86/mm/hap/guest_walk.c
> b/xen/arch/x86/mm/hap/guest_walk.c
> index c550017ba4..cb3f9cebe7 100644
> --- a/xen/arch/x86/mm/hap/guest_walk.c
> +++ b/xen/arch/x86/mm/hap/guest_walk.c
> @@ -83,7 +83,7 @@ unsigned long
> hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
>          *pfec &= ~PFEC_page_present;
>          goto out_tweak_pfec;
>      }
> -    top_mfn = _mfn(page_to_mfn(top_page));
> +    top_mfn = page_to_mfn(top_page);
> 
>      /* Map the top-level table and call the tree-walker */
>      ASSERT(mfn_valid(top_mfn));
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 41deb90787..97f2516ecd 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -42,12 +42,6 @@
> 
>  #include "private.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  /************************************************/
>  /*          HAP VRAM TRACKING SUPPORT           */
>  /************************************************/
> diff --git a/xen/arch/x86/mm/hap/nested_ept.c
> b/xen/arch/x86/mm/hap/nested_ept.c
> index 14b1bb01e9..1738df69f6 100644
> --- a/xen/arch/x86/mm/hap/nested_ept.c
> +++ b/xen/arch/x86/mm/hap/nested_ept.c
> @@ -173,7 +173,7 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga,
> ept_walk_t *gw)
>              goto map_err;
>          gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
>          unmap_domain_page(lxp);
> -        put_page(mfn_to_page(mfn_x(lxmfn)));
> +        put_page(mfn_to_page(lxmfn));
> 
>          if ( nept_non_present_check(gw->lxe[lvl]) )
>              goto non_present;
> diff --git a/xen/arch/x86/mm/mem_sharing.c
> b/xen/arch/x86/mm/mem_sharing.c
> index 6f4be95515..6ecf0b27d5 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -152,11 +152,6 @@ static inline shr_handle_t get_next_handle(void)
>  #define mem_sharing_enabled(d) \
>      (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
> 
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  static atomic_t nr_saved_mfns   = ATOMIC_INIT(0);
>  static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
> 
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index b4996ce658..8885916c0a 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -33,6 +33,10 @@
> 
>  #include "mm-locks.h"
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  #define atomic_read_ept_entry(__pepte)                              \
>      ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
> 
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index 0a811ccf28..7a88074c31 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -29,12 +29,6 @@
> 
>  #include "mm-locks.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  #define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
> 
>  /* Enforce lock ordering when grabbing the "external" page_alloc lock */
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index c72a3cdebb..b71f51e0b2 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -47,12 +47,6 @@ bool_t __initdata opt_hap_1gb = 1, __initdata
> opt_hap_2mb = 1;
>  boolean_param("hap_1gb", opt_hap_1gb);
>  boolean_param("hap_2mb", opt_hap_2mb);
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
> 
>  /* Init the datastructures for later use by the p2m code */
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index 1e2c9ba4cc..cb97642cbc 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -47,12 +47,6 @@
>  /* Per-CPU variable for enforcing the lock ordering */
>  DEFINE_PER_CPU(int, mm_lock_level);
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  /************************************************/
>  /*              LOG DIRTY SUPPORT               */
>  /************************************************/
> diff --git a/xen/arch/x86/mm/shadow/private.h
> b/xen/arch/x86/mm/shadow/private.h
> index 6a03370402..b9cc680f4e 100644
> --- a/xen/arch/x86/mm/shadow/private.h
> +++ b/xen/arch/x86/mm/shadow/private.h
> @@ -315,7 +315,7 @@ static inline int page_is_out_of_sync(struct page_info
> *p)
> 
>  static inline int mfn_is_out_of_sync(mfn_t gmfn)
>  {
> -    return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
> +    return page_is_out_of_sync(mfn_to_page(gmfn));
>  }
> 
>  static inline int page_oos_may_write(struct page_info *p)
> @@ -326,7 +326,7 @@ static inline int page_oos_may_write(struct page_info
> *p)
> 
>  static inline int mfn_oos_may_write(mfn_t gmfn)
>  {
> -    return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
> +    return page_oos_may_write(mfn_to_page(gmfn));
>  }
>  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
> 
> @@ -465,18 +465,6 @@ void sh_reset_l3_up_pointers(struct vcpu *v);
>   * MFN/page-info handling
>   */
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
> -/* Override pagetable_t <-> struct page_info conversions to work with
> mfn_t */
> -#undef pagetable_get_page
> -#define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
> -#undef pagetable_from_page
> -#define pagetable_from_page(pg)
> pagetable_from_mfn(page_to_mfn(pg))
> -
>  #define backpointer(sp) _mfn(pdx_to_pfn((unsigned long)(sp)->v.sh.back))
>  static inline unsigned long __backpointer(const struct page_info *sp)
>  {
> diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
> index 4fc967f893..a87987da6f 100644
> --- a/xen/arch/x86/numa.c
> +++ b/xen/arch/x86/numa.c
> @@ -430,7 +430,7 @@ static void dump_numa(unsigned char key)
>          spin_lock(&d->page_alloc_lock);
>          page_list_for_each(page, &d->page_list)
>          {
> -            i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
> +            i = phys_to_nid(page_to_maddr(page));
>              page_num_node[i]++;
>          }
>          spin_unlock(&d->page_alloc_lock);
> diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
> index a5fedca671..5422f3e372 100644
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -242,7 +242,7 @@ ret_t do_physdev_op(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          }
> 
>          if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
> -                     0, page_to_mfn(page)) != 0 )
> +                     0, mfn_x(page_to_mfn(page))) != 0 )
>          {
>              put_page_and_type(page);
>              ret = -EBUSY;
> diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
> index 97d8438600..5957cb5085 100644
> --- a/xen/arch/x86/pv/callback.c
> +++ b/xen/arch/x86/pv/callback.c
> @@ -31,12 +31,6 @@
> 
>  #include <public/callback.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static int register_guest_nmi_callback(unsigned long address)
>  {
>      struct vcpu *curr = current;
> diff --git a/xen/arch/x86/pv/descriptor-tables.c
> b/xen/arch/x86/pv/descriptor-tables.c
> index 81973af124..f2b20f9910 100644
> --- a/xen/arch/x86/pv/descriptor-tables.c
> +++ b/xen/arch/x86/pv/descriptor-tables.c
> @@ -25,16 +25,6 @@
>  #include <asm/p2m.h>
>  #include <asm/pv/mm.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
> -/*******************
> - * Descriptor Tables
> - */
> -
>  void pv_destroy_gdt(struct vcpu *v)
>  {
>      l1_pgentry_t *pl1e;
> diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
> index 44601d08d3..1ac843e603 100644
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -20,6 +20,12 @@
>  #include <asm/page.h>
>  #include <asm/setup.h>
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
>  #define BASE_PROT
> (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
>  #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
> diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
> index 2234128bb3..93f8fa4323 100644
> --- a/xen/arch/x86/pv/domain.c
> +++ b/xen/arch/x86/pv/domain.c
> @@ -11,12 +11,6 @@
> 
>  #include <asm/pv/domain.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static void noreturn continue_nonidle_domain(struct vcpu *v)
>  {
>      check_wakeup_from_wait();
> diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-
> op.c
> index 0f89c91dff..5cdb54c937 100644
> --- a/xen/arch/x86/pv/emul-gate-op.c
> +++ b/xen/arch/x86/pv/emul-gate-op.c
> @@ -41,12 +41,6 @@
> 
>  #include "emulate.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static int read_gate_descriptor(unsigned int gate_sel,
>                                  const struct vcpu *v,
>                                  unsigned int *sel,
> diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-
> op.c
> index 2f9264548a..6ebaf2f1e3 100644
> --- a/xen/arch/x86/pv/emul-priv-op.c
> +++ b/xen/arch/x86/pv/emul-priv-op.c
> @@ -43,16 +43,6 @@
>  #include "emulate.h"
>  #include "mm.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
> -/***********************
> - * I/O emulation support
> - */
> -
>  struct priv_op_ctxt {
>      struct x86_emulate_ctxt ctxt;
>      struct {
> diff --git a/xen/arch/x86/pv/grant_table.c b/xen/arch/x86/pv/grant_table.c
> index aaca228c6b..97323367c5 100644
> --- a/xen/arch/x86/pv/grant_table.c
> +++ b/xen/arch/x86/pv/grant_table.c
> @@ -27,12 +27,6 @@
> 
>  #include "mm.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static unsigned int grant_to_pte_flags(unsigned int grant_flags,
>                                         unsigned int cache_flags)
>  {
> diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-
> fault.c
> index 6b2976d3df..a7b7eb5113 100644
> --- a/xen/arch/x86/pv/ro-page-fault.c
> +++ b/xen/arch/x86/pv/ro-page-fault.c
> @@ -33,12 +33,6 @@
>  #include "emulate.h"
>  #include "mm.h"
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  /*********************
>   * Writable Pagetables
>   */
> diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
> index 1609b627ae..0791345a47 100644
> --- a/xen/arch/x86/smpboot.c
> +++ b/xen/arch/x86/smpboot.c
> @@ -46,12 +46,6 @@
>  #include <mach_wakecpu.h>
>  #include <smpboot_hooks.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  #define setup_trampoline()
> (bootsym_phys(trampoline_realmode_entry))
> 
>  unsigned long __read_mostly trampoline_phys;
> diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
> index 59d7c477f4..e9522f06ec 100644
> --- a/xen/arch/x86/tboot.c
> +++ b/xen/arch/x86/tboot.c
> @@ -184,7 +184,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
> 
>      for ( mfn = 0; mfn < max_page; mfn++ )
>      {
> -        struct page_info *page = mfn_to_page(mfn);
> +        struct page_info *page = mfn_to_page(_mfn(mfn));
> 
>          if ( !mfn_valid(_mfn(mfn)) )
>              continue;
> @@ -276,7 +276,7 @@ static void tboot_gen_xenheap_integrity(const
> uint8_t key[TB_KEY_SIZE],
>      vmac_set_key((uint8_t *)key, &ctx);
>      for ( mfn = 0; mfn < max_page; mfn++ )
>      {
> -        struct page_info *page = __mfn_to_page(mfn);
> +        struct page_info *page = mfn_to_page(_mfn(mfn));
> 
>          if ( !mfn_valid(_mfn(mfn)) )
>              continue;
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 86506f3747..e2562eee4e 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -810,8 +810,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
>              }
> 
>              gdprintk(XENLOG_WARNING,
> -                     "Bad GMFN %lx (MFN %lx) to MSR %08x\n",
> -                     gmfn, page ? page_to_mfn(page) : -1UL, base);
> +                     "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
> +                     gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN),
> base);
>              return 0;
>          }
> 
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 34cd8457cf..ccad3d448b 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -40,6 +40,12 @@ asm(".file \"" __FILE__ "\"");
>  #include <asm/mem_sharing.h>
>  #include <public/memory.h>
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  unsigned int __read_mostly m2p_compat_vstart =
> __HYPERVISOR_COMPAT_VIRT_START;
> 
>  l2_pgentry_t *compat_idle_pg_table_l2;
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 5aebcf265f..e8302e8e1b 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -1192,7 +1192,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long
> gfn, unsigned offset)
>      }
> 
>      v->vcpu_info = new_info;
> -    v->vcpu_info_mfn = _mfn(page_to_mfn(page));
> +    v->vcpu_info_mfn = page_to_mfn(page);
> 
>      /* Set new vcpu_info pointer /before/ setting pending flags. */
>      smp_wmb();
> @@ -1225,7 +1225,7 @@ void unmap_vcpu_info(struct vcpu *v)
> 
>      vcpu_info_reset(v); /* NB: Clobbers v->vcpu_info_mfn */
> 
> -    put_page_and_type(mfn_to_page(mfn_x(mfn)));
> +    put_page_and_type(mfn_to_page(mfn));
>  }
> 
>  int default_initialise_vcpu(struct vcpu *v,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index c5950f2b3f..73a9d0293b 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -40,6 +40,12 @@
>  #include <xsm/xsm.h>
>  #include <asm/flushtlb.h>
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Per-domain grant information. */
>  struct grant_table {
>      /*
> diff --git a/xen/common/kimage.c b/xen/common/kimage.c
> index afd8292cc1..210241dfb7 100644
> --- a/xen/common/kimage.c
> +++ b/xen/common/kimage.c
> @@ -23,12 +23,6 @@
> 
>  #include <asm/page.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg)  _mfn(__page_to_mfn(pg))
> -
>  /*
>   * When kexec transitions to the new kernel there is a one-to-one
>   * mapping between physical and virtual addresses.  On processors
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index ad987e0f29..e467f271c7 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -29,6 +29,12 @@
>  #include <public/memory.h>
>  #include <xsm/xsm.h>
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  struct memop_args {
>      /* INPUT */
>      struct domain *domain;     /* Domain to be affected. */
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 5616a82263..34c2089cd2 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -150,6 +150,12 @@
>  #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
>  #endif
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /*
>   * Comma-separated list of hexadecimal page numbers containing bad
> bytes.
>   * e.g. 'badpage=0x3f45,0x8a321'.
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 324f42a6f9..c077f87e77 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -243,7 +243,7 @@ static void tmem_persistent_pool_page_put(void
> *page_va)
>      struct page_info *pi;
> 
>      ASSERT(IS_PAGE_ALIGNED(page_va));
> -    pi = mfn_to_page(virt_to_mfn(page_va));
> +    pi = mfn_to_page(_mfn(virt_to_mfn(page_va)));
>      ASSERT(IS_VALID_PAGE(pi));
>      __tmem_free_page_thispool(pi);
>  }
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index bd52e44faf..bf7b14f79a 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -14,10 +14,6 @@
>  #include <xen/cpu.h>
>  #include <xen/init.h>
> 
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  bool __read_mostly opt_tmem;
>  boolean_param("tmem", opt_tmem);
> 
> diff --git a/xen/common/trace.c b/xen/common/trace.c
> index 2e18702317..cf8f8b0997 100644
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -42,6 +42,12 @@ CHECK_t_buf;
>  #define compat_t_rec t_rec
>  #endif
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* opt_tbuf_size: trace buffer size (in pages) for each cpu */
>  static unsigned int opt_tbuf_size;
>  static unsigned int opt_tevt_mask;
> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
> index 0b23f8fb97..10f32b29e0 100644
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -36,7 +36,7 @@ void __init vm_init_type(enum vmap_region type, void
> *start, void *end)
>      {
>          struct page_info *pg = alloc_domheap_page(NULL, 0);
> 
> -        map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
> +        map_pages_to_xen(va, mfn_x(page_to_mfn(pg)), 1,
> PAGE_HYPERVISOR);
>          clear_page((void *)va);
>      }
>      bitmap_fill(vm_bitmap(type), vm_low[type]);
> @@ -107,7 +107,8 @@ static void *vm_alloc(unsigned int nr, unsigned int
> align,
>          {
>              unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
> 
> -            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR)
> )
> +            if ( !map_pages_to_xen(va, mfn_x(page_to_mfn(pg)),
> +                                   1, PAGE_HYPERVISOR) )
>              {
>                  clear_page((void *)va);
>                  vm_top[t] += PAGE_SIZE * 8;
> @@ -258,7 +259,7 @@ static void *vmalloc_type(size_t size, enum
> vmap_region type)
>          pg = alloc_domheap_page(NULL, 0);
>          if ( pg == NULL )
>              goto error;
> -        mfn[i] = _mfn(page_to_mfn(pg));
> +        mfn[i] = page_to_mfn(pg);
>      }
> 
>      va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
> @@ -270,7 +271,7 @@ static void *vmalloc_type(size_t size, enum
> vmap_region type)
> 
>   error:
>      while ( i-- )
> -        free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
> +        free_domheap_page(mfn_to_page(mfn[i]));
>      xfree(mfn);
>      return NULL;
>  }
> diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
> index 5acdde5691..fecdfb3697 100644
> --- a/xen/common/xenoprof.c
> +++ b/xen/common/xenoprof.c
> @@ -22,8 +22,6 @@
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef virt_to_mfn
>  #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> 
>  /* Limit amount of pages used for shared buffer (per domain) */
>  #define MAX_OPROF_SHARED_PAGES 32
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c
> b/xen/drivers/passthrough/amd/iommu_map.c
> index fd2327d3e5..bd62c2ce90 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -25,6 +25,12 @@
>  #include "../ats.h"
>  #include <xen/pci.h>
> 
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Given pfn and page table level, return pde index */
>  static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
>  {
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index 1aecf7cf34..2c44fabf99 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -184,7 +184,7 @@ void __hwdom_init iommu_hwdom_init(struct
> domain *d)
> 
>          page_list_for_each ( page, &d->page_list )
>          {
> -            unsigned long mfn = page_to_mfn(page);
> +            unsigned long mfn = mfn_x(page_to_mfn(page));
>              unsigned long gfn = mfn_to_gmfn(d, mfn);
>              unsigned int mapping = IOMMUF_readable;
>              int ret;
> diff --git a/xen/drivers/passthrough/x86/iommu.c
> b/xen/drivers/passthrough/x86/iommu.c
> index 0253823173..68182afd91 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -58,7 +58,7 @@ int arch_iommu_populate_page_table(struct domain
> *d)
>          if ( is_hvm_domain(d) ||
>              (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
>          {
> -            unsigned long mfn = page_to_mfn(page);
> +            unsigned long mfn = mfn_x(page_to_mfn(page));
>              unsigned long gfn = mfn_to_gmfn(d, mfn);
> 
>              if ( gfn != gfn_x(INVALID_GFN) )
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 737a429409..3eb4b68761 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -138,7 +138,7 @@ extern vaddr_t xenheap_virt_start;
>  #endif
> 
>  #ifdef CONFIG_ARM_32
> -#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
> +#define is_xen_heap_page(page)
> is_xen_heap_mfn(mfn_x(__page_to_mfn(page)))
>  #define is_xen_heap_mfn(mfn) ({                                 \
>      unsigned long mfn_ = (mfn);                                 \
>      (mfn_ >= mfn_x(xenheap_mfn_start) &&                        \
> @@ -220,12 +220,14 @@ static inline void __iomem *ioremap_wc(paddr_t
> start, size_t len)
>  })
> 
>  /* Convert between machine frame numbers and page-info structures. */
> -#define __mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) -
> frametable_base_pdx))
> -#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) -
> frame_table) + frametable_base_pdx)
> +#define __mfn_to_page(mfn)                                          \
> +    (frame_table + (pfn_to_pdx(mfn_x(mfn)) - frametable_base_pdx))
> +#define __page_to_mfn(pg)                                           \
> +    _mfn(pdx_to_pfn((unsigned long)((pg) - frame_table) +
> frametable_base_pdx))
> 
>  /* Convert between machine addresses and page-info structures. */
> -#define maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
> -#define page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) <<
> PAGE_SHIFT)
> +#define maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
> +#define page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
> 
>  /* Convert between frame number and address formats.  */
>  #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
> @@ -235,7 +237,7 @@ static inline void __iomem *ioremap_wc(paddr_t
> start, size_t len)
>  #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
>  #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
>  #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
> -#define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
> +#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
>  #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
> 
>  /* Page-align address and convert to frame number format */
> @@ -309,7 +311,7 @@ static inline struct page_info *virt_to_page(const void
> *v)
> 
>  static inline void *page_to_virt(const struct page_info *pg)
>  {
> -    return mfn_to_virt(page_to_mfn(pg));
> +    return mfn_to_virt(mfn_x(__page_to_mfn(pg)));
>  }
> 
>  struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index faadcfe8fe..87c9994974 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -276,7 +276,7 @@ static inline struct page_info *get_page_from_gfn(
>  {
>      struct page_info *page;
>      p2m_type_t p2mt;
> -    unsigned long mfn = mfn_x(p2m_lookup(d, _gfn(gfn), &p2mt));
> +    mfn_t mfn = p2m_lookup(d, _gfn(gfn), &p2mt);
> 
>      if (t)
>          *t = p2mt;
> @@ -284,7 +284,7 @@ static inline struct page_info *get_page_from_gfn(
>      if ( !p2m_is_any_ram(p2mt) )
>          return NULL;
> 
> -    if ( !mfn_valid(_mfn(mfn)) )
> +    if ( !mfn_valid(mfn) )
>          return NULL;
>      page = mfn_to_page(mfn);
> 
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index 83626085e0..c8dd273517 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -270,7 +270,7 @@ struct page_info
> 
>  #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
>  #define is_xen_heap_mfn(mfn) \
> -    (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
> +    (__mfn_valid(mfn) &&
> is_xen_heap_page(__mfn_to_page(_mfn(mfn))))
>  #define is_xen_fixed_mfn(mfn)                     \
>      ((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) &&  \
>       (((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))
> @@ -383,7 +383,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct
> domain *l1e_owner);
> 
>  static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
>  {
> -    struct page_info *page = __mfn_to_page(mfn_x(mfn));
> +    struct page_info *page = __mfn_to_page(mfn);
> 
>      if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
>      {
> @@ -477,10 +477,10 @@ extern paddr_t mem_hotplug;
>  #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
> 
>  #define compat_machine_to_phys_mapping ((unsigned int
> *)RDWR_COMPAT_MPT_VIRT_START)
> -#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
> -    struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
> -    unsigned long entry = (d && (d == dom_cow)) ?              \
> -        SHARED_M2P_ENTRY : (pfn);                              \
> +#define _set_gpfn_from_mfn(mfn, pfn) ({                         \
> +    struct domain *d = page_get_owner(__mfn_to_page(_mfn(mfn)));    \
> +    unsigned long entry = (d && (d == dom_cow)) ?               \
> +        SHARED_M2P_ENTRY : (pfn);                               \
>      ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END -
> RDWR_COMPAT_MPT_VIRT_START) / 4 || \
>              (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))),
> \
>       machine_to_phys_mapping[(mfn)] = (entry));                \
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 17b1d0c8d3..a2a216061f 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -487,7 +487,7 @@ static inline struct page_info *get_page_from_gfn(
>      /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
>      if ( t )
>          *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
> -    page = __mfn_to_page(gfn);
> +    page = __mfn_to_page(_mfn(gfn));
>      return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
>  }
> 
> diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
> index 45ca742678..8737ef16ff 100644
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -88,10 +88,10 @@
>      ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
> 
>  /* Get pointer to info structure of page mapped by pte (struct page_info *).
> */
> -#define l1e_get_page(x)           (__mfn_to_page(l1e_get_pfn(x)))
> -#define l2e_get_page(x)           (__mfn_to_page(l2e_get_pfn(x)))
> -#define l3e_get_page(x)           (__mfn_to_page(l3e_get_pfn(x)))
> -#define l4e_get_page(x)           (__mfn_to_page(l4e_get_pfn(x)))
> +#define l1e_get_page(x)           (__mfn_to_page(l1e_get_mfn(x)))
> +#define l2e_get_page(x)           (__mfn_to_page(l2e_get_mfn(x)))
> +#define l3e_get_page(x)           (__mfn_to_page(l3e_get_mfn(x)))
> +#define l4e_get_page(x)           (__mfn_to_page(l4e_get_mfn(x)))
> 
>  /* Get pte access flags (unsigned int). */
>  #define l1e_get_flags(x)           (get_pte_flags((x).l1))
> @@ -157,10 +157,10 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t
> pa, unsigned int flags)
>  #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
> 
>  /* Construct a pte from a page pointer and access flags. */
> -#define l1e_from_page(page, flags) l1e_from_pfn(__page_to_mfn(page),
> (flags))
> -#define l2e_from_page(page, flags) l2e_from_pfn(__page_to_mfn(page),
> (flags))
> -#define l3e_from_page(page, flags) l3e_from_pfn(__page_to_mfn(page),
> (flags))
> -#define l4e_from_page(page, flags) l4e_from_pfn(__page_to_mfn(page),
> (flags))
> +#define l1e_from_page(page, flags) l1e_from_mfn(__page_to_mfn(page),
> (flags))
> +#define l2e_from_page(page, flags) l2e_from_mfn(__page_to_mfn(page),
> (flags))
> +#define l3e_from_page(page, flags) l3e_from_mfn(__page_to_mfn(page),
> (flags))
> +#define l4e_from_page(page, flags) l4e_from_mfn(__page_to_mfn(page),
> (flags))
> 
>  /* Add extra flags to an existing pte. */
>  #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
> @@ -215,13 +215,13 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t
> pa, unsigned int flags)
>  /* Page-table type. */
>  typedef struct { u64 pfn; } pagetable_t;
>  #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
> -#define pagetable_get_page(x)   __mfn_to_page((x).pfn)
> +#define pagetable_get_page(x)   __mfn_to_page(pagetable_get_mfn(x))
>  #define pagetable_get_pfn(x)    ((x).pfn)
>  #define pagetable_get_mfn(x)    _mfn(((x).pfn))
>  #define pagetable_is_null(x)    ((x).pfn == 0)
>  #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
>  #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
> -#define pagetable_from_page(pg)
> pagetable_from_pfn(__page_to_mfn(pg))
> +#define pagetable_from_page(pg)
> pagetable_from_mfn(__page_to_mfn(pg))
>  #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
>  #define pagetable_null()        pagetable_from_pfn(0)
> 
> @@ -240,12 +240,12 @@ void copy_page_sse2(void *, const void *);
>  #define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) <<
> PAGE_SHIFT))
> 
>  /* Convert between machine frame numbers and page-info structures. */
> -#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn))
> -#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) -
> frame_table))
> +#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn_x(mfn)))
> +#define __page_to_mfn(pg)   _mfn(pdx_to_pfn((unsigned long)((pg) -
> frame_table)))
> 
>  /* Convert between machine addresses and page-info structures. */
> -#define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
> -#define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) <<
> PAGE_SHIFT)
> +#define __maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
> +#define __page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
> 
>  /* Convert between frame number and address formats.  */
>  #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
> @@ -273,8 +273,8 @@ void copy_page_sse2(void *, const void *);
>  #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
>  #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
>  #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
> -#define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned
> long)(va)))
> -#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
> +#define vmap_to_mfn(va)
> _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
> +#define vmap_to_page(va)    __mfn_to_page(vmap_to_mfn(va))
> 
>  #endif /* !defined(__ASSEMBLY__) */
> 
> diff --git a/xen/include/xen/domain_page.h
> b/xen/include/xen/domain_page.h
> index 890bae5b9c..22ab65ba16 100644
> --- a/xen/include/xen/domain_page.h
> +++ b/xen/include/xen/domain_page.h
> @@ -34,7 +34,7 @@ void unmap_domain_page(const void *va);
>  /*
>   * Given a VA from map_domain_page(), return its underlying MFN.
>   */
> -unsigned long domain_page_map_to_mfn(const void *va);
> +mfn_t domain_page_map_to_mfn(const void *va);
> 
>  /*
>   * Similar to the above calls, except the mapping is accessible in all
> @@ -44,11 +44,11 @@ unsigned long domain_page_map_to_mfn(const void
> *va);
>  void *map_domain_page_global(mfn_t mfn);
>  void unmap_domain_page_global(const void *va);
> 
> -#define __map_domain_page(pg)
> map_domain_page(_mfn(__page_to_mfn(pg)))
> +#define __map_domain_page(pg)
> map_domain_page(__page_to_mfn(pg))
> 
>  static inline void *__map_domain_page_global(const struct page_info *pg)
>  {
> -    return map_domain_page_global(_mfn(__page_to_mfn(pg)));
> +    return map_domain_page_global(page_to_mfn(pg));
>  }
> 
>  #else /* !CONFIG_DOMAIN_PAGE */
> @@ -56,7 +56,7 @@ static inline void *__map_domain_page_global(const
> struct page_info *pg)
>  #define map_domain_page(mfn)                __mfn_to_virt(mfn_x(mfn))
>  #define __map_domain_page(pg)               page_to_virt(pg)
>  #define unmap_domain_page(va)               ((void)(va))
> -#define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned
> long)(va))
> +#define domain_page_map_to_mfn(va)          _mfn(virt_to_mfn((unsigned
> long)(va)))
> 
>  static inline void *map_domain_page_global(mfn_t mfn)
>  {
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index 542c0b3f20..8516a0b131 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -25,7 +25,7 @@
>  typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE
> */
> 
>  #define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr),
> PAGE_SIZE)
> -#define IS_VALID_PAGE(_pi)    mfn_valid(_mfn(page_to_mfn(_pi)))
> +#define IS_VALID_PAGE(_pi)    mfn_valid(page_to_mfn(_pi))
> 
>  extern struct page_list_head tmem_page_list;
>  extern spinlock_t tmem_page_list_lock;
> --
> 2.11.0
Razvan Cojocaru Nov. 1, 2017, 3:43 p.m. UTC | #2
On 11/01/2017 04:03 PM, Julien Grall wrote:
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
> 
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
> 
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
> 
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>


Thanks,
Razvan
Boris Ostrovsky Nov. 1, 2017, 6:38 p.m. UTC | #3
On 11/01/2017 10:03 AM, Julien Grall wrote:
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
>
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
>
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
>
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
>
> Signed-off-by: Julien Grall <julien.grall@linaro.org>
>

SVM:

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tian, Kevin Nov. 2, 2017, 2:52 a.m. UTC | #4
> From: Julien Grall [mailto:julien.grall@linaro.org]
> Sent: Wednesday, November 1, 2017 10:03 PM
> 
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
> 
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
> 
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
> 
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>
> 

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tim Deegan Nov. 2, 2017, 8:35 a.m. UTC | #5
At 14:03 +0000 on 01 Nov (1509544996), Julien Grall wrote:
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
> 
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
> 
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
> 
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>

Acked-by: Tim Deegan <tim@xen.org>
Stefano Stabellini Dec. 11, 2017, 11:36 p.m. UTC | #6
On Wed, 1 Nov 2017, Julien Grall wrote:
> Most of the users of page_to_mfn and mfn_to_page are either overriding
> the macros to make them work with mfn_t or use mfn_x/_mfn because the
> rest of the function use mfn_t.
> 
> So make __page_to_mfn and __mfn_to_page return mfn_t by default.
> 
> Only reasonable clean-ups are done in this patch because it is
> already quite big. So some of the files now override page_to_mfn and
> mfn_to_page to avoid using mfn_t.
> 
> Lastly, domain_page_to_mfn is also converted to use mfn_t given that
> most of the callers are now switched to _mfn(domain_page_to_mfn(...)).
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
> 
> Andrew suggested to drop IS_VALID_PAGE in xen/tmem_xen.h. His comment
> was:
> 
> "/sigh  This is tautological.  The definition of a "valid mfn" in this
> case is one for which we have frametable entry, and by having a struct
> page_info in our hands, this is by definition true (unless you have a
> wild pointer, at which point your bug is elsewhere).
> 
> IS_VALID_PAGE() is only ever used in assertions and never usefully, so
> instead I would remove it entirely rather than trying to fix it up."
> 
> I can remove the function in a separate patch at the begining of the
> series if Konrad (TMEM maintainer) is happy with that.
> 
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Julien Grall <julien.grall@arm.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <George.Dunlap@eu.citrix.com>
> Cc: Ian Jackson <ian.jackson@eu.citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Cc: Tim Deegan <tim@xen.org>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
> Cc: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Paul Durrant <paul.durrant@citrix.com>
> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Cc: Jun Nakajima <jun.nakajima@intel.com>
> Cc: Kevin Tian <kevin.tian@intel.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Gang Wei <gang.wei@intel.com>
> Cc: Shane Wang <shane.wang@intel.com>
> 
>     Changes in v3:
>         - Rebase on the latest staging and fix some conflicts. Tags
>         haven't be retained.
>         - Switch the printf format to PRI_mfn
> 
>     Changes in v2:
>         - Some part have been moved in separate patch
>         - Remove one spurious comment
>         - Convert domain_page_to_mfn to use mfn_t
> ---
>  xen/arch/arm/domain_build.c             |  2 --
>  xen/arch/arm/kernel.c                   |  2 +-
>  xen/arch/arm/mem_access.c               |  2 +-
>  xen/arch/arm/mm.c                       |  8 ++++----
>  xen/arch/arm/p2m.c                      | 10 ++--------
>  xen/arch/x86/cpu/vpmu.c                 |  4 ++--
>  xen/arch/x86/domain.c                   | 21 +++++++++++----------
>  xen/arch/x86/domain_page.c              |  6 +++---
>  xen/arch/x86/domctl.c                   |  2 +-
>  xen/arch/x86/hvm/dm.c                   |  2 +-
>  xen/arch/x86/hvm/dom0_build.c           |  6 +++---
>  xen/arch/x86/hvm/emulate.c              |  6 +++---
>  xen/arch/x86/hvm/hvm.c                  | 16 ++++++++--------
>  xen/arch/x86/hvm/ioreq.c                |  6 +++---
>  xen/arch/x86/hvm/stdvga.c               |  2 +-
>  xen/arch/x86/hvm/svm/svm.c              |  4 ++--
>  xen/arch/x86/hvm/viridian.c             |  6 +++---
>  xen/arch/x86/hvm/vmx/vmcs.c             |  2 +-
>  xen/arch/x86/hvm/vmx/vmx.c              | 10 +++++-----
>  xen/arch/x86/hvm/vmx/vvmx.c             |  6 +++---
>  xen/arch/x86/mm.c                       |  6 ------
>  xen/arch/x86/mm/guest_walk.c            |  6 +++---
>  xen/arch/x86/mm/hap/guest_walk.c        |  2 +-
>  xen/arch/x86/mm/hap/hap.c               |  6 ------
>  xen/arch/x86/mm/hap/nested_ept.c        |  2 +-
>  xen/arch/x86/mm/mem_sharing.c           |  5 -----
>  xen/arch/x86/mm/p2m-ept.c               |  4 ++++
>  xen/arch/x86/mm/p2m-pod.c               |  6 ------
>  xen/arch/x86/mm/p2m.c                   |  6 ------
>  xen/arch/x86/mm/paging.c                |  6 ------
>  xen/arch/x86/mm/shadow/private.h        | 16 ++--------------
>  xen/arch/x86/numa.c                     |  2 +-
>  xen/arch/x86/physdev.c                  |  2 +-
>  xen/arch/x86/pv/callback.c              |  6 ------
>  xen/arch/x86/pv/descriptor-tables.c     | 10 ----------
>  xen/arch/x86/pv/dom0_build.c            |  6 ++++++
>  xen/arch/x86/pv/domain.c                |  6 ------
>  xen/arch/x86/pv/emul-gate-op.c          |  6 ------
>  xen/arch/x86/pv/emul-priv-op.c          | 10 ----------
>  xen/arch/x86/pv/grant_table.c           |  6 ------
>  xen/arch/x86/pv/ro-page-fault.c         |  6 ------
>  xen/arch/x86/smpboot.c                  |  6 ------
>  xen/arch/x86/tboot.c                    |  4 ++--
>  xen/arch/x86/traps.c                    |  4 ++--
>  xen/arch/x86/x86_64/mm.c                |  6 ++++++
>  xen/common/domain.c                     |  4 ++--
>  xen/common/grant_table.c                |  6 ++++++
>  xen/common/kimage.c                     |  6 ------
>  xen/common/memory.c                     |  6 ++++++
>  xen/common/page_alloc.c                 |  6 ++++++
>  xen/common/tmem.c                       |  2 +-
>  xen/common/tmem_xen.c                   |  4 ----
>  xen/common/trace.c                      |  6 ++++++
>  xen/common/vmap.c                       |  9 +++++----
>  xen/common/xenoprof.c                   |  2 --
>  xen/drivers/passthrough/amd/iommu_map.c |  6 ++++++
>  xen/drivers/passthrough/iommu.c         |  2 +-
>  xen/drivers/passthrough/x86/iommu.c     |  2 +-
>  xen/include/asm-arm/mm.h                | 16 +++++++++-------
>  xen/include/asm-arm/p2m.h               |  4 ++--
>  xen/include/asm-x86/mm.h                | 12 ++++++------
>  xen/include/asm-x86/p2m.h               |  2 +-
>  xen/include/asm-x86/page.h              | 32 ++++++++++++++++----------------
>  xen/include/xen/domain_page.h           |  8 ++++----
>  xen/include/xen/tmem_xen.h              |  2 +-
>  65 files changed, 166 insertions(+), 239 deletions(-)
> 
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 5532068ab1..4b554b49c1 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -50,8 +50,6 @@ struct map_range_data
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef virt_to_mfn
>  #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
>  
>  //#define DEBUG_11_ALLOCATION
>  #ifdef DEBUG_11_ALLOCATION
> diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
> index c2755a9ab9..018d1aed06 100644
> --- a/xen/arch/arm/kernel.c
> +++ b/xen/arch/arm/kernel.c
> @@ -295,7 +295,7 @@ static __init int kernel_decompress(struct bootmodule *mod)
>          iounmap(input);
>          return -ENOMEM;
>      }
> -    mfn = _mfn(page_to_mfn(pages));
> +    mfn = page_to_mfn(pages);
>      output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
>  
>      rc = perform_gunzip(output, input, size);
> diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
> index 0f2cbb81d3..112e291cba 100644
> --- a/xen/arch/arm/mem_access.c
> +++ b/xen/arch/arm/mem_access.c
> @@ -210,7 +210,7 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
>      if ( t != p2m_ram_rw )
>          goto err;
>  
> -    page = mfn_to_page(mfn_x(mfn));
> +    page = mfn_to_page(mfn);
>  
>      if ( unlikely(!get_page(page, v->domain)) )
>          page = NULL;
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 3c328e2df5..b8818e03ab 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -477,7 +477,7 @@ void unmap_domain_page(const void *va)
>      local_irq_restore(flags);
>  }
>  
> -unsigned long domain_page_map_to_mfn(const void *ptr)
> +mfn_t domain_page_map_to_mfn(const void *ptr)
>  {
>      unsigned long va = (unsigned long)ptr;
>      lpae_t *map = this_cpu(xen_dommap);
> @@ -485,12 +485,12 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
>      unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK;
>  
>      if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
> -        return __virt_to_mfn(va);
> +        return virt_to_mfn(va);
>  
>      ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
>      ASSERT(map[slot].pt.avail != 0);
>  
> -    return map[slot].pt.base + offset;
> +    return _mfn(map[slot].pt.base + offset);
>  }
>  #endif
>  
> @@ -1288,7 +1288,7 @@ int xenmem_add_to_physmap_one(
>              return -EINVAL;
>          }
>  
> -        mfn = _mfn(page_to_mfn(page));
> +        mfn = page_to_mfn(page);
>          t = p2m_map_foreign;
>  
>          rcu_unlock_domain(od);
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 68b488997d..d1ef535a43 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -38,12 +38,6 @@ static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
>  
>  #define P2M_ROOT_PAGES    (1<<P2M_ROOT_ORDER)
>  
> -/* Override macros from asm/mm.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  unsigned int __read_mostly p2m_ipa_bits;
>  
>  /* Helpers to lookup the properties of each level */
> @@ -97,8 +91,8 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr)
>  
>      printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr);
>  
> -    printk("P2M @ %p mfn:0x%lx\n",
> -           p2m->root, __page_to_mfn(p2m->root));
> +    printk("P2M @ %p mfn:%#"PRI_mfn"\n",
> +           p2m->root, mfn_x(page_to_mfn(p2m->root)));
>  
>      dump_pt_walk(page_to_maddr(p2m->root), addr,
>                   P2M_ROOT_LEVEL, P2M_ROOT_PAGES);
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index 7baf4614be..b978e05613 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -653,7 +653,7 @@ static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
>  {
>      struct vcpu *v;
>      struct vpmu_struct *vpmu;
> -    uint64_t mfn;
> +    mfn_t mfn;
>      void *xenpmu_data;
>  
>      if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
> @@ -675,7 +675,7 @@ static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
>      if ( xenpmu_data )
>      {
>          mfn = domain_page_map_to_mfn(xenpmu_data);
> -        ASSERT(mfn_valid(_mfn(mfn)));
> +        ASSERT(mfn_valid(mfn));
>          unmap_domain_page_global(xenpmu_data);
>          put_page_and_type(mfn_to_page(mfn));
>      }
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 735f45c133..cb596c4a31 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -186,7 +186,7 @@ void dump_pageframe_info(struct domain *d)
>                  }
>              }
>              printk("    DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
> -                   _p(page_to_mfn(page)),
> +                   _p(mfn_x(page_to_mfn(page))),
>                     page->count_info, page->u.inuse.type_info);
>          }
>          spin_unlock(&d->page_alloc_lock);
> @@ -199,7 +199,7 @@ void dump_pageframe_info(struct domain *d)
>      page_list_for_each ( page, &d->xenpage_list )
>      {
>          printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
> -               _p(page_to_mfn(page)),
> +               _p(mfn_x(page_to_mfn(page))),
>                 page->count_info, page->u.inuse.type_info);
>      }
>      spin_unlock(&d->page_alloc_lock);
> @@ -621,7 +621,8 @@ int arch_domain_soft_reset(struct domain *d)
>      struct page_info *page = virt_to_page(d->shared_info), *new_page;
>      int ret = 0;
>      struct domain *owner;
> -    unsigned long mfn, gfn;
> +    mfn_t mfn;
> +    unsigned long gfn;
>      p2m_type_t p2mt;
>      unsigned int i;
>  
> @@ -655,7 +656,7 @@ int arch_domain_soft_reset(struct domain *d)
>      ASSERT( owner == d );
>  
>      mfn = page_to_mfn(page);
> -    gfn = mfn_to_gmfn(d, mfn);
> +    gfn = mfn_to_gmfn(d, mfn_x(mfn));
>  
>      /*
>       * gfn == INVALID_GFN indicates that the shared_info page was never mapped
> @@ -664,7 +665,7 @@ int arch_domain_soft_reset(struct domain *d)
>      if ( gfn == gfn_x(INVALID_GFN) )
>          goto exit_put_page;
>  
> -    if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) != mfn )
> +    if ( !mfn_eq(get_gfn_query(d, gfn, &p2mt), mfn) )
>      {
>          printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN (%lx)\n",
>                 d->domain_id, gfn);
> @@ -681,7 +682,7 @@ int arch_domain_soft_reset(struct domain *d)
>          goto exit_put_gfn;
>      }
>  
> -    ret = guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
> +    ret = guest_physmap_remove_page(d, _gfn(gfn), mfn, PAGE_ORDER_4K);
>      if ( ret )
>      {
>          printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame %lx\n",
> @@ -690,7 +691,7 @@ int arch_domain_soft_reset(struct domain *d)
>          goto exit_put_gfn;
>      }
>  
> -    ret = guest_physmap_add_page(d, _gfn(gfn), _mfn(page_to_mfn(new_page)),
> +    ret = guest_physmap_add_page(d, _gfn(gfn), page_to_mfn(new_page),
>                                   PAGE_ORDER_4K);
>      if ( ret )
>      {
> @@ -988,7 +989,7 @@ int arch_set_info_guest(
>                  {
>                      if ( (page->u.inuse.type_info & PGT_type_mask) ==
>                           PGT_l4_page_table )
> -                        done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
> +                        done = !fill_ro_mpt(page_to_mfn(page));
>  
>                      page_unlock(page);
>                  }
> @@ -1115,7 +1116,7 @@ int arch_set_info_guest(
>          l4_pgentry_t *l4tab;
>  
>          l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table));
> -        *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
> +        *l4tab = l4e_from_mfn(page_to_mfn(cr3_page),
>              _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
>          unmap_domain_page(l4tab);
>      }
> @@ -1945,7 +1946,7 @@ int domain_relinquish_resources(struct domain *d)
>          if ( d->arch.pirq_eoi_map != NULL )
>          {
>              unmap_domain_page_global(d->arch.pirq_eoi_map);
> -            put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
> +            put_page_and_type(mfn_to_page(_mfn(d->arch.pirq_eoi_map_mfn)));
>              d->arch.pirq_eoi_map = NULL;
>              d->arch.auto_unmask = 0;
>          }
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 3432a854dd..88046b39c9 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -331,13 +331,13 @@ void unmap_domain_page_global(const void *ptr)
>  }
>  
>  /* Translate a map-domain-page'd address to the underlying MFN */
> -unsigned long domain_page_map_to_mfn(const void *ptr)
> +mfn_t domain_page_map_to_mfn(const void *ptr)
>  {
>      unsigned long va = (unsigned long)ptr;
>      const l1_pgentry_t *pl1e;
>  
>      if ( va >= DIRECTMAP_VIRT_START )
> -        return virt_to_mfn(ptr);
> +        return _mfn(virt_to_mfn(ptr));
>  
>      if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
>      {
> @@ -350,5 +350,5 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
>          pl1e = &__linear_l1_table[l1_linear_offset(va)];
>      }
>  
> -    return l1e_get_pfn(*pl1e);
> +    return l1e_get_mfn(*pl1e);
>  }
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 80b4df9ec9..9f6c92411a 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -429,7 +429,7 @@ long arch_do_domctl(
>          {
>              if ( i >= max_pfns )
>                  break;
> -            mfn = page_to_mfn(page);
> +            mfn = mfn_x(page_to_mfn(page));
>              if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
>                                        i, &mfn, 1) )
>              {
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index a787f43737..4760816ee6 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -219,7 +219,7 @@ static int modified_memory(struct domain *d,
>              page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
>              if ( page )
>              {
> -                mfn_t gmfn = _mfn(page_to_mfn(page));
> +                mfn_t gmfn = page_to_mfn(page);
>  
>                  paging_mark_dirty(d, gmfn);
>                  /*
> diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
> index a67071c739..b50506ec63 100644
> --- a/xen/arch/x86/hvm/dom0_build.c
> +++ b/xen/arch/x86/hvm/dom0_build.c
> @@ -120,7 +120,7 @@ static int __init pvh_populate_memory_range(struct domain *d,
>              continue;
>          }
>  
> -        rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)),
> +        rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page),
>                                      order);
>          if ( rc != 0 )
>          {
> @@ -270,7 +270,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
>      }
>      write_32bit_pse_identmap(ident_pt);
>      unmap_domain_page(ident_pt);
> -    put_page(mfn_to_page(mfn_x(mfn)));
> +    put_page(mfn_to_page(mfn));
>      d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
>      if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
>              printk("Unable to set identity page tables as reserved in the memory map\n");
> @@ -288,7 +288,7 @@ static void __init pvh_steal_low_ram(struct domain *d, unsigned long start,
>  
>      for ( mfn = start; mfn < start + nr_pages; mfn++ )
>      {
> -        struct page_info *pg = mfn_to_page(mfn);
> +        struct page_info *pg = mfn_to_page(_mfn(mfn));
>          int rc;
>  
>          rc = unshare_xen_page_with_guest(pg, dom_io);
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index e924ce07c4..312aa91416 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -590,7 +590,7 @@ static void *hvmemul_map_linear_addr(
>              goto unhandleable;
>          }
>  
> -        *mfn++ = _mfn(page_to_mfn(page));
> +        *mfn++ = page_to_mfn(page);
>  
>          if ( p2m_is_discard_write(p2mt) )
>          {
> @@ -622,7 +622,7 @@ static void *hvmemul_map_linear_addr(
>   out:
>      /* Drop all held references. */
>      while ( mfn-- > hvmemul_ctxt->mfn )
> -        put_page(mfn_to_page(mfn_x(*mfn)));
> +        put_page(mfn_to_page(*mfn));
>  
>      return err;
>  }
> @@ -648,7 +648,7 @@ static void hvmemul_unmap_linear_addr(
>      {
>          ASSERT(mfn_valid(*mfn));
>          paging_mark_dirty(currd, *mfn);
> -        put_page(mfn_to_page(mfn_x(*mfn)));
> +        put_page(mfn_to_page(*mfn));
>  
>          *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
>      }
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 205b4cb685..4d795ee700 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2211,7 +2211,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
>              v->arch.guest_table = pagetable_from_page(page);
>  
>              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
> -                        v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
> +                        v->arch.hvm_vcpu.guest_cr[3], mfn_x(page_to_mfn(page)));
>          }
>      }
>      else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
> @@ -2546,7 +2546,7 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
>          if ( unlikely(p2m_is_discard_write(p2mt)) )
>              *writable = 0;
>          else if ( !permanent )
> -            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
> +            paging_mark_dirty(d, page_to_mfn(page));
>      }
>  
>      if ( !permanent )
> @@ -2588,7 +2588,7 @@ void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
>  
>  void hvm_unmap_guest_frame(void *p, bool_t permanent)
>  {
> -    unsigned long mfn;
> +    mfn_t mfn;
>      struct page_info *page;
>  
>      if ( !p )
> @@ -2609,7 +2609,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
>          list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
>              if ( track->page == page )
>              {
> -                paging_mark_dirty(d, _mfn(mfn));
> +                paging_mark_dirty(d, mfn);
>                  list_del(&track->list);
>                  xfree(track);
>                  break;
> @@ -2626,7 +2626,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
>  
>      spin_lock(&d->arch.hvm_domain.write_map.lock);
>      list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
> -        paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
> +        paging_mark_dirty(d, page_to_mfn(track->page));
>      spin_unlock(&d->arch.hvm_domain.write_map.lock);
>  }
>  
> @@ -3200,8 +3200,8 @@ static enum hvm_translation_result __hvm_copy(
>  
>                  if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
>                      dprintk(XENLOG_G_DEBUG,
> -                            "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
> -                            v, gfn_x(gfn), page_to_mfn(page));
> +                            "%pv attempted write to read-only gfn %#lx (mfn=%#"PRI_mfn")\n",
> +                            v, gfn_x(gfn), mfn_x(page_to_mfn(page)));
>              }
>              else
>              {
> @@ -3209,7 +3209,7 @@ static enum hvm_translation_result __hvm_copy(
>                      memcpy(p, buf, count);
>                  else
>                      memset(p, 0, count);
> -                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
> +                paging_mark_dirty(v->domain, page_to_mfn(page));
>              }
>          }
>          else
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index d5afe20cc8..0f823d201a 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -268,7 +268,7 @@ static void hvm_remove_ioreq_gfn(
>      struct domain *d, struct hvm_ioreq_page *iorp)
>  {
>      if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
> -                                   _mfn(page_to_mfn(iorp->page)), 0) )
> +                                   page_to_mfn(iorp->page), 0) )
>          domain_crash(d);
>      clear_page(iorp->va);
>  }
> @@ -281,9 +281,9 @@ static int hvm_add_ioreq_gfn(
>      clear_page(iorp->va);
>  
>      rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
> -                                _mfn(page_to_mfn(iorp->page)), 0);
> +                                page_to_mfn(iorp->page), 0);
>      if ( rc == 0 )
> -        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
> +        paging_mark_dirty(d, page_to_mfn(iorp->page));
>  
>      return rc;
>  }
> diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
> index 088fbdf8ce..925bab2438 100644
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -590,7 +590,7 @@ void stdvga_init(struct domain *d)
>          if ( pg == NULL )
>              break;
>          s->vram_page[i] = pg;
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>      }
>  
>      if ( i == ARRAY_SIZE(s->vram_page) )
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index b9cf423fd9..f50f931598 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1521,7 +1521,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
>          if ( !pg )
>              goto err;
>  
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>          *this_hsa = page_to_maddr(pg);
>      }
>  
> @@ -1531,7 +1531,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
>          if ( !pg )
>              goto err;
>  
> -        clear_domain_page(_mfn(page_to_mfn(pg)));
> +        clear_domain_page(page_to_mfn(pg));
>          *this_vmcb = page_to_maddr(pg);
>      }
>  
> diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
> index f0fa59d7d5..070551e1ab 100644
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -354,7 +354,7 @@ static void enable_hypercall_page(struct domain *d)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> -                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>          return;
>      }
>  
> @@ -414,7 +414,7 @@ static void initialize_vp_assist(struct vcpu *v)
>  
>   fail:
>      gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn,
> -             page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +             mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>  }
>  
>  static void teardown_vp_assist(struct vcpu *v)
> @@ -494,7 +494,7 @@ static void update_reference_tsc(struct domain *d, bool_t initialize)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> -                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
> +                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
>          return;
>      }
>  
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index b5100b5021..8eaa58e3c0 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1437,7 +1437,7 @@ int vmx_vcpu_enable_pml(struct vcpu *v)
>  
>      vmx_vmcs_enter(v);
>  
> -    __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) << PAGE_SHIFT);
> +    __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
>      __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
>  
>      v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index b18cceab55..c657ba89f5 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2978,7 +2978,7 @@ gp_fault:
>  static int vmx_alloc_vlapic_mapping(struct domain *d)
>  {
>      struct page_info *pg;
> -    unsigned long mfn;
> +    mfn_t mfn;
>  
>      if ( !cpu_has_vmx_virtualize_apic_accesses )
>          return 0;
> @@ -2987,10 +2987,10 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
>      if ( !pg )
>          return -ENOMEM;
>      mfn = page_to_mfn(pg);
> -    clear_domain_page(_mfn(mfn));
> +    clear_domain_page(mfn);
>      share_xen_page_with_guest(pg, d, XENSHARE_writable);
> -    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
> -    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
> +    d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
> +    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
>                         PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
>  
>      return 0;
> @@ -3001,7 +3001,7 @@ static void vmx_free_vlapic_mapping(struct domain *d)
>      unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
>  
>      if ( mfn != 0 )
> -        free_shared_domheap_page(mfn_to_page(mfn));
> +        free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
>  }
>  
>  static void vmx_install_vlapic_mapping(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> index dde02c076b..4836d69c0e 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -84,7 +84,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
>  
> -        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
> +        clear_domain_page(page_to_mfn(vmread_bitmap));
>  
>          vmwrite_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmwrite_bitmap )
> @@ -1729,7 +1729,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
>                  nvcpu->nv_vvmcx = vvmcx;
>                  nvcpu->nv_vvmcxaddr = gpa;
>                  v->arch.hvm_vmx.vmcs_shadow_maddr =
> -                    pfn_to_paddr(domain_page_map_to_mfn(vvmcx));
> +                    mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
>              }
>              else
>              {
> @@ -1815,7 +1815,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
>          {
>              if ( writable )
>                  clear_vvmcs_launched(&nvmx->launched_list,
> -                                     domain_page_map_to_mfn(vvmcs));
> +                                     mfn_x(domain_page_map_to_mfn(vvmcs)));
>              else
>                  rc = VMFAIL_VALID;
>              hvm_unmap_guest_frame(vvmcs, 0);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index a20fdcaea4..f6ca4f884b 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -128,12 +128,6 @@
>  
>  #include "pv/mm.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  /* Mapping of the fixmap space needed early. */
>  l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
>      l1_fixmap[L1_PAGETABLE_ENTRIES];
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index 6055fec1ad..f67aeda3d0 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -469,20 +469,20 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>      if ( l3p )
>      {
>          unmap_domain_page(l3p);
> -        put_page(mfn_to_page(mfn_x(gw->l3mfn)));
> +        put_page(mfn_to_page(gw->l3mfn));
>      }
>  #endif
>  #if GUEST_PAGING_LEVELS >= 3
>      if ( l2p )
>      {
>          unmap_domain_page(l2p);
> -        put_page(mfn_to_page(mfn_x(gw->l2mfn)));
> +        put_page(mfn_to_page(gw->l2mfn));
>      }
>  #endif
>      if ( l1p )
>      {
>          unmap_domain_page(l1p);
> -        put_page(mfn_to_page(mfn_x(gw->l1mfn)));
> +        put_page(mfn_to_page(gw->l1mfn));
>      }
>  
>      return walk_ok;
> diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
> index c550017ba4..cb3f9cebe7 100644
> --- a/xen/arch/x86/mm/hap/guest_walk.c
> +++ b/xen/arch/x86/mm/hap/guest_walk.c
> @@ -83,7 +83,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
>          *pfec &= ~PFEC_page_present;
>          goto out_tweak_pfec;
>      }
> -    top_mfn = _mfn(page_to_mfn(top_page));
> +    top_mfn = page_to_mfn(top_page);
>  
>      /* Map the top-level table and call the tree-walker */
>      ASSERT(mfn_valid(top_mfn));
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 41deb90787..97f2516ecd 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -42,12 +42,6 @@
>  
>  #include "private.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  /************************************************/
>  /*          HAP VRAM TRACKING SUPPORT           */
>  /************************************************/
> diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
> index 14b1bb01e9..1738df69f6 100644
> --- a/xen/arch/x86/mm/hap/nested_ept.c
> +++ b/xen/arch/x86/mm/hap/nested_ept.c
> @@ -173,7 +173,7 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw)
>              goto map_err;
>          gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
>          unmap_domain_page(lxp);
> -        put_page(mfn_to_page(mfn_x(lxmfn)));
> +        put_page(mfn_to_page(lxmfn));
>  
>          if ( nept_non_present_check(gw->lxe[lvl]) )
>              goto non_present;
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index 6f4be95515..6ecf0b27d5 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -152,11 +152,6 @@ static inline shr_handle_t get_next_handle(void)
>  #define mem_sharing_enabled(d) \
>      (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
>  
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  static atomic_t nr_saved_mfns   = ATOMIC_INIT(0); 
>  static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
>  
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index b4996ce658..8885916c0a 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -33,6 +33,10 @@
>  
>  #include "mm-locks.h"
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  #define atomic_read_ept_entry(__pepte)                              \
>      ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
>  
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index 0a811ccf28..7a88074c31 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -29,12 +29,6 @@
>  
>  #include "mm-locks.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  #define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
>  
>  /* Enforce lock ordering when grabbing the "external" page_alloc lock */
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index c72a3cdebb..b71f51e0b2 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -47,12 +47,6 @@ bool_t __initdata opt_hap_1gb = 1, __initdata opt_hap_2mb = 1;
>  boolean_param("hap_1gb", opt_hap_1gb);
>  boolean_param("hap_2mb", opt_hap_2mb);
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
>  
>  /* Init the datastructures for later use by the p2m code */
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index 1e2c9ba4cc..cb97642cbc 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -47,12 +47,6 @@
>  /* Per-CPU variable for enforcing the lock ordering */
>  DEFINE_PER_CPU(int, mm_lock_level);
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
>  /************************************************/
>  /*              LOG DIRTY SUPPORT               */
>  /************************************************/
> diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
> index 6a03370402..b9cc680f4e 100644
> --- a/xen/arch/x86/mm/shadow/private.h
> +++ b/xen/arch/x86/mm/shadow/private.h
> @@ -315,7 +315,7 @@ static inline int page_is_out_of_sync(struct page_info *p)
>  
>  static inline int mfn_is_out_of_sync(mfn_t gmfn)
>  {
> -    return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
> +    return page_is_out_of_sync(mfn_to_page(gmfn));
>  }
>  
>  static inline int page_oos_may_write(struct page_info *p)
> @@ -326,7 +326,7 @@ static inline int page_oos_may_write(struct page_info *p)
>  
>  static inline int mfn_oos_may_write(mfn_t gmfn)
>  {
> -    return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
> +    return page_oos_may_write(mfn_to_page(gmfn));
>  }
>  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
>  
> @@ -465,18 +465,6 @@ void sh_reset_l3_up_pointers(struct vcpu *v);
>   * MFN/page-info handling
>   */
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef page_to_mfn
> -#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
> -
> -/* Override pagetable_t <-> struct page_info conversions to work with mfn_t */
> -#undef pagetable_get_page
> -#define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
> -#undef pagetable_from_page
> -#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
> -
>  #define backpointer(sp) _mfn(pdx_to_pfn((unsigned long)(sp)->v.sh.back))
>  static inline unsigned long __backpointer(const struct page_info *sp)
>  {
> diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
> index 4fc967f893..a87987da6f 100644
> --- a/xen/arch/x86/numa.c
> +++ b/xen/arch/x86/numa.c
> @@ -430,7 +430,7 @@ static void dump_numa(unsigned char key)
>          spin_lock(&d->page_alloc_lock);
>          page_list_for_each(page, &d->page_list)
>          {
> -            i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
> +            i = phys_to_nid(page_to_maddr(page));
>              page_num_node[i]++;
>          }
>          spin_unlock(&d->page_alloc_lock);
> diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
> index a5fedca671..5422f3e372 100644
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -242,7 +242,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>          }
>  
>          if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
> -                     0, page_to_mfn(page)) != 0 )
> +                     0, mfn_x(page_to_mfn(page))) != 0 )
>          {
>              put_page_and_type(page);
>              ret = -EBUSY;
> diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
> index 97d8438600..5957cb5085 100644
> --- a/xen/arch/x86/pv/callback.c
> +++ b/xen/arch/x86/pv/callback.c
> @@ -31,12 +31,6 @@
>  
>  #include <public/callback.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static int register_guest_nmi_callback(unsigned long address)
>  {
>      struct vcpu *curr = current;
> diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
> index 81973af124..f2b20f9910 100644
> --- a/xen/arch/x86/pv/descriptor-tables.c
> +++ b/xen/arch/x86/pv/descriptor-tables.c
> @@ -25,16 +25,6 @@
>  #include <asm/p2m.h>
>  #include <asm/pv/mm.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
> -/*******************
> - * Descriptor Tables
> - */
> -
>  void pv_destroy_gdt(struct vcpu *v)
>  {
>      l1_pgentry_t *pl1e;
> diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
> index 44601d08d3..1ac843e603 100644
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -20,6 +20,12 @@
>  #include <asm/page.h>
>  #include <asm/setup.h>
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
>  #define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
>  #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
> diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
> index 2234128bb3..93f8fa4323 100644
> --- a/xen/arch/x86/pv/domain.c
> +++ b/xen/arch/x86/pv/domain.c
> @@ -11,12 +11,6 @@
>  
>  #include <asm/pv/domain.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static void noreturn continue_nonidle_domain(struct vcpu *v)
>  {
>      check_wakeup_from_wait();
> diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
> index 0f89c91dff..5cdb54c937 100644
> --- a/xen/arch/x86/pv/emul-gate-op.c
> +++ b/xen/arch/x86/pv/emul-gate-op.c
> @@ -41,12 +41,6 @@
>  
>  #include "emulate.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static int read_gate_descriptor(unsigned int gate_sel,
>                                  const struct vcpu *v,
>                                  unsigned int *sel,
> diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
> index 2f9264548a..6ebaf2f1e3 100644
> --- a/xen/arch/x86/pv/emul-priv-op.c
> +++ b/xen/arch/x86/pv/emul-priv-op.c
> @@ -43,16 +43,6 @@
>  #include "emulate.h"
>  #include "mm.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
> -/***********************
> - * I/O emulation support
> - */
> -
>  struct priv_op_ctxt {
>      struct x86_emulate_ctxt ctxt;
>      struct {
> diff --git a/xen/arch/x86/pv/grant_table.c b/xen/arch/x86/pv/grant_table.c
> index aaca228c6b..97323367c5 100644
> --- a/xen/arch/x86/pv/grant_table.c
> +++ b/xen/arch/x86/pv/grant_table.c
> @@ -27,12 +27,6 @@
>  
>  #include "mm.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  static unsigned int grant_to_pte_flags(unsigned int grant_flags,
>                                         unsigned int cache_flags)
>  {
> diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
> index 6b2976d3df..a7b7eb5113 100644
> --- a/xen/arch/x86/pv/ro-page-fault.c
> +++ b/xen/arch/x86/pv/ro-page-fault.c
> @@ -33,12 +33,6 @@
>  #include "emulate.h"
>  #include "mm.h"
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  /*********************
>   * Writable Pagetables
>   */
> diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
> index 1609b627ae..0791345a47 100644
> --- a/xen/arch/x86/smpboot.c
> +++ b/xen/arch/x86/smpboot.c
> @@ -46,12 +46,6 @@
>  #include <mach_wakecpu.h>
>  #include <smpboot_hooks.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  #define setup_trampoline()    (bootsym_phys(trampoline_realmode_entry))
>  
>  unsigned long __read_mostly trampoline_phys;
> diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
> index 59d7c477f4..e9522f06ec 100644
> --- a/xen/arch/x86/tboot.c
> +++ b/xen/arch/x86/tboot.c
> @@ -184,7 +184,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
>  
>      for ( mfn = 0; mfn < max_page; mfn++ )
>      {
> -        struct page_info *page = mfn_to_page(mfn);
> +        struct page_info *page = mfn_to_page(_mfn(mfn));
>  
>          if ( !mfn_valid(_mfn(mfn)) )
>              continue;
> @@ -276,7 +276,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
>      vmac_set_key((uint8_t *)key, &ctx);
>      for ( mfn = 0; mfn < max_page; mfn++ )
>      {
> -        struct page_info *page = __mfn_to_page(mfn);
> +        struct page_info *page = mfn_to_page(_mfn(mfn));
>  
>          if ( !mfn_valid(_mfn(mfn)) )
>              continue;
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 86506f3747..e2562eee4e 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -810,8 +810,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
>              }
>  
>              gdprintk(XENLOG_WARNING,
> -                     "Bad GMFN %lx (MFN %lx) to MSR %08x\n",
> -                     gmfn, page ? page_to_mfn(page) : -1UL, base);
> +                     "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
> +                     gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
>              return 0;
>          }
>  
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 34cd8457cf..ccad3d448b 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -40,6 +40,12 @@ asm(".file \"" __FILE__ "\"");
>  #include <asm/mem_sharing.h>
>  #include <public/memory.h>
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
>  
>  l2_pgentry_t *compat_idle_pg_table_l2;
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 5aebcf265f..e8302e8e1b 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -1192,7 +1192,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
>      }
>  
>      v->vcpu_info = new_info;
> -    v->vcpu_info_mfn = _mfn(page_to_mfn(page));
> +    v->vcpu_info_mfn = page_to_mfn(page);
>  
>      /* Set new vcpu_info pointer /before/ setting pending flags. */
>      smp_wmb();
> @@ -1225,7 +1225,7 @@ void unmap_vcpu_info(struct vcpu *v)
>  
>      vcpu_info_reset(v); /* NB: Clobbers v->vcpu_info_mfn */
>  
> -    put_page_and_type(mfn_to_page(mfn_x(mfn)));
> +    put_page_and_type(mfn_to_page(mfn));
>  }
>  
>  int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index c5950f2b3f..73a9d0293b 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -40,6 +40,12 @@
>  #include <xsm/xsm.h>
>  #include <asm/flushtlb.h>
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Per-domain grant information. */
>  struct grant_table {
>      /*
> diff --git a/xen/common/kimage.c b/xen/common/kimage.c
> index afd8292cc1..210241dfb7 100644
> --- a/xen/common/kimage.c
> +++ b/xen/common/kimage.c
> @@ -23,12 +23,6 @@
>  
>  #include <asm/page.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
> -#undef page_to_mfn
> -#define page_to_mfn(pg)  _mfn(__page_to_mfn(pg))
> -
>  /*
>   * When kexec transitions to the new kernel there is a one-to-one
>   * mapping between physical and virtual addresses.  On processors
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index ad987e0f29..e467f271c7 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -29,6 +29,12 @@
>  #include <public/memory.h>
>  #include <xsm/xsm.h>
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  struct memop_args {
>      /* INPUT */
>      struct domain *domain;     /* Domain to be affected. */
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 5616a82263..34c2089cd2 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -150,6 +150,12 @@
>  #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
>  #endif
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /*
>   * Comma-separated list of hexadecimal page numbers containing bad bytes.
>   * e.g. 'badpage=0x3f45,0x8a321'.
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 324f42a6f9..c077f87e77 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -243,7 +243,7 @@ static void tmem_persistent_pool_page_put(void *page_va)
>      struct page_info *pi;
>  
>      ASSERT(IS_PAGE_ALIGNED(page_va));
> -    pi = mfn_to_page(virt_to_mfn(page_va));
> +    pi = mfn_to_page(_mfn(virt_to_mfn(page_va)));
>      ASSERT(IS_VALID_PAGE(pi));
>      __tmem_free_page_thispool(pi);
>  }
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index bd52e44faf..bf7b14f79a 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -14,10 +14,6 @@
>  #include <xen/cpu.h>
>  #include <xen/init.h>
>  
> -/* Override macros from asm/page.h to make them work with mfn_t */
> -#undef page_to_mfn
> -#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> -
>  bool __read_mostly opt_tmem;
>  boolean_param("tmem", opt_tmem);
>  
> diff --git a/xen/common/trace.c b/xen/common/trace.c
> index 2e18702317..cf8f8b0997 100644
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -42,6 +42,12 @@ CHECK_t_buf;
>  #define compat_t_rec t_rec
>  #endif
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* opt_tbuf_size: trace buffer size (in pages) for each cpu */
>  static unsigned int opt_tbuf_size;
>  static unsigned int opt_tevt_mask;
> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
> index 0b23f8fb97..10f32b29e0 100644
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -36,7 +36,7 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end)
>      {
>          struct page_info *pg = alloc_domheap_page(NULL, 0);
>  
> -        map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
> +        map_pages_to_xen(va, mfn_x(page_to_mfn(pg)), 1, PAGE_HYPERVISOR);
>          clear_page((void *)va);
>      }
>      bitmap_fill(vm_bitmap(type), vm_low[type]);
> @@ -107,7 +107,8 @@ static void *vm_alloc(unsigned int nr, unsigned int align,
>          {
>              unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
>  
> -            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
> +            if ( !map_pages_to_xen(va, mfn_x(page_to_mfn(pg)),
> +                                   1, PAGE_HYPERVISOR) )
>              {
>                  clear_page((void *)va);
>                  vm_top[t] += PAGE_SIZE * 8;
> @@ -258,7 +259,7 @@ static void *vmalloc_type(size_t size, enum vmap_region type)
>          pg = alloc_domheap_page(NULL, 0);
>          if ( pg == NULL )
>              goto error;
> -        mfn[i] = _mfn(page_to_mfn(pg));
> +        mfn[i] = page_to_mfn(pg);
>      }
>  
>      va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
> @@ -270,7 +271,7 @@ static void *vmalloc_type(size_t size, enum vmap_region type)
>  
>   error:
>      while ( i-- )
> -        free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
> +        free_domheap_page(mfn_to_page(mfn[i]));
>      xfree(mfn);
>      return NULL;
>  }
> diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
> index 5acdde5691..fecdfb3697 100644
> --- a/xen/common/xenoprof.c
> +++ b/xen/common/xenoprof.c
> @@ -22,8 +22,6 @@
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef virt_to_mfn
>  #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> -#undef mfn_to_page
> -#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
>  
>  /* Limit amount of pages used for shared buffer (per domain) */
>  #define MAX_OPROF_SHARED_PAGES 32
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
> index fd2327d3e5..bd62c2ce90 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -25,6 +25,12 @@
>  #include "../ats.h"
>  #include <xen/pci.h>
>  
> +/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
> +#undef mfn_to_page
> +#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
> +
>  /* Given pfn and page table level, return pde index */
>  static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
>  {
> diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
> index 1aecf7cf34..2c44fabf99 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -184,7 +184,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
>  
>          page_list_for_each ( page, &d->page_list )
>          {
> -            unsigned long mfn = page_to_mfn(page);
> +            unsigned long mfn = mfn_x(page_to_mfn(page));
>              unsigned long gfn = mfn_to_gmfn(d, mfn);
>              unsigned int mapping = IOMMUF_readable;
>              int ret;
> diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
> index 0253823173..68182afd91 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -58,7 +58,7 @@ int arch_iommu_populate_page_table(struct domain *d)
>          if ( is_hvm_domain(d) ||
>              (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
>          {
> -            unsigned long mfn = page_to_mfn(page);
> +            unsigned long mfn = mfn_x(page_to_mfn(page));
>              unsigned long gfn = mfn_to_gmfn(d, mfn);
>  
>              if ( gfn != gfn_x(INVALID_GFN) )
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 737a429409..3eb4b68761 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -138,7 +138,7 @@ extern vaddr_t xenheap_virt_start;
>  #endif
>  
>  #ifdef CONFIG_ARM_32
> -#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
> +#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(__page_to_mfn(page)))
>  #define is_xen_heap_mfn(mfn) ({                                 \
>      unsigned long mfn_ = (mfn);                                 \
>      (mfn_ >= mfn_x(xenheap_mfn_start) &&                        \
> @@ -220,12 +220,14 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
>  })
>  
>  /* Convert between machine frame numbers and page-info structures. */
> -#define __mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) - frametable_base_pdx))
> -#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
> +#define __mfn_to_page(mfn)                                          \
> +    (frame_table + (pfn_to_pdx(mfn_x(mfn)) - frametable_base_pdx))
> +#define __page_to_mfn(pg)                                           \
> +    _mfn(pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx))
>  
>  /* Convert between machine addresses and page-info structures. */
> -#define maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
> -#define page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
> +#define maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
> +#define page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
>  
>  /* Convert between frame number and address formats.  */
>  #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
> @@ -235,7 +237,7 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
>  #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
>  #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
>  #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
> -#define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
> +#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
>  #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
>  
>  /* Page-align address and convert to frame number format */
> @@ -309,7 +311,7 @@ static inline struct page_info *virt_to_page(const void *v)
>  
>  static inline void *page_to_virt(const struct page_info *pg)
>  {
> -    return mfn_to_virt(page_to_mfn(pg));
> +    return mfn_to_virt(mfn_x(__page_to_mfn(pg)));
>  }
>  
>  struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index faadcfe8fe..87c9994974 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -276,7 +276,7 @@ static inline struct page_info *get_page_from_gfn(
>  {
>      struct page_info *page;
>      p2m_type_t p2mt;
> -    unsigned long mfn = mfn_x(p2m_lookup(d, _gfn(gfn), &p2mt));
> +    mfn_t mfn = p2m_lookup(d, _gfn(gfn), &p2mt);
>  
>      if (t)
>          *t = p2mt;
> @@ -284,7 +284,7 @@ static inline struct page_info *get_page_from_gfn(
>      if ( !p2m_is_any_ram(p2mt) )
>          return NULL;
>  
> -    if ( !mfn_valid(_mfn(mfn)) )
> +    if ( !mfn_valid(mfn) )
>          return NULL;
>      page = mfn_to_page(mfn);
>  
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index 83626085e0..c8dd273517 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -270,7 +270,7 @@ struct page_info
>  
>  #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
>  #define is_xen_heap_mfn(mfn) \
> -    (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
> +    (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(_mfn(mfn))))
>  #define is_xen_fixed_mfn(mfn)                     \
>      ((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) &&  \
>       (((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))
> @@ -383,7 +383,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
>  
>  static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
>  {
> -    struct page_info *page = __mfn_to_page(mfn_x(mfn));
> +    struct page_info *page = __mfn_to_page(mfn);
>  
>      if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
>      {
> @@ -477,10 +477,10 @@ extern paddr_t mem_hotplug;
>  #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
>  
>  #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
> -#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
> -    struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
> -    unsigned long entry = (d && (d == dom_cow)) ?              \
> -        SHARED_M2P_ENTRY : (pfn);                              \
> +#define _set_gpfn_from_mfn(mfn, pfn) ({                         \
> +    struct domain *d = page_get_owner(__mfn_to_page(_mfn(mfn)));    \
> +    unsigned long entry = (d && (d == dom_cow)) ?               \
> +        SHARED_M2P_ENTRY : (pfn);                               \
>      ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
>              (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
>       machine_to_phys_mapping[(mfn)] = (entry));                \
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 17b1d0c8d3..a2a216061f 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -487,7 +487,7 @@ static inline struct page_info *get_page_from_gfn(
>      /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
>      if ( t )
>          *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
> -    page = __mfn_to_page(gfn);
> +    page = __mfn_to_page(_mfn(gfn));
>      return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
>  }
>  
> diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
> index 45ca742678..8737ef16ff 100644
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -88,10 +88,10 @@
>      ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
>  
>  /* Get pointer to info structure of page mapped by pte (struct page_info *). */
> -#define l1e_get_page(x)           (__mfn_to_page(l1e_get_pfn(x)))
> -#define l2e_get_page(x)           (__mfn_to_page(l2e_get_pfn(x)))
> -#define l3e_get_page(x)           (__mfn_to_page(l3e_get_pfn(x)))
> -#define l4e_get_page(x)           (__mfn_to_page(l4e_get_pfn(x)))
> +#define l1e_get_page(x)           (__mfn_to_page(l1e_get_mfn(x)))
> +#define l2e_get_page(x)           (__mfn_to_page(l2e_get_mfn(x)))
> +#define l3e_get_page(x)           (__mfn_to_page(l3e_get_mfn(x)))
> +#define l4e_get_page(x)           (__mfn_to_page(l4e_get_mfn(x)))
>  
>  /* Get pte access flags (unsigned int). */
>  #define l1e_get_flags(x)           (get_pte_flags((x).l1))
> @@ -157,10 +157,10 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>  #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
>  
>  /* Construct a pte from a page pointer and access flags. */
> -#define l1e_from_page(page, flags) l1e_from_pfn(__page_to_mfn(page), (flags))
> -#define l2e_from_page(page, flags) l2e_from_pfn(__page_to_mfn(page), (flags))
> -#define l3e_from_page(page, flags) l3e_from_pfn(__page_to_mfn(page), (flags))
> -#define l4e_from_page(page, flags) l4e_from_pfn(__page_to_mfn(page), (flags))
> +#define l1e_from_page(page, flags) l1e_from_mfn(__page_to_mfn(page), (flags))
> +#define l2e_from_page(page, flags) l2e_from_mfn(__page_to_mfn(page), (flags))
> +#define l3e_from_page(page, flags) l3e_from_mfn(__page_to_mfn(page), (flags))
> +#define l4e_from_page(page, flags) l4e_from_mfn(__page_to_mfn(page), (flags))
>  
>  /* Add extra flags to an existing pte. */
>  #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
> @@ -215,13 +215,13 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>  /* Page-table type. */
>  typedef struct { u64 pfn; } pagetable_t;
>  #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
> -#define pagetable_get_page(x)   __mfn_to_page((x).pfn)
> +#define pagetable_get_page(x)   __mfn_to_page(pagetable_get_mfn(x))
>  #define pagetable_get_pfn(x)    ((x).pfn)
>  #define pagetable_get_mfn(x)    _mfn(((x).pfn))
>  #define pagetable_is_null(x)    ((x).pfn == 0)
>  #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
>  #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
> -#define pagetable_from_page(pg) pagetable_from_pfn(__page_to_mfn(pg))
> +#define pagetable_from_page(pg) pagetable_from_mfn(__page_to_mfn(pg))
>  #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
>  #define pagetable_null()        pagetable_from_pfn(0)
>  
> @@ -240,12 +240,12 @@ void copy_page_sse2(void *, const void *);
>  #define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
>  
>  /* Convert between machine frame numbers and page-info structures. */
> -#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn))
> -#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table))
> +#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn_x(mfn)))
> +#define __page_to_mfn(pg)   _mfn(pdx_to_pfn((unsigned long)((pg) - frame_table)))
>  
>  /* Convert between machine addresses and page-info structures. */
> -#define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
> -#define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
> +#define __maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
> +#define __page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
>  
>  /* Convert between frame number and address formats.  */
>  #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
> @@ -273,8 +273,8 @@ void copy_page_sse2(void *, const void *);
>  #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
>  #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
>  #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
> -#define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
> -#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
> +#define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
> +#define vmap_to_page(va)    __mfn_to_page(vmap_to_mfn(va))
>  
>  #endif /* !defined(__ASSEMBLY__) */
>  
> diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
> index 890bae5b9c..22ab65ba16 100644
> --- a/xen/include/xen/domain_page.h
> +++ b/xen/include/xen/domain_page.h
> @@ -34,7 +34,7 @@ void unmap_domain_page(const void *va);
>  /* 
>   * Given a VA from map_domain_page(), return its underlying MFN.
>   */
> -unsigned long domain_page_map_to_mfn(const void *va);
> +mfn_t domain_page_map_to_mfn(const void *va);
>  
>  /*
>   * Similar to the above calls, except the mapping is accessible in all
> @@ -44,11 +44,11 @@ unsigned long domain_page_map_to_mfn(const void *va);
>  void *map_domain_page_global(mfn_t mfn);
>  void unmap_domain_page_global(const void *va);
>  
> -#define __map_domain_page(pg)        map_domain_page(_mfn(__page_to_mfn(pg)))
> +#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
>  
>  static inline void *__map_domain_page_global(const struct page_info *pg)
>  {
> -    return map_domain_page_global(_mfn(__page_to_mfn(pg)));
> +    return map_domain_page_global(page_to_mfn(pg));
>  }
>  
>  #else /* !CONFIG_DOMAIN_PAGE */
> @@ -56,7 +56,7 @@ static inline void *__map_domain_page_global(const struct page_info *pg)
>  #define map_domain_page(mfn)                __mfn_to_virt(mfn_x(mfn))
>  #define __map_domain_page(pg)               page_to_virt(pg)
>  #define unmap_domain_page(va)               ((void)(va))
> -#define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
> +#define domain_page_map_to_mfn(va)          _mfn(virt_to_mfn((unsigned long)(va)))
>  
>  static inline void *map_domain_page_global(mfn_t mfn)
>  {
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index 542c0b3f20..8516a0b131 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -25,7 +25,7 @@
>  typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
>  
>  #define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
> -#define IS_VALID_PAGE(_pi)    mfn_valid(_mfn(page_to_mfn(_pi)))
> +#define IS_VALID_PAGE(_pi)    mfn_valid(page_to_mfn(_pi))
>  
>  extern struct page_list_head tmem_page_list;
>  extern spinlock_t tmem_page_list_lock;
> -- 
> 2.11.0
>
diff mbox series

Patch

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 5532068ab1..4b554b49c1 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -50,8 +50,6 @@  struct map_range_data
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef virt_to_mfn
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
 
 //#define DEBUG_11_ALLOCATION
 #ifdef DEBUG_11_ALLOCATION
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index c2755a9ab9..018d1aed06 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -295,7 +295,7 @@  static __init int kernel_decompress(struct bootmodule *mod)
         iounmap(input);
         return -ENOMEM;
     }
-    mfn = _mfn(page_to_mfn(pages));
+    mfn = page_to_mfn(pages);
     output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
 
     rc = perform_gunzip(output, input, size);
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index 0f2cbb81d3..112e291cba 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -210,7 +210,7 @@  p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
     if ( t != p2m_ram_rw )
         goto err;
 
-    page = mfn_to_page(mfn_x(mfn));
+    page = mfn_to_page(mfn);
 
     if ( unlikely(!get_page(page, v->domain)) )
         page = NULL;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 3c328e2df5..b8818e03ab 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -477,7 +477,7 @@  void unmap_domain_page(const void *va)
     local_irq_restore(flags);
 }
 
-unsigned long domain_page_map_to_mfn(const void *ptr)
+mfn_t domain_page_map_to_mfn(const void *ptr)
 {
     unsigned long va = (unsigned long)ptr;
     lpae_t *map = this_cpu(xen_dommap);
@@ -485,12 +485,12 @@  unsigned long domain_page_map_to_mfn(const void *ptr)
     unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK;
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
-        return __virt_to_mfn(va);
+        return virt_to_mfn(va);
 
     ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
     ASSERT(map[slot].pt.avail != 0);
 
-    return map[slot].pt.base + offset;
+    return _mfn(map[slot].pt.base + offset);
 }
 #endif
 
@@ -1288,7 +1288,7 @@  int xenmem_add_to_physmap_one(
             return -EINVAL;
         }
 
-        mfn = _mfn(page_to_mfn(page));
+        mfn = page_to_mfn(page);
         t = p2m_map_foreign;
 
         rcu_unlock_domain(od);
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 68b488997d..d1ef535a43 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -38,12 +38,6 @@  static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
 
 #define P2M_ROOT_PAGES    (1<<P2M_ROOT_ORDER)
 
-/* Override macros from asm/mm.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 unsigned int __read_mostly p2m_ipa_bits;
 
 /* Helpers to lookup the properties of each level */
@@ -97,8 +91,8 @@  void dump_p2m_lookup(struct domain *d, paddr_t addr)
 
     printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr);
 
-    printk("P2M @ %p mfn:0x%lx\n",
-           p2m->root, __page_to_mfn(p2m->root));
+    printk("P2M @ %p mfn:%#"PRI_mfn"\n",
+           p2m->root, mfn_x(page_to_mfn(p2m->root)));
 
     dump_pt_walk(page_to_maddr(p2m->root), addr,
                  P2M_ROOT_LEVEL, P2M_ROOT_PAGES);
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 7baf4614be..b978e05613 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -653,7 +653,7 @@  static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
 {
     struct vcpu *v;
     struct vpmu_struct *vpmu;
-    uint64_t mfn;
+    mfn_t mfn;
     void *xenpmu_data;
 
     if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
@@ -675,7 +675,7 @@  static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
     if ( xenpmu_data )
     {
         mfn = domain_page_map_to_mfn(xenpmu_data);
-        ASSERT(mfn_valid(_mfn(mfn)));
+        ASSERT(mfn_valid(mfn));
         unmap_domain_page_global(xenpmu_data);
         put_page_and_type(mfn_to_page(mfn));
     }
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 735f45c133..cb596c4a31 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -186,7 +186,7 @@  void dump_pageframe_info(struct domain *d)
                 }
             }
             printk("    DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
-                   _p(page_to_mfn(page)),
+                   _p(mfn_x(page_to_mfn(page))),
                    page->count_info, page->u.inuse.type_info);
         }
         spin_unlock(&d->page_alloc_lock);
@@ -199,7 +199,7 @@  void dump_pageframe_info(struct domain *d)
     page_list_for_each ( page, &d->xenpage_list )
     {
         printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
-               _p(page_to_mfn(page)),
+               _p(mfn_x(page_to_mfn(page))),
                page->count_info, page->u.inuse.type_info);
     }
     spin_unlock(&d->page_alloc_lock);
@@ -621,7 +621,8 @@  int arch_domain_soft_reset(struct domain *d)
     struct page_info *page = virt_to_page(d->shared_info), *new_page;
     int ret = 0;
     struct domain *owner;
-    unsigned long mfn, gfn;
+    mfn_t mfn;
+    unsigned long gfn;
     p2m_type_t p2mt;
     unsigned int i;
 
@@ -655,7 +656,7 @@  int arch_domain_soft_reset(struct domain *d)
     ASSERT( owner == d );
 
     mfn = page_to_mfn(page);
-    gfn = mfn_to_gmfn(d, mfn);
+    gfn = mfn_to_gmfn(d, mfn_x(mfn));
 
     /*
      * gfn == INVALID_GFN indicates that the shared_info page was never mapped
@@ -664,7 +665,7 @@  int arch_domain_soft_reset(struct domain *d)
     if ( gfn == gfn_x(INVALID_GFN) )
         goto exit_put_page;
 
-    if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) != mfn )
+    if ( !mfn_eq(get_gfn_query(d, gfn, &p2mt), mfn) )
     {
         printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN (%lx)\n",
                d->domain_id, gfn);
@@ -681,7 +682,7 @@  int arch_domain_soft_reset(struct domain *d)
         goto exit_put_gfn;
     }
 
-    ret = guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
+    ret = guest_physmap_remove_page(d, _gfn(gfn), mfn, PAGE_ORDER_4K);
     if ( ret )
     {
         printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame %lx\n",
@@ -690,7 +691,7 @@  int arch_domain_soft_reset(struct domain *d)
         goto exit_put_gfn;
     }
 
-    ret = guest_physmap_add_page(d, _gfn(gfn), _mfn(page_to_mfn(new_page)),
+    ret = guest_physmap_add_page(d, _gfn(gfn), page_to_mfn(new_page),
                                  PAGE_ORDER_4K);
     if ( ret )
     {
@@ -988,7 +989,7 @@  int arch_set_info_guest(
                 {
                     if ( (page->u.inuse.type_info & PGT_type_mask) ==
                          PGT_l4_page_table )
-                        done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
+                        done = !fill_ro_mpt(page_to_mfn(page));
 
                     page_unlock(page);
                 }
@@ -1115,7 +1116,7 @@  int arch_set_info_guest(
         l4_pgentry_t *l4tab;
 
         l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table));
-        *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
+        *l4tab = l4e_from_mfn(page_to_mfn(cr3_page),
             _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
         unmap_domain_page(l4tab);
     }
@@ -1945,7 +1946,7 @@  int domain_relinquish_resources(struct domain *d)
         if ( d->arch.pirq_eoi_map != NULL )
         {
             unmap_domain_page_global(d->arch.pirq_eoi_map);
-            put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
+            put_page_and_type(mfn_to_page(_mfn(d->arch.pirq_eoi_map_mfn)));
             d->arch.pirq_eoi_map = NULL;
             d->arch.auto_unmask = 0;
         }
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 3432a854dd..88046b39c9 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -331,13 +331,13 @@  void unmap_domain_page_global(const void *ptr)
 }
 
 /* Translate a map-domain-page'd address to the underlying MFN */
-unsigned long domain_page_map_to_mfn(const void *ptr)
+mfn_t domain_page_map_to_mfn(const void *ptr)
 {
     unsigned long va = (unsigned long)ptr;
     const l1_pgentry_t *pl1e;
 
     if ( va >= DIRECTMAP_VIRT_START )
-        return virt_to_mfn(ptr);
+        return _mfn(virt_to_mfn(ptr));
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
     {
@@ -350,5 +350,5 @@  unsigned long domain_page_map_to_mfn(const void *ptr)
         pl1e = &__linear_l1_table[l1_linear_offset(va)];
     }
 
-    return l1e_get_pfn(*pl1e);
+    return l1e_get_mfn(*pl1e);
 }
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 80b4df9ec9..9f6c92411a 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -429,7 +429,7 @@  long arch_do_domctl(
         {
             if ( i >= max_pfns )
                 break;
-            mfn = page_to_mfn(page);
+            mfn = mfn_x(page_to_mfn(page));
             if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
                                       i, &mfn, 1) )
             {
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index a787f43737..4760816ee6 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -219,7 +219,7 @@  static int modified_memory(struct domain *d,
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
             if ( page )
             {
-                mfn_t gmfn = _mfn(page_to_mfn(page));
+                mfn_t gmfn = page_to_mfn(page);
 
                 paging_mark_dirty(d, gmfn);
                 /*
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index a67071c739..b50506ec63 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -120,7 +120,7 @@  static int __init pvh_populate_memory_range(struct domain *d,
             continue;
         }
 
-        rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)),
+        rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page),
                                     order);
         if ( rc != 0 )
         {
@@ -270,7 +270,7 @@  static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
     }
     write_32bit_pse_identmap(ident_pt);
     unmap_domain_page(ident_pt);
-    put_page(mfn_to_page(mfn_x(mfn)));
+    put_page(mfn_to_page(mfn));
     d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
     if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
             printk("Unable to set identity page tables as reserved in the memory map\n");
@@ -288,7 +288,7 @@  static void __init pvh_steal_low_ram(struct domain *d, unsigned long start,
 
     for ( mfn = start; mfn < start + nr_pages; mfn++ )
     {
-        struct page_info *pg = mfn_to_page(mfn);
+        struct page_info *pg = mfn_to_page(_mfn(mfn));
         int rc;
 
         rc = unshare_xen_page_with_guest(pg, dom_io);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index e924ce07c4..312aa91416 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -590,7 +590,7 @@  static void *hvmemul_map_linear_addr(
             goto unhandleable;
         }
 
-        *mfn++ = _mfn(page_to_mfn(page));
+        *mfn++ = page_to_mfn(page);
 
         if ( p2m_is_discard_write(p2mt) )
         {
@@ -622,7 +622,7 @@  static void *hvmemul_map_linear_addr(
  out:
     /* Drop all held references. */
     while ( mfn-- > hvmemul_ctxt->mfn )
-        put_page(mfn_to_page(mfn_x(*mfn)));
+        put_page(mfn_to_page(*mfn));
 
     return err;
 }
@@ -648,7 +648,7 @@  static void hvmemul_unmap_linear_addr(
     {
         ASSERT(mfn_valid(*mfn));
         paging_mark_dirty(currd, *mfn);
-        put_page(mfn_to_page(mfn_x(*mfn)));
+        put_page(mfn_to_page(*mfn));
 
         *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 205b4cb685..4d795ee700 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2211,7 +2211,7 @@  int hvm_set_cr0(unsigned long value, bool_t may_defer)
             v->arch.guest_table = pagetable_from_page(page);
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                        v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
+                        v->arch.hvm_vcpu.guest_cr[3], mfn_x(page_to_mfn(page)));
         }
     }
     else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -2546,7 +2546,7 @@  static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
         if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
         else if ( !permanent )
-            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
+            paging_mark_dirty(d, page_to_mfn(page));
     }
 
     if ( !permanent )
@@ -2588,7 +2588,7 @@  void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
 
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
-    unsigned long mfn;
+    mfn_t mfn;
     struct page_info *page;
 
     if ( !p )
@@ -2609,7 +2609,7 @@  void hvm_unmap_guest_frame(void *p, bool_t permanent)
         list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
             if ( track->page == page )
             {
-                paging_mark_dirty(d, _mfn(mfn));
+                paging_mark_dirty(d, mfn);
                 list_del(&track->list);
                 xfree(track);
                 break;
@@ -2626,7 +2626,7 @@  void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
 
     spin_lock(&d->arch.hvm_domain.write_map.lock);
     list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
-        paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
+        paging_mark_dirty(d, page_to_mfn(track->page));
     spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
@@ -3200,8 +3200,8 @@  static enum hvm_translation_result __hvm_copy(
 
                 if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
                     dprintk(XENLOG_G_DEBUG,
-                            "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
-                            v, gfn_x(gfn), page_to_mfn(page));
+                            "%pv attempted write to read-only gfn %#lx (mfn=%#"PRI_mfn")\n",
+                            v, gfn_x(gfn), mfn_x(page_to_mfn(page)));
             }
             else
             {
@@ -3209,7 +3209,7 @@  static enum hvm_translation_result __hvm_copy(
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
+                paging_mark_dirty(v->domain, page_to_mfn(page));
             }
         }
         else
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index d5afe20cc8..0f823d201a 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -268,7 +268,7 @@  static void hvm_remove_ioreq_gfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
     if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
-                                   _mfn(page_to_mfn(iorp->page)), 0) )
+                                   page_to_mfn(iorp->page), 0) )
         domain_crash(d);
     clear_page(iorp->va);
 }
@@ -281,9 +281,9 @@  static int hvm_add_ioreq_gfn(
     clear_page(iorp->va);
 
     rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
-                                _mfn(page_to_mfn(iorp->page)), 0);
+                                page_to_mfn(iorp->page), 0);
     if ( rc == 0 )
-        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
+        paging_mark_dirty(d, page_to_mfn(iorp->page));
 
     return rc;
 }
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 088fbdf8ce..925bab2438 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -590,7 +590,7 @@  void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        clear_domain_page(_mfn(page_to_mfn(pg)));
+        clear_domain_page(page_to_mfn(pg));
     }
 
     if ( i == ARRAY_SIZE(s->vram_page) )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b9cf423fd9..f50f931598 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1521,7 +1521,7 @@  static int svm_cpu_up_prepare(unsigned int cpu)
         if ( !pg )
             goto err;
 
-        clear_domain_page(_mfn(page_to_mfn(pg)));
+        clear_domain_page(page_to_mfn(pg));
         *this_hsa = page_to_maddr(pg);
     }
 
@@ -1531,7 +1531,7 @@  static int svm_cpu_up_prepare(unsigned int cpu)
         if ( !pg )
             goto err;
 
-        clear_domain_page(_mfn(page_to_mfn(pg)));
+        clear_domain_page(page_to_mfn(pg));
         *this_vmcb = page_to_maddr(pg);
     }
 
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index f0fa59d7d5..070551e1ab 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -354,7 +354,7 @@  static void enable_hypercall_page(struct domain *d)
         if ( page )
             put_page(page);
         gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
-                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
         return;
     }
 
@@ -414,7 +414,7 @@  static void initialize_vp_assist(struct vcpu *v)
 
  fail:
     gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn,
-             page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+             mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
 }
 
 static void teardown_vp_assist(struct vcpu *v)
@@ -494,7 +494,7 @@  static void update_reference_tsc(struct domain *d, bool_t initialize)
         if ( page )
             put_page(page);
         gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
-                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
         return;
     }
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index b5100b5021..8eaa58e3c0 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1437,7 +1437,7 @@  int vmx_vcpu_enable_pml(struct vcpu *v)
 
     vmx_vmcs_enter(v);
 
-    __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) << PAGE_SHIFT);
+    __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
     __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
 
     v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b18cceab55..c657ba89f5 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2978,7 +2978,7 @@  gp_fault:
 static int vmx_alloc_vlapic_mapping(struct domain *d)
 {
     struct page_info *pg;
-    unsigned long mfn;
+    mfn_t mfn;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
@@ -2987,10 +2987,10 @@  static int vmx_alloc_vlapic_mapping(struct domain *d)
     if ( !pg )
         return -ENOMEM;
     mfn = page_to_mfn(pg);
-    clear_domain_page(_mfn(mfn));
+    clear_domain_page(mfn);
     share_xen_page_with_guest(pg, d, XENSHARE_writable);
-    d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
-    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
+    d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
+    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
                        PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
 
     return 0;
@@ -3001,7 +3001,7 @@  static void vmx_free_vlapic_mapping(struct domain *d)
     unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
 
     if ( mfn != 0 )
-        free_shared_domheap_page(mfn_to_page(mfn));
+        free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index dde02c076b..4836d69c0e 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -84,7 +84,7 @@  int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
 
-        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+        clear_domain_page(page_to_mfn(vmread_bitmap));
 
         vmwrite_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmwrite_bitmap )
@@ -1729,7 +1729,7 @@  int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
                 nvcpu->nv_vvmcx = vvmcx;
                 nvcpu->nv_vvmcxaddr = gpa;
                 v->arch.hvm_vmx.vmcs_shadow_maddr =
-                    pfn_to_paddr(domain_page_map_to_mfn(vvmcx));
+                    mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
             }
             else
             {
@@ -1815,7 +1815,7 @@  int nvmx_handle_vmclear(struct cpu_user_regs *regs)
         {
             if ( writable )
                 clear_vvmcs_launched(&nvmx->launched_list,
-                                     domain_page_map_to_mfn(vvmcs));
+                                     mfn_x(domain_page_map_to_mfn(vvmcs)));
             else
                 rc = VMFAIL_VALID;
             hvm_unmap_guest_frame(vvmcs, 0);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a20fdcaea4..f6ca4f884b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -128,12 +128,6 @@ 
 
 #include "pv/mm.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 /* Mapping of the fixmap space needed early. */
 l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
     l1_fixmap[L1_PAGETABLE_ENTRIES];
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 6055fec1ad..f67aeda3d0 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -469,20 +469,20 @@  guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     if ( l3p )
     {
         unmap_domain_page(l3p);
-        put_page(mfn_to_page(mfn_x(gw->l3mfn)));
+        put_page(mfn_to_page(gw->l3mfn));
     }
 #endif
 #if GUEST_PAGING_LEVELS >= 3
     if ( l2p )
     {
         unmap_domain_page(l2p);
-        put_page(mfn_to_page(mfn_x(gw->l2mfn)));
+        put_page(mfn_to_page(gw->l2mfn));
     }
 #endif
     if ( l1p )
     {
         unmap_domain_page(l1p);
-        put_page(mfn_to_page(mfn_x(gw->l1mfn)));
+        put_page(mfn_to_page(gw->l1mfn));
     }
 
     return walk_ok;
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index c550017ba4..cb3f9cebe7 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -83,7 +83,7 @@  unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
         *pfec &= ~PFEC_page_present;
         goto out_tweak_pfec;
     }
-    top_mfn = _mfn(page_to_mfn(top_page));
+    top_mfn = page_to_mfn(top_page);
 
     /* Map the top-level table and call the tree-walker */
     ASSERT(mfn_valid(top_mfn));
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 41deb90787..97f2516ecd 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -42,12 +42,6 @@ 
 
 #include "private.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
 /************************************************/
 /*          HAP VRAM TRACKING SUPPORT           */
 /************************************************/
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
index 14b1bb01e9..1738df69f6 100644
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -173,7 +173,7 @@  nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw)
             goto map_err;
         gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
         unmap_domain_page(lxp);
-        put_page(mfn_to_page(mfn_x(lxmfn)));
+        put_page(mfn_to_page(lxmfn));
 
         if ( nept_non_present_check(gw->lxe[lvl]) )
             goto non_present;
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 6f4be95515..6ecf0b27d5 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -152,11 +152,6 @@  static inline shr_handle_t get_next_handle(void)
 #define mem_sharing_enabled(d) \
     (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
 
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
 static atomic_t nr_saved_mfns   = ATOMIC_INIT(0); 
 static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index b4996ce658..8885916c0a 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -33,6 +33,10 @@ 
 
 #include "mm-locks.h"
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
 
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 0a811ccf28..7a88074c31 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -29,12 +29,6 @@ 
 
 #include "mm-locks.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
 #define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
 
 /* Enforce lock ordering when grabbing the "external" page_alloc lock */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c72a3cdebb..b71f51e0b2 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -47,12 +47,6 @@  bool_t __initdata opt_hap_1gb = 1, __initdata opt_hap_2mb = 1;
 boolean_param("hap_1gb", opt_hap_1gb);
 boolean_param("hap_2mb", opt_hap_2mb);
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
 DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
 
 /* Init the datastructures for later use by the p2m code */
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 1e2c9ba4cc..cb97642cbc 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -47,12 +47,6 @@ 
 /* Per-CPU variable for enforcing the lock ordering */
 DEFINE_PER_CPU(int, mm_lock_level);
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
 /************************************************/
 /*              LOG DIRTY SUPPORT               */
 /************************************************/
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 6a03370402..b9cc680f4e 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -315,7 +315,7 @@  static inline int page_is_out_of_sync(struct page_info *p)
 
 static inline int mfn_is_out_of_sync(mfn_t gmfn)
 {
-    return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
+    return page_is_out_of_sync(mfn_to_page(gmfn));
 }
 
 static inline int page_oos_may_write(struct page_info *p)
@@ -326,7 +326,7 @@  static inline int page_oos_may_write(struct page_info *p)
 
 static inline int mfn_oos_may_write(mfn_t gmfn)
 {
-    return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
+    return page_oos_may_write(mfn_to_page(gmfn));
 }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
 
@@ -465,18 +465,6 @@  void sh_reset_l3_up_pointers(struct vcpu *v);
  * MFN/page-info handling
  */
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
-/* Override pagetable_t <-> struct page_info conversions to work with mfn_t */
-#undef pagetable_get_page
-#define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
-#undef pagetable_from_page
-#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
-
 #define backpointer(sp) _mfn(pdx_to_pfn((unsigned long)(sp)->v.sh.back))
 static inline unsigned long __backpointer(const struct page_info *sp)
 {
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 4fc967f893..a87987da6f 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -430,7 +430,7 @@  static void dump_numa(unsigned char key)
         spin_lock(&d->page_alloc_lock);
         page_list_for_each(page, &d->page_list)
         {
-            i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
+            i = phys_to_nid(page_to_maddr(page));
             page_num_node[i]++;
         }
         spin_unlock(&d->page_alloc_lock);
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index a5fedca671..5422f3e372 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -242,7 +242,7 @@  ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         }
 
         if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
-                     0, page_to_mfn(page)) != 0 )
+                     0, mfn_x(page_to_mfn(page))) != 0 )
         {
             put_page_and_type(page);
             ret = -EBUSY;
diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
index 97d8438600..5957cb5085 100644
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -31,12 +31,6 @@ 
 
 #include <public/callback.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 static int register_guest_nmi_callback(unsigned long address)
 {
     struct vcpu *curr = current;
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index 81973af124..f2b20f9910 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -25,16 +25,6 @@ 
 #include <asm/p2m.h>
 #include <asm/pv/mm.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
-/*******************
- * Descriptor Tables
- */
-
 void pv_destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 44601d08d3..1ac843e603 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -20,6 +20,12 @@ 
 #include <asm/page.h>
 #include <asm/setup.h>
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 /* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
 #define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
 #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 2234128bb3..93f8fa4323 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -11,12 +11,6 @@ 
 
 #include <asm/pv/domain.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 static void noreturn continue_nonidle_domain(struct vcpu *v)
 {
     check_wakeup_from_wait();
diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
index 0f89c91dff..5cdb54c937 100644
--- a/xen/arch/x86/pv/emul-gate-op.c
+++ b/xen/arch/x86/pv/emul-gate-op.c
@@ -41,12 +41,6 @@ 
 
 #include "emulate.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 static int read_gate_descriptor(unsigned int gate_sel,
                                 const struct vcpu *v,
                                 unsigned int *sel,
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 2f9264548a..6ebaf2f1e3 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -43,16 +43,6 @@ 
 #include "emulate.h"
 #include "mm.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
-/***********************
- * I/O emulation support
- */
-
 struct priv_op_ctxt {
     struct x86_emulate_ctxt ctxt;
     struct {
diff --git a/xen/arch/x86/pv/grant_table.c b/xen/arch/x86/pv/grant_table.c
index aaca228c6b..97323367c5 100644
--- a/xen/arch/x86/pv/grant_table.c
+++ b/xen/arch/x86/pv/grant_table.c
@@ -27,12 +27,6 @@ 
 
 #include "mm.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 static unsigned int grant_to_pte_flags(unsigned int grant_flags,
                                        unsigned int cache_flags)
 {
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 6b2976d3df..a7b7eb5113 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -33,12 +33,6 @@ 
 #include "emulate.h"
 #include "mm.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 /*********************
  * Writable Pagetables
  */
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 1609b627ae..0791345a47 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -46,12 +46,6 @@ 
 #include <mach_wakecpu.h>
 #include <smpboot_hooks.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 #define setup_trampoline()    (bootsym_phys(trampoline_realmode_entry))
 
 unsigned long __read_mostly trampoline_phys;
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 59d7c477f4..e9522f06ec 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -184,7 +184,7 @@  static void update_pagetable_mac(vmac_ctx_t *ctx)
 
     for ( mfn = 0; mfn < max_page; mfn++ )
     {
-        struct page_info *page = mfn_to_page(mfn);
+        struct page_info *page = mfn_to_page(_mfn(mfn));
 
         if ( !mfn_valid(_mfn(mfn)) )
             continue;
@@ -276,7 +276,7 @@  static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
     vmac_set_key((uint8_t *)key, &ctx);
     for ( mfn = 0; mfn < max_page; mfn++ )
     {
-        struct page_info *page = __mfn_to_page(mfn);
+        struct page_info *page = mfn_to_page(_mfn(mfn));
 
         if ( !mfn_valid(_mfn(mfn)) )
             continue;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 86506f3747..e2562eee4e 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -810,8 +810,8 @@  int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
             }
 
             gdprintk(XENLOG_WARNING,
-                     "Bad GMFN %lx (MFN %lx) to MSR %08x\n",
-                     gmfn, page ? page_to_mfn(page) : -1UL, base);
+                     "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
+                     gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
             return 0;
         }
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 34cd8457cf..ccad3d448b 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -40,6 +40,12 @@  asm(".file \"" __FILE__ "\"");
 #include <asm/mem_sharing.h>
 #include <public/memory.h>
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
 l2_pgentry_t *compat_idle_pg_table_l2;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 5aebcf265f..e8302e8e1b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1192,7 +1192,7 @@  int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
     }
 
     v->vcpu_info = new_info;
-    v->vcpu_info_mfn = _mfn(page_to_mfn(page));
+    v->vcpu_info_mfn = page_to_mfn(page);
 
     /* Set new vcpu_info pointer /before/ setting pending flags. */
     smp_wmb();
@@ -1225,7 +1225,7 @@  void unmap_vcpu_info(struct vcpu *v)
 
     vcpu_info_reset(v); /* NB: Clobbers v->vcpu_info_mfn */
 
-    put_page_and_type(mfn_to_page(mfn_x(mfn)));
+    put_page_and_type(mfn_to_page(mfn));
 }
 
 int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index c5950f2b3f..73a9d0293b 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -40,6 +40,12 @@ 
 #include <xsm/xsm.h>
 #include <asm/flushtlb.h>
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 /* Per-domain grant information. */
 struct grant_table {
     /*
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index afd8292cc1..210241dfb7 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -23,12 +23,6 @@ 
 
 #include <asm/page.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg)  _mfn(__page_to_mfn(pg))
-
 /*
  * When kexec transitions to the new kernel there is a one-to-one
  * mapping between physical and virtual addresses.  On processors
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ad987e0f29..e467f271c7 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -29,6 +29,12 @@ 
 #include <public/memory.h>
 #include <xsm/xsm.h>
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 struct memop_args {
     /* INPUT */
     struct domain *domain;     /* Domain to be affected. */
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 5616a82263..34c2089cd2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -150,6 +150,12 @@ 
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 324f42a6f9..c077f87e77 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -243,7 +243,7 @@  static void tmem_persistent_pool_page_put(void *page_va)
     struct page_info *pi;
 
     ASSERT(IS_PAGE_ALIGNED(page_va));
-    pi = mfn_to_page(virt_to_mfn(page_va));
+    pi = mfn_to_page(_mfn(virt_to_mfn(page_va)));
     ASSERT(IS_VALID_PAGE(pi));
     __tmem_free_page_thispool(pi);
 }
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index bd52e44faf..bf7b14f79a 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -14,10 +14,6 @@ 
 #include <xen/cpu.h>
 #include <xen/init.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
 bool __read_mostly opt_tmem;
 boolean_param("tmem", opt_tmem);
 
diff --git a/xen/common/trace.c b/xen/common/trace.c
index 2e18702317..cf8f8b0997 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -42,6 +42,12 @@  CHECK_t_buf;
 #define compat_t_rec t_rec
 #endif
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 /* opt_tbuf_size: trace buffer size (in pages) for each cpu */
 static unsigned int opt_tbuf_size;
 static unsigned int opt_tevt_mask;
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index 0b23f8fb97..10f32b29e0 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -36,7 +36,7 @@  void __init vm_init_type(enum vmap_region type, void *start, void *end)
     {
         struct page_info *pg = alloc_domheap_page(NULL, 0);
 
-        map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
+        map_pages_to_xen(va, mfn_x(page_to_mfn(pg)), 1, PAGE_HYPERVISOR);
         clear_page((void *)va);
     }
     bitmap_fill(vm_bitmap(type), vm_low[type]);
@@ -107,7 +107,8 @@  static void *vm_alloc(unsigned int nr, unsigned int align,
         {
             unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
 
-            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
+            if ( !map_pages_to_xen(va, mfn_x(page_to_mfn(pg)),
+                                   1, PAGE_HYPERVISOR) )
             {
                 clear_page((void *)va);
                 vm_top[t] += PAGE_SIZE * 8;
@@ -258,7 +259,7 @@  static void *vmalloc_type(size_t size, enum vmap_region type)
         pg = alloc_domheap_page(NULL, 0);
         if ( pg == NULL )
             goto error;
-        mfn[i] = _mfn(page_to_mfn(pg));
+        mfn[i] = page_to_mfn(pg);
     }
 
     va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
@@ -270,7 +271,7 @@  static void *vmalloc_type(size_t size, enum vmap_region type)
 
  error:
     while ( i-- )
-        free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
+        free_domheap_page(mfn_to_page(mfn[i]));
     xfree(mfn);
     return NULL;
 }
diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
index 5acdde5691..fecdfb3697 100644
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -22,8 +22,6 @@ 
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef virt_to_mfn
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
 
 /* Limit amount of pages used for shared buffer (per domain) */
 #define MAX_OPROF_SHARED_PAGES 32
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index fd2327d3e5..bd62c2ce90 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -25,6 +25,12 @@ 
 #include "../ats.h"
 #include <xen/pci.h>
 
+/* Override macros from asm/page.h to avoid using typesafe mfn_t. */
+#undef page_to_mfn
+#define page_to_mfn(pg) mfn_x(__page_to_mfn(pg))
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(_mfn(mfn))
+
 /* Given pfn and page table level, return pde index */
 static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
 {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 1aecf7cf34..2c44fabf99 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -184,7 +184,7 @@  void __hwdom_init iommu_hwdom_init(struct domain *d)
 
         page_list_for_each ( page, &d->page_list )
         {
-            unsigned long mfn = page_to_mfn(page);
+            unsigned long mfn = mfn_x(page_to_mfn(page));
             unsigned long gfn = mfn_to_gmfn(d, mfn);
             unsigned int mapping = IOMMUF_readable;
             int ret;
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index 0253823173..68182afd91 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -58,7 +58,7 @@  int arch_iommu_populate_page_table(struct domain *d)
         if ( is_hvm_domain(d) ||
             (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
         {
-            unsigned long mfn = page_to_mfn(page);
+            unsigned long mfn = mfn_x(page_to_mfn(page));
             unsigned long gfn = mfn_to_gmfn(d, mfn);
 
             if ( gfn != gfn_x(INVALID_GFN) )
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 737a429409..3eb4b68761 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -138,7 +138,7 @@  extern vaddr_t xenheap_virt_start;
 #endif
 
 #ifdef CONFIG_ARM_32
-#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
+#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(__page_to_mfn(page)))
 #define is_xen_heap_mfn(mfn) ({                                 \
     unsigned long mfn_ = (mfn);                                 \
     (mfn_ >= mfn_x(xenheap_mfn_start) &&                        \
@@ -220,12 +220,14 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 })
 
 /* Convert between machine frame numbers and page-info structures. */
-#define __mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) - frametable_base_pdx))
-#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
+#define __mfn_to_page(mfn)                                          \
+    (frame_table + (pfn_to_pdx(mfn_x(mfn)) - frametable_base_pdx))
+#define __page_to_mfn(pg)                                           \
+    _mfn(pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx))
 
 /* Convert between machine addresses and page-info structures. */
-#define maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
-#define page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
+#define maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
+#define page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
 
 /* Convert between frame number and address formats.  */
 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
@@ -235,7 +237,7 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
 #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
 #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
-#define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
+#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 /* Page-align address and convert to frame number format */
@@ -309,7 +311,7 @@  static inline struct page_info *virt_to_page(const void *v)
 
 static inline void *page_to_virt(const struct page_info *pg)
 {
-    return mfn_to_virt(page_to_mfn(pg));
+    return mfn_to_virt(mfn_x(__page_to_mfn(pg)));
 }
 
 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index faadcfe8fe..87c9994974 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -276,7 +276,7 @@  static inline struct page_info *get_page_from_gfn(
 {
     struct page_info *page;
     p2m_type_t p2mt;
-    unsigned long mfn = mfn_x(p2m_lookup(d, _gfn(gfn), &p2mt));
+    mfn_t mfn = p2m_lookup(d, _gfn(gfn), &p2mt);
 
     if (t)
         *t = p2mt;
@@ -284,7 +284,7 @@  static inline struct page_info *get_page_from_gfn(
     if ( !p2m_is_any_ram(p2mt) )
         return NULL;
 
-    if ( !mfn_valid(_mfn(mfn)) )
+    if ( !mfn_valid(mfn) )
         return NULL;
     page = mfn_to_page(mfn);
 
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 83626085e0..c8dd273517 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -270,7 +270,7 @@  struct page_info
 
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
 #define is_xen_heap_mfn(mfn) \
-    (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
+    (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(_mfn(mfn))))
 #define is_xen_fixed_mfn(mfn)                     \
     ((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) &&  \
      (((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))
@@ -383,7 +383,7 @@  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
 
 static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
 {
-    struct page_info *page = __mfn_to_page(mfn_x(mfn));
+    struct page_info *page = __mfn_to_page(mfn);
 
     if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
     {
@@ -477,10 +477,10 @@  extern paddr_t mem_hotplug;
 #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
 
 #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
-#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
-    struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
-    unsigned long entry = (d && (d == dom_cow)) ?              \
-        SHARED_M2P_ENTRY : (pfn);                              \
+#define _set_gpfn_from_mfn(mfn, pfn) ({                         \
+    struct domain *d = page_get_owner(__mfn_to_page(_mfn(mfn)));    \
+    unsigned long entry = (d && (d == dom_cow)) ?               \
+        SHARED_M2P_ENTRY : (pfn);                               \
     ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
             (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
      machine_to_phys_mapping[(mfn)] = (entry));                \
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 17b1d0c8d3..a2a216061f 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -487,7 +487,7 @@  static inline struct page_info *get_page_from_gfn(
     /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
     if ( t )
         *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
-    page = __mfn_to_page(gfn);
+    page = __mfn_to_page(_mfn(gfn));
     return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
 }
 
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 45ca742678..8737ef16ff 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -88,10 +88,10 @@ 
     ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
 
 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
-#define l1e_get_page(x)           (__mfn_to_page(l1e_get_pfn(x)))
-#define l2e_get_page(x)           (__mfn_to_page(l2e_get_pfn(x)))
-#define l3e_get_page(x)           (__mfn_to_page(l3e_get_pfn(x)))
-#define l4e_get_page(x)           (__mfn_to_page(l4e_get_pfn(x)))
+#define l1e_get_page(x)           (__mfn_to_page(l1e_get_mfn(x)))
+#define l2e_get_page(x)           (__mfn_to_page(l2e_get_mfn(x)))
+#define l3e_get_page(x)           (__mfn_to_page(l3e_get_mfn(x)))
+#define l4e_get_page(x)           (__mfn_to_page(l4e_get_mfn(x)))
 
 /* Get pte access flags (unsigned int). */
 #define l1e_get_flags(x)           (get_pte_flags((x).l1))
@@ -157,10 +157,10 @@  static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
 
 /* Construct a pte from a page pointer and access flags. */
-#define l1e_from_page(page, flags) l1e_from_pfn(__page_to_mfn(page), (flags))
-#define l2e_from_page(page, flags) l2e_from_pfn(__page_to_mfn(page), (flags))
-#define l3e_from_page(page, flags) l3e_from_pfn(__page_to_mfn(page), (flags))
-#define l4e_from_page(page, flags) l4e_from_pfn(__page_to_mfn(page), (flags))
+#define l1e_from_page(page, flags) l1e_from_mfn(__page_to_mfn(page), (flags))
+#define l2e_from_page(page, flags) l2e_from_mfn(__page_to_mfn(page), (flags))
+#define l3e_from_page(page, flags) l3e_from_mfn(__page_to_mfn(page), (flags))
+#define l4e_from_page(page, flags) l4e_from_mfn(__page_to_mfn(page), (flags))
 
 /* Add extra flags to an existing pte. */
 #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
@@ -215,13 +215,13 @@  static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 /* Page-table type. */
 typedef struct { u64 pfn; } pagetable_t;
 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
-#define pagetable_get_page(x)   __mfn_to_page((x).pfn)
+#define pagetable_get_page(x)   __mfn_to_page(pagetable_get_mfn(x))
 #define pagetable_get_pfn(x)    ((x).pfn)
 #define pagetable_get_mfn(x)    _mfn(((x).pfn))
 #define pagetable_is_null(x)    ((x).pfn == 0)
 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
-#define pagetable_from_page(pg) pagetable_from_pfn(__page_to_mfn(pg))
+#define pagetable_from_page(pg) pagetable_from_mfn(__page_to_mfn(pg))
 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
 #define pagetable_null()        pagetable_from_pfn(0)
 
@@ -240,12 +240,12 @@  void copy_page_sse2(void *, const void *);
 #define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
 
 /* Convert between machine frame numbers and page-info structures. */
-#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn))
-#define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table))
+#define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn_x(mfn)))
+#define __page_to_mfn(pg)   _mfn(pdx_to_pfn((unsigned long)((pg) - frame_table)))
 
 /* Convert between machine addresses and page-info structures. */
-#define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
-#define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
+#define __maddr_to_page(ma) __mfn_to_page(maddr_to_mfn(ma))
+#define __page_to_maddr(pg) (mfn_to_maddr(__page_to_mfn(pg)))
 
 /* Convert between frame number and address formats.  */
 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
@@ -273,8 +273,8 @@  void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
-#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
+#define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
+#define vmap_to_page(va)    __mfn_to_page(vmap_to_mfn(va))
 
 #endif /* !defined(__ASSEMBLY__) */
 
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 890bae5b9c..22ab65ba16 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -34,7 +34,7 @@  void unmap_domain_page(const void *va);
 /* 
  * Given a VA from map_domain_page(), return its underlying MFN.
  */
-unsigned long domain_page_map_to_mfn(const void *va);
+mfn_t domain_page_map_to_mfn(const void *va);
 
 /*
  * Similar to the above calls, except the mapping is accessible in all
@@ -44,11 +44,11 @@  unsigned long domain_page_map_to_mfn(const void *va);
 void *map_domain_page_global(mfn_t mfn);
 void unmap_domain_page_global(const void *va);
 
-#define __map_domain_page(pg)        map_domain_page(_mfn(__page_to_mfn(pg)))
+#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
 
 static inline void *__map_domain_page_global(const struct page_info *pg)
 {
-    return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+    return map_domain_page_global(page_to_mfn(pg));
 }
 
 #else /* !CONFIG_DOMAIN_PAGE */
@@ -56,7 +56,7 @@  static inline void *__map_domain_page_global(const struct page_info *pg)
 #define map_domain_page(mfn)                __mfn_to_virt(mfn_x(mfn))
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
-#define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
+#define domain_page_map_to_mfn(va)          _mfn(virt_to_mfn((unsigned long)(va)))
 
 static inline void *map_domain_page_global(mfn_t mfn)
 {
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 542c0b3f20..8516a0b131 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -25,7 +25,7 @@ 
 typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
 
 #define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
-#define IS_VALID_PAGE(_pi)    mfn_valid(_mfn(page_to_mfn(_pi)))
+#define IS_VALID_PAGE(_pi)    mfn_valid(page_to_mfn(_pi))
 
 extern struct page_list_head tmem_page_list;
 extern spinlock_t tmem_page_list_lock;