diff mbox series

[Xen-devel,RFC,02/16] xen/arm: Introduce helpers to get/set an MFN from/to an LPAE entry

Message ID 20181008183352.16291-3-julien.grall@arm.com
State Accepted
Commit f4febe5bfb1b70c219aef5f96fd3e6de7f59cd75
Headers show
Series xen/arm: Implement Set/Way operations | expand

Commit Message

Julien Grall Oct. 8, 2018, 6:33 p.m. UTC
The new helpers make it easier to read the code by abstracting the way to
set/get an MFN from/to an LPAE entry. The helpers are using "walk" as the
bits are common across different LPAE stages.

At the same time, use the new helpers to replace the various open-coding
place.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    This patch was originally sent separately.
---
 xen/arch/arm/mm.c          | 10 +++++-----
 xen/arch/arm/p2m.c         | 19 ++++++++++---------
 xen/include/asm-arm/lpae.h |  3 +++
 3 files changed, 18 insertions(+), 14 deletions(-)

Comments

Stefano Stabellini Oct. 30, 2018, 12:07 a.m. UTC | #1
On Mon, 8 Oct 2018, Julien Grall wrote:
> The new helpers make it easier to read the code by abstracting the way to
> set/get an MFN from/to an LPAE entry. The helpers are using "walk" as the
> bits are common across different LPAE stages.
> 
> At the same time, use the new helpers to replace the various open-coding
> place.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>     This patch was originally sent separately.
> ---
>  xen/arch/arm/mm.c          | 10 +++++-----
>  xen/arch/arm/p2m.c         | 19 ++++++++++---------
>  xen/include/asm-arm/lpae.h |  3 +++
>  3 files changed, 18 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 7a06a33e21..0bc31b1d9b 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -238,7 +238,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
>  
>          /* For next iteration */
>          unmap_domain_page(mapping);
> -        mapping = map_domain_page(_mfn(pte.walk.base));
> +        mapping = map_domain_page(lpae_get_mfn(pte));
>      }
>  
>      unmap_domain_page(mapping);
> @@ -323,7 +323,7 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr)
>  
>      ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
>  
> -    e.pt.base = mfn_x(mfn);
> +    lpae_set_mfn(e, mfn);
>  
>      return e;
>  }
> @@ -490,7 +490,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr)
>      ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
>      ASSERT(map[slot].pt.avail != 0);
>  
> -    return _mfn(map[slot].pt.base + offset);
> +    return mfn_add(lpae_get_mfn(map[slot]), offset);
>  }
>  #endif
>  
> @@ -851,7 +851,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>              /* mfn_to_virt is not valid on the 1st 1st mfn, since it
>               * is not within the xenheap. */
>              first = slot == xenheap_first_first_slot ?
> -                xenheap_first_first : __mfn_to_virt(p->pt.base);
> +                xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p));
>          }
>          else if ( xenheap_first_first_slot == -1)
>          {
> @@ -1007,7 +1007,7 @@ static int create_xen_entries(enum xenmap_operation op,
>  
>          BUG_ON(!lpae_is_valid(*entry));
>  
> -        third = __mfn_to_virt(entry->pt.base);
> +        third = mfn_to_virt(lpae_get_mfn(*entry));
>          entry = &third[third_table_offset(addr)];
>  
>          switch ( op ) {
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 30cfb01498..f8a2f6459e 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -265,7 +265,7 @@ static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
>      if ( lpae_is_mapping(*entry, level) )
>          return GUEST_TABLE_SUPER_PAGE;
>  
> -    mfn = _mfn(entry->p2m.base);
> +    mfn = lpae_get_mfn(*entry);
>  
>      unmap_domain_page(*table);
>      *table = map_domain_page(mfn);
> @@ -349,7 +349,7 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
>          if ( a )
>              *a = p2m_mem_access_radix_get(p2m, gfn);
>  
> -        mfn = _mfn(entry.p2m.base);
> +        mfn = lpae_get_mfn(entry);
>          /*
>           * The entry may point to a superpage. Find the MFN associated
>           * to the GFN.
> @@ -519,7 +519,7 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a)
>  
>      ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
>  
> -    e.p2m.base = mfn_x(mfn);
> +    lpae_set_mfn(e, mfn);
>  
>      return e;
>  }
> @@ -621,7 +621,7 @@ static void p2m_put_l3_page(const lpae_t pte)
>       */
>      if ( p2m_is_foreign(pte.p2m.type) )
>      {
> -        mfn_t mfn = _mfn(pte.p2m.base);
> +        mfn_t mfn = lpae_get_mfn(pte);
>  
>          ASSERT(mfn_valid(mfn));
>          put_page(mfn_to_page(mfn));
> @@ -655,7 +655,7 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>          return;
>      }
>  
> -    table = map_domain_page(_mfn(entry.p2m.base));
> +    table = map_domain_page(lpae_get_mfn(entry));
>      for ( i = 0; i < LPAE_ENTRIES; i++ )
>          p2m_free_entry(p2m, *(table + i), level + 1);
>  
> @@ -669,7 +669,7 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>       */
>      p2m_tlb_flush_sync(p2m);
>  
> -    mfn = _mfn(entry.p2m.base);
> +    mfn = lpae_get_mfn(entry);
>      ASSERT(mfn_valid(mfn));
>  
>      pg = mfn_to_page(mfn);
> @@ -688,7 +688,7 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
>      bool rv = true;
>  
>      /* Convenience aliases */
> -    mfn_t mfn = _mfn(entry->p2m.base);
> +    mfn_t mfn = lpae_get_mfn(*entry);
>      unsigned int next_level = level + 1;
>      unsigned int level_order = level_orders[next_level];
>  
> @@ -719,7 +719,7 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
>           * the necessary fields. So the correct permission are kept.
>           */
>          pte = *entry;
> -        pte.p2m.base = mfn_x(mfn_add(mfn, i << level_order));
> +        lpae_set_mfn(pte, mfn_add(mfn, i << level_order));
>  
>          /*
>           * First and second level pages set p2m.table = 0, but third
> @@ -952,7 +952,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
>       * Free the entry only if the original pte was valid and the base
>       * is different (to avoid freeing when permission is changed).
>       */
> -    if ( lpae_is_valid(orig_pte) && entry->p2m.base != orig_pte.p2m.base )
> +    if ( lpae_is_valid(orig_pte) &&
> +         !mfn_eq(lpae_get_mfn(*entry), lpae_get_mfn(orig_pte)) )
>          p2m_free_entry(p2m, orig_pte, level);
>  
>      if ( need_iommu_pt_sync(p2m->domain) &&
> diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
> index 15595cd35c..17fdc6074f 100644
> --- a/xen/include/asm-arm/lpae.h
> +++ b/xen/include/asm-arm/lpae.h
> @@ -153,6 +153,9 @@ static inline bool lpae_is_superpage(lpae_t pte, unsigned int level)
>      return (level < 3) && lpae_is_mapping(pte, level);
>  }
>  
> +#define lpae_get_mfn(pte)    (_mfn((pte).walk.base))
> +#define lpae_set_mfn(pte, mfn)  ((pte).walk.base = mfn_x(mfn))
> +
>  /*
>   * AArch64 supports pages with different sizes (4K, 16K, and 64K). To enable
>   * page table walks for various configurations, the following helpers enable
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 7a06a33e21..0bc31b1d9b 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -238,7 +238,7 @@  void dump_pt_walk(paddr_t ttbr, paddr_t addr,
 
         /* For next iteration */
         unmap_domain_page(mapping);
-        mapping = map_domain_page(_mfn(pte.walk.base));
+        mapping = map_domain_page(lpae_get_mfn(pte));
     }
 
     unmap_domain_page(mapping);
@@ -323,7 +323,7 @@  static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr)
 
     ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
 
-    e.pt.base = mfn_x(mfn);
+    lpae_set_mfn(e, mfn);
 
     return e;
 }
@@ -490,7 +490,7 @@  mfn_t domain_page_map_to_mfn(const void *ptr)
     ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
     ASSERT(map[slot].pt.avail != 0);
 
-    return _mfn(map[slot].pt.base + offset);
+    return mfn_add(lpae_get_mfn(map[slot]), offset);
 }
 #endif
 
@@ -851,7 +851,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
             /* mfn_to_virt is not valid on the 1st 1st mfn, since it
              * is not within the xenheap. */
             first = slot == xenheap_first_first_slot ?
-                xenheap_first_first : __mfn_to_virt(p->pt.base);
+                xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p));
         }
         else if ( xenheap_first_first_slot == -1)
         {
@@ -1007,7 +1007,7 @@  static int create_xen_entries(enum xenmap_operation op,
 
         BUG_ON(!lpae_is_valid(*entry));
 
-        third = __mfn_to_virt(entry->pt.base);
+        third = mfn_to_virt(lpae_get_mfn(*entry));
         entry = &third[third_table_offset(addr)];
 
         switch ( op ) {
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 30cfb01498..f8a2f6459e 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -265,7 +265,7 @@  static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
     if ( lpae_is_mapping(*entry, level) )
         return GUEST_TABLE_SUPER_PAGE;
 
-    mfn = _mfn(entry->p2m.base);
+    mfn = lpae_get_mfn(*entry);
 
     unmap_domain_page(*table);
     *table = map_domain_page(mfn);
@@ -349,7 +349,7 @@  mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
         if ( a )
             *a = p2m_mem_access_radix_get(p2m, gfn);
 
-        mfn = _mfn(entry.p2m.base);
+        mfn = lpae_get_mfn(entry);
         /*
          * The entry may point to a superpage. Find the MFN associated
          * to the GFN.
@@ -519,7 +519,7 @@  static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a)
 
     ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK));
 
-    e.p2m.base = mfn_x(mfn);
+    lpae_set_mfn(e, mfn);
 
     return e;
 }
@@ -621,7 +621,7 @@  static void p2m_put_l3_page(const lpae_t pte)
      */
     if ( p2m_is_foreign(pte.p2m.type) )
     {
-        mfn_t mfn = _mfn(pte.p2m.base);
+        mfn_t mfn = lpae_get_mfn(pte);
 
         ASSERT(mfn_valid(mfn));
         put_page(mfn_to_page(mfn));
@@ -655,7 +655,7 @@  static void p2m_free_entry(struct p2m_domain *p2m,
         return;
     }
 
-    table = map_domain_page(_mfn(entry.p2m.base));
+    table = map_domain_page(lpae_get_mfn(entry));
     for ( i = 0; i < LPAE_ENTRIES; i++ )
         p2m_free_entry(p2m, *(table + i), level + 1);
 
@@ -669,7 +669,7 @@  static void p2m_free_entry(struct p2m_domain *p2m,
      */
     p2m_tlb_flush_sync(p2m);
 
-    mfn = _mfn(entry.p2m.base);
+    mfn = lpae_get_mfn(entry);
     ASSERT(mfn_valid(mfn));
 
     pg = mfn_to_page(mfn);
@@ -688,7 +688,7 @@  static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
     bool rv = true;
 
     /* Convenience aliases */
-    mfn_t mfn = _mfn(entry->p2m.base);
+    mfn_t mfn = lpae_get_mfn(*entry);
     unsigned int next_level = level + 1;
     unsigned int level_order = level_orders[next_level];
 
@@ -719,7 +719,7 @@  static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
          * the necessary fields. So the correct permission are kept.
          */
         pte = *entry;
-        pte.p2m.base = mfn_x(mfn_add(mfn, i << level_order));
+        lpae_set_mfn(pte, mfn_add(mfn, i << level_order));
 
         /*
          * First and second level pages set p2m.table = 0, but third
@@ -952,7 +952,8 @@  static int __p2m_set_entry(struct p2m_domain *p2m,
      * Free the entry only if the original pte was valid and the base
      * is different (to avoid freeing when permission is changed).
      */
-    if ( lpae_is_valid(orig_pte) && entry->p2m.base != orig_pte.p2m.base )
+    if ( lpae_is_valid(orig_pte) &&
+         !mfn_eq(lpae_get_mfn(*entry), lpae_get_mfn(orig_pte)) )
         p2m_free_entry(p2m, orig_pte, level);
 
     if ( need_iommu_pt_sync(p2m->domain) &&
diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
index 15595cd35c..17fdc6074f 100644
--- a/xen/include/asm-arm/lpae.h
+++ b/xen/include/asm-arm/lpae.h
@@ -153,6 +153,9 @@  static inline bool lpae_is_superpage(lpae_t pte, unsigned int level)
     return (level < 3) && lpae_is_mapping(pte, level);
 }
 
+#define lpae_get_mfn(pte)    (_mfn((pte).walk.base))
+#define lpae_set_mfn(pte, mfn)  ((pte).walk.base = mfn_x(mfn))
+
 /*
  * AArch64 supports pages with different sizes (4K, 16K, and 64K). To enable
  * page table walks for various configurations, the following helpers enable