[Xen-devel,16/24] xen/arm: mm: Use typesafe mfn for xenheap_mfn_*

Message ID 20170613161323.25196-17-julien.grall@arm.com
State Superseded
Headers show
Series
  • xen/arm: Extend the usage of typesafe MFN
Related show

Commit Message

Julien Grall June 13, 2017, 4:13 p.m.
Add more safety when using xenheap_mfn_*.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/mm.c        | 16 ++++++++--------
 xen/arch/arm/setup.c     | 18 +++++++++---------
 xen/include/asm-arm/mm.h | 11 ++++++-----
 3 files changed, 23 insertions(+), 22 deletions(-)

Comments

Stefano Stabellini June 15, 2017, 11:12 p.m. | #1
On Tue, 13 Jun 2017, Julien Grall wrote:
> Add more safety when using xenheap_mfn_*.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/arm/mm.c        | 16 ++++++++--------
>  xen/arch/arm/setup.c     | 18 +++++++++---------
>  xen/include/asm-arm/mm.h | 11 ++++++-----
>  3 files changed, 23 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 0014c24ecc..fb01f01879 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -138,8 +138,8 @@ uint64_t init_ttbr;
>  static paddr_t phys_offset;
>  
>  /* Limits of the Xen heap */
> -unsigned long xenheap_mfn_start __read_mostly = ~0UL;
> -unsigned long xenheap_mfn_end __read_mostly;
> +mfn_t xenheap_mfn_start __read_mostly = INVALID_MFN;
> +mfn_t xenheap_mfn_end __read_mostly;

The patch is fine, but given that xenheap_mfn_end is unused on arm64, I
would like to make it arm32 only.


>  vaddr_t xenheap_virt_end __read_mostly;
>  #ifdef CONFIG_ARM_64
>  vaddr_t xenheap_virt_start __read_mostly;
> @@ -801,8 +801,8 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>  
>      /* Record where the xenheap is, for translation routines. */
>      xenheap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
> -    xenheap_mfn_start = base_mfn;
> -    xenheap_mfn_end = base_mfn + nr_mfns;
> +    xenheap_mfn_start = _mfn(base_mfn);
> +    xenheap_mfn_end = _mfn(base_mfn + nr_mfns);
>  }
>  #else /* CONFIG_ARM_64 */
>  void __init setup_xenheap_mappings(unsigned long base_mfn,
> @@ -816,16 +816,16 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>      mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1);
>  
>      /* First call sets the xenheap physical and virtual offset. */
> -    if ( xenheap_mfn_start == ~0UL )
> +    if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
>      {
> -        xenheap_mfn_start = base_mfn;
> +        xenheap_mfn_start = _mfn(base_mfn);
>          xenheap_virt_start = DIRECTMAP_VIRT_START +
>              (base_mfn - mfn) * PAGE_SIZE;
>      }
>  
> -    if ( base_mfn < xenheap_mfn_start )
> +    if ( base_mfn < mfn_x(xenheap_mfn_start) )
>          panic("cannot add xenheap mapping at %lx below heap start %lx",
> -              base_mfn, xenheap_mfn_start);
> +              base_mfn, mfn_x(xenheap_mfn_start));
>  
>      end_mfn = base_mfn + nr_mfns;
>  
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index ab4d8e4218..3b34855668 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -555,8 +555,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
>       * and enough mapped pages for copying the DTB.
>       */
>      dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
> -    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
> -    boot_mfn_end = xenheap_mfn_end;
> +    boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1;
> +    boot_mfn_end = mfn_x(xenheap_mfn_end);
>  
>      init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));
>  
> @@ -591,11 +591,11 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
>                  e = bank_end;
>  
>              /* Avoid the xenheap */
> -            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
> -                 && pfn_to_paddr(xenheap_mfn_start) < e )
> +            if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
> +                 && mfn_to_maddr(xenheap_mfn_start) < e )
>              {
> -                e = pfn_to_paddr(xenheap_mfn_start);
> -                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
> +                e = mfn_to_maddr(xenheap_mfn_start);
> +                n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
>              }
>  
>              dt_unreserved_regions(s, e, init_boot_pages, 0);
> @@ -610,7 +610,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
>  
>      /* Add xenheap memory that was not already added to the boot
>         allocator. */
> -    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
> +    init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
>                         pfn_to_paddr(boot_mfn_start));
>  }
>  #else /* CONFIG_ARM_64 */
> @@ -662,8 +662,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
>      total_pages += ram_size >> PAGE_SHIFT;
>  
>      xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
> -    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
> -    xenheap_mfn_end = ram_end >> PAGE_SHIFT;
> +    xenheap_mfn_start = maddr_to_mfn(ram_start);
> +    xenheap_mfn_end = maddr_to_mfn(ram_end);
>  
>      /*
>       * Need enough mapped pages for copying the DTB.
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index a42da20f0a..3dab6dc9a1 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -115,7 +115,7 @@ struct page_info
>  #define PGC_count_width   PG_shift(9)
>  #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
>  
> -extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
> +extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
>  extern vaddr_t xenheap_virt_end;
>  #ifdef CONFIG_ARM_64
>  extern vaddr_t xenheap_virt_start;
> @@ -125,7 +125,8 @@ extern vaddr_t xenheap_virt_start;
>  #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
>  #define is_xen_heap_mfn(mfn) ({                                 \
>      unsigned long _mfn = (mfn);                                 \
> -    (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end);      \
> +    (_mfn >= mfn_x(xenheap_mfn_start) &&                        \
> +     _mfn < mfn_x(xenheap_mfn_end));                            \

Do you think that mfn_less_than() and mfn_greater_than() would be helpful?


>  })
>  #else
>  #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
> @@ -235,7 +236,7 @@ static inline paddr_t __virt_to_maddr(vaddr_t va)
>  static inline void *maddr_to_virt(paddr_t ma)
>  {
>      ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
> -    ma -= pfn_to_paddr(xenheap_mfn_start);
> +    ma -= mfn_to_maddr(xenheap_mfn_start);
>      return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
>  }
>  #else
> @@ -243,7 +244,7 @@ static inline void *maddr_to_virt(paddr_t ma)
>  {
>      ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
>      return (void *)(XENHEAP_VIRT_START -
> -                    pfn_to_paddr(xenheap_mfn_start) +
> +                    mfn_to_maddr(xenheap_mfn_start) +
>                      ((ma & ma_va_bottom_mask) |
>                       ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
>  }
> @@ -284,7 +285,7 @@ static inline struct page_info *virt_to_page(const void *v)
>      ASSERT(va < xenheap_virt_end);
>  
>      pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
> -    pdx += pfn_to_pdx(xenheap_mfn_start);
> +    pdx += pfn_to_pdx(mfn_x(xenheap_mfn_start));
>      return frame_table + pdx - frametable_base_pdx;
>  }
>  
> -- 
> 2.11.0
>
Julien Grall June 16, 2017, 6:58 a.m. | #2
Hi Stefano,

On 16/06/2017 00:12, Stefano Stabellini wrote:
> On Tue, 13 Jun 2017, Julien Grall wrote:
>> Add more safety when using xenheap_mfn_*.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> ---
>>  xen/arch/arm/mm.c        | 16 ++++++++--------
>>  xen/arch/arm/setup.c     | 18 +++++++++---------
>>  xen/include/asm-arm/mm.h | 11 ++++++-----
>>  3 files changed, 23 insertions(+), 22 deletions(-)
>>
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index 0014c24ecc..fb01f01879 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -138,8 +138,8 @@ uint64_t init_ttbr;
>>  static paddr_t phys_offset;
>>
>>  /* Limits of the Xen heap */
>> -unsigned long xenheap_mfn_start __read_mostly = ~0UL;
>> -unsigned long xenheap_mfn_end __read_mostly;
>> +mfn_t xenheap_mfn_start __read_mostly = INVALID_MFN;
>> +mfn_t xenheap_mfn_end __read_mostly;
>
> The patch is fine, but given that xenheap_mfn_end is unused on arm64, I
> would like to make it arm32 only.

See my answer on patch #3, I would like to limit the #ifdefery and it is 
better to bound a region with start/end.

Hopefully, at some point we could make the xenheap code very similar on 
both ARM64 and ARM32.

Cheers,
Julien Grall June 16, 2017, 6:08 p.m. | #3
Hi Stefano,

On 06/16/2017 12:12 AM, Stefano Stabellini wrote:
> On Tue, 13 Jun 2017, Julien Grall wrote:
>>        * Need enough mapped pages for copying the DTB.
>> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
>> index a42da20f0a..3dab6dc9a1 100644
>> --- a/xen/include/asm-arm/mm.h
>> +++ b/xen/include/asm-arm/mm.h
>> @@ -115,7 +115,7 @@ struct page_info
>>   #define PGC_count_width   PG_shift(9)
>>   #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
>>   
>> -extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
>> +extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
>>   extern vaddr_t xenheap_virt_end;
>>   #ifdef CONFIG_ARM_64
>>   extern vaddr_t xenheap_virt_start;
>> @@ -125,7 +125,8 @@ extern vaddr_t xenheap_virt_start;
>>   #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
>>   #define is_xen_heap_mfn(mfn) ({                                 \
>>       unsigned long _mfn = (mfn);                                 \
>> -    (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end);      \
>> +    (_mfn >= mfn_x(xenheap_mfn_start) &&                        \
>> +     _mfn < mfn_x(xenheap_mfn_end));                            \
> 
> Do you think that mfn_less_than() and mfn_greater_than() would be helpful?

I forgot to answer this one. I have looked at the place comparing (other 
than = and !=) typesafe MFN and I haven't found many. So I didn't see 
the need to introduce helpers for that.

If notice an increase of them, we can decide to introduce one. It will 
be easy to find as they would have explicit mfn_x in the code.

Cheers,

Patch hide | download patch | download mbox

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0014c24ecc..fb01f01879 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -138,8 +138,8 @@  uint64_t init_ttbr;
 static paddr_t phys_offset;
 
 /* Limits of the Xen heap */
-unsigned long xenheap_mfn_start __read_mostly = ~0UL;
-unsigned long xenheap_mfn_end __read_mostly;
+mfn_t xenheap_mfn_start __read_mostly = INVALID_MFN;
+mfn_t xenheap_mfn_end __read_mostly;
 vaddr_t xenheap_virt_end __read_mostly;
 #ifdef CONFIG_ARM_64
 vaddr_t xenheap_virt_start __read_mostly;
@@ -801,8 +801,8 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
 
     /* Record where the xenheap is, for translation routines. */
     xenheap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
-    xenheap_mfn_start = base_mfn;
-    xenheap_mfn_end = base_mfn + nr_mfns;
+    xenheap_mfn_start = _mfn(base_mfn);
+    xenheap_mfn_end = _mfn(base_mfn + nr_mfns);
 }
 #else /* CONFIG_ARM_64 */
 void __init setup_xenheap_mappings(unsigned long base_mfn,
@@ -816,16 +816,16 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
     mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1);
 
     /* First call sets the xenheap physical and virtual offset. */
-    if ( xenheap_mfn_start == ~0UL )
+    if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
     {
-        xenheap_mfn_start = base_mfn;
+        xenheap_mfn_start = _mfn(base_mfn);
         xenheap_virt_start = DIRECTMAP_VIRT_START +
             (base_mfn - mfn) * PAGE_SIZE;
     }
 
-    if ( base_mfn < xenheap_mfn_start )
+    if ( base_mfn < mfn_x(xenheap_mfn_start) )
         panic("cannot add xenheap mapping at %lx below heap start %lx",
-              base_mfn, xenheap_mfn_start);
+              base_mfn, mfn_x(xenheap_mfn_start));
 
     end_mfn = base_mfn + nr_mfns;
 
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index ab4d8e4218..3b34855668 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -555,8 +555,8 @@  static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
      * and enough mapped pages for copying the DTB.
      */
     dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
-    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
-    boot_mfn_end = xenheap_mfn_end;
+    boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1;
+    boot_mfn_end = mfn_x(xenheap_mfn_end);
 
     init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));
 
@@ -591,11 +591,11 @@  static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
                 e = bank_end;
 
             /* Avoid the xenheap */
-            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
-                 && pfn_to_paddr(xenheap_mfn_start) < e )
+            if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
+                 && mfn_to_maddr(xenheap_mfn_start) < e )
             {
-                e = pfn_to_paddr(xenheap_mfn_start);
-                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
+                e = mfn_to_maddr(xenheap_mfn_start);
+                n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
             }
 
             dt_unreserved_regions(s, e, init_boot_pages, 0);
@@ -610,7 +610,7 @@  static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
 
     /* Add xenheap memory that was not already added to the boot
        allocator. */
-    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
+    init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
                        pfn_to_paddr(boot_mfn_start));
 }
 #else /* CONFIG_ARM_64 */
@@ -662,8 +662,8 @@  static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
     total_pages += ram_size >> PAGE_SHIFT;
 
     xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
-    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
-    xenheap_mfn_end = ram_end >> PAGE_SHIFT;
+    xenheap_mfn_start = maddr_to_mfn(ram_start);
+    xenheap_mfn_end = maddr_to_mfn(ram_end);
 
     /*
      * Need enough mapped pages for copying the DTB.
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index a42da20f0a..3dab6dc9a1 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -115,7 +115,7 @@  struct page_info
 #define PGC_count_width   PG_shift(9)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
+extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
 extern vaddr_t xenheap_virt_end;
 #ifdef CONFIG_ARM_64
 extern vaddr_t xenheap_virt_start;
@@ -125,7 +125,8 @@  extern vaddr_t xenheap_virt_start;
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
 #define is_xen_heap_mfn(mfn) ({                                 \
     unsigned long _mfn = (mfn);                                 \
-    (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end);      \
+    (_mfn >= mfn_x(xenheap_mfn_start) &&                        \
+     _mfn < mfn_x(xenheap_mfn_end));                            \
 })
 #else
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
@@ -235,7 +236,7 @@  static inline paddr_t __virt_to_maddr(vaddr_t va)
 static inline void *maddr_to_virt(paddr_t ma)
 {
     ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
-    ma -= pfn_to_paddr(xenheap_mfn_start);
+    ma -= mfn_to_maddr(xenheap_mfn_start);
     return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
 }
 #else
@@ -243,7 +244,7 @@  static inline void *maddr_to_virt(paddr_t ma)
 {
     ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
     return (void *)(XENHEAP_VIRT_START -
-                    pfn_to_paddr(xenheap_mfn_start) +
+                    mfn_to_maddr(xenheap_mfn_start) +
                     ((ma & ma_va_bottom_mask) |
                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
 }
@@ -284,7 +285,7 @@  static inline struct page_info *virt_to_page(const void *v)
     ASSERT(va < xenheap_virt_end);
 
     pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
-    pdx += pfn_to_pdx(xenheap_mfn_start);
+    pdx += pfn_to_pdx(mfn_x(xenheap_mfn_start));
     return frame_table + pdx - frametable_base_pdx;
 }