diff mbox series

[Xen-devel,v2,04/24] xen/arm: mm: Redefine mfn_to_virt to use typesafe

Message ID 20170912100330.2168-5-julien.grall@arm.com
State Accepted
Commit 0f76a091e47f10d1b13c050decc2672a5047d00c
Headers show
Series xen/arm: Memory subsystem clean-up | expand

Commit Message

Julien Grall Sept. 12, 2017, 10:03 a.m. UTC
This add a bit more safety in the memory subsystem code.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/mm.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

Comments

Stefano Stabellini Sept. 15, 2017, 11:56 p.m. UTC | #1
On Tue, 12 Sep 2017, Julien Grall wrote:
> This add a bit more safety in the memory subsystem code.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/arm/mm.c | 16 +++++++++-------
>  1 file changed, 9 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 965d0573a4..5716ef1123 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -47,6 +47,8 @@ struct domain *dom_xen, *dom_io, *dom_cow;
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef virt_to_mfn
>  #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> +#undef mfn_to_virt
> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
>  
>  /* Static start-of-day pagetables that we use before the allocators
>   * are up. These are used by all CPUs during bringup before switching
> @@ -837,7 +839,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>       * Virtual address aligned to previous 1GB to match physical
>       * address alignment done above.
>       */
> -    vaddr = (vaddr_t)mfn_to_virt(base_mfn) & FIRST_MASK;
> +    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;

Don't you think it would be better to do mfn_to_virt(_mfn(base_mfn)) in
this patch? This is just bike-shedding, but I think it would be more
obviously consistent. Other than that, it looks good.


>      while ( mfn < end_mfn )
>      {
> @@ -849,7 +851,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>              /* mfn_to_virt is not valid on the 1st 1st mfn, since it
>               * is not within the xenheap. */
>              first = slot == xenheap_first_first_slot ?
> -                xenheap_first_first : mfn_to_virt(p->pt.base);
> +                xenheap_first_first : __mfn_to_virt(p->pt.base);
>          }
>          else if ( xenheap_first_first_slot == -1)
>          {
> @@ -866,11 +868,11 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>          {
>              mfn_t first_mfn = alloc_boot_pages(1, 1);
>  
> -            clear_page(mfn_to_virt(mfn_x(first_mfn)));
> +            clear_page(mfn_to_virt(first_mfn));
>              pte = mfn_to_xen_entry(first_mfn, WRITEALLOC);
>              pte.pt.table = 1;
>              write_pte(p, pte);
> -            first = mfn_to_virt(mfn_x(first_mfn));
> +            first = mfn_to_virt(first_mfn);
>          }
>  
>          pte = mfn_to_xen_entry(_mfn(mfn), WRITEALLOC);
> @@ -909,10 +911,10 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
>      /* Compute the number of second level pages. */
>      nr_second = ROUNDUP(frametable_size, FIRST_SIZE) >> FIRST_SHIFT;
>      second_base = alloc_boot_pages(nr_second, 1);
> -    second = mfn_to_virt(mfn_x(second_base));
> +    second = mfn_to_virt(second_base);
>      for ( i = 0; i < nr_second; i++ )
>      {
> -        clear_page(mfn_to_virt(mfn_x(mfn_add(second_base, i))));
> +        clear_page(mfn_to_virt(mfn_add(second_base, i)));
>          pte = mfn_to_xen_entry(mfn_add(second_base, i), WRITEALLOC);
>          pte.pt.table = 1;
>          write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
> @@ -1005,7 +1007,7 @@ static int create_xen_entries(enum xenmap_operation op,
>  
>          BUG_ON(!lpae_valid(*entry));
>  
> -        third = mfn_to_virt(entry->pt.base);
> +        third = __mfn_to_virt(entry->pt.base);
>          entry = &third[third_table_offset(addr)];
>  
>          switch ( op ) {
> -- 
> 2.11.0
>
Julien Grall Sept. 16, 2017, 9:27 a.m. UTC | #2
Hi Stefano,

On 09/16/2017 12:56 AM, Stefano Stabellini wrote:
> On Tue, 12 Sep 2017, Julien Grall wrote:
>> This add a bit more safety in the memory subsystem code.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> ---
>>   xen/arch/arm/mm.c | 16 +++++++++-------
>>   1 file changed, 9 insertions(+), 7 deletions(-)
>>
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index 965d0573a4..5716ef1123 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -47,6 +47,8 @@ struct domain *dom_xen, *dom_io, *dom_cow;
>>   /* Override macros from asm/page.h to make them work with mfn_t */
>>   #undef virt_to_mfn
>>   #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
>> +#undef mfn_to_virt
>> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
>>   
>>   /* Static start-of-day pagetables that we use before the allocators
>>    * are up. These are used by all CPUs during bringup before switching
>> @@ -837,7 +839,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>>        * Virtual address aligned to previous 1GB to match physical
>>        * address alignment done above.
>>        */
>> -    vaddr = (vaddr_t)mfn_to_virt(base_mfn) & FIRST_MASK;
>> +    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
> 
> Don't you think it would be better to do mfn_to_virt(_mfn(base_mfn)) in
> this patch? This is just bike-shedding, but I think it would be more
> obviously consistent. Other than that, it looks good.

Well, last time I used mfn_x/_mfn in similar condition, you requested to 
use the __* version (see [1]).

I really don't mind which one to use. But we should stay consistent with 
the macros to use for non-typesafe version.

Cheers,

[1] http://patches.linaro.org/patch/105386/
Stefano Stabellini Sept. 19, 2017, 10:58 p.m. UTC | #3
On Sat, 16 Sep 2017, Julien Grall wrote:
> Hi Stefano,
> 
> On 09/16/2017 12:56 AM, Stefano Stabellini wrote:
> > On Tue, 12 Sep 2017, Julien Grall wrote:
> > > This add a bit more safety in the memory subsystem code.
> > > 
> > > Signed-off-by: Julien Grall <julien.grall@arm.com>
> > > ---
> > >   xen/arch/arm/mm.c | 16 +++++++++-------
> > >   1 file changed, 9 insertions(+), 7 deletions(-)
> > > 
> > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > > index 965d0573a4..5716ef1123 100644
> > > --- a/xen/arch/arm/mm.c
> > > +++ b/xen/arch/arm/mm.c
> > > @@ -47,6 +47,8 @@ struct domain *dom_xen, *dom_io, *dom_cow;
> > >   /* Override macros from asm/page.h to make them work with mfn_t */
> > >   #undef virt_to_mfn
> > >   #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> > > +#undef mfn_to_virt
> > > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
> > >     /* Static start-of-day pagetables that we use before the allocators
> > >    * are up. These are used by all CPUs during bringup before switching
> > > @@ -837,7 +839,7 @@ void __init setup_xenheap_mappings(unsigned long
> > > base_mfn,
> > >        * Virtual address aligned to previous 1GB to match physical
> > >        * address alignment done above.
> > >        */
> > > -    vaddr = (vaddr_t)mfn_to_virt(base_mfn) & FIRST_MASK;
> > > +    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
> > 
> > Don't you think it would be better to do mfn_to_virt(_mfn(base_mfn)) in
> > this patch? This is just bike-shedding, but I think it would be more
> > obviously consistent. Other than that, it looks good.
> 
> Well, last time I used mfn_x/_mfn in similar condition, you requested to use
> the __* version (see [1]).
 
LOL
This is a good sign: it means I am getting more familiar with the
mfn_x/_mfn syntax :-D


> I really don't mind which one to use. But we should stay consistent with the
> macros to use for non-typesafe version.


Of course. Let's keep the patch as is.
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 965d0573a4..5716ef1123 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -47,6 +47,8 @@  struct domain *dom_xen, *dom_io, *dom_cow;
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef virt_to_mfn
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+#undef mfn_to_virt
+#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
 
 /* Static start-of-day pagetables that we use before the allocators
  * are up. These are used by all CPUs during bringup before switching
@@ -837,7 +839,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
      * Virtual address aligned to previous 1GB to match physical
      * address alignment done above.
      */
-    vaddr = (vaddr_t)mfn_to_virt(base_mfn) & FIRST_MASK;
+    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
 
     while ( mfn < end_mfn )
     {
@@ -849,7 +851,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
             /* mfn_to_virt is not valid on the 1st 1st mfn, since it
              * is not within the xenheap. */
             first = slot == xenheap_first_first_slot ?
-                xenheap_first_first : mfn_to_virt(p->pt.base);
+                xenheap_first_first : __mfn_to_virt(p->pt.base);
         }
         else if ( xenheap_first_first_slot == -1)
         {
@@ -866,11 +868,11 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
         {
             mfn_t first_mfn = alloc_boot_pages(1, 1);
 
-            clear_page(mfn_to_virt(mfn_x(first_mfn)));
+            clear_page(mfn_to_virt(first_mfn));
             pte = mfn_to_xen_entry(first_mfn, WRITEALLOC);
             pte.pt.table = 1;
             write_pte(p, pte);
-            first = mfn_to_virt(mfn_x(first_mfn));
+            first = mfn_to_virt(first_mfn);
         }
 
         pte = mfn_to_xen_entry(_mfn(mfn), WRITEALLOC);
@@ -909,10 +911,10 @@  void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
     /* Compute the number of second level pages. */
     nr_second = ROUNDUP(frametable_size, FIRST_SIZE) >> FIRST_SHIFT;
     second_base = alloc_boot_pages(nr_second, 1);
-    second = mfn_to_virt(mfn_x(second_base));
+    second = mfn_to_virt(second_base);
     for ( i = 0; i < nr_second; i++ )
     {
-        clear_page(mfn_to_virt(mfn_x(mfn_add(second_base, i))));
+        clear_page(mfn_to_virt(mfn_add(second_base, i)));
         pte = mfn_to_xen_entry(mfn_add(second_base, i), WRITEALLOC);
         pte.pt.table = 1;
         write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
@@ -1005,7 +1007,7 @@  static int create_xen_entries(enum xenmap_operation op,
 
         BUG_ON(!lpae_valid(*entry));
 
-        third = mfn_to_virt(entry->pt.base);
+        third = __mfn_to_virt(entry->pt.base);
         entry = &third[third_table_offset(addr)];
 
         switch ( op ) {