Message ID | 20170613161323.25196-21-julien.grall@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: Extend the usage of typesafe MFN | expand |
On Tue, 13 Jun 2017, Julien Grall wrote: > The file xen/arch/arm/mm.c is using the typesafe MFN in most of the > place. This requires all caller of virt_to_mfn to prefixed by _mfn(...). > > To avoid the extra _mfn(...), re-defined virt_to_mfn within arch/arm/mm.c > to handle typesafe MFN. > > This patch also introduce __virt_to_mfn, so virt_to_mfn can be > re-defined easily. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > --- > xen/arch/arm/mm.c | 16 ++++++++++------ > xen/include/asm-arm/mm.h | 3 ++- > 2 files changed, 12 insertions(+), 7 deletions(-) > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 452c1e26c3..2ff1688f3f 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -44,6 +44,10 @@ > > struct domain *dom_xen, *dom_io, *dom_cow; > > +/* Override macros from asm/page.h to make them work with mfn_t */ > +#undef virt_to_mfn > +#define virt_to_mfn(va) _mfn(__virt_to_mfn(va)) > + > /* Static start-of-day pagetables that we use before the allocators > * are up. These are used by all CPUs during bringup before switching > * to the CPUs own pagetables. > @@ -479,7 +483,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr) > unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK; > > if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) > - return virt_to_mfn(va); > + return mfn_x(virt_to_mfn(va)); __virt_to_mfn? > ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES); > ASSERT(map[slot].pt.avail != 0); > @@ -764,7 +768,7 @@ int init_secondary_pagetables(int cpu) > * domheap mapping pages. */ > for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) > { > - pte = mfn_to_xen_entry(_mfn(virt_to_mfn(domheap+i*LPAE_ENTRIES)), > + pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES), > WRITEALLOC); > pte.pt.table = 1; > write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); > @@ -961,7 +965,7 @@ static int create_xen_table(lpae_t *entry) > if ( p == NULL ) > return -ENOMEM; > clear_page(p); > - pte = mfn_to_xen_entry(_mfn(virt_to_mfn(p)), WRITEALLOC); > + pte = mfn_to_xen_entry(virt_to_mfn(p), WRITEALLOC); > pte.pt.table = 1; > write_pte(entry, pte); > return 0; > @@ -1216,7 +1220,7 @@ int xenmem_add_to_physmap_one( > unsigned long idx, > gfn_t gfn) > { > - unsigned long mfn = 0; > + mfn_t mfn = INVALID_MFN; > int rc; > p2m_type_t t; > struct page_info *page = NULL; > @@ -1302,7 +1306,7 @@ int xenmem_add_to_physmap_one( > return -EINVAL; > } > > - mfn = page_to_mfn(page); > + mfn = _mfn(page_to_mfn(page)); > t = p2m_map_foreign; > > rcu_unlock_domain(od); > @@ -1321,7 +1325,7 @@ int xenmem_add_to_physmap_one( > } > > /* Map at new location. */ > - rc = guest_physmap_add_entry(d, gfn, _mfn(mfn), 0, t); > + rc = guest_physmap_add_entry(d, gfn, mfn, 0, t); > > /* If we fail to add the mapping, we need to drop the reference we > * took earlier on foreign pages */ > diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h > index b2e7ea7761..6e2b3c7f2b 100644 > --- a/xen/include/asm-arm/mm.h > +++ b/xen/include/asm-arm/mm.h > @@ -264,7 +264,7 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags) > #define __va(x) (maddr_to_virt(x)) > > /* Convert between Xen-heap virtual addresses and machine frame numbers. */ > -#define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > #define mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) > > /* > @@ -274,6 +274,7 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags) > */ > #define mfn_to_page(mfn) __mfn_to_page(mfn) > #define page_to_mfn(pg) __page_to_mfn(pg) > +#define virt_to_mfn(va) __virt_to_mfn(va) > > /* Convert between Xen-heap virtual addresses and page-info structures. */ > static inline struct page_info *virt_to_page(const void *v) > -- > 2.11.0 >
Hi Stefano, On 16/06/2017 00:44, Stefano Stabellini wrote: > On Tue, 13 Jun 2017, Julien Grall wrote: >> The file xen/arch/arm/mm.c is using the typesafe MFN in most of the >> place. This requires all caller of virt_to_mfn to prefixed by _mfn(...). >> >> To avoid the extra _mfn(...), re-defined virt_to_mfn within arch/arm/mm.c >> to handle typesafe MFN. >> >> This patch also introduce __virt_to_mfn, so virt_to_mfn can be >> re-defined easily. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> >> --- >> xen/arch/arm/mm.c | 16 ++++++++++------ >> xen/include/asm-arm/mm.h | 3 ++- >> 2 files changed, 12 insertions(+), 7 deletions(-) >> >> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c >> index 452c1e26c3..2ff1688f3f 100644 >> --- a/xen/arch/arm/mm.c >> +++ b/xen/arch/arm/mm.c >> @@ -44,6 +44,10 @@ >> >> struct domain *dom_xen, *dom_io, *dom_cow; >> >> +/* Override macros from asm/page.h to make them work with mfn_t */ >> +#undef virt_to_mfn >> +#define virt_to_mfn(va) _mfn(__virt_to_mfn(va)) >> + >> /* Static start-of-day pagetables that we use before the allocators >> * are up. These are used by all CPUs during bringup before switching >> * to the CPUs own pagetables. >> @@ -479,7 +483,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr) >> unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK; >> >> if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) >> - return virt_to_mfn(va); >> + return mfn_x(virt_to_mfn(va)); > > __virt_to_mfn? Ok. Cheers,
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 452c1e26c3..2ff1688f3f 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -44,6 +44,10 @@ struct domain *dom_xen, *dom_io, *dom_cow; +/* Override macros from asm/page.h to make them work with mfn_t */ +#undef virt_to_mfn +#define virt_to_mfn(va) _mfn(__virt_to_mfn(va)) + /* Static start-of-day pagetables that we use before the allocators * are up. These are used by all CPUs during bringup before switching * to the CPUs own pagetables. @@ -479,7 +483,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr) unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK; if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) - return virt_to_mfn(va); + return mfn_x(virt_to_mfn(va)); ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES); ASSERT(map[slot].pt.avail != 0); @@ -764,7 +768,7 @@ int init_secondary_pagetables(int cpu) * domheap mapping pages. */ for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) { - pte = mfn_to_xen_entry(_mfn(virt_to_mfn(domheap+i*LPAE_ENTRIES)), + pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES), WRITEALLOC); pte.pt.table = 1; write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); @@ -961,7 +965,7 @@ static int create_xen_table(lpae_t *entry) if ( p == NULL ) return -ENOMEM; clear_page(p); - pte = mfn_to_xen_entry(_mfn(virt_to_mfn(p)), WRITEALLOC); + pte = mfn_to_xen_entry(virt_to_mfn(p), WRITEALLOC); pte.pt.table = 1; write_pte(entry, pte); return 0; @@ -1216,7 +1220,7 @@ int xenmem_add_to_physmap_one( unsigned long idx, gfn_t gfn) { - unsigned long mfn = 0; + mfn_t mfn = INVALID_MFN; int rc; p2m_type_t t; struct page_info *page = NULL; @@ -1302,7 +1306,7 @@ int xenmem_add_to_physmap_one( return -EINVAL; } - mfn = page_to_mfn(page); + mfn = _mfn(page_to_mfn(page)); t = p2m_map_foreign; rcu_unlock_domain(od); @@ -1321,7 +1325,7 @@ int xenmem_add_to_physmap_one( } /* Map at new location. */ - rc = guest_physmap_add_entry(d, gfn, _mfn(mfn), 0, t); + rc = guest_physmap_add_entry(d, gfn, mfn, 0, t); /* If we fail to add the mapping, we need to drop the reference we * took earlier on foreign pages */ diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index b2e7ea7761..6e2b3c7f2b 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -264,7 +264,7 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags) #define __va(x) (maddr_to_virt(x)) /* Convert between Xen-heap virtual addresses and machine frame numbers. */ -#define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) #define mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) /* @@ -274,6 +274,7 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags) */ #define mfn_to_page(mfn) __mfn_to_page(mfn) #define page_to_mfn(pg) __page_to_mfn(pg) +#define virt_to_mfn(va) __virt_to_mfn(va) /* Convert between Xen-heap virtual addresses and page-info structures. */ static inline struct page_info *virt_to_page(const void *v)
The file xen/arch/arm/mm.c is using the typesafe MFN in most of the place. This requires all caller of virt_to_mfn to prefixed by _mfn(...). To avoid the extra _mfn(...), re-defined virt_to_mfn within arch/arm/mm.c to handle typesafe MFN. This patch also introduce __virt_to_mfn, so virt_to_mfn can be re-defined easily. Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/arch/arm/mm.c | 16 ++++++++++------ xen/include/asm-arm/mm.h | 3 ++- 2 files changed, 12 insertions(+), 7 deletions(-)