diff mbox series

[Xen-devel,09/15] xen/arm: guest_walk: Use lpae_is_mapping to simplify the code

Message ID 20180716172712.20294-10-julien.grall@arm.com
State New
Headers show
Series xen/arm: Bunch of clean-up/improvement | expand

Commit Message

Julien Grall July 16, 2018, 5:27 p.m. UTC
!lpae_is_page(pte, level) && !lpae_is_superpage(pte, level) is
equivalent to !lpae_is_mapping(pte, level).

At the same time drop lpae_is_page(pte, level) that is now unused.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/guest_walk.c  | 2 +-
 xen/include/asm-arm/lpae.h | 5 -----
 2 files changed, 1 insertion(+), 6 deletions(-)

Comments

Stefano Stabellini Aug. 14, 2018, 9:14 p.m. UTC | #1
On Mon, 16 Jul 2018, Julien Grall wrote:
> !lpae_is_page(pte, level) && !lpae_is_superpage(pte, level) is
> equivalent to !lpae_is_mapping(pte, level).
> 
> At the same time drop lpae_is_page(pte, level) that is now unused.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

> ---
>  xen/arch/arm/guest_walk.c  | 2 +-
>  xen/include/asm-arm/lpae.h | 5 -----
>  2 files changed, 1 insertion(+), 6 deletions(-)
> 
> diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
> index a7c7e05603..e3e21bdad3 100644
> --- a/xen/arch/arm/guest_walk.c
> +++ b/xen/arch/arm/guest_walk.c
> @@ -566,7 +566,7 @@ static int guest_walk_ld(const struct vcpu *v,
>       * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if the PTE
>       * maps a memory block at level 3 (PTE<1:0> == 01).
>       */
> -    if ( !lpae_is_page(pte, level) && !lpae_is_superpage(pte, level) )
> +    if ( !lpae_is_mapping(pte, level) )
>          return -EFAULT;
>  
>      /* Make sure that the lower bits of the PTE's base address are zero. */
> diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
> index 1d86020d07..15595cd35c 100644
> --- a/xen/include/asm-arm/lpae.h
> +++ b/xen/include/asm-arm/lpae.h
> @@ -153,11 +153,6 @@ static inline bool lpae_is_superpage(lpae_t pte, unsigned int level)
>      return (level < 3) && lpae_is_mapping(pte, level);
>  }
>  
> -static inline bool lpae_is_page(lpae_t pte, unsigned int level)
> -{
> -    return (level == 3) && lpae_is_valid(pte) && pte.walk.table;
> -}
> -
>  /*
>   * AArch64 supports pages with different sizes (4K, 16K, and 64K). To enable
>   * page table walks for various configurations, the following helpers enable
> -- 
> 2.11.0
>
diff mbox series

Patch

diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
index a7c7e05603..e3e21bdad3 100644
--- a/xen/arch/arm/guest_walk.c
+++ b/xen/arch/arm/guest_walk.c
@@ -566,7 +566,7 @@  static int guest_walk_ld(const struct vcpu *v,
      * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if the PTE
      * maps a memory block at level 3 (PTE<1:0> == 01).
      */
-    if ( !lpae_is_page(pte, level) && !lpae_is_superpage(pte, level) )
+    if ( !lpae_is_mapping(pte, level) )
         return -EFAULT;
 
     /* Make sure that the lower bits of the PTE's base address are zero. */
diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
index 1d86020d07..15595cd35c 100644
--- a/xen/include/asm-arm/lpae.h
+++ b/xen/include/asm-arm/lpae.h
@@ -153,11 +153,6 @@  static inline bool lpae_is_superpage(lpae_t pte, unsigned int level)
     return (level < 3) && lpae_is_mapping(pte, level);
 }
 
-static inline bool lpae_is_page(lpae_t pte, unsigned int level)
-{
-    return (level == 3) && lpae_is_valid(pte) && pte.walk.table;
-}
-
 /*
  * AArch64 supports pages with different sizes (4K, 16K, and 64K). To enable
  * page table walks for various configurations, the following helpers enable