diff mbox series

[Xen-devel,11/15] xen/arm: Allow lpae_is_{table, mapping} helpers to work on invalid entry

Message ID 20180716172712.20294-12-julien.grall@arm.com
State New
Headers show
Series xen/arm: Bunch of clean-up/improvement | expand

Commit Message

Julien Grall July 16, 2018, 5:27 p.m. UTC
Currently, lpae_is_{table, mapping} helpers will always return false on
entry with the valid bit unset. However, it would be useful to have them
operating on any entry. For instance to store information in advance but
still request a fault.

With that change, the p2m is now providing an overlay for *_is_{table,
mapping} that will check the valid bit of the entry.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/guest_walk.c  |  2 +-
 xen/arch/arm/mm.c          |  2 +-
 xen/arch/arm/p2m.c         | 22 ++++++++++++++++++----
 xen/include/asm-arm/lpae.h | 11 +++++++----
 4 files changed, 27 insertions(+), 10 deletions(-)

Comments

Stefano Stabellini Aug. 14, 2018, 9:33 p.m. UTC | #1
On Mon, 16 Jul 2018, Julien Grall wrote:
> Currently, lpae_is_{table, mapping} helpers will always return false on
> entry with the valid bit unset. However, it would be useful to have them
  ^entries

> operating on any entry. For instance to store information in advance but
> still request a fault.
> 
> With that change, the p2m is now providing an overlay for *_is_{table,
> mapping} that will check the valid bit of the entry.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
>
> ---
>  xen/arch/arm/guest_walk.c  |  2 +-
>  xen/arch/arm/mm.c          |  2 +-
>  xen/arch/arm/p2m.c         | 22 ++++++++++++++++++----
>  xen/include/asm-arm/lpae.h | 11 +++++++----
>  4 files changed, 27 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
> index e3e21bdad3..4a1b4cf2c8 100644
> --- a/xen/arch/arm/guest_walk.c
> +++ b/xen/arch/arm/guest_walk.c
> @@ -566,7 +566,7 @@ static int guest_walk_ld(const struct vcpu *v,
>       * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if the PTE
>       * maps a memory block at level 3 (PTE<1:0> == 01).
>       */
> -    if ( !lpae_is_mapping(pte, level) )
> +    if ( !lpae_is_valid(pte) || !lpae_is_mapping(pte, level) )
>          return -EFAULT;
>  
>      /* Make sure that the lower bits of the PTE's base address are zero. */
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index e3dafe5fd7..52e57fef2f 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -996,7 +996,7 @@ static int create_xen_entries(enum xenmap_operation op,
>      for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1))
>      {
>          entry = &xen_second[second_linear_offset(addr)];
> -        if ( !lpae_is_table(*entry, 2) )
> +        if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) )
>          {
>              rc = create_xen_table(entry);
>              if ( rc < 0 ) {
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index ec3fdcb554..07925a1be4 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -219,6 +219,20 @@ static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
>          return radix_tree_ptr_to_int(ptr);
>  }
>  
> +/*
> + * lpae_is_* helpers don't check whether the valid bit is set in the
> + * PTE. Provide our own overlay to check the valid bit.
> + */
> +static inline bool p2m_is_mapping(lpae_t pte, unsigned int level)
> +{
> +    return lpae_is_valid(pte) && lpae_is_mapping(pte, level);
> +}
> +
> +static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
> +{
> +    return lpae_is_valid(pte) && lpae_is_superpage(pte, level);
> +}

I like the idea, but it would be clearer to me if they were called
lpae_is_valid_mapping and lpae_is_valid_superpage respectively. What do
you think?

Also, we might as well move them to lpae.h and use them from mm.c and
guest_walk.c as appropriate?


>  #define GUEST_TABLE_MAP_FAILED 0
>  #define GUEST_TABLE_SUPER_PAGE 1
>  #define GUEST_TABLE_NORMAL_PAGE 2
> @@ -262,7 +276,7 @@ static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
>  
>      /* The function p2m_next_level is never called at the 3rd level */
>      ASSERT(level < 3);
> -    if ( lpae_is_mapping(*entry, level) )
> +    if ( p2m_is_mapping(*entry, level) )
>          return GUEST_TABLE_SUPER_PAGE;
>  
>      mfn = lpae_to_mfn(*entry);
> @@ -642,7 +656,7 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>          return;
>  
>      /* Nothing to do but updating the stats if the entry is a super-page. */
> -    if ( lpae_is_superpage(entry, level) )
> +    if ( p2m_is_superpage(entry, level) )
>      {
>          p2m->stats.mappings[level]--;
>          return;
> @@ -697,7 +711,7 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
>       * a superpage.
>       */
>      ASSERT(level < target);
> -    ASSERT(lpae_is_superpage(*entry, level));
> +    ASSERT(p2m_is_superpage(*entry, level));
>  
>      page = alloc_domheap_page(NULL, 0);
>      if ( !page )
> @@ -834,7 +848,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
>          /* We need to split the original page. */
>          lpae_t split_pte = *entry;
>  
> -        ASSERT(lpae_is_superpage(*entry, level));
> +        ASSERT(p2m_is_superpage(*entry, level));
>  
>          if ( !p2m_split_superpage(p2m, &split_pte, level, target, offsets) )
>          {
> diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
> index 05c87a8f48..88f30fc917 100644
> --- a/xen/include/asm-arm/lpae.h
> +++ b/xen/include/asm-arm/lpae.h
> @@ -133,16 +133,19 @@ static inline bool lpae_is_valid(lpae_t pte)
>      return pte.walk.valid;
>  }
>  
> +/*
> + * lpae_is_* don't check the valid bit. This gives an opportunity for the
> + * callers to operate on the entry even if they are not valid. For
> + * instance to store information in advance.
> + */
>  static inline bool lpae_is_table(lpae_t pte, unsigned int level)
>  {
> -    return (level < 3) && lpae_is_valid(pte) && pte.walk.table;
> +    return (level < 3) && pte.walk.table;
>  }
>  
>  static inline bool lpae_is_mapping(lpae_t pte, unsigned int level)
>  {
> -    if ( !lpae_is_valid(pte) )
> -        return false;
> -    else if ( level == 3 )
> +    if ( level == 3 )
>          return pte.walk.table;
>      else
>          return !pte.walk.table;
> -- 
> 2.11.0
>
Julien Grall Aug. 14, 2018, 10:51 p.m. UTC | #2
Hi Stefano,

On 08/14/2018 10:33 PM, Stefano Stabellini wrote:
> On Mon, 16 Jul 2018, Julien Grall wrote:
>> Currently, lpae_is_{table, mapping} helpers will always return false on
>> entry with the valid bit unset. However, it would be useful to have them
>    ^entries
> 
>> operating on any entry. For instance to store information in advance but
>> still request a fault.
>>
>> With that change, the p2m is now providing an overlay for *_is_{table,
>> mapping} that will check the valid bit of the entry.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>>
>> ---
>>   xen/arch/arm/guest_walk.c  |  2 +-
>>   xen/arch/arm/mm.c          |  2 +-
>>   xen/arch/arm/p2m.c         | 22 ++++++++++++++++++----
>>   xen/include/asm-arm/lpae.h | 11 +++++++----
>>   4 files changed, 27 insertions(+), 10 deletions(-)
>>
>> diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
>> index e3e21bdad3..4a1b4cf2c8 100644
>> --- a/xen/arch/arm/guest_walk.c
>> +++ b/xen/arch/arm/guest_walk.c
>> @@ -566,7 +566,7 @@ static int guest_walk_ld(const struct vcpu *v,
>>        * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if the PTE
>>        * maps a memory block at level 3 (PTE<1:0> == 01).
>>        */
>> -    if ( !lpae_is_mapping(pte, level) )
>> +    if ( !lpae_is_valid(pte) || !lpae_is_mapping(pte, level) )
>>           return -EFAULT;
>>   
>>       /* Make sure that the lower bits of the PTE's base address are zero. */
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index e3dafe5fd7..52e57fef2f 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -996,7 +996,7 @@ static int create_xen_entries(enum xenmap_operation op,
>>       for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1))
>>       {
>>           entry = &xen_second[second_linear_offset(addr)];
>> -        if ( !lpae_is_table(*entry, 2) )
>> +        if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) )
>>           {
>>               rc = create_xen_table(entry);
>>               if ( rc < 0 ) {
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index ec3fdcb554..07925a1be4 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -219,6 +219,20 @@ static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
>>           return radix_tree_ptr_to_int(ptr);
>>   }
>>   
>> +/*
>> + * lpae_is_* helpers don't check whether the valid bit is set in the
>> + * PTE. Provide our own overlay to check the valid bit.
>> + */
>> +static inline bool p2m_is_mapping(lpae_t pte, unsigned int level)
>> +{
>> +    return lpae_is_valid(pte) && lpae_is_mapping(pte, level);
>> +}
>> +
>> +static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
>> +{
>> +    return lpae_is_valid(pte) && lpae_is_superpage(pte, level);
>> +}
> 
> I like the idea, but it would be clearer to me if they were called
> lpae_is_valid_mapping and lpae_is_valid_superpage respectively. What do
> you think?
>  > Also, we might as well move them to lpae.h and use them from mm.c and
> guest_walk.c as appropriate?

lpae.h is not suitable because as I said in the commit message each 
page-table (stage-1, stage-2) may have a different view of what "valid" 
means.

At the moment, that view is the same but it is not going to stay long 
like that. Hence the reason of prefixing with p2m_. They are specific to 
the p2m. This is giving us some more liberty in the future while making 
the lpae code a bit more generic.

In guest_walk.c there are only one user, so the introduction of an 
helper is quite limited there.

Cheers,
Stefano Stabellini Aug. 15, 2018, 7:13 p.m. UTC | #3
On Tue, 14 Aug 2018, Julien Grall wrote:
> Hi Stefano,
> 
> On 08/14/2018 10:33 PM, Stefano Stabellini wrote:
> > On Mon, 16 Jul 2018, Julien Grall wrote:
> > > Currently, lpae_is_{table, mapping} helpers will always return false on
> > > entry with the valid bit unset. However, it would be useful to have them
> >    ^entries
> > 
> > > operating on any entry. For instance to store information in advance but
> > > still request a fault.
> > > 
> > > With that change, the p2m is now providing an overlay for *_is_{table,
> > > mapping} that will check the valid bit of the entry.
> > > 
> > > Signed-off-by: Julien Grall <julien.grall@arm.com>
> > > 
> > > ---
> > >   xen/arch/arm/guest_walk.c  |  2 +-
> > >   xen/arch/arm/mm.c          |  2 +-
> > >   xen/arch/arm/p2m.c         | 22 ++++++++++++++++++----
> > >   xen/include/asm-arm/lpae.h | 11 +++++++----
> > >   4 files changed, 27 insertions(+), 10 deletions(-)
> > > 
> > > diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
> > > index e3e21bdad3..4a1b4cf2c8 100644
> > > --- a/xen/arch/arm/guest_walk.c
> > > +++ b/xen/arch/arm/guest_walk.c
> > > @@ -566,7 +566,7 @@ static int guest_walk_ld(const struct vcpu *v,
> > >        * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if
> > > the PTE
> > >        * maps a memory block at level 3 (PTE<1:0> == 01).
> > >        */
> > > -    if ( !lpae_is_mapping(pte, level) )
> > > +    if ( !lpae_is_valid(pte) || !lpae_is_mapping(pte, level) )
> > >           return -EFAULT;
> > >         /* Make sure that the lower bits of the PTE's base address are
> > > zero. */
> > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > > index e3dafe5fd7..52e57fef2f 100644
> > > --- a/xen/arch/arm/mm.c
> > > +++ b/xen/arch/arm/mm.c
> > > @@ -996,7 +996,7 @@ static int create_xen_entries(enum xenmap_operation
> > > op,
> > >       for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1))
> > >       {
> > >           entry = &xen_second[second_linear_offset(addr)];
> > > -        if ( !lpae_is_table(*entry, 2) )
> > > +        if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) )
> > >           {
> > >               rc = create_xen_table(entry);
> > >               if ( rc < 0 ) {
> > > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> > > index ec3fdcb554..07925a1be4 100644
> > > --- a/xen/arch/arm/p2m.c
> > > +++ b/xen/arch/arm/p2m.c
> > > @@ -219,6 +219,20 @@ static p2m_access_t p2m_mem_access_radix_get(struct
> > > p2m_domain *p2m, gfn_t gfn)
> > >           return radix_tree_ptr_to_int(ptr);
> > >   }
> > >   +/*
> > > + * lpae_is_* helpers don't check whether the valid bit is set in the
> > > + * PTE. Provide our own overlay to check the valid bit.
> > > + */
> > > +static inline bool p2m_is_mapping(lpae_t pte, unsigned int level)
> > > +{
> > > +    return lpae_is_valid(pte) && lpae_is_mapping(pte, level);
> > > +}
> > > +
> > > +static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
> > > +{
> > > +    return lpae_is_valid(pte) && lpae_is_superpage(pte, level);
> > > +}
> > 
> > I like the idea, but it would be clearer to me if they were called
> > lpae_is_valid_mapping and lpae_is_valid_superpage respectively. What do
> > you think?
> >  > Also, we might as well move them to lpae.h and use them from mm.c and
> > guest_walk.c as appropriate?
> 
> lpae.h is not suitable because as I said in the commit message each page-table
> (stage-1, stage-2) may have a different view of what "valid" means.
> 
> At the moment, that view is the same but it is not going to stay long like
> that. Hence the reason of prefixing with p2m_. They are specific to the p2m.
> This is giving us some more liberty in the future while making the lpae code a
> bit more generic.
> 
> In guest_walk.c there are only one user, so the introduction of an helper is
> quite limited there.

I see, so by "p2m_is_mapping" you meant specifically
"stage2_is_mapping", right? That makes sense now.
Julien Grall Aug. 16, 2018, 8:51 a.m. UTC | #4
Hi,

On 08/15/2018 08:13 PM, Stefano Stabellini wrote:
> On Tue, 14 Aug 2018, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 08/14/2018 10:33 PM, Stefano Stabellini wrote:
>>> On Mon, 16 Jul 2018, Julien Grall wrote:
>>>> Currently, lpae_is_{table, mapping} helpers will always return false on
>>>> entry with the valid bit unset. However, it would be useful to have them
>>>     ^entries
>>>
>>>> operating on any entry. For instance to store information in advance but
>>>> still request a fault.
>>>>
>>>> With that change, the p2m is now providing an overlay for *_is_{table,
>>>> mapping} that will check the valid bit of the entry.
>>>>
>>>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>>>>
>>>> ---
>>>>    xen/arch/arm/guest_walk.c  |  2 +-
>>>>    xen/arch/arm/mm.c          |  2 +-
>>>>    xen/arch/arm/p2m.c         | 22 ++++++++++++++++++----
>>>>    xen/include/asm-arm/lpae.h | 11 +++++++----
>>>>    4 files changed, 27 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
>>>> index e3e21bdad3..4a1b4cf2c8 100644
>>>> --- a/xen/arch/arm/guest_walk.c
>>>> +++ b/xen/arch/arm/guest_walk.c
>>>> @@ -566,7 +566,7 @@ static int guest_walk_ld(const struct vcpu *v,
>>>>         * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if
>>>> the PTE
>>>>         * maps a memory block at level 3 (PTE<1:0> == 01).
>>>>         */
>>>> -    if ( !lpae_is_mapping(pte, level) )
>>>> +    if ( !lpae_is_valid(pte) || !lpae_is_mapping(pte, level) )
>>>>            return -EFAULT;
>>>>          /* Make sure that the lower bits of the PTE's base address are
>>>> zero. */
>>>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>>>> index e3dafe5fd7..52e57fef2f 100644
>>>> --- a/xen/arch/arm/mm.c
>>>> +++ b/xen/arch/arm/mm.c
>>>> @@ -996,7 +996,7 @@ static int create_xen_entries(enum xenmap_operation
>>>> op,
>>>>        for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1))
>>>>        {
>>>>            entry = &xen_second[second_linear_offset(addr)];
>>>> -        if ( !lpae_is_table(*entry, 2) )
>>>> +        if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) )
>>>>            {
>>>>                rc = create_xen_table(entry);
>>>>                if ( rc < 0 ) {
>>>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>>>> index ec3fdcb554..07925a1be4 100644
>>>> --- a/xen/arch/arm/p2m.c
>>>> +++ b/xen/arch/arm/p2m.c
>>>> @@ -219,6 +219,20 @@ static p2m_access_t p2m_mem_access_radix_get(struct
>>>> p2m_domain *p2m, gfn_t gfn)
>>>>            return radix_tree_ptr_to_int(ptr);
>>>>    }
>>>>    +/*
>>>> + * lpae_is_* helpers don't check whether the valid bit is set in the
>>>> + * PTE. Provide our own overlay to check the valid bit.
>>>> + */
>>>> +static inline bool p2m_is_mapping(lpae_t pte, unsigned int level)
>>>> +{
>>>> +    return lpae_is_valid(pte) && lpae_is_mapping(pte, level);
>>>> +}
>>>> +
>>>> +static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
>>>> +{
>>>> +    return lpae_is_valid(pte) && lpae_is_superpage(pte, level);
>>>> +}
>>>
>>> I like the idea, but it would be clearer to me if they were called
>>> lpae_is_valid_mapping and lpae_is_valid_superpage respectively. What do
>>> you think?
>>>   > Also, we might as well move them to lpae.h and use them from mm.c and
>>> guest_walk.c as appropriate?
>>
>> lpae.h is not suitable because as I said in the commit message each page-table
>> (stage-1, stage-2) may have a different view of what "valid" means.
>>
>> At the moment, that view is the same but it is not going to stay long like
>> that. Hence the reason of prefixing with p2m_. They are specific to the p2m.
>> This is giving us some more liberty in the future while making the lpae code a
>> bit more generic.
>>
>> In guest_walk.c there are only one user, so the introduction of an helper is
>> quite limited there.
> 
> I see, so by "p2m_is_mapping" you meant specifically
> "stage2_is_mapping", right? That makes sense now.

Yes. We use the term "p2m" everywhere in Xen to refer to stage-2 
page-tables (see lpae_p2m_t). So it makes sense to prefix the helpers 
with "p2m_" here.

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
index e3e21bdad3..4a1b4cf2c8 100644
--- a/xen/arch/arm/guest_walk.c
+++ b/xen/arch/arm/guest_walk.c
@@ -566,7 +566,7 @@  static int guest_walk_ld(const struct vcpu *v,
      * PTE is invalid or holds a reserved entry (PTE<1:0> == x0)) or if the PTE
      * maps a memory block at level 3 (PTE<1:0> == 01).
      */
-    if ( !lpae_is_mapping(pte, level) )
+    if ( !lpae_is_valid(pte) || !lpae_is_mapping(pte, level) )
         return -EFAULT;
 
     /* Make sure that the lower bits of the PTE's base address are zero. */
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index e3dafe5fd7..52e57fef2f 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -996,7 +996,7 @@  static int create_xen_entries(enum xenmap_operation op,
     for(; addr < addr_end; addr += PAGE_SIZE, mfn = mfn_add(mfn, 1))
     {
         entry = &xen_second[second_linear_offset(addr)];
-        if ( !lpae_is_table(*entry, 2) )
+        if ( !lpae_is_valid(*entry) || !lpae_is_table(*entry, 2) )
         {
             rc = create_xen_table(entry);
             if ( rc < 0 ) {
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index ec3fdcb554..07925a1be4 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -219,6 +219,20 @@  static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
         return radix_tree_ptr_to_int(ptr);
 }
 
+/*
+ * lpae_is_* helpers don't check whether the valid bit is set in the
+ * PTE. Provide our own overlay to check the valid bit.
+ */
+static inline bool p2m_is_mapping(lpae_t pte, unsigned int level)
+{
+    return lpae_is_valid(pte) && lpae_is_mapping(pte, level);
+}
+
+static inline bool p2m_is_superpage(lpae_t pte, unsigned int level)
+{
+    return lpae_is_valid(pte) && lpae_is_superpage(pte, level);
+}
+
 #define GUEST_TABLE_MAP_FAILED 0
 #define GUEST_TABLE_SUPER_PAGE 1
 #define GUEST_TABLE_NORMAL_PAGE 2
@@ -262,7 +276,7 @@  static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
 
     /* The function p2m_next_level is never called at the 3rd level */
     ASSERT(level < 3);
-    if ( lpae_is_mapping(*entry, level) )
+    if ( p2m_is_mapping(*entry, level) )
         return GUEST_TABLE_SUPER_PAGE;
 
     mfn = lpae_to_mfn(*entry);
@@ -642,7 +656,7 @@  static void p2m_free_entry(struct p2m_domain *p2m,
         return;
 
     /* Nothing to do but updating the stats if the entry is a super-page. */
-    if ( lpae_is_superpage(entry, level) )
+    if ( p2m_is_superpage(entry, level) )
     {
         p2m->stats.mappings[level]--;
         return;
@@ -697,7 +711,7 @@  static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry,
      * a superpage.
      */
     ASSERT(level < target);
-    ASSERT(lpae_is_superpage(*entry, level));
+    ASSERT(p2m_is_superpage(*entry, level));
 
     page = alloc_domheap_page(NULL, 0);
     if ( !page )
@@ -834,7 +848,7 @@  static int __p2m_set_entry(struct p2m_domain *p2m,
         /* We need to split the original page. */
         lpae_t split_pte = *entry;
 
-        ASSERT(lpae_is_superpage(*entry, level));
+        ASSERT(p2m_is_superpage(*entry, level));
 
         if ( !p2m_split_superpage(p2m, &split_pte, level, target, offsets) )
         {
diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
index 05c87a8f48..88f30fc917 100644
--- a/xen/include/asm-arm/lpae.h
+++ b/xen/include/asm-arm/lpae.h
@@ -133,16 +133,19 @@  static inline bool lpae_is_valid(lpae_t pte)
     return pte.walk.valid;
 }
 
+/*
+ * lpae_is_* don't check the valid bit. This gives an opportunity for the
+ * callers to operate on the entry even if they are not valid. For
+ * instance to store information in advance.
+ */
 static inline bool lpae_is_table(lpae_t pte, unsigned int level)
 {
-    return (level < 3) && lpae_is_valid(pte) && pte.walk.table;
+    return (level < 3) && pte.walk.table;
 }
 
 static inline bool lpae_is_mapping(lpae_t pte, unsigned int level)
 {
-    if ( !lpae_is_valid(pte) )
-        return false;
-    else if ( level == 3 )
+    if ( level == 3 )
         return pte.walk.table;
     else
         return !pte.walk.table;