diff mbox series

[Xen-devel,RFC,09/16] xen/arm: p2m: Introduce a function to resolve translation fault

Message ID 20181008183352.16291-10-julien.grall@arm.com
State New
Headers show
Series xen/arm: Implement Set/Way operations | expand

Commit Message

Julien Grall Oct. 8, 2018, 6:33 p.m. UTC
Currently a Stage-2 translation fault could happen:
    1) MMIO emulation
    2) When the page-tables is been updated using Break-Before-Make
    3) Page not mapped

A follow-up patch will re-purpose the valid bit in an entry to generate
translation fault. This would be used to do an action on each entries to
track page used for a given period.

A new function is introduced to try to resolve a translation fault. This
will include 2) and the new way to generate fault explained above.

To avoid invalidating all the page-tables entries in one go. It is
possible to invalidate the top-level table and then on trap invalidate
the table one-level down. This will be repeated until a block/page entry
has been reached.

At the moment, there are no action done when reaching a block/page entry
but setting the valid bit to 1.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/p2m.c   | 127 +++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/traps.c |   7 +--
 2 files changed, 131 insertions(+), 3 deletions(-)

Comments

Stefano Stabellini Nov. 2, 2018, 11:27 p.m. UTC | #1
On Mon, 8 Oct 2018, Julien Grall wrote:
> Currently a Stage-2 translation fault could happen:
>     1) MMIO emulation
>     2) When the page-tables is been updated using Break-Before-Make
                              ^ have

>     3) Page not mapped
> 
> A follow-up patch will re-purpose the valid bit in an entry to generate
> translation fault. This would be used to do an action on each entries to
                                                                ^entry

> track page used for a given period.
        ^pages


> 
> A new function is introduced to try to resolve a translation fault. This
> will include 2) and the new way to generate fault explained above.

I can see the code does what you describe, but I don't understand why we
are doing this. What is missing in the commit message is the explanation
of the relationship between the future goal of repurposing the valid bit
and the introduction of a function to handle Break-Before-Make stage2
faults. Does it fix an issue with Break-Before-Make that we currently
have? Or it becomes needed due to the repurposing of valid? If so, why?


> To avoid invalidating all the page-tables entries in one go. It is
> possible to invalidate the top-level table and then on trap invalidate
> the table one-level down. This will be repeated until a block/page entry
> has been reached.
> 
> At the moment, there are no action done when reaching a block/page entry
> but setting the valid bit to 1.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/arm/p2m.c   | 127 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/traps.c |   7 +--
>  2 files changed, 131 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index ec956bc151..af445d3313 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1043,6 +1043,133 @@ int p2m_set_entry(struct p2m_domain *p2m,
>      return rc;
>  }
>  
> +/* Invalidate all entries in the table. The p2m should be write locked. */
> +static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
> +{
> +    lpae_t *table;
> +    unsigned int i;
> +
> +    ASSERT(p2m_is_write_locked(p2m));
> +
> +    table = map_domain_page(mfn);
> +
> +    for ( i = 0; i < LPAE_ENTRIES; i++ )
> +    {
> +        lpae_t pte = table[i];
> +
> +        pte.p2m.valid = 0;
> +
> +        p2m_write_pte(&table[i], pte, p2m->clean_pte);
> +    }
> +
> +    unmap_domain_page(table);
> +
> +    p2m->need_flush = true;
> +}
> +
> +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    unsigned int level = 0;
> +    bool resolved = false;
> +    lpae_t entry, *table;
> +    paddr_t addr = gfn_to_gaddr(gfn);
> +
> +    /* Convenience aliases */
> +    const unsigned int offsets[4] = {
> +        zeroeth_table_offset(addr),
> +        first_table_offset(addr),
> +        second_table_offset(addr),
> +        third_table_offset(addr)
> +    };
> +
> +    p2m_write_lock(p2m);
> +
> +    /* This gfn is higher than the highest the p2m map currently holds */
> +    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
> +        goto out;
> +
> +    table = p2m_get_root_pointer(p2m, gfn);
> +    /*
> +     * The table should always be non-NULL because the gfn is below
> +     * p2m->max_mapped_gfn and the root table pages are always present.
> +     */
> +    BUG_ON(table == NULL);
> +
> +    /*
> +     * Go down the page-tables until an entry has the valid bit unset or
> +     * a block/page entry has been hit.
> +     */
> +    for ( level = P2M_ROOT_LEVEL; level <= 3; level++ )
> +    {
> +        int rc;
> +
> +        entry = table[offsets[level]];
> +
> +        if ( level == 3 )
> +            break;
> +
> +        /* Stop as soon as we hit an entry with the valid bit unset. */
> +        if ( !lpae_is_valid(entry) )
> +            break;
> +
> +        rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
> +        if ( rc == GUEST_TABLE_MAP_FAILED )
> +            goto out_unmap;
> +        else if ( rc != GUEST_TABLE_NORMAL_PAGE )

why not rc == GUEST_TABLE_SUPER_PAGE?

> +            break;
> +    }
> +
> +    /*
> +     * If the valid bit of the entry is set, it means someone was playing with
> +     * the Stage-2 page table. Nothing to do and mark the fault as resolved.
> +     */
> +    if ( lpae_is_valid(entry) )
> +    {
> +        resolved = true;
> +        goto out_unmap;
> +    }
> +
> +    /*
> +     * The valid bit is unset. If the entry is still not valid then the fault
> +     * cannot be resolved, exit and report it.
> +     */
> +    if ( !p2m_is_valid(entry) )
> +        goto out_unmap;
> +
> +    /*
> +     * Now we have an entry with valid bit unset, but still valid from
> +     * the P2M point of view.
> +     *
> +     * For entry pointing to a table, the table will be invalidated.
              ^ entries


> +     * For entry pointing to a block/page, no work to do for now.
              ^ entries


> +     */
> +    if ( lpae_is_table(entry, level) )
> +        p2m_invalidate_table(p2m, lpae_get_mfn(entry));

Maybe because I haven't read the rest of the patches, it is not clear to
me why in the case of an entry pointing to a table we need to invalidate
it, and otherwise set valid to 1.


> +    /*
> +     * Now that the work on the entry is done, set the valid bit to prevent
> +     * another fault on that entry.
> +     */
> +    resolved = true;
> +    entry.p2m.valid = 1;
> +
> +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
> +
> +    /*
> +     * No need to flush the TLBs as the modified entry had the valid bit
> +     * unset.
> +     */
> +
> +out_unmap:
> +    unmap_domain_page(table);
> +
> +out:
> +    p2m_write_unlock(p2m);
> +
> +    return resolved;
> +}
> +
>  static inline int p2m_insert_mapping(struct domain *d,
>                                       gfn_t start_gfn,
>                                       unsigned long nr,
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index b40798084d..169b57cb6b 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -1882,6 +1882,8 @@ static bool try_map_mmio(gfn_t gfn)
>      return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
>  }
>  
> +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);

Should be in an header file?


>  static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
>                                         const union hsr hsr)
>  {
> @@ -1894,7 +1896,6 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
>      vaddr_t gva;
>      paddr_t gpa;
>      uint8_t fsc = xabt.fsc & ~FSC_LL_MASK;
> -    mfn_t mfn;
>      bool is_data = (hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL);
>  
>      /*
> @@ -1977,8 +1978,8 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
>           * with the Stage-2 page table. Walk the Stage-2 PT to check
>           * if the entry exists. If it's the case, return to the guest
>           */
> -        mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(gpa));
> -        if ( !mfn_eq(mfn, INVALID_MFN) )
> +        if ( p2m_resolve_translation_fault(current->domain,
> +                                           gaddr_to_gfn(gpa)) )
>              return;
>  
>          if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) )
> -- 
> 2.11.0
>
Julien Grall Nov. 5, 2018, 11:55 a.m. UTC | #2
Hi,

On 02/11/2018 23:27, Stefano Stabellini wrote:
> On Mon, 8 Oct 2018, Julien Grall wrote:
>> Currently a Stage-2 translation fault could happen:
>>      1) MMIO emulation
>>      2) When the page-tables is been updated using Break-Before-Make
>                                ^ have
> 
>>      3) Page not mapped
>>
>> A follow-up patch will re-purpose the valid bit in an entry to generate
>> translation fault. This would be used to do an action on each entries to
>                                                                  ^entry
> 
>> track page used for a given period.
>          ^pages
> 
> 
>>
>> A new function is introduced to try to resolve a translation fault. This
>> will include 2) and the new way to generate fault explained above.
> 
> I can see the code does what you describe, but I don't understand why we
> are doing this. What is missing in the commit message is the explanation
> of the relationship between the future goal of repurposing the valid bit
> and the introduction of a function to handle Break-Before-Make stage2
> faults. Does it fix an issue with Break-Before-Make that we currently
> have? Or it becomes needed due to the repurposing of valid? If so, why?

This does not fix any issue with BBM. The valid bit adds a 4th reasons for 
translation fault. Both BBM and the valid bit will require to walk the page-tables.

For the valid bit, we will need to walk the page-table in order to fixup the 
entry (i.e set valid bit). We also can't use p2m_lookup(...) has it only tell 
you the mapping exists, the valid bit may still not be set.

So we need to provide a new helper to walk the page-table and fixup an entry.

>> To avoid invalidating all the page-tables entries in one go. It is
>> possible to invalidate the top-level table and then on trap invalidate
>> the table one-level down. This will be repeated until a block/page entry
>> has been reached.
>>
>> At the moment, there are no action done when reaching a block/page entry
>> but setting the valid bit to 1.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> ---
>>   xen/arch/arm/p2m.c   | 127 +++++++++++++++++++++++++++++++++++++++++++++++++++
>>   xen/arch/arm/traps.c |   7 +--
>>   2 files changed, 131 insertions(+), 3 deletions(-)
>>
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index ec956bc151..af445d3313 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -1043,6 +1043,133 @@ int p2m_set_entry(struct p2m_domain *p2m,
>>       return rc;
>>   }
>>   
>> +/* Invalidate all entries in the table. The p2m should be write locked. */
>> +static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
>> +{
>> +    lpae_t *table;
>> +    unsigned int i;
>> +
>> +    ASSERT(p2m_is_write_locked(p2m));
>> +
>> +    table = map_domain_page(mfn);
>> +
>> +    for ( i = 0; i < LPAE_ENTRIES; i++ )
>> +    {
>> +        lpae_t pte = table[i];
>> +
>> +        pte.p2m.valid = 0;
>> +
>> +        p2m_write_pte(&table[i], pte, p2m->clean_pte);
>> +    }
>> +
>> +    unmap_domain_page(table);
>> +
>> +    p2m->need_flush = true;
>> +}
>> +
>> +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn)
>> +{
>> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> +    unsigned int level = 0;
>> +    bool resolved = false;
>> +    lpae_t entry, *table;
>> +    paddr_t addr = gfn_to_gaddr(gfn);
>> +
>> +    /* Convenience aliases */
>> +    const unsigned int offsets[4] = {
>> +        zeroeth_table_offset(addr),
>> +        first_table_offset(addr),
>> +        second_table_offset(addr),
>> +        third_table_offset(addr)
>> +    };
>> +
>> +    p2m_write_lock(p2m);
>> +
>> +    /* This gfn is higher than the highest the p2m map currently holds */
>> +    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
>> +        goto out;
>> +
>> +    table = p2m_get_root_pointer(p2m, gfn);
>> +    /*
>> +     * The table should always be non-NULL because the gfn is below
>> +     * p2m->max_mapped_gfn and the root table pages are always present.
>> +     */
>> +    BUG_ON(table == NULL);
>> +
>> +    /*
>> +     * Go down the page-tables until an entry has the valid bit unset or
>> +     * a block/page entry has been hit.
>> +     */
>> +    for ( level = P2M_ROOT_LEVEL; level <= 3; level++ )
>> +    {
>> +        int rc;
>> +
>> +        entry = table[offsets[level]];
>> +
>> +        if ( level == 3 )
>> +            break;
>> +
>> +        /* Stop as soon as we hit an entry with the valid bit unset. */
>> +        if ( !lpae_is_valid(entry) )
>> +            break;
>> +
>> +        rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
>> +        if ( rc == GUEST_TABLE_MAP_FAILED )
>> +            goto out_unmap;
>> +        else if ( rc != GUEST_TABLE_NORMAL_PAGE )
> 
> why not rc == GUEST_TABLE_SUPER_PAGE?

The logic has been taken from p2m_get_entry(). It makes sense to use != here as 
you only want to continue the loop if you are on a table. So it is clearer why 
you continue.

> 
>> +            break;
>> +    }
>> +
>> +    /*
>> +     * If the valid bit of the entry is set, it means someone was playing with
>> +     * the Stage-2 page table. Nothing to do and mark the fault as resolved.
>> +     */
>> +    if ( lpae_is_valid(entry) )
>> +    {
>> +        resolved = true;
>> +        goto out_unmap;
>> +    }
>> +
>> +    /*
>> +     * The valid bit is unset. If the entry is still not valid then the fault
>> +     * cannot be resolved, exit and report it.
>> +     */
>> +    if ( !p2m_is_valid(entry) )
>> +        goto out_unmap;
>> +
>> +    /*
>> +     * Now we have an entry with valid bit unset, but still valid from
>> +     * the P2M point of view.
>> +     *
>> +     * For entry pointing to a table, the table will be invalidated.
>                ^ entries
> 
> 
>> +     * For entry pointing to a block/page, no work to do for now.
>                ^ entries

I am not entirely sure why it should be plural here. We are dealing with only 
one entry it.

> 
> 
>> +     */
>> +    if ( lpae_is_table(entry, level) )
>> +        p2m_invalidate_table(p2m, lpae_get_mfn(entry));
> 
> Maybe because I haven't read the rest of the patches, it is not clear to
> me why in the case of an entry pointing to a table we need to invalidate
> it, and otherwise set valid to 1.

This was written in the commit message:

"To avoid invalidating all the page-tables entries in one go. It is
possible to invalidate the top-level table and then on trap invalidate
the table one-level down. This will be repeated until a block/page entry
has been reached."

It is mostly to spread the cost of invalidating the page-tables. With this 
solution, you only need to clear the valid bit of the top-level entries to 
invalidate the full P2M.

On the first access, you will trap, set the entry of the first "invalid entry", 
and invalidate the next level if necessary.

The access will then be retried. If trapped, the process is repeated until all 
the entries are valid.

It is possible to optimize it, avoiding intermediate trap when necessary. But I 
would not bother looking at that for now. Indeed, this will be used for lowering 
down the cost of set/way cache maintenance emulation. Any guest using that 
already knows that a big cost will incur.

> 
> 
>> +    /*
>> +     * Now that the work on the entry is done, set the valid bit to prevent
>> +     * another fault on that entry.
>> +     */
>> +    resolved = true;
>> +    entry.p2m.valid = 1;
>> +
>> +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
>> +
>> +    /*
>> +     * No need to flush the TLBs as the modified entry had the valid bit
>> +     * unset.
>> +     */
>> +
>> +out_unmap:
>> +    unmap_domain_page(table);
>> +
>> +out:
>> +    p2m_write_unlock(p2m);
>> +
>> +    return resolved;
>> +}
>> +
>>   static inline int p2m_insert_mapping(struct domain *d,
>>                                        gfn_t start_gfn,
>>                                        unsigned long nr,
>> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
>> index b40798084d..169b57cb6b 100644
>> --- a/xen/arch/arm/traps.c
>> +++ b/xen/arch/arm/traps.c
>> @@ -1882,6 +1882,8 @@ static bool try_map_mmio(gfn_t gfn)
>>       return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
>>   }
>>   
>> +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
> 
> Should be in an header file?

Yes. I will move it.

Cheers,
Stefano Stabellini Nov. 5, 2018, 5:56 p.m. UTC | #3
On Mon, 5 Nov 2018, Julien Grall wrote:
> On 02/11/2018 23:27, Stefano Stabellini wrote:
> > On Mon, 8 Oct 2018, Julien Grall wrote:
> > > Currently a Stage-2 translation fault could happen:
> > >      1) MMIO emulation
> > >      2) When the page-tables is been updated using Break-Before-Make
> >                                ^ have
> > 
> > >      3) Page not mapped
> > > 
> > > A follow-up patch will re-purpose the valid bit in an entry to generate
> > > translation fault. This would be used to do an action on each entries to
> >                                                                  ^entry
> > 
> > > track page used for a given period.
> >          ^pages
> > 
> > 
> > > 
> > > A new function is introduced to try to resolve a translation fault. This
> > > will include 2) and the new way to generate fault explained above.
> > 
> > I can see the code does what you describe, but I don't understand why we
> > are doing this. What is missing in the commit message is the explanation
> > of the relationship between the future goal of repurposing the valid bit
> > and the introduction of a function to handle Break-Before-Make stage2
> > faults. Does it fix an issue with Break-Before-Make that we currently
> > have? Or it becomes needed due to the repurposing of valid? If so, why?
> 
> This does not fix any issue with BBM. The valid bit adds a 4th reasons for
> translation fault. Both BBM and the valid bit will require to walk the
> page-tables.
> 
> For the valid bit, we will need to walk the page-table in order to fixup the
> entry (i.e set valid bit). We also can't use p2m_lookup(...) has it only tell
> you the mapping exists, the valid bit may still not be set.
> 
> So we need to provide a new helper to walk the page-table and fixup an entry.

OK. Please expand a bit the commit message.


> > > To avoid invalidating all the page-tables entries in one go. It is
> > > possible to invalidate the top-level table and then on trap invalidate
> > > the table one-level down. This will be repeated until a block/page entry
> > > has been reached.
> > > 
> > > At the moment, there are no action done when reaching a block/page entry
> > > but setting the valid bit to 1.
> > > 
> > > Signed-off-by: Julien Grall <julien.grall@arm.com>
> > > ---
> > >   xen/arch/arm/p2m.c   | 127
> > > +++++++++++++++++++++++++++++++++++++++++++++++++++
> > >   xen/arch/arm/traps.c |   7 +--
> > >   2 files changed, 131 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> > > index ec956bc151..af445d3313 100644
> > > --- a/xen/arch/arm/p2m.c
> > > +++ b/xen/arch/arm/p2m.c
> > > @@ -1043,6 +1043,133 @@ int p2m_set_entry(struct p2m_domain *p2m,
> > >       return rc;
> > >   }
> > >   +/* Invalidate all entries in the table. The p2m should be write locked.
> > > */
> > > +static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
> > > +{
> > > +    lpae_t *table;
> > > +    unsigned int i;
> > > +
> > > +    ASSERT(p2m_is_write_locked(p2m));
> > > +
> > > +    table = map_domain_page(mfn);
> > > +
> > > +    for ( i = 0; i < LPAE_ENTRIES; i++ )
> > > +    {
> > > +        lpae_t pte = table[i];
> > > +
> > > +        pte.p2m.valid = 0;
> > > +
> > > +        p2m_write_pte(&table[i], pte, p2m->clean_pte);
> > > +    }
> > > +
> > > +    unmap_domain_page(table);
> > > +
> > > +    p2m->need_flush = true;
> > > +}
> > > +
> > > +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn)
> > > +{
> > > +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> > > +    unsigned int level = 0;
> > > +    bool resolved = false;
> > > +    lpae_t entry, *table;
> > > +    paddr_t addr = gfn_to_gaddr(gfn);
> > > +
> > > +    /* Convenience aliases */
> > > +    const unsigned int offsets[4] = {
> > > +        zeroeth_table_offset(addr),
> > > +        first_table_offset(addr),
> > > +        second_table_offset(addr),
> > > +        third_table_offset(addr)
> > > +    };
> > > +
> > > +    p2m_write_lock(p2m);
> > > +
> > > +    /* This gfn is higher than the highest the p2m map currently holds */
> > > +    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
> > > +        goto out;
> > > +
> > > +    table = p2m_get_root_pointer(p2m, gfn);
> > > +    /*
> > > +     * The table should always be non-NULL because the gfn is below
> > > +     * p2m->max_mapped_gfn and the root table pages are always present.
> > > +     */
> > > +    BUG_ON(table == NULL);
> > > +
> > > +    /*
> > > +     * Go down the page-tables until an entry has the valid bit unset or
> > > +     * a block/page entry has been hit.
> > > +     */
> > > +    for ( level = P2M_ROOT_LEVEL; level <= 3; level++ )
> > > +    {
> > > +        int rc;
> > > +
> > > +        entry = table[offsets[level]];
> > > +
> > > +        if ( level == 3 )
> > > +            break;
> > > +
> > > +        /* Stop as soon as we hit an entry with the valid bit unset. */
> > > +        if ( !lpae_is_valid(entry) )
> > > +            break;
> > > +
> > > +        rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
> > > +        if ( rc == GUEST_TABLE_MAP_FAILED )
> > > +            goto out_unmap;
> > > +        else if ( rc != GUEST_TABLE_NORMAL_PAGE )
> > 
> > why not rc == GUEST_TABLE_SUPER_PAGE?
> 
> The logic has been taken from p2m_get_entry(). It makes sense to use != here
> as you only want to continue the loop if you are on a table. So it is clearer
> why you continue.

OK


> > 
> > > +            break;
> > > +    }
> > > +
> > > +    /*
> > > +     * If the valid bit of the entry is set, it means someone was playing
> > > with
> > > +     * the Stage-2 page table. Nothing to do and mark the fault as
> > > resolved.
> > > +     */
> > > +    if ( lpae_is_valid(entry) )
> > > +    {
> > > +        resolved = true;
> > > +        goto out_unmap;
> > > +    }
> > > +
> > > +    /*
> > > +     * The valid bit is unset. If the entry is still not valid then the
> > > fault
> > > +     * cannot be resolved, exit and report it.
> > > +     */
> > > +    if ( !p2m_is_valid(entry) )
> > > +        goto out_unmap;
> > > +
> > > +    /*
> > > +     * Now we have an entry with valid bit unset, but still valid from
> > > +     * the P2M point of view.
> > > +     *
> > > +     * For entry pointing to a table, the table will be invalidated.
> >                ^ entries
> > 
> > 
> > > +     * For entry pointing to a block/page, no work to do for now.
> >                ^ entries
> 
> I am not entirely sure why it should be plural here. We are dealing with only
> one entry it.

I was trying to make the grammar work as a generic sentence. To make it
singular we would have to remove "For":

  If an entry is pointing to a table, the table will be invalidated.
  If an entry is pointing to a block/page, no work to do for now.


> > 
> > > +     */
> > > +    if ( lpae_is_table(entry, level) )
> > > +        p2m_invalidate_table(p2m, lpae_get_mfn(entry));
> > 
> > Maybe because I haven't read the rest of the patches, it is not clear to
> > me why in the case of an entry pointing to a table we need to invalidate
> > it, and otherwise set valid to 1.
> 
> This was written in the commit message:
> 
> "To avoid invalidating all the page-tables entries in one go. It is
> possible to invalidate the top-level table and then on trap invalidate
> the table one-level down. This will be repeated until a block/page entry
> has been reached."
> 
> It is mostly to spread the cost of invalidating the page-tables. With this
> solution, you only need to clear the valid bit of the top-level entries to
> invalidate the full P2M.
> 
> On the first access, you will trap, set the entry of the first "invalid
> entry", and invalidate the next level if necessary.
> 
> The access will then be retried. If trapped, the process is repeated until all
> the entries are valid.
> 
> It is possible to optimize it, avoiding intermediate trap when necessary. But
> I would not bother looking at that for now. Indeed, this will be used for
> lowering down the cost of set/way cache maintenance emulation. Any guest using
> that already knows that a big cost will incur.

So instead of walking the page table in Xen, finding all the leaf
(level==3) entries that we need to set !valid, we just set !valid one of
the higher levels entries. On access, we'll trap in Xen, then set the
higher level entry back to valid but the direct children to !valid. And
we'll cycle again through this until the table entries are valid and the
leaf entry is the only invalid one: at that point we'll only set it to
valid and the whole translation for that address is valid again.

Very inefficient, but very simple to implement in Xen. A good way to
penalize guests that are using instructions they should not be using :-)

All right, please expand on the explanation in the commit message. It is
also worthy of a in-code comment on top of
p2m_resolve_translation_fault.

One more comment below.


> > 
> > > +    /*
> > > +     * Now that the work on the entry is done, set the valid bit to
> > > prevent
> > > +     * another fault on that entry.
> > > +     */
> > > +    resolved = true;
> > > +    entry.p2m.valid = 1;
> > > +
> > > +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
> > > +
> > > +    /*
> > > +     * No need to flush the TLBs as the modified entry had the valid bit
> > > +     * unset.
> > > +     */
> > > +
> > > +out_unmap:
> > > +    unmap_domain_page(table);
> > > +
> > > +out:
> > > +    p2m_write_unlock(p2m);
> > > +
> > > +    return resolved;
> > > +}
> > > +
> > >   static inline int p2m_insert_mapping(struct domain *d,
> > >                                        gfn_t start_gfn,
> > >                                        unsigned long nr,


We probably want to update the comment on top of the call to
p2m_resolve_translation_fault:


> @@ -1977,8 +1978,8 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
>           * with the Stage-2 page table. Walk the Stage-2 PT to check
>           * if the entry exists. If it's the case, return to the guest
>           */
> -        mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(gpa));
> -        if ( !mfn_eq(mfn, INVALID_MFN) )
> +        if ( p2m_resolve_translation_fault(current->domain,
> +                                           gaddr_to_gfn(gpa)) )
Julien Grall Nov. 6, 2018, 2:20 p.m. UTC | #4
Hi Stefano,

On 05/11/2018 17:56, Stefano Stabellini wrote:
> On Mon, 5 Nov 2018, Julien Grall wrote:
>> On 02/11/2018 23:27, Stefano Stabellini wrote:
>>> On Mon, 8 Oct 2018, Julien Grall wrote:
>>>> Currently a Stage-2 translation fault could happen:
>>>>       1) MMIO emulation
>>>>       2) When the page-tables is been updated using Break-Before-Make
>>>                                 ^ have
>>>
>>>>       3) Page not mapped
>>>>
>>>> A follow-up patch will re-purpose the valid bit in an entry to generate
>>>> translation fault. This would be used to do an action on each entries to
>>>                                                                   ^entry
>>>
>>>> track page used for a given period.
>>>           ^pages
>>>
>>>
>>>>
>>>> A new function is introduced to try to resolve a translation fault. This
>>>> will include 2) and the new way to generate fault explained above.
>>>
>>> I can see the code does what you describe, but I don't understand why we
>>> are doing this. What is missing in the commit message is the explanation
>>> of the relationship between the future goal of repurposing the valid bit
>>> and the introduction of a function to handle Break-Before-Make stage2
>>> faults. Does it fix an issue with Break-Before-Make that we currently
>>> have? Or it becomes needed due to the repurposing of valid? If so, why?
>>
>> This does not fix any issue with BBM. The valid bit adds a 4th reasons for
>> translation fault. Both BBM and the valid bit will require to walk the
>> page-tables.
>>
>> For the valid bit, we will need to walk the page-table in order to fixup the
>> entry (i.e set valid bit). We also can't use p2m_lookup(...) has it only tell
>> you the mapping exists, the valid bit may still not be set.
>>
>> So we need to provide a new helper to walk the page-table and fixup an entry.
> 
> OK. Please expand a bit the commit message.

Sure.

>>>
>>>> +            break;
>>>> +    }
>>>> +
>>>> +    /*
>>>> +     * If the valid bit of the entry is set, it means someone was playing
>>>> with
>>>> +     * the Stage-2 page table. Nothing to do and mark the fault as
>>>> resolved.
>>>> +     */
>>>> +    if ( lpae_is_valid(entry) )
>>>> +    {
>>>> +        resolved = true;
>>>> +        goto out_unmap;
>>>> +    }
>>>> +
>>>> +    /*
>>>> +     * The valid bit is unset. If the entry is still not valid then the
>>>> fault
>>>> +     * cannot be resolved, exit and report it.
>>>> +     */
>>>> +    if ( !p2m_is_valid(entry) )
>>>> +        goto out_unmap;
>>>> +
>>>> +    /*
>>>> +     * Now we have an entry with valid bit unset, but still valid from
>>>> +     * the P2M point of view.
>>>> +     *
>>>> +     * For entry pointing to a table, the table will be invalidated.
>>>                 ^ entries
>>>
>>>
>>>> +     * For entry pointing to a block/page, no work to do for now.
>>>                 ^ entries
>>
>> I am not entirely sure why it should be plural here. We are dealing with only
>> one entry it.
> 
> I was trying to make the grammar work as a generic sentence. To make it
> singular we would have to remove "For":
> 
>    If an entry is pointing to a table, the table will be invalidated.
>    If an entry is pointing to a block/page, no work to do for now.

I will use the singular version.

> 
> 
>>>
>>>> +     */
>>>> +    if ( lpae_is_table(entry, level) )
>>>> +        p2m_invalidate_table(p2m, lpae_get_mfn(entry));
>>>
>>> Maybe because I haven't read the rest of the patches, it is not clear to
>>> me why in the case of an entry pointing to a table we need to invalidate
>>> it, and otherwise set valid to 1.
>>
>> This was written in the commit message:
>>
>> "To avoid invalidating all the page-tables entries in one go. It is
>> possible to invalidate the top-level table and then on trap invalidate
>> the table one-level down. This will be repeated until a block/page entry
>> has been reached."
>>
>> It is mostly to spread the cost of invalidating the page-tables. With this
>> solution, you only need to clear the valid bit of the top-level entries to
>> invalidate the full P2M.
>>
>> On the first access, you will trap, set the entry of the first "invalid
>> entry", and invalidate the next level if necessary.
>>
>> The access will then be retried. If trapped, the process is repeated until all
>> the entries are valid.
>>
>> It is possible to optimize it, avoiding intermediate trap when necessary. But
>> I would not bother looking at that for now. Indeed, this will be used for
>> lowering down the cost of set/way cache maintenance emulation. Any guest using
>> that already knows that a big cost will incur.
> 
> So instead of walking the page table in Xen, finding all the leaf
> (level==3) entries that we need to set !valid, we just set !valid one of
> the higher levels entries. On access, we'll trap in Xen, then set the
> higher level entry back to valid but the direct children to !valid. And
> we'll cycle again through this until the table entries are valid and the
> leaf entry is the only invalid one: at that point we'll only set it to
> valid and the whole translation for that address is valid again.

That's correct.

> 
> Very inefficient, but very simple to implement in Xen. A good way to > penalize guests that are using instructions they should not be using :-)

I am not convinced you will see a major slowdown. As you will quickly go through 
all the level, ending to only trap once after a bit. So the impact is mostly boot.

> 
> All right, please expand on the explanation in the commit message. It is
> also worthy of a in-code comment on top of
> p2m_resolve_translation_fault.

I will expand it.

> 
> One more comment below.
> 
> 
>>>
>>>> +    /*
>>>> +     * Now that the work on the entry is done, set the valid bit to
>>>> prevent
>>>> +     * another fault on that entry.
>>>> +     */
>>>> +    resolved = true;
>>>> +    entry.p2m.valid = 1;
>>>> +
>>>> +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
>>>> +
>>>> +    /*
>>>> +     * No need to flush the TLBs as the modified entry had the valid bit
>>>> +     * unset.
>>>> +     */
>>>> +
>>>> +out_unmap:
>>>> +    unmap_domain_page(table);
>>>> +
>>>> +out:
>>>> +    p2m_write_unlock(p2m);
>>>> +
>>>> +    return resolved;
>>>> +}
>>>> +
>>>>    static inline int p2m_insert_mapping(struct domain *d,
>>>>                                         gfn_t start_gfn,
>>>>                                         unsigned long nr,
> 
> 
> We probably want to update the comment on top of the call to
> p2m_resolve_translation_fault:

Whoops. I will fix it.

> 
> 
>> @@ -1977,8 +1978,8 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
>>            * with the Stage-2 page table. Walk the Stage-2 PT to check
>>            * if the entry exists. If it's the case, return to the guest
>>            */
>> -        mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(gpa));
>> -        if ( !mfn_eq(mfn, INVALID_MFN) )
>> +        if ( p2m_resolve_translation_fault(current->domain,
>> +                                           gaddr_to_gfn(gpa)) )
> 

Cheers,
Julien Grall Nov. 12, 2018, 5:59 p.m. UTC | #5
Hi Stefano,

On 11/6/18 2:20 PM, Julien Grall wrote:
> On 05/11/2018 17:56, Stefano Stabellini wrote:
>> On Mon, 5 Nov 2018, Julien Grall wrote:
>>> On 02/11/2018 23:27, Stefano Stabellini wrote:
>>>> On Mon, 8 Oct 2018, Julien Grall wrote:
>>>>
>>>>> +    /*
>>>>> +     * Now that the work on the entry is done, set the valid bit to
>>>>> prevent
>>>>> +     * another fault on that entry.
>>>>> +     */
>>>>> +    resolved = true;
>>>>> +    entry.p2m.valid = 1;
>>>>> +
>>>>> +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
>>>>> +
>>>>> +    /*
>>>>> +     * No need to flush the TLBs as the modified entry had the 
>>>>> valid bit
>>>>> +     * unset.
>>>>> +     */
>>>>> +
>>>>> +out_unmap:
>>>>> +    unmap_domain_page(table);
>>>>> +
>>>>> +out:
>>>>> +    p2m_write_unlock(p2m);
>>>>> +
>>>>> +    return resolved;
>>>>> +}
>>>>> +
>>>>>    static inline int p2m_insert_mapping(struct domain *d,
>>>>>                                         gfn_t start_gfn,
>>>>>                                         unsigned long nr,
>>
>>
>> We probably want to update the comment on top of the call to
>> p2m_resolve_translation_fault:
> 
> Whoops. I will fix it.

Looking at this again. I think the comment on top of the call to 
p2m_resolve_translation_fault still makes sense. Feel free to suggest an 
update of the comment if you think it is not enough.

Cheers,
Stefano Stabellini Nov. 12, 2018, 11:36 p.m. UTC | #6
On Mon, 12 Nov 2018, Julien Grall wrote:
> Hi Stefano,

> 

> On 11/6/18 2:20 PM, Julien Grall wrote:

> > On 05/11/2018 17:56, Stefano Stabellini wrote:

> > > On Mon, 5 Nov 2018, Julien Grall wrote:

> > > > On 02/11/2018 23:27, Stefano Stabellini wrote:

> > > > > On Mon, 8 Oct 2018, Julien Grall wrote:

> > > > > 

> > > > > > +    /*

> > > > > > +     * Now that the work on the entry is done, set the valid bit to

> > > > > > prevent

> > > > > > +     * another fault on that entry.

> > > > > > +     */

> > > > > > +    resolved = true;

> > > > > > +    entry.p2m.valid = 1;

> > > > > > +

> > > > > > +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);

> > > > > > +

> > > > > > +    /*

> > > > > > +     * No need to flush the TLBs as the modified entry had the

> > > > > > valid bit

> > > > > > +     * unset.

> > > > > > +     */

> > > > > > +

> > > > > > +out_unmap:

> > > > > > +    unmap_domain_page(table);

> > > > > > +

> > > > > > +out:

> > > > > > +    p2m_write_unlock(p2m);

> > > > > > +

> > > > > > +    return resolved;

> > > > > > +}

> > > > > > +

> > > > > >    static inline int p2m_insert_mapping(struct domain *d,

> > > > > >                                         gfn_t start_gfn,

> > > > > >                                         unsigned long nr,

> > > 

> > > 

> > > We probably want to update the comment on top of the call to

> > > p2m_resolve_translation_fault:

> > 

> > Whoops. I will fix it.

> 

> Looking at this again. I think the comment on top of the call to

> p2m_resolve_translation_fault still makes sense. Feel free to suggest an

> update of the comment if you think it is not enough.


        /*
         * The PT walk may have failed because someone was playing with
         * the Stage-2 page table or because the valid bit was left
         * unset to track memory accesses. In these cases, we want to
         * return to the guest.
         */
Julien Grall Dec. 4, 2018, 3:35 p.m. UTC | #7
Hi Stefano,

On 11/12/18 11:36 PM, Stefano Stabellini wrote:
> On Mon, 12 Nov 2018, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 11/6/18 2:20 PM, Julien Grall wrote:
>>> On 05/11/2018 17:56, Stefano Stabellini wrote:
>>>> On Mon, 5 Nov 2018, Julien Grall wrote:
>>>>> On 02/11/2018 23:27, Stefano Stabellini wrote:
>>>>>> On Mon, 8 Oct 2018, Julien Grall wrote:
>>>>>>
>>>>>>> +    /*
>>>>>>> +     * Now that the work on the entry is done, set the valid bit to
>>>>>>> prevent
>>>>>>> +     * another fault on that entry.
>>>>>>> +     */
>>>>>>> +    resolved = true;
>>>>>>> +    entry.p2m.valid = 1;
>>>>>>> +
>>>>>>> +    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
>>>>>>> +
>>>>>>> +    /*
>>>>>>> +     * No need to flush the TLBs as the modified entry had the
>>>>>>> valid bit
>>>>>>> +     * unset.
>>>>>>> +     */
>>>>>>> +
>>>>>>> +out_unmap:
>>>>>>> +    unmap_domain_page(table);
>>>>>>> +
>>>>>>> +out:
>>>>>>> +    p2m_write_unlock(p2m);
>>>>>>> +
>>>>>>> +    return resolved;
>>>>>>> +}
>>>>>>> +
>>>>>>>     static inline int p2m_insert_mapping(struct domain *d,
>>>>>>>                                          gfn_t start_gfn,
>>>>>>>                                          unsigned long nr,
>>>>
>>>>
>>>> We probably want to update the comment on top of the call to
>>>> p2m_resolve_translation_fault:
>>>
>>> Whoops. I will fix it.
>>
>> Looking at this again. I think the comment on top of the call to
>> p2m_resolve_translation_fault still makes sense. Feel free to suggest an
>> update of the comment if you think it is not enough.
> 
>          /*
>           * The PT walk may have failed because someone was playing with
>           * the Stage-2 page table or because the valid bit was left
>           * unset to track memory accesses. In these cases, we want to
>           * return to the guest.
>           */

Thank you for the suggestion. Thinking a bit more,  I would not be 
surprised we decide to expand p2m_resolve_translation_fault in the 
future. So I decided to go for a more generic comment to avoid stale 
comment:

        /*
     	* First check if the translation fault can be resolved by the
	* P2M subsystem. If that's the case nothing else to do.
         */

Cheers,
Stefano Stabellini Dec. 4, 2018, 7:13 p.m. UTC | #8
On Tue, 4 Dec 2018, Julien Grall wrote:
> Hi Stefano,

> 

> On 11/12/18 11:36 PM, Stefano Stabellini wrote:

> > On Mon, 12 Nov 2018, Julien Grall wrote:

> > > Hi Stefano,

> > > 

> > > On 11/6/18 2:20 PM, Julien Grall wrote:

> > > > On 05/11/2018 17:56, Stefano Stabellini wrote:

> > > > > On Mon, 5 Nov 2018, Julien Grall wrote:

> > > > > > On 02/11/2018 23:27, Stefano Stabellini wrote:

> > > > > > > On Mon, 8 Oct 2018, Julien Grall wrote:

> > > > > > > 

> > > > > > > > +    /*

> > > > > > > > +     * Now that the work on the entry is done, set the valid

> > > > > > > > bit to

> > > > > > > > prevent

> > > > > > > > +     * another fault on that entry.

> > > > > > > > +     */

> > > > > > > > +    resolved = true;

> > > > > > > > +    entry.p2m.valid = 1;

> > > > > > > > +

> > > > > > > > +    p2m_write_pte(table + offsets[level], entry,

> > > > > > > > p2m->clean_pte);

> > > > > > > > +

> > > > > > > > +    /*

> > > > > > > > +     * No need to flush the TLBs as the modified entry had the

> > > > > > > > valid bit

> > > > > > > > +     * unset.

> > > > > > > > +     */

> > > > > > > > +

> > > > > > > > +out_unmap:

> > > > > > > > +    unmap_domain_page(table);

> > > > > > > > +

> > > > > > > > +out:

> > > > > > > > +    p2m_write_unlock(p2m);

> > > > > > > > +

> > > > > > > > +    return resolved;

> > > > > > > > +}

> > > > > > > > +

> > > > > > > >     static inline int p2m_insert_mapping(struct domain *d,

> > > > > > > >                                          gfn_t start_gfn,

> > > > > > > >                                          unsigned long nr,

> > > > > 

> > > > > 

> > > > > We probably want to update the comment on top of the call to

> > > > > p2m_resolve_translation_fault:

> > > > 

> > > > Whoops. I will fix it.

> > > 

> > > Looking at this again. I think the comment on top of the call to

> > > p2m_resolve_translation_fault still makes sense. Feel free to suggest an

> > > update of the comment if you think it is not enough.

> > 

> >          /*

> >           * The PT walk may have failed because someone was playing with

> >           * the Stage-2 page table or because the valid bit was left

> >           * unset to track memory accesses. In these cases, we want to

> >           * return to the guest.

> >           */

> 

> Thank you for the suggestion. Thinking a bit more,  I would not be surprised

> we decide to expand p2m_resolve_translation_fault in the future. So I decided

> to go for a more generic comment to avoid stale comment:

> 

>        /*

>     	* First check if the translation fault can be resolved by the

> 	* P2M subsystem. If that's the case nothing else to do.


OK
diff mbox series

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index ec956bc151..af445d3313 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1043,6 +1043,133 @@  int p2m_set_entry(struct p2m_domain *p2m,
     return rc;
 }
 
+/* Invalidate all entries in the table. The p2m should be write locked. */
+static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
+{
+    lpae_t *table;
+    unsigned int i;
+
+    ASSERT(p2m_is_write_locked(p2m));
+
+    table = map_domain_page(mfn);
+
+    for ( i = 0; i < LPAE_ENTRIES; i++ )
+    {
+        lpae_t pte = table[i];
+
+        pte.p2m.valid = 0;
+
+        p2m_write_pte(&table[i], pte, p2m->clean_pte);
+    }
+
+    unmap_domain_page(table);
+
+    p2m->need_flush = true;
+}
+
+bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    unsigned int level = 0;
+    bool resolved = false;
+    lpae_t entry, *table;
+    paddr_t addr = gfn_to_gaddr(gfn);
+
+    /* Convenience aliases */
+    const unsigned int offsets[4] = {
+        zeroeth_table_offset(addr),
+        first_table_offset(addr),
+        second_table_offset(addr),
+        third_table_offset(addr)
+    };
+
+    p2m_write_lock(p2m);
+
+    /* This gfn is higher than the highest the p2m map currently holds */
+    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
+        goto out;
+
+    table = p2m_get_root_pointer(p2m, gfn);
+    /*
+     * The table should always be non-NULL because the gfn is below
+     * p2m->max_mapped_gfn and the root table pages are always present.
+     */
+    BUG_ON(table == NULL);
+
+    /*
+     * Go down the page-tables until an entry has the valid bit unset or
+     * a block/page entry has been hit.
+     */
+    for ( level = P2M_ROOT_LEVEL; level <= 3; level++ )
+    {
+        int rc;
+
+        entry = table[offsets[level]];
+
+        if ( level == 3 )
+            break;
+
+        /* Stop as soon as we hit an entry with the valid bit unset. */
+        if ( !lpae_is_valid(entry) )
+            break;
+
+        rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
+        if ( rc == GUEST_TABLE_MAP_FAILED )
+            goto out_unmap;
+        else if ( rc != GUEST_TABLE_NORMAL_PAGE )
+            break;
+    }
+
+    /*
+     * If the valid bit of the entry is set, it means someone was playing with
+     * the Stage-2 page table. Nothing to do and mark the fault as resolved.
+     */
+    if ( lpae_is_valid(entry) )
+    {
+        resolved = true;
+        goto out_unmap;
+    }
+
+    /*
+     * The valid bit is unset. If the entry is still not valid then the fault
+     * cannot be resolved, exit and report it.
+     */
+    if ( !p2m_is_valid(entry) )
+        goto out_unmap;
+
+    /*
+     * Now we have an entry with valid bit unset, but still valid from
+     * the P2M point of view.
+     *
+     * For entry pointing to a table, the table will be invalidated.
+     * For entry pointing to a block/page, no work to do for now.
+     */
+    if ( lpae_is_table(entry, level) )
+        p2m_invalidate_table(p2m, lpae_get_mfn(entry));
+
+    /*
+     * Now that the work on the entry is done, set the valid bit to prevent
+     * another fault on that entry.
+     */
+    resolved = true;
+    entry.p2m.valid = 1;
+
+    p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
+
+    /*
+     * No need to flush the TLBs as the modified entry had the valid bit
+     * unset.
+     */
+
+out_unmap:
+    unmap_domain_page(table);
+
+out:
+    p2m_write_unlock(p2m);
+
+    return resolved;
+}
+
 static inline int p2m_insert_mapping(struct domain *d,
                                      gfn_t start_gfn,
                                      unsigned long nr,
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index b40798084d..169b57cb6b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1882,6 +1882,8 @@  static bool try_map_mmio(gfn_t gfn)
     return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
 }
 
+bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
+
 static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
                                        const union hsr hsr)
 {
@@ -1894,7 +1896,6 @@  static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
     vaddr_t gva;
     paddr_t gpa;
     uint8_t fsc = xabt.fsc & ~FSC_LL_MASK;
-    mfn_t mfn;
     bool is_data = (hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL);
 
     /*
@@ -1977,8 +1978,8 @@  static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
          * with the Stage-2 page table. Walk the Stage-2 PT to check
          * if the entry exists. If it's the case, return to the guest
          */
-        mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(gpa));
-        if ( !mfn_eq(mfn, INVALID_MFN) )
+        if ( p2m_resolve_translation_fault(current->domain,
+                                           gaddr_to_gfn(gpa)) )
             return;
 
         if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) )