diff mbox series

[Xen-devel,for-4.12,v2,13/17] xen/arm: p2m: Rework p2m_cache_flush_range

Message ID 20181204202651.8836-14-julien.grall@arm.com
State New
Headers show
Series xen/arm: Implement Set/Way operations | expand

Commit Message

Julien Grall Dec. 4, 2018, 8:26 p.m. UTC
A follow-up patch will add support for preemption in p2m_cache_flush_range.
Because of the complexity for the 2 loops, it would be necessary to add
preemption in both of them.

This can be avoided by merging the 2 loops together and still keeping
the code fairly simple to read and extend.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    Changes in v2:
        - Patch added
---
 xen/arch/arm/p2m.c | 52 +++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 37 insertions(+), 15 deletions(-)

Comments

Stefano Stabellini Dec. 6, 2018, 11:53 p.m. UTC | #1
On Tue, 4 Dec 2018, Julien Grall wrote:
> A follow-up patch will add support for preemption in p2m_cache_flush_range.
> Because of the complexity for the 2 loops, it would be necessary to add
> preemption in both of them.
> 
> This can be avoided by merging the 2 loops together and still keeping
> the code fairly simple to read and extend.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> 
> ---
>     Changes in v2:
>         - Patch added
> ---
>  xen/arch/arm/p2m.c | 52 +++++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 37 insertions(+), 15 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index c713226561..db22b53bfd 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1527,7 +1527,8 @@ int relinquish_p2m_mapping(struct domain *d)
>  int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>  {
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    gfn_t next_gfn;
> +    gfn_t next_block_gfn;
> +    mfn_t mfn = INVALID_MFN;
>      p2m_type_t t;
>      unsigned int order;
>  
> @@ -1542,24 +1543,45 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>      start = gfn_max(start, p2m->lowest_mapped_gfn);
>      end = gfn_min(end, p2m->max_mapped_gfn);
>  
> -    for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
> -    {
> -        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
> +    next_block_gfn = start;
>  
> -        next_gfn = gfn_next_boundary(start, order);
> -
> -        /* Skip hole and non-RAM page */
> -        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
> -            continue;
> -
> -        /* XXX: Implement preemption */
> -        while ( gfn_x(start) < gfn_x(next_gfn) )
> +    while ( gfn_x(start) < gfn_x(end) )
> +    {
> +        /*
> +         * We want to flush page by page as:
> +         *  - it may not be possible to map the full block (can be up to 1GB)
> +         *    in Xen memory
> +         *  - we may want to do fine grain preemption as flushing multiple
> +         *    page in one go may take a long time
> +         *
> +         * As p2m_get_entry is able to return the size of the mapping
> +         * in the p2m, it is pointless to execute it for each page.
> +         *
> +         * We can optimize it by tracking the gfn of the next
> +         * block. So we will only call p2m_get_entry for each block (can
> +         * be up to 1GB).
> +         */
> +        if ( gfn_eq(start, next_block_gfn) )
>          {
> -            flush_page_to_ram(mfn_x(mfn), false);
> +            mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
> +            next_block_gfn = gfn_next_boundary(start, order);
>  
> -            start = gfn_add(start, 1);
> -            mfn = mfn_add(mfn, 1);
> +            /*
> +             * The following regions can be skipped:
> +             *      - Hole
> +             *      - non-RAM
> +             */

I think this comment is superfluous as the code is already obvious. You
can remove it.

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> +            if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
> +            {
> +                start = next_block_gfn;
> +                continue;
> +            }
>          }
> +
> +        flush_page_to_ram(mfn_x(mfn), false);
> +
> +        start = gfn_add(start, 1);
> +        mfn = mfn_add(mfn, 1);
>      }
>  
>      invalidate_icache();
> -- 
> 2.11.0
>
Julien Grall Dec. 7, 2018, 10:18 a.m. UTC | #2
Hi Stefano,

On 06/12/2018 23:53, Stefano Stabellini wrote:
> On Tue, 4 Dec 2018, Julien Grall wrote:
>> A follow-up patch will add support for preemption in p2m_cache_flush_range.
>> Because of the complexity for the 2 loops, it would be necessary to add
>> preemption in both of them.
>>
>> This can be avoided by merging the 2 loops together and still keeping
>> the code fairly simple to read and extend.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>>
>> ---
>>      Changes in v2:
>>          - Patch added
>> ---
>>   xen/arch/arm/p2m.c | 52 +++++++++++++++++++++++++++++++++++++---------------
>>   1 file changed, 37 insertions(+), 15 deletions(-)
>>
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index c713226561..db22b53bfd 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -1527,7 +1527,8 @@ int relinquish_p2m_mapping(struct domain *d)
>>   int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>>   {
>>       struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> -    gfn_t next_gfn;
>> +    gfn_t next_block_gfn;
>> +    mfn_t mfn = INVALID_MFN;
>>       p2m_type_t t;
>>       unsigned int order;
>>   
>> @@ -1542,24 +1543,45 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>>       start = gfn_max(start, p2m->lowest_mapped_gfn);
>>       end = gfn_min(end, p2m->max_mapped_gfn);
>>   
>> -    for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
>> -    {
>> -        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
>> +    next_block_gfn = start;
>>   
>> -        next_gfn = gfn_next_boundary(start, order);
>> -
>> -        /* Skip hole and non-RAM page */
>> -        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
>> -            continue;
>> -
>> -        /* XXX: Implement preemption */
>> -        while ( gfn_x(start) < gfn_x(next_gfn) )
>> +    while ( gfn_x(start) < gfn_x(end) )
>> +    {
>> +        /*
>> +         * We want to flush page by page as:
>> +         *  - it may not be possible to map the full block (can be up to 1GB)
>> +         *    in Xen memory
>> +         *  - we may want to do fine grain preemption as flushing multiple
>> +         *    page in one go may take a long time
>> +         *
>> +         * As p2m_get_entry is able to return the size of the mapping
>> +         * in the p2m, it is pointless to execute it for each page.
>> +         *
>> +         * We can optimize it by tracking the gfn of the next
>> +         * block. So we will only call p2m_get_entry for each block (can
>> +         * be up to 1GB).
>> +         */
>> +        if ( gfn_eq(start, next_block_gfn) )
>>           {
>> -            flush_page_to_ram(mfn_x(mfn), false);
>> +            mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
>> +            next_block_gfn = gfn_next_boundary(start, order);
>>   
>> -            start = gfn_add(start, 1);
>> -            mfn = mfn_add(mfn, 1);
>> +            /*
>> +             * The following regions can be skipped:
>> +             *      - Hole
>> +             *      - non-RAM
>> +             */
> 
> I think this comment is superfluous as the code is already obvious. You
> can remove it.

I was over-cautious :). I will drop it.

> 
> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

Thank you for the review!

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index c713226561..db22b53bfd 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1527,7 +1527,8 @@  int relinquish_p2m_mapping(struct domain *d)
 int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    gfn_t next_gfn;
+    gfn_t next_block_gfn;
+    mfn_t mfn = INVALID_MFN;
     p2m_type_t t;
     unsigned int order;
 
@@ -1542,24 +1543,45 @@  int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
     start = gfn_max(start, p2m->lowest_mapped_gfn);
     end = gfn_min(end, p2m->max_mapped_gfn);
 
-    for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
-    {
-        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
+    next_block_gfn = start;
 
-        next_gfn = gfn_next_boundary(start, order);
-
-        /* Skip hole and non-RAM page */
-        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
-            continue;
-
-        /* XXX: Implement preemption */
-        while ( gfn_x(start) < gfn_x(next_gfn) )
+    while ( gfn_x(start) < gfn_x(end) )
+    {
+        /*
+         * We want to flush page by page as:
+         *  - it may not be possible to map the full block (can be up to 1GB)
+         *    in Xen memory
+         *  - we may want to do fine grain preemption as flushing multiple
+         *    page in one go may take a long time
+         *
+         * As p2m_get_entry is able to return the size of the mapping
+         * in the p2m, it is pointless to execute it for each page.
+         *
+         * We can optimize it by tracking the gfn of the next
+         * block. So we will only call p2m_get_entry for each block (can
+         * be up to 1GB).
+         */
+        if ( gfn_eq(start, next_block_gfn) )
         {
-            flush_page_to_ram(mfn_x(mfn), false);
+            mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
+            next_block_gfn = gfn_next_boundary(start, order);
 
-            start = gfn_add(start, 1);
-            mfn = mfn_add(mfn, 1);
+            /*
+             * The following regions can be skipped:
+             *      - Hole
+             *      - non-RAM
+             */
+            if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
+            {
+                start = next_block_gfn;
+                continue;
+            }
         }
+
+        flush_page_to_ram(mfn_x(mfn), false);
+
+        start = gfn_add(start, 1);
+        mfn = mfn_add(mfn, 1);
     }
 
     invalidate_icache();