diff mbox series

[Xen-devel,for-4.12,v3,3/5] xen/arm: p2m: Add support for preemption in p2m_cache_flush_range

Message ID 20181214115855.6713-4-julien.grall@arm.com
State New
Headers show
Series xen/arm: Implement Set/Way operations | expand

Commit Message

Julien Grall Dec. 14, 2018, 11:58 a.m. UTC
p2m_cache_flush_range does not yet support preemption, this may be an
issue as cleaning the cache can take a long time. While the current
caller (XEN_DOMCTL_cacheflush) does not stricly require preemption, this
will be necessary for new caller in a follow-up patch.

The preemption implemented is quite simple, a counter is incremented by:
    - 1 on region skipped
    - 10 for each page requiring a flush

When the counter reach 512 or above, we will check if preemption is
needed. If not, the counter will be reset to 0. If yes, the function
will stop, update start (to allow resuming later on) and return
-ERESTART. This allows the caller to decide how the preemption will be
done.

For now, XEN_DOMCTL_cacheflush will continue to ignore the preemption.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    Changes in v2:
        - Patch added
---
 xen/arch/arm/domctl.c     |  8 +++++++-
 xen/arch/arm/p2m.c        | 35 ++++++++++++++++++++++++++++++++---
 xen/include/asm-arm/p2m.h |  4 +++-
 3 files changed, 42 insertions(+), 5 deletions(-)

Comments

Stefano Stabellini Dec. 14, 2018, 9:27 p.m. UTC | #1
On Fri, 14 Dec 2018, Julien Grall wrote:
> p2m_cache_flush_range does not yet support preemption, this may be an
> issue as cleaning the cache can take a long time. While the current
> caller (XEN_DOMCTL_cacheflush) does not stricly require preemption, this
> will be necessary for new caller in a follow-up patch.
> 
> The preemption implemented is quite simple, a counter is incremented by:
>     - 1 on region skipped
>     - 10 for each page requiring a flush
> 
> When the counter reach 512 or above, we will check if preemption is
> needed. If not, the counter will be reset to 0. If yes, the function
> will stop, update start (to allow resuming later on) and return
> -ERESTART. This allows the caller to decide how the preemption will be
> done.
> 
> For now, XEN_DOMCTL_cacheflush will continue to ignore the preemption.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> 
> ---
>     Changes in v2:
>         - Patch added
> ---
>  xen/arch/arm/domctl.c     |  8 +++++++-
>  xen/arch/arm/p2m.c        | 35 ++++++++++++++++++++++++++++++++---
>  xen/include/asm-arm/p2m.h |  4 +++-
>  3 files changed, 42 insertions(+), 5 deletions(-)
> 
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index 20691528a6..9da88b8c64 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -54,6 +54,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>      {
>          gfn_t s = _gfn(domctl->u.cacheflush.start_pfn);
>          gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns);
> +        int rc;
>  
>          if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
>              return -EINVAL;
> @@ -61,7 +62,12 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>          if ( gfn_x(e) < gfn_x(s) )
>              return -EINVAL;
>  
> -        return p2m_cache_flush_range(d, s, e);
> +        /* XXX: Handle preemption */
> +        do
> +            rc = p2m_cache_flush_range(d, &s, e);
> +        while ( rc == -ERESTART );
> +
> +        return rc;
>      }
>      case XEN_DOMCTL_bind_pt_irq:
>      {
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 17e2523fc1..5639e4b64c 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1524,13 +1524,17 @@ int relinquish_p2m_mapping(struct domain *d)
>      return rc;
>  }
>  
> -int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
> +int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
>  {
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      gfn_t next_block_gfn;
> +    gfn_t start = *pstart;
>      mfn_t mfn = INVALID_MFN;
>      p2m_type_t t;
>      unsigned int order;
> +    int rc = 0;
> +    /* Counter for preemption */
> +    unsigned long count = 0;

No need for unsigned long, count could be unsigned int or even unsigned
short.

With this change:

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


>      /*
>       * The operation cache flush will invalidate the RAM assigned to the
> @@ -1547,6 +1551,25 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>  
>      while ( gfn_x(start) < gfn_x(end) )
>      {
> +       /*
> +         * Cleaning the cache for the P2M may take a long time. So we
> +         * need to be able to preempt. We will arbitrarily preempt every
> +         * time count reach 512 or above.
> +         *
> +         * The count will be incremented by:
> +         *  - 1 on region skipped
> +         *  - 10 for each page requiring a flush
> +         */
> +        if ( count >= 512 )
> +        {
> +            if ( softirq_pending(smp_processor_id()) )
> +            {
> +                rc = -ERESTART;
> +                break;
> +            }
> +            count = 0;
> +        }
> +
>          /*
>           * We want to flush page by page as:
>           *  - it may not be possible to map the full block (can be up to 1GB)
> @@ -1568,22 +1591,28 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>  
>              if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
>              {
> +                count++;
>                  start = next_block_gfn;
>                  continue;
>              }
>          }
>  
> +        count += 10;
> +
>          flush_page_to_ram(mfn_x(mfn), false);
>  
>          start = gfn_add(start, 1);
>          mfn = mfn_add(mfn, 1);
>      }
>  
> -    invalidate_icache();
> +    if ( rc != -ERESTART )
> +        invalidate_icache();
>  
>      p2m_read_unlock(p2m);
>  
> -    return 0;
> +    *pstart = start;
> +
> +    return rc;
>  }
>  
>  mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 7c1d930b1d..a633e27cc9 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -232,8 +232,10 @@ bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
>  /*
>   * Clean & invalidate caches corresponding to a region [start,end) of guest
>   * address space.
> + *
> + * start will get updated if the function is preempted.
>   */
> -int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end);
> +int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
>  
>  /*
>   * Map a region in the guest p2m with a specific p2m type.
> -- 
> 2.11.0
>
Julien Grall Dec. 18, 2018, 5:47 p.m. UTC | #2
Hi Stefano,

On 12/14/18 9:27 PM, Stefano Stabellini wrote:
> On Fri, 14 Dec 2018, Julien Grall wrote:
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index 17e2523fc1..5639e4b64c 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -1524,13 +1524,17 @@ int relinquish_p2m_mapping(struct domain *d)
>>       return rc;
>>   }
>>   
>> -int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
>> +int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
>>   {
>>       struct p2m_domain *p2m = p2m_get_hostp2m(d);
>>       gfn_t next_block_gfn;
>> +    gfn_t start = *pstart;
>>       mfn_t mfn = INVALID_MFN;
>>       p2m_type_t t;
>>       unsigned int order;
>> +    int rc = 0;
>> +    /* Counter for preemption */
>> +    unsigned long count = 0;
> 
> No need for unsigned long, count could be unsigned int or even unsigned
> short.
> 
> With this change:
> 
> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

I will use unsigned short and commit it.

Thank you for the review!

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 20691528a6..9da88b8c64 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -54,6 +54,7 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
     {
         gfn_t s = _gfn(domctl->u.cacheflush.start_pfn);
         gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns);
+        int rc;
 
         if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
             return -EINVAL;
@@ -61,7 +62,12 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
         if ( gfn_x(e) < gfn_x(s) )
             return -EINVAL;
 
-        return p2m_cache_flush_range(d, s, e);
+        /* XXX: Handle preemption */
+        do
+            rc = p2m_cache_flush_range(d, &s, e);
+        while ( rc == -ERESTART );
+
+        return rc;
     }
     case XEN_DOMCTL_bind_pt_irq:
     {
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 17e2523fc1..5639e4b64c 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1524,13 +1524,17 @@  int relinquish_p2m_mapping(struct domain *d)
     return rc;
 }
 
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     gfn_t next_block_gfn;
+    gfn_t start = *pstart;
     mfn_t mfn = INVALID_MFN;
     p2m_type_t t;
     unsigned int order;
+    int rc = 0;
+    /* Counter for preemption */
+    unsigned long count = 0;
 
     /*
      * The operation cache flush will invalidate the RAM assigned to the
@@ -1547,6 +1551,25 @@  int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
 
     while ( gfn_x(start) < gfn_x(end) )
     {
+       /*
+         * Cleaning the cache for the P2M may take a long time. So we
+         * need to be able to preempt. We will arbitrarily preempt every
+         * time count reach 512 or above.
+         *
+         * The count will be incremented by:
+         *  - 1 on region skipped
+         *  - 10 for each page requiring a flush
+         */
+        if ( count >= 512 )
+        {
+            if ( softirq_pending(smp_processor_id()) )
+            {
+                rc = -ERESTART;
+                break;
+            }
+            count = 0;
+        }
+
         /*
          * We want to flush page by page as:
          *  - it may not be possible to map the full block (can be up to 1GB)
@@ -1568,22 +1591,28 @@  int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
 
             if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
             {
+                count++;
                 start = next_block_gfn;
                 continue;
             }
         }
 
+        count += 10;
+
         flush_page_to_ram(mfn_x(mfn), false);
 
         start = gfn_add(start, 1);
         mfn = mfn_add(mfn, 1);
     }
 
-    invalidate_icache();
+    if ( rc != -ERESTART )
+        invalidate_icache();
 
     p2m_read_unlock(p2m);
 
-    return 0;
+    *pstart = start;
+
+    return rc;
 }
 
 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 7c1d930b1d..a633e27cc9 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -232,8 +232,10 @@  bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
 /*
  * Clean & invalidate caches corresponding to a region [start,end) of guest
  * address space.
+ *
+ * start will get updated if the function is preempted.
  */
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end);
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
 
 /*
  * Map a region in the guest p2m with a specific p2m type.