diff mbox series

[Xen-devel] xen/arm: p2m: Read *_mapped_gfn with the p2m lock taken

Message ID 20170914153901.6750-1-julien.grall@arm.com
State New
Headers show
Series [Xen-devel] xen/arm: p2m: Read *_mapped_gfn with the p2m lock taken | expand

Commit Message

Julien Grall Sept. 14, 2017, 3:39 p.m. UTC
*_mapped_gfn are currently read before acquiring the lock. However, they
may be modified by the p2m code before the lock was acquired. This means
we will use the wrong values.

Fix it by moving the read inside the section protected by the p2m lock.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    This patch should be backported to Xen 4.9 and Xen 4.8
---
 xen/arch/arm/p2m.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

Julien Grall Sept. 28, 2017, 12:45 a.m. UTC | #1
Hi,

Ping?

Cheers,

On 09/14/2017 04:39 PM, Julien Grall wrote:
> *_mapped_gfn are currently read before acquiring the lock. However, they
> may be modified by the p2m code before the lock was acquired. This means
> we will use the wrong values.
> 
> Fix it by moving the read inside the section protected by the p2m lock.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> 
> ---
>      This patch should be backported to Xen 4.9 and Xen 4.8
> ---
>   xen/arch/arm/p2m.c | 14 +++++++-------
>   1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index c484469e6c..d1260d3b4e 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1292,13 +1292,13 @@ int relinquish_p2m_mapping(struct domain *d)
>       p2m_type_t t;
>       int rc = 0;
>       unsigned int order;
> -
> -    /* Convenience alias */
> -    gfn_t start = p2m->lowest_mapped_gfn;
> -    gfn_t end = p2m->max_mapped_gfn;
> +    gfn_t start, end;
>   
>       p2m_write_lock(p2m);
>   
> +    start = p2m->lowest_mapped_gfn;
> +    end = p2m->max_mapped_gfn;
> +
>       for ( ; gfn_x(start) < gfn_x(end);
>             start = gfn_next_boundary(start, order) )
>       {
> @@ -1353,9 +1353,6 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
>       p2m_type_t t;
>       unsigned int order;
>   
> -    start = gfn_max(start, p2m->lowest_mapped_gfn);
> -    end = gfn_min(end, p2m->max_mapped_gfn);
> -
>       /*
>        * The operation cache flush will invalidate the RAM assigned to the
>        * guest in a given range. It will not modify the page table and
> @@ -1364,6 +1361,9 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
>        */
>       p2m_read_lock(p2m);
>   
> +    start = gfn_max(start, p2m->lowest_mapped_gfn);
> +    end = gfn_min(end, p2m->max_mapped_gfn);
> +
>       for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
>       {
>           mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
>
Stefano Stabellini Oct. 7, 2017, 12:40 a.m. UTC | #2
On Thu, 14 Sep 2017, Julien Grall wrote:
> *_mapped_gfn are currently read before acquiring the lock. However, they
> may be modified by the p2m code before the lock was acquired. This means
> we will use the wrong values.
> 
> Fix it by moving the read inside the section protected by the p2m lock.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

and committed

> ---
>     This patch should be backported to Xen 4.9 and Xen 4.8
> ---
>  xen/arch/arm/p2m.c | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index c484469e6c..d1260d3b4e 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1292,13 +1292,13 @@ int relinquish_p2m_mapping(struct domain *d)
>      p2m_type_t t;
>      int rc = 0;
>      unsigned int order;
> -
> -    /* Convenience alias */
> -    gfn_t start = p2m->lowest_mapped_gfn;
> -    gfn_t end = p2m->max_mapped_gfn;
> +    gfn_t start, end;
>  
>      p2m_write_lock(p2m);
>  
> +    start = p2m->lowest_mapped_gfn;
> +    end = p2m->max_mapped_gfn;
> +
>      for ( ; gfn_x(start) < gfn_x(end);
>            start = gfn_next_boundary(start, order) )
>      {
> @@ -1353,9 +1353,6 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
>      p2m_type_t t;
>      unsigned int order;
>  
> -    start = gfn_max(start, p2m->lowest_mapped_gfn);
> -    end = gfn_min(end, p2m->max_mapped_gfn);
> -
>      /*
>       * The operation cache flush will invalidate the RAM assigned to the
>       * guest in a given range. It will not modify the page table and
> @@ -1364,6 +1361,9 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
>       */
>      p2m_read_lock(p2m);
>  
> +    start = gfn_max(start, p2m->lowest_mapped_gfn);
> +    end = gfn_min(end, p2m->max_mapped_gfn);
> +
>      for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
>      {
>          mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
> -- 
> 2.11.0
>
diff mbox series

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index c484469e6c..d1260d3b4e 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1292,13 +1292,13 @@  int relinquish_p2m_mapping(struct domain *d)
     p2m_type_t t;
     int rc = 0;
     unsigned int order;
-
-    /* Convenience alias */
-    gfn_t start = p2m->lowest_mapped_gfn;
-    gfn_t end = p2m->max_mapped_gfn;
+    gfn_t start, end;
 
     p2m_write_lock(p2m);
 
+    start = p2m->lowest_mapped_gfn;
+    end = p2m->max_mapped_gfn;
+
     for ( ; gfn_x(start) < gfn_x(end);
           start = gfn_next_boundary(start, order) )
     {
@@ -1353,9 +1353,6 @@  int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
     p2m_type_t t;
     unsigned int order;
 
-    start = gfn_max(start, p2m->lowest_mapped_gfn);
-    end = gfn_min(end, p2m->max_mapped_gfn);
-
     /*
      * The operation cache flush will invalidate the RAM assigned to the
      * guest in a given range. It will not modify the page table and
@@ -1364,6 +1361,9 @@  int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
      */
     p2m_read_lock(p2m);
 
+    start = gfn_max(start, p2m->lowest_mapped_gfn);
+    end = gfn_min(end, p2m->max_mapped_gfn);
+
     for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
     {
         mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);