diff mbox series

[Xen-devel,v2,05/16] xen/x86: p2m-pod: Avoid redundant assignments in p2m_pod_demand_populate

Message ID 20170921124035.2410-6-julien.grall@arm.com
State Superseded
Headers show
Series xen/x86: Clean-up the PoD code | expand

Commit Message

Julien Grall Sept. 21, 2017, 12:40 p.m. UTC
gfn_aligned is assigned 3 times with the exact same formula. All the
variables used are not modified, so consolidate in a single assignment
at the beginning of the function.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

---

Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

    Changes in v2:
        - Add Andrew's acked-by
---
 xen/arch/x86/mm/p2m-pod.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

Comments

Wei Liu Sept. 21, 2017, 2:53 p.m. UTC | #1
On Thu, Sep 21, 2017 at 01:40:24PM +0100, Julien Grall wrote:
> gfn_aligned is assigned 3 times with the exact same formula. All the
> variables used are not modified, so consolidate in a single assignment
> at the beginning of the function.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>
George Dunlap Sept. 21, 2017, 4:35 p.m. UTC | #2
On 09/21/2017 01:40 PM, Julien Grall wrote:
> gfn_aligned is assigned 3 times with the exact same formula. All the
> variables used are not modified, so consolidate in a single assignment
> at the beginning of the function.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

> 
> ---
> 
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> 
>     Changes in v2:
>         - Add Andrew's acked-by
> ---
>  xen/arch/x86/mm/p2m-pod.c | 6 +-----
>  1 file changed, 1 insertion(+), 5 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index f04d6e03e2..bcc87aee03 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -1079,7 +1079,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
>  {
>      struct domain *d = p2m->domain;
>      struct page_info *p = NULL; /* Compiler warnings */
> -    unsigned long gfn_aligned;
> +    unsigned long gfn_aligned = (gfn >> order) << order;
>      mfn_t mfn;
>      unsigned long i;
>  
> @@ -1102,7 +1102,6 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
>      if ( order == PAGE_ORDER_1G )
>      {
>          pod_unlock(p2m);
> -        gfn_aligned = (gfn >> order) << order;
>          /*
>           * Note that we are supposed to call p2m_set_entry() 512 times to
>           * split 1GB into 512 2MB pages here. But We only do once here because
> @@ -1147,8 +1146,6 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
>  
>      BUG_ON((mfn_x(mfn) & ((1UL << order) - 1)) != 0);
>  
> -    gfn_aligned = (gfn >> order) << order;
> -
>      p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
>                    p2m->default_access);
>  
> @@ -1200,7 +1197,6 @@ remap_and_retry:
>       * NOTE: In a p2m fine-grained lock scenario this might
>       * need promoting the gfn lock from gfn->2M superpage.
>       */
> -    gfn_aligned = (gfn >> order) << order;
>      for ( i = 0; i < (1UL << order); i++ )
>          p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
>                        p2m_populate_on_demand, p2m->default_access);
>
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index f04d6e03e2..bcc87aee03 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1079,7 +1079,7 @@  p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
 {
     struct domain *d = p2m->domain;
     struct page_info *p = NULL; /* Compiler warnings */
-    unsigned long gfn_aligned;
+    unsigned long gfn_aligned = (gfn >> order) << order;
     mfn_t mfn;
     unsigned long i;
 
@@ -1102,7 +1102,6 @@  p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
     if ( order == PAGE_ORDER_1G )
     {
         pod_unlock(p2m);
-        gfn_aligned = (gfn >> order) << order;
         /*
          * Note that we are supposed to call p2m_set_entry() 512 times to
          * split 1GB into 512 2MB pages here. But We only do once here because
@@ -1147,8 +1146,6 @@  p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
 
     BUG_ON((mfn_x(mfn) & ((1UL << order) - 1)) != 0);
 
-    gfn_aligned = (gfn >> order) << order;
-
     p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
                   p2m->default_access);
 
@@ -1200,7 +1197,6 @@  remap_and_retry:
      * NOTE: In a p2m fine-grained lock scenario this might
      * need promoting the gfn lock from gfn->2M superpage.
      */
-    gfn_aligned = (gfn >> order) << order;
     for ( i = 0; i < (1UL << order); i++ )
         p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);