diff mbox series

[Xen-devel,for-next,11/16] xen/arm: p2m: Rename p2m_flush_tlb and p2m_flush_tlb_sync

Message ID 20171123183210.12045-12-julien.grall@linaro.org
State Superseded
Headers show
Series xen/arm: Stage-2 handling cleanup | expand

Commit Message

Julien Grall Nov. 23, 2017, 6:32 p.m. UTC
Rename p2m_flush_tlb and p2m_flush_tlb_sync to respectively
p2m_tlb_flush and p2m_force_tlb_flush_sync.

At first glance, inverting 'flush' and 'tlb'  might seem pointless but
would be helpful in the future in order to get more easily some code ported
from x86 P2M or even to shared with.

For p2m_flush_tlb_sync, the 'force' was added because the TLBs are
flush unconditionally. A follow-up patch will add an helper to flush
TLBs only in certain cases.

Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
 xen/arch/arm/p2m.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

Comments

Stefano Stabellini Dec. 7, 2017, 10:10 p.m. UTC | #1
On Thu, 23 Nov 2017, Julien Grall wrote:
> Rename p2m_flush_tlb and p2m_flush_tlb_sync to respectively
> p2m_tlb_flush and p2m_force_tlb_flush_sync.
> 
> At first glance, inverting 'flush' and 'tlb'  might seem pointless but
> would be helpful in the future in order to get more easily some code ported
> from x86 P2M or even to shared with.
> 
> For p2m_flush_tlb_sync, the 'force' was added because the TLBs are
> flush unconditionally. A follow-up patch will add an helper to flush
> TLBs only in certain cases.
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>  xen/arch/arm/p2m.c | 18 +++++++++---------
>  1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 417609ede2..d466a5bc43 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -52,7 +52,7 @@ static const paddr_t level_masks[] =
>  static const uint8_t level_orders[] =
>      { ZEROETH_ORDER, FIRST_ORDER, SECOND_ORDER, THIRD_ORDER };
>  
> -static void p2m_flush_tlb(struct p2m_domain *p2m);
> +static void p2m_tlb_flush(struct p2m_domain *p2m);
>  
>  /* Unlock the flush and do a P2M TLB flush if necessary */
>  void p2m_write_unlock(struct p2m_domain *p2m)
> @@ -65,7 +65,7 @@ void p2m_write_unlock(struct p2m_domain *p2m)
>           * to avoid someone else modify the P2M before the TLB
>           * invalidation has completed.
>           */
> -        p2m_flush_tlb(p2m);
> +        p2m_tlb_flush(p2m);
>      }
>  
>      write_unlock(&p2m->lock);
> @@ -138,7 +138,7 @@ void p2m_restore_state(struct vcpu *n)
>      *last_vcpu_ran = n->vcpu_id;
>  }
>  
> -static void p2m_flush_tlb(struct p2m_domain *p2m)
> +static void p2m_tlb_flush(struct p2m_domain *p2m)
>  {
>      unsigned long flags = 0;
>      uint64_t ovttbr;
> @@ -170,11 +170,11 @@ static void p2m_flush_tlb(struct p2m_domain *p2m)
>   *
>   * Must be called with the p2m lock held.
>   */
> -static void p2m_flush_tlb_sync(struct p2m_domain *p2m)
> +static void p2m_force_tlb_flush_sync(struct p2m_domain *p2m)
>  {
>      ASSERT(p2m_is_write_locked(p2m));
>  
> -    p2m_flush_tlb(p2m);
> +    p2m_tlb_flush(p2m);
>      p2m->need_flush = false;
>  }
>  
> @@ -675,7 +675,7 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>       * flush?
>       */
>      if ( p2m->need_flush )
> -        p2m_flush_tlb_sync(p2m);
> +        p2m_force_tlb_flush_sync(p2m);
>  
>      mfn = _mfn(entry.p2m.base);
>      ASSERT(mfn_valid(mfn));
> @@ -864,7 +864,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
>           * For more details see (D4.7.1 in ARM DDI 0487A.j).
>           */
>          p2m_remove_pte(entry, p2m->clean_pte);
> -        p2m_flush_tlb_sync(p2m);
> +        p2m_force_tlb_flush_sync(p2m);
>  
>          p2m_write_pte(entry, split_pte, p2m->clean_pte);
>  
> @@ -940,7 +940,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
>          {
>              if ( likely(!p2m->mem_access_enabled) ||
>                   P2M_CLEAR_PERM(pte) != P2M_CLEAR_PERM(orig_pte) )
> -                p2m_flush_tlb_sync(p2m);
> +                p2m_force_tlb_flush_sync(p2m);
>              else
>                  p2m->need_flush = true;
>          }
> @@ -1144,7 +1144,7 @@ static int p2m_alloc_table(struct domain *d)
>       * Make sure that all TLBs corresponding to the new VMID are flushed
>       * before using it
>       */
> -    p2m_flush_tlb(p2m);
> +    p2m_tlb_flush(p2m);
>  
>      return 0;
>  }
> -- 
> 2.11.0
>
diff mbox series

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 417609ede2..d466a5bc43 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -52,7 +52,7 @@  static const paddr_t level_masks[] =
 static const uint8_t level_orders[] =
     { ZEROETH_ORDER, FIRST_ORDER, SECOND_ORDER, THIRD_ORDER };
 
-static void p2m_flush_tlb(struct p2m_domain *p2m);
+static void p2m_tlb_flush(struct p2m_domain *p2m);
 
 /* Unlock the flush and do a P2M TLB flush if necessary */
 void p2m_write_unlock(struct p2m_domain *p2m)
@@ -65,7 +65,7 @@  void p2m_write_unlock(struct p2m_domain *p2m)
          * to avoid someone else modify the P2M before the TLB
          * invalidation has completed.
          */
-        p2m_flush_tlb(p2m);
+        p2m_tlb_flush(p2m);
     }
 
     write_unlock(&p2m->lock);
@@ -138,7 +138,7 @@  void p2m_restore_state(struct vcpu *n)
     *last_vcpu_ran = n->vcpu_id;
 }
 
-static void p2m_flush_tlb(struct p2m_domain *p2m)
+static void p2m_tlb_flush(struct p2m_domain *p2m)
 {
     unsigned long flags = 0;
     uint64_t ovttbr;
@@ -170,11 +170,11 @@  static void p2m_flush_tlb(struct p2m_domain *p2m)
  *
  * Must be called with the p2m lock held.
  */
-static void p2m_flush_tlb_sync(struct p2m_domain *p2m)
+static void p2m_force_tlb_flush_sync(struct p2m_domain *p2m)
 {
     ASSERT(p2m_is_write_locked(p2m));
 
-    p2m_flush_tlb(p2m);
+    p2m_tlb_flush(p2m);
     p2m->need_flush = false;
 }
 
@@ -675,7 +675,7 @@  static void p2m_free_entry(struct p2m_domain *p2m,
      * flush?
      */
     if ( p2m->need_flush )
-        p2m_flush_tlb_sync(p2m);
+        p2m_force_tlb_flush_sync(p2m);
 
     mfn = _mfn(entry.p2m.base);
     ASSERT(mfn_valid(mfn));
@@ -864,7 +864,7 @@  static int __p2m_set_entry(struct p2m_domain *p2m,
          * For more details see (D4.7.1 in ARM DDI 0487A.j).
          */
         p2m_remove_pte(entry, p2m->clean_pte);
-        p2m_flush_tlb_sync(p2m);
+        p2m_force_tlb_flush_sync(p2m);
 
         p2m_write_pte(entry, split_pte, p2m->clean_pte);
 
@@ -940,7 +940,7 @@  static int __p2m_set_entry(struct p2m_domain *p2m,
         {
             if ( likely(!p2m->mem_access_enabled) ||
                  P2M_CLEAR_PERM(pte) != P2M_CLEAR_PERM(orig_pte) )
-                p2m_flush_tlb_sync(p2m);
+                p2m_force_tlb_flush_sync(p2m);
             else
                 p2m->need_flush = true;
         }
@@ -1144,7 +1144,7 @@  static int p2m_alloc_table(struct domain *d)
      * Make sure that all TLBs corresponding to the new VMID are flushed
      * before using it
      */
-    p2m_flush_tlb(p2m);
+    p2m_tlb_flush(p2m);
 
     return 0;
 }