diff mbox

xen/arm: p2m: flush tlb when a domain is destroyed

Message ID 1382624683-27093-1-git-send-email-julien.grall@linaro.org
State Superseded, archived
Headers show

Commit Message

Julien Grall Oct. 24, 2013, 2:24 p.m. UTC
Once the VMID is marked unused, a new domain can reuse the VMID for its
own. If the TLB is not flushed, entries can contain wrong translation.
Arm assembly doesn't provide instruction to flush TLB by VMID, except
for current VMID but the domain is already unscheduled from every physical
CPUs. So flush TLB for all VMIDs on every CPU (inner shareable).

Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
 xen/arch/arm/p2m.c                   |    3 +++
 xen/include/asm-arm/arm32/flushtlb.h |   11 +++++++++++
 2 files changed, 14 insertions(+)

Comments

Ian Campbell Oct. 24, 2013, 2:28 p.m. UTC | #1
On Thu, 2013-10-24 at 15:24 +0100, Julien Grall wrote:
> Once the VMID is marked unused, a new domain can reuse the VMID for its
> own. If the TLB is not flushed, entries can contain wrong translation.
> Arm assembly doesn't provide instruction to flush TLB by VMID, except
> for current VMID but the domain is already unscheduled from every physical
> CPUs. So flush TLB for all VMIDs on every CPU (inner shareable).

Did you consider temporarily switching to the VMID in order to flush it?
Or flushing on allocate rather than free or flushing when the p2m is
first loaded (via a hook in p2m_load_vttbr)?

> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>
> ---
>  xen/arch/arm/p2m.c                   |    3 +++
>  xen/include/asm-arm/arm32/flushtlb.h |   11 +++++++++++
>  2 files changed, 14 insertions(+)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 2d09fef..d4497f9 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -357,6 +357,9 @@ static void p2m_free_vmid(struct domain *d)
>      spin_lock(&vmid_alloc_lock);
>      if ( p2m->vmid != INVALID_VMID )
>          clear_bit(p2m->vmid, vmid_mask);
> +
> +    flush_tlb_all();
> +
>      spin_unlock(&vmid_alloc_lock);
>  }
>  
> diff --git a/xen/include/asm-arm/arm32/flushtlb.h b/xen/include/asm-arm/arm32/flushtlb.h
> index a258f58..7b4a73e 100644
> --- a/xen/include/asm-arm/arm32/flushtlb.h
> +++ b/xen/include/asm-arm/arm32/flushtlb.h

This needs an arm64 version too.

> @@ -23,6 +23,17 @@ static inline void flush_tlb_all_local(void)
>      isb();
>  }
>  
> +/* Flush inner shareable TLBs, all VMIDs, non-hypervisor mode */
> +static inline void flush_tlb_all(void)
> +{
> +    dsb();
> +
> +    WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);

Is the cast really necessary? I'd have thought that at the very most 0U
would be all that was needed.

> +
> +    dsb();
> +    isb();
> +}
> +
>  #endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
>  /*
>   * Local variables:
diff mbox

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 2d09fef..d4497f9 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -357,6 +357,9 @@  static void p2m_free_vmid(struct domain *d)
     spin_lock(&vmid_alloc_lock);
     if ( p2m->vmid != INVALID_VMID )
         clear_bit(p2m->vmid, vmid_mask);
+
+    flush_tlb_all();
+
     spin_unlock(&vmid_alloc_lock);
 }
 
diff --git a/xen/include/asm-arm/arm32/flushtlb.h b/xen/include/asm-arm/arm32/flushtlb.h
index a258f58..7b4a73e 100644
--- a/xen/include/asm-arm/arm32/flushtlb.h
+++ b/xen/include/asm-arm/arm32/flushtlb.h
@@ -23,6 +23,17 @@  static inline void flush_tlb_all_local(void)
     isb();
 }
 
+/* Flush inner shareable TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_tlb_all(void)
+{
+    dsb();
+
+    WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
+
+    dsb();
+    isb();
+}
+
 #endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
 /*
  * Local variables: