@@ -217,7 +217,7 @@ void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes)
pte.pt.ai = attributes;
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_local_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
@@ -225,7 +225,7 @@ void clear_fixmap(unsigned map)
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_local_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
#ifdef CONFIG_DOMAIN_PAGE
@@ -82,6 +82,25 @@ static inline void flush_xen_data_tlb_local_range_va(unsigned long va,
isb();
}
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB on all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(); /* Ensure preceding are visible */
+ while ( va < end ) {
+ asm volatile(STORE_CP32(0, TLBIMVAHIS)
+ : : "r" (va) : "memory");
+ va += PAGE_SIZE;
+ }
+ dsb(); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -74,6 +74,25 @@ static inline void flush_xen_data_tlb_local_range_va(unsigned long va,
isb();
}
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(); /* Ensure preceding are visible */
+ while ( va < end ) {
+ asm volatile("tlbi vae2is, %0;"
+ : : "r" (va>>PAGE_SHIFT) : "memory");
+ va += PAGE_SIZE;
+ }
+ dsb(); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
These mappings are global and therefore need flushing on all processors. Add flush_all_xen_data_tlb_range_va which accomplishes this. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/mm.c | 4 ++-- xen/include/asm-arm/arm32/page.h | 19 +++++++++++++++++++ xen/include/asm-arm/arm64/page.h | 19 +++++++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-)