@@ -63,23 +63,10 @@ static inline void flush_xen_data_tlb_local(void)
: : "r" (r0) /* dummy */: "memory");
}
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
{
- unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end ) {
- asm volatile(STORE_CP32(0, TLBIMVAH)
- : : "r" (va) : "memory");
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
+ asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
/* Ask the MMU to translate a VA for us */
@@ -55,23 +55,10 @@ static inline void flush_xen_data_tlb_local(void)
: : : "memory");
}
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
{
- unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end ) {
- asm volatile("tlbi vae2, %0;"
- : : "r" (va>>PAGE_SHIFT) : "memory");
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
+ asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
/* Ask the MMU to translate a VA for us */
@@ -306,6 +306,24 @@ static inline void clean_and_invalidate_xen_dcache_va_range
: : "r" (_p), "m" (*_p)); \
} while (0)
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end ) {
+ __flush_xen_data_tlb_one_local(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn);
This is almost identical on both sub architectures. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v4: New patch --- xen/include/asm-arm/arm32/page.h | 19 +++---------------- xen/include/asm-arm/arm64/page.h | 19 +++---------------- xen/include/asm-arm/page.h | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 32 deletions(-)