@@ -215,7 +215,7 @@ void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes)
pte.pt.table = 1; /* 4k mappings always have this bit set */
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
@@ -223,7 +223,7 @@ void clear_fixmap(unsigned map)
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va_local(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
#ifdef CONFIG_DOMAIN_PAGE
@@ -403,7 +403,7 @@ void __init remove_early_mappings(void)
{
lpae_t pte = {0};
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
- flush_xen_data_tlb_range_va_local(BOOT_FDT_VIRT_START, SECOND_SIZE);
+ flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, SECOND_SIZE);
}
extern void relocate_xen(uint64_t ttbr, void *src, void *dst, size_t len);
@@ -69,6 +69,13 @@ static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
+/* Flush TLB of all processors in the inner-shareable domain for
+ * address va. */
+static inline void __flush_xen_data_tlb_one(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -61,6 +61,13 @@ static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
+/* Flush TLB of all processors in the inner-shareable domain for
+ * address va. */
+static inline void __flush_xen_data_tlb_one(vaddr_t va)
+{
+ asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -324,6 +324,24 @@ static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
isb();
}
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end ) {
+ __flush_xen_data_tlb_one(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn);
These mappings are global and therefore need flushing on all processors. Add flush_all_xen_data_tlb_range_va which accomplishes this. Likewise when removing the early mappings Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v4: also make the change in remove_early_mappings consolidatw flush_all_xen_data_tlb_range_va implentation v3: use dsb(sy) not dsb() --- xen/arch/arm/mm.c | 6 +++--- xen/include/asm-arm/arm32/page.h | 7 +++++++ xen/include/asm-arm/arm64/page.h | 7 +++++++ xen/include/asm-arm/page.h | 18 ++++++++++++++++++ 4 files changed, 35 insertions(+), 3 deletions(-)