@@ -1,59 +1,44 @@
#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__
#define __ASM_ARM_ARM32_FLUSHTLB_H__
-/* Flush local TLBs, current VMID only */
-static inline void flush_guest_tlb_local(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALL);
-
- dsb(sy);
- isb();
+/*
+ * Every invalidation operation use the following patterns:
+ *
+ * DSB ISHST // Ensure prior page-tables updates have completed
+ * TLBI... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * ISB // See explanation below
+ *
+ * For Xen page-tables the ISB will discard any instructions fetched
+ * from the old mappings.
+ *
+ * For the Stage-2 page-tables the ISB ensures the completion of the DSB
+ * (and therefore the TLB invalidation) before continuing. So we know
+ * the TLBs cannot contain an entry for a mapping we may have removed.
+ */
+#define TLB_HELPER(name, tlbop) \
+static inline void name(void) \
+{ \
+ dsb(ishst); \
+ WRITE_CP32(0, tlbop); \
+ dsb(ish); \
+ isb(); \
}
-/* Flush inner shareable TLBs, current VMID only */
-static inline void flush_guest_tlb(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLIS);
+/* Flush local TLBs, current VMID only */
+TLB_HELPER(flush_guest_tlb_local, TLBIALL);
- dsb(sy);
- isb();
-}
+/* Flush inner shareable TLBs, current VMID only */
+TLB_HELPER(flush_guest_tlb, TLBIALLIS);
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb_local(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLNSNH);
-
- dsb(sy);
- isb();
-}
+TLB_HELPER(flush_all_guests_tlb_local, TLBIALLNSNH);
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
-
- dsb(sy);
- isb();
-}
+TLB_HELPER(flush_all_guests_tlb, TLBIALLNSNHIS);
/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile("dsb;" /* Ensure preceding are visible */
- CMD_CP32(TLBIALLH)
- "dsb;" /* Ensure completion of the TLB flush */
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_xen_tlb_local, TLBIALLH);
/* Flush TLB of local processor for address va. */
static inline void __flush_xen_tlb_one_local(vaddr_t va)
@@ -1,60 +1,46 @@
#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__
#define __ASM_ARM_ARM64_FLUSHTLB_H__
-/* Flush local TLBs, current VMID only */
-static inline void flush_guest_tlb_local(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi vmalls12e1;"
- "dsb sy;"
- "isb;"
- : : : "memory");
+/*
+ * Every invalidation operation use the following patterns:
+ *
+ * DSB ISHST // Ensure prior page-tables updates have completed
+ * TLBI... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * ISB // See explanation below
+ *
+ * For Xen page-tables the ISB will discard any instructions fetched
+ * from the old mappings.
+ *
+ * For the Stage-2 page-tables the ISB ensures the completion of the DSB
+ * (and therefore the TLB invalidation) before continuing. So we know
+ * the TLBs cannot contain an entry for a mapping we may have removed.
+ */
+#define TLB_HELPER(name, tlbop) \
+static inline void name(void) \
+{ \
+ asm volatile( \
+ "dsb ishst;" \
+ "tlbi " # tlbop ";" \
+ "dsb ish;" \
+ "isb;" \
+ : : : "memory"); \
}
+/* Flush local TLBs, current VMID only. */
+TLB_HELPER(flush_guest_tlb_local, vmalls12e1);
+
/* Flush innershareable TLBs, current VMID only */
-static inline void flush_guest_tlb(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi vmalls12e1is;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_guest_tlb, vmalls12e1is);
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb_local(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi alle1;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_all_guests_tlb_local, alle1);
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi alle1is;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_all_guests_tlb, alle1is);
/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile (
- "dsb sy;" /* Ensure visibility of PTE writes */
- "tlbi alle2;" /* Flush hypervisor TLB */
- "dsb sy;" /* Ensure completion of TLB flush */
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_xen_tlb_local, alle2);
/* Flush TLB of local processor for address va. */
static inline void __flush_xen_tlb_one_local(vaddr_t va)