@@ -14,8 +14,11 @@
#define PTE_AF PTE_EXT_AF
#define PTE_WBWA L_PTE_MT_WRITEALLOC
+/* See B3.18.7 TLB maintenance operations */
+
static inline void local_flush_tlb_all(void)
{
+ /* TLBIALL */
asm volatile("mcr p15, 0, %0, c8, c7, 0" :: "r" (0));
dsb();
isb();
@@ -27,6 +30,14 @@ static inline void flush_tlb_all(void)
local_flush_tlb_all();
}
+static inline void flush_tlb_page(unsigned long vaddr)
+{
+ /* TLBIMVAA */
+ asm volatile("mcr p15, 0, %0, c8, c7, 3" :: "r" (vaddr));
+ dsb();
+ isb();
+}
+
#include <asm/mmu-api.h>
#endif /* __ASMARM_MMU_H_ */
@@ -19,6 +19,14 @@ static inline void flush_tlb_all(void)
isb();
}
+static inline void flush_tlb_page(unsigned long vaddr)
+{
+ unsigned long page = vaddr >> 12;
+ dsb(ishst);
+ asm("tlbi vaae1is, %0" :: "r" (page));
+ dsb(ish);
+}
+
#include <asm/mmu-api.h>
#endif /* __ASMARM64_MMU_H_ */
This introduces a new flush_tlb_page function which does exactly what you expect. It's going to be useful for the future TLB torture test. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- lib/arm/asm/mmu.h | 11 +++++++++++ lib/arm64/asm/mmu.h | 8 ++++++++ 2 files changed, 19 insertions(+)