@@ -4,44 +4,44 @@
/* Flush local TLBs, current VMID only */
static inline void flush_tlb_local(void)
{
- dsb(sy);
+ dsb(nsh);
WRITE_CP32((uint32_t) 0, TLBIALL);
- dsb(sy);
+ dsb(nsh);
isb();
}
/* Flush inner shareable TLBs, current VMID only */
static inline void flush_tlb(void)
{
- dsb(sy);
+ dsb(ish);
WRITE_CP32((uint32_t) 0, TLBIALLIS);
- dsb(sy);
+ dsb(ish);
isb();
}
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all_local(void)
{
- dsb(sy);
+ dsb(nsh);
WRITE_CP32((uint32_t) 0, TLBIALLNSNH);
- dsb(sy);
+ dsb(nsh);
isb();
}
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all(void)
{
- dsb(sy);
+ dsb(ish);
WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
- dsb(sy);
+ dsb(ish);
isb();
}
@@ -12,10 +12,10 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
{
asm volatile (
/* Ensure any writes have completed with the old mappings. */
- "dsb;"
+ "dsb ish;"
/* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
"strd %0, %H0, [%1];"
- "dsb;"
+ "dsb ish;"
: : "r" (pte.bits), "r" (p) : "memory");
}
@@ -42,7 +42,7 @@ static inline void flush_xen_text_tlb_local(void)
CMD_CP32(TLBIALLH) /* Flush hypervisor TLB */
CMD_CP32(ICIALLU) /* Flush I-cache */
CMD_CP32(BPIALL) /* Flush branch predictor */
- "dsb;" /* Ensure completion of TLB+BP flush */
+ "dsb nsh;" /* Ensure completion of TLB+BP flush */
"isb;"
: : : "memory");
}
@@ -54,9 +54,9 @@ static inline void flush_xen_text_tlb_local(void)
*/
static inline void flush_xen_data_tlb_local(void)
{
- asm volatile("dsb;" /* Ensure preceding are visible */
+ asm volatile("dsb nsh;" /* Ensure preceding are visible */
CMD_CP32(TLBIALLH)
- "dsb;" /* Ensure completion of the TLB flush */
+ "dsb nsh;" /* Ensure completion of the TLB flush */
"isb;"
: : : "memory");
}
@@ -5,9 +5,9 @@
static inline void flush_tlb_local(void)
{
asm volatile(
- "dsb sy;"
+ "dsb nsh;"
"tlbi vmalle1;"
- "dsb sy;"
+ "dsb nsh;"
"isb;"
: : : "memory");
}
@@ -16,9 +16,9 @@ static inline void flush_tlb_local(void)
static inline void flush_tlb(void)
{
asm volatile(
- "dsb sy;"
+ "dsb ish;"
"tlbi vmalle1is;"
- "dsb sy;"
+ "dsb ish;"
"isb;"
: : : "memory");
}
@@ -27,9 +27,9 @@ static inline void flush_tlb(void)
static inline void flush_tlb_all_local(void)
{
asm volatile(
- "dsb sy;"
+ "dsb nsh;"
"tlbi alle1;"
- "dsb sy;"
+ "dsb nsh;"
"isb;"
: : : "memory");
}
@@ -38,9 +38,9 @@ static inline void flush_tlb_all_local(void)
static inline void flush_tlb_all(void)
{
asm volatile(
- "dsb sy;"
+ "dsb ish;"
"tlbi alle1is;"
- "dsb sy;"
+ "dsb ish;"
"isb;"
: : : "memory");
}
@@ -8,9 +8,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
{
asm volatile (
/* Ensure any writes have completed with the old mappings. */
- "dsb sy;"
+ "dsb ish;"
"str %0, [%1];" /* Write the entry */
- "dsb sy;"
+ "dsb ish;"
: : "r" (pte.bits), "r" (p) : "memory");
}
@@ -35,7 +35,7 @@ static inline void flush_xen_text_tlb_local(void)
"isb;" /* Ensure synchronization with previous changes to text */
"tlbi alle2;" /* Flush hypervisor TLB */
"ic iallu;" /* Flush I-cache */
- "dsb sy;" /* Ensure completion of TLB flush */
+ "dsb nsh;" /* Ensure completion of TLB flush */
"isb;"
: : : "memory");
}
@@ -48,9 +48,9 @@ static inline void flush_xen_text_tlb_local(void)
static inline void flush_xen_data_tlb_local(void)
{
asm volatile (
- "dsb sy;" /* Ensure visibility of PTE writes */
+ "dsb nsh;" /* Ensure visibility of PTE writes */
"tlbi alle2;" /* Flush hypervisor TLB */
- "dsb sy;" /* Ensure completion of TLB flush */
+ "dsb nsh;" /* Ensure completion of TLB flush */
"isb;"
: : : "memory");
}
@@ -315,12 +315,12 @@ static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
unsigned long size)
{
unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
+ dsb(nsh); /* Ensure preceding are visible */
while ( va < end ) {
__flush_xen_data_tlb_one_local(va);
va += PAGE_SIZE;
}
- dsb(sy); /* Ensure completion of the TLB flush */
+ dsb(nsh); /* Ensure completion of the TLB flush */
isb();
}
@@ -333,12 +333,12 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va,
unsigned long size)
{
unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
+ dsb(ish); /* Ensure preceding are visible */
while ( va < end ) {
__flush_xen_data_tlb_one(va);
va += PAGE_SIZE;
}
- dsb(sy); /* Ensure completion of the TLB flush */
+ dsb(ish); /* Ensure completion of the TLB flush */
isb();
}
Flushing the local TLB only requires a local barrier. Flushing the TLB of all processors in the inner-shareable domain only requires an inner-shareable barrier. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v4: new patch --- xen/include/asm-arm/arm32/flushtlb.h | 16 ++++++++-------- xen/include/asm-arm/arm32/page.h | 10 +++++----- xen/include/asm-arm/arm64/flushtlb.h | 16 ++++++++-------- xen/include/asm-arm/arm64/page.h | 10 +++++----- xen/include/asm-arm/page.h | 8 ++++---- 5 files changed, 30 insertions(+), 30 deletions(-)