@@ -27,7 +27,7 @@ static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
p += offset;
memcpy(p, from, size);
if ( flush_dcache )
- clean_xen_dcache_va_range(p, size);
+ clean_dcache_va_range(p, size);
unmap_domain_page(p - offset);
put_page(page);
@@ -57,7 +57,7 @@ void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len)
set_fixmap(FIXMAP_MISC, p, BUFFERABLE);
memcpy(dst, src + s, l);
- clean_xen_dcache_va_range(dst, l);
+ clean_dcache_va_range(dst, l);
paddr += l;
dst += l;
@@ -381,7 +381,7 @@ void flush_page_to_ram(unsigned long mfn)
{
void *v = map_domain_page(mfn);
- clean_and_invalidate_xen_dcache_va_range(v, PAGE_SIZE);
+ clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
unmap_domain_page(v);
}
@@ -504,17 +504,17 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
/* Clear the copy of the boot pagetables. Each secondary CPU
* rebuilds these itself (see head.S) */
memset(boot_pgtable, 0x0, PAGE_SIZE);
- clean_and_invalidate_xen_dcache(boot_pgtable);
+ clean_and_invalidate_dcache(boot_pgtable);
#ifdef CONFIG_ARM_64
memset(boot_first, 0x0, PAGE_SIZE);
- clean_and_invalidate_xen_dcache(boot_first);
+ clean_and_invalidate_dcache(boot_first);
memset(boot_first_id, 0x0, PAGE_SIZE);
- clean_and_invalidate_xen_dcache(boot_first_id);
+ clean_and_invalidate_dcache(boot_first_id);
#endif
memset(boot_second, 0x0, PAGE_SIZE);
- clean_and_invalidate_xen_dcache(boot_second);
+ clean_and_invalidate_dcache(boot_second);
memset(boot_third, 0x0, PAGE_SIZE);
- clean_and_invalidate_xen_dcache(boot_third);
+ clean_and_invalidate_dcache(boot_third);
/* Break up the Xen mapping into 4k pages and protect them separately. */
for ( i = 0; i < LPAE_ENTRIES; i++ )
@@ -552,7 +552,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
/* Make sure it is clear */
memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
- clean_xen_dcache_va_range(this_cpu(xen_dommap),
+ clean_dcache_va_range(this_cpu(xen_dommap),
DOMHEAP_SECOND_PAGES*PAGE_SIZE);
#endif
}
@@ -563,7 +563,7 @@ int init_secondary_pagetables(int cpu)
/* Set init_ttbr for this CPU coming up. All CPus share a single setof
* pagetables, but rewrite it each time for consistency with 32 bit. */
init_ttbr = (uintptr_t) xen_pgtable + phys_offset;
- clean_xen_dcache(init_ttbr);
+ clean_dcache(init_ttbr);
return 0;
}
#else
@@ -598,15 +598,15 @@ int init_secondary_pagetables(int cpu)
write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte);
}
- clean_xen_dcache_va_range(first, PAGE_SIZE);
- clean_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
+ clean_dcache_va_range(first, PAGE_SIZE);
+ clean_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
per_cpu(xen_pgtable, cpu) = first;
per_cpu(xen_dommap, cpu) = domheap;
/* Set init_ttbr for this CPU coming up */
init_ttbr = __pa(first);
- clean_xen_dcache(init_ttbr);
+ clean_dcache(init_ttbr);
return 0;
}
@@ -1273,7 +1273,7 @@ void clear_and_clean_page(struct page_info *page)
void *p = __map_domain_page(page);
clear_page(p);
- clean_xen_dcache_va_range(p, PAGE_SIZE);
+ clean_dcache_va_range(p, PAGE_SIZE);
unmap_domain_page(p);
}
@@ -302,7 +302,7 @@ static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache)
{
write_pte(p, pte);
if ( flush_cache )
- clean_xen_dcache(*p);
+ clean_dcache(*p);
}
/*
@@ -362,7 +362,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
clear_page(p);
if ( flush_cache )
- clean_xen_dcache_va_range(p, PAGE_SIZE);
+ clean_dcache_va_range(p, PAGE_SIZE);
unmap_domain_page(p);
@@ -374,7 +374,7 @@ int __cpu_up(unsigned int cpu)
/* Open the gate for this CPU */
smp_up_cpu = cpu_logical_map(cpu);
- clean_xen_dcache(smp_up_cpu);
+ clean_dcache(smp_up_cpu);
rc = arch_cpu_up(cpu);
@@ -20,11 +20,11 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
}
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+#define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC)
/* Inline ASM to clean and invalidate dcache on register R (may be an
* inline asm operand) */
-#define __clean_and_invalidate_xen_dcache_one(R) STORE_CP32(R, DCCIMVAC)
+#define __clean_and_invalidate_dcache_one(R) STORE_CP32(R, DCCIMVAC)
/*
* Flush all hypervisor mappings from the TLB and branch predictor of
@@ -15,11 +15,11 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
}
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __clean_xen_dcache_one(R) "dc cvac, %" #R ";"
+#define __clean_dcache_one(R) "dc cvac, %" #R ";"
/* Inline ASM to clean and invalidate dcache on register R (may be an
* inline asm operand) */
-#define __clean_and_invalidate_xen_dcache_one(R) "dc civac, %" #R ";"
+#define __clean_and_invalidate_dcache_one(R) "dc civac, %" #R ";"
/*
* Flush all hypervisor mappings from the TLB of the local processor.
@@ -268,48 +268,48 @@ extern size_t cacheline_bytes;
/* Functions for flushing medium-sized areas.
* if 'range' is large enough we might want to use model-specific
* full-cache flushes. */
-static inline void clean_xen_dcache_va_range(const void *p, unsigned long size)
+static inline void clean_dcache_va_range(const void *p, unsigned long size)
{
const void *end;
dsb(sy); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
- asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
+ asm volatile (__clean_dcache_one(0) : : "r" (p));
dsb(sy); /* So we know the flushes happen before continuing */
}
-static inline void clean_and_invalidate_xen_dcache_va_range
+static inline void clean_and_invalidate_dcache_va_range
(const void *p, unsigned long size)
{
const void *end;
dsb(sy); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
- asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p));
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
dsb(sy); /* So we know the flushes happen before continuing */
}
/* Macros for flushing a single small item. The predicate is always
* compile-time constant so this will compile down to 3 instructions in
* the common case. */
-#define clean_xen_dcache(x) do { \
+#define clean_dcache(x) do { \
typeof(x) *_p = &(x); \
if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
- clean_xen_dcache_va_range(_p, sizeof(x)); \
+ clean_dcache_va_range(_p, sizeof(x)); \
else \
asm volatile ( \
"dsb sy;" /* Finish all earlier writes */ \
- __clean_xen_dcache_one(0) \
+ __clean_dcache_one(0) \
"dsb sy;" /* Finish flush before continuing */ \
: : "r" (_p), "m" (*_p)); \
} while (0)
-#define clean_and_invalidate_xen_dcache(x) do { \
+#define clean_and_invalidate_dcache(x) do { \
typeof(x) *_p = &(x); \
if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
- clean_and_invalidate_xen_dcache_va_range(_p, sizeof(x)); \
+ clean_and_invalidate_dcache_va_range(_p, sizeof(x)); \
else \
asm volatile ( \
"dsb sy;" /* Finish all earlier writes */ \
- __clean_and_invalidate_xen_dcache_one(0) \
+ __clean_and_invalidate_dcache_one(0) \
"dsb sy;" /* Finish flush before continuing */ \
: : "r" (_p), "m" (*_p)); \
} while (0)
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/guestcopy.c | 2 +- xen/arch/arm/kernel.c | 2 +- xen/arch/arm/mm.c | 24 ++++++++++++------------ xen/arch/arm/p2m.c | 4 ++-- xen/arch/arm/smpboot.c | 2 +- xen/include/asm-arm/arm32/page.h | 4 ++-- xen/include/asm-arm/arm64/page.h | 4 ++-- xen/include/asm-arm/page.h | 20 ++++++++++---------- 8 files changed, 31 insertions(+), 31 deletions(-)