@@ -351,6 +351,10 @@ int xc_dom_gnttab_seed(xc_interface *xch, domid_t domid,
return -1;
}
+ /* Guest shouldn't really touch its grant table until it has
+ * enabled its caches. But lets be nice. */
+ xc_domain_cacheflush(xch, domid, gnttab_gmfn, 1);
+
return 0;
}
@@ -603,6 +603,8 @@ void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn)
prev->next = phys->next;
else
dom->phys_pages = phys->next;
+
+ xc_domain_cacheflush(dom->xch, dom->guest_domid, phys->first, phys->count);
}
void xc_dom_unmap_all(struct xc_dom_image *dom)
@@ -48,6 +48,16 @@ int xc_domain_create(xc_interface *xch,
return 0;
}
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
+ xen_pfn_t start_pfn, xen_pfn_t nr_pfns)
+{
+ DECLARE_DOMCTL;
+ domctl.cmd = XEN_DOMCTL_cacheflush;
+ domctl.domain = (domid_t)domid;
+ domctl.u.cacheflush.start_pfn = start_pfn;
+ domctl.u.cacheflush.nr_pfns = nr_pfns;
+ return do_domctl(xch, &domctl);
+}
int xc_domain_pause(xc_interface *xch,
uint32_t domid)
@@ -628,6 +628,7 @@ int xc_copy_to_domain_page(xc_interface *xch,
return -1;
memcpy(vaddr, src_page, PAGE_SIZE);
munmap(vaddr, PAGE_SIZE);
+ xc_domain_cacheflush(xch, domid, dst_pfn, 1);
return 0;
}
@@ -641,6 +642,7 @@ int xc_clear_domain_page(xc_interface *xch,
return -1;
memset(vaddr, 0, PAGE_SIZE);
munmap(vaddr, PAGE_SIZE);
+ xc_domain_cacheflush(xch, domid, dst_pfn, 1);
return 0;
}
@@ -304,6 +304,9 @@ void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
/* Optionally flush file to disk and discard page cache */
void discard_file_cache(xc_interface *xch, int fd, int flush);
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
+ xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
+
#define MAX_MMU_UPDATES 1024
struct xc_mmu {
mmu_update_t updates[MAX_MMU_UPDATES];
@@ -17,6 +17,20 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
{
switch ( domctl->cmd )
{
+ case XEN_DOMCTL_cacheflush:
+ {
+ unsigned long s = domctl->u.cacheflush.start_pfn;
+ unsigned long e = s + domctl->u.cacheflush.nr_pfns;
+
+ if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
+ return -EINVAL;
+
+ if ( e < s )
+ return -EINVAL;
+
+ return p2m_cache_flush(d, s, e);
+ }
+
default:
return subarch_do_domctl(domctl, d, u_domctl);
}
@@ -342,6 +342,18 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
}
#endif
+void flush_page_to_ram(unsigned long mfn)
+{
+ void *p, *v = map_domain_page(mfn);
+
+ dsb(); /* So the CPU issues all writes to the range */
+ for ( p = v; p < v + PAGE_SIZE ; p += cacheline_bytes )
+ asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p));
+ dsb(); /* So we know the flushes happen before continuing */
+
+ unmap_domain_page(v);
+}
+
void __init arch_init_memory(void)
{
/*
@@ -8,6 +8,7 @@
#include <asm/gic.h>
#include <asm/event.h>
#include <asm/hardirq.h>
+#include <asm/page.h>
/* First level P2M is 2 consecutive pages */
#define P2M_FIRST_ORDER 1
@@ -228,6 +229,7 @@ enum p2m_operation {
ALLOCATE,
REMOVE,
RELINQUISH,
+ CACHEFLUSH,
};
static int apply_p2m_changes(struct domain *d,
@@ -381,6 +383,15 @@ static int apply_p2m_changes(struct domain *d,
count++;
}
break;
+
+ case CACHEFLUSH:
+ {
+ if ( !pte.p2m.valid || !p2m_is_ram(pte.p2m.type) )
+ break;
+
+ flush_page_to_ram(pte.p2m.base);
+ }
+ break;
}
/* Preempt every 2MiB (mapped) or 32 MiB (unmapped) - arbitrary */
@@ -624,6 +635,20 @@ int relinquish_p2m_mapping(struct domain *d)
MATTR_MEM, p2m_invalid);
}
+int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
+{
+ struct p2m_domain *p2m = &d->arch.p2m;
+
+ start_mfn = MAX(start_mfn, p2m->lowest_mapped_gfn);
+ end_mfn = MIN(end_mfn, p2m->max_mapped_gfn);
+
+ return apply_p2m_changes(d, CACHEFLUSH,
+ pfn_to_paddr(start_mfn),
+ pfn_to_paddr(end_mfn),
+ pfn_to_paddr(INVALID_MFN),
+ MATTR_MEM, p2m_invalid);
+}
+
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
{
paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
@@ -710,6 +710,11 @@ static struct page_info *alloc_heap_pages(
/* Initialise fields which have other uses for free pages. */
pg[i].u.inuse.type_info = 0;
page_set_owner(&pg[i], NULL);
+
+ /* Ensure cache and RAM are consistent for platforms where the
+ * guest can control its own visibility of/through the cache.
+ */
+ flush_page_to_ram(page_to_mfn(&pg[i]));
}
spin_unlock(&heap_lock);
@@ -22,6 +22,10 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+/* Inline ASM to clean and invalidate dcache on register R (may be an
+ * inline asm operand) */
+#define __clean_and_invalidate_xen_dcache_one(R) STORE_CP32(R, DCCIMVAC)
+
/*
* Flush all hypervisor mappings from the TLB and branch predictor.
* This is needed after changing Xen code mappings.
@@ -17,6 +17,10 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+/* Inline ASM to clean and invalidate dcache on register R (may be an
+ * inline asm operand) */
+#define __clean_and_invalidate_xen_dcache_one(R) "dc civac, %" #R ";"
+
/*
* Flush all hypervisor mappings from the TLB
* This is needed after changing Xen code mappings.
@@ -78,6 +78,9 @@ void p2m_load_VTTBR(struct domain *d);
/* Look up the MFN corresponding to a domain's PFN. */
paddr_t p2m_lookup(struct domain *d, paddr_t gpfn, p2m_type_t *t);
+/* Clean & invalidate caches corresponding to a region of guest address space */
+int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn);
+
/* Setup p2m RAM mapping for domain d from start-end. */
int p2m_populate_ram(struct domain *d, paddr_t start, paddr_t end);
/* Map MMIO regions in the p2m: start_gaddr and end_gaddr is the range
@@ -253,6 +253,9 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
: : "r" (_p), "m" (*_p)); \
} while (0)
+/* Flush the dcache for an entire page. */
+void flush_page_to_ram(unsigned long mfn);
+
/* Print a walk of an arbitrary page table */
void dump_pt_walk(lpae_t *table, paddr_t addr);
@@ -346,6 +346,9 @@ static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
}
+/* No cache maintenance required on x86 architecture. */
+static inline void flush_page_to_ram(unsigned long mfn) {}
+
/* return true if permission increased */
static inline bool_t
perms_strictly_increased(uint32_t old_flags, uint32_t new_flags)
@@ -885,6 +885,17 @@ struct xen_domctl_set_max_evtchn {
typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
+/*
+ * ARM: Clean and invalidate caches associated with given region of
+ * guest memory.
+ */
+struct xen_domctl_cacheflush {
+ /* IN: page range to flush. */
+ xen_pfn_t start_pfn, nr_pfns;
+};
+typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -954,6 +965,7 @@ struct xen_domctl {
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
#define XEN_DOMCTL_set_max_evtchn 70
+#define XEN_DOMCTL_cacheflush 71
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1012,6 +1024,7 @@ struct xen_domctl {
struct xen_domctl_set_max_evtchn set_max_evtchn;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
+ struct xen_domctl_cacheflush cacheflush;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
uint8_t pad[128];
@@ -737,6 +737,9 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_set_max_evtchn:
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_MAX_EVTCHN);
+ case XEN_DOMCTL_cacheflush:
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__CACHEFLUSH);
+
default:
printk("flask_domctl: Unknown op %d\n", cmd);
return -EPERM;
@@ -1617,3 +1620,13 @@ static __init int flask_init(void)
}
xsm_initcall(flask_init);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -196,6 +196,8 @@ class domain2
setclaim
# XEN_DOMCTL_set_max_evtchn
set_max_evtchn
+# XEN_DOMCTL_cacheflush
+ cacheflush
}
# Similar to class domain, but primarily contains domctls related to HVM domains