@@ -502,6 +502,11 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
/* Default the virtual ID to match the physical */
d->arch.vpidr = boot_cpu_data.midr.bits;
+ d->arch.dirty.second_lvl_start = 0;
+ d->arch.dirty.second_lvl_end = 0;
+ d->arch.dirty.second_lvl[0] = NULL;
+ d->arch.dirty.second_lvl[1] = NULL;
+
clear_page(d->shared_info);
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
@@ -1252,6 +1252,122 @@ void get_gma_start_end(struct domain *d, paddr_t *start, paddr_t *end)
*end = GUEST_RAM_BASE + ((paddr_t) d->max_pages << PAGE_SHIFT);
}
+/* flush the vlpt area */
+void flush_vlpt(struct domain *d)
+{
+ int flush_size;
+ flush_size = (d->arch.dirty.second_lvl_end -
+ d->arch.dirty.second_lvl_start) << SECOND_SHIFT;
+
+ /* flushing the 3rd level mapping */
+ flush_xen_data_tlb_range_va(d->arch.dirty.second_lvl_start << SECOND_SHIFT,
+ flush_size);
+}
+
+/* restore the xen page table for vlpt mapping for domain d */
+void restore_vlpt(struct domain *d)
+{
+ int i;
+
+ dsb(sy);
+
+ for ( i = d->arch.dirty.second_lvl_start;
+ i < d->arch.dirty.second_lvl_end;
+ ++i )
+ {
+ int k = i % LPAE_ENTRIES;
+ int l = i / LPAE_ENTRIES;
+
+ if ( xen_second[i].bits != d->arch.dirty.second_lvl[l][k].bits )
+ {
+ write_pte(&xen_second[i], d->arch.dirty.second_lvl[l][k]);
+ flush_xen_data_tlb_range_va(i << SECOND_SHIFT, 1 << SECOND_SHIFT);
+ }
+ }
+
+ dsb(sy);
+ isb();
+}
+
+/* setting up the xen page table for vlpt mapping for domain d */
+int prepare_vlpt(struct domain *d)
+{
+ int xen_second_linear_base;
+ int gp2m_start_index, gp2m_end_index;
+ struct p2m_domain *p2m = &d->arch.p2m;
+ struct page_info *second_lvl_page;
+ paddr_t gma_start = 0;
+ paddr_t gma_end = 0;
+ lpae_t *first[2];
+ int i;
+ uint64_t required, avail = VIRT_LIN_P2M_END - VIRT_LIN_P2M_START;
+
+ get_gma_start_end(d, &gma_start, &gma_end);
+ required = (gma_end - gma_start) >> LPAE_SHIFT;
+
+ if ( required > avail )
+ {
+ dprintk(XENLOG_ERR, "Available VLPT is small for domU guest"
+ "(avail: %llx, required: %llx)\n", (unsigned long long)avail,
+ (unsigned long long)required);
+ return -ENOMEM;
+ }
+
+ xen_second_linear_base = second_linear_offset(VIRT_LIN_P2M_START);
+
+ gp2m_start_index = gma_start >> FIRST_SHIFT;
+ gp2m_end_index = (gma_end >> FIRST_SHIFT) + 1;
+
+ if ( xen_second_linear_base + gp2m_end_index >= LPAE_ENTRIES * 2 )
+ {
+ dprintk(XENLOG_ERR, "xen second page is small for VLPT for domU");
+ return -ENOMEM;
+ }
+
+ second_lvl_page = alloc_domheap_pages(NULL, 1, 0);
+ if ( second_lvl_page == NULL )
+ return -ENOMEM;
+
+ /* First level p2m is 2 consecutive pages */
+ d->arch.dirty.second_lvl[0] = map_domain_page_global(
+ page_to_mfn(second_lvl_page) );
+ d->arch.dirty.second_lvl[1] = map_domain_page_global(
+ page_to_mfn(second_lvl_page+1) );
+
+ first[0] = __map_domain_page(p2m->first_level);
+ first[1] = __map_domain_page(p2m->first_level+1);
+
+ for ( i = gp2m_start_index; i < gp2m_end_index; ++i )
+ {
+ int k = i % LPAE_ENTRIES;
+ int l = i / LPAE_ENTRIES;
+ int k2 = (xen_second_linear_base + i) % LPAE_ENTRIES;
+ int l2 = (xen_second_linear_base + i) / LPAE_ENTRIES;
+
+ write_pte(&xen_second[xen_second_linear_base+i], first[l][k]);
+
+ /* we copy the mapping into domain''s structure as a reference
+ * in case of the context switch (used in restore_vlpt) */
+ d->arch.dirty.second_lvl[l2][k2] = first[l][k];
+ }
+ unmap_domain_page(first[0]);
+ unmap_domain_page(first[1]);
+
+ /* storing the start and end index */
+ d->arch.dirty.second_lvl_start = xen_second_linear_base + gp2m_start_index;
+ d->arch.dirty.second_lvl_end = xen_second_linear_base + gp2m_end_index;
+
+ flush_vlpt(d);
+
+ return 0;
+}
+
+void cleanup_vlpt(struct domain *d)
+{
+ /* First level p2m is 2 consecutive pages */
+ unmap_domain_page_global(d->arch.dirty.second_lvl[0]);
+ unmap_domain_page_global(d->arch.dirty.second_lvl[1]);
+}
/*
* Local variables:
* mode: C
@@ -3,22 +3,25 @@
#ifndef __ASSEMBLY__
-/* Write a pagetable entry.
- *
- * If the table entry is changing a text mapping, it is responsibility
- * of the caller to issue an ISB after write_pte.
- */
-static inline void write_pte(lpae_t *p, lpae_t pte)
+/* Write a pagetable entry. All necessary barriers are responsibility of
+ * the caller */
+static inline void __write_pte(lpae_t *p, lpae_t pte)
{
asm volatile (
- /* Ensure any writes have completed with the old mappings. */
- "dsb;"
- /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
+ /* safely write the entry (STRD is atomic on CPUs that support LPAE) */
"strd %0, %H0, [%1];"
- "dsb;"
: : "r" (pte.bits), "r" (p) : "memory");
}
+/* Write a pagetable entry with dsb barriers. All necessary barriers are
+ * responsibility of the caller. */
+static inline void write_pte(lpae_t *p, lpae_t pte)
+{
+ dsb();
+ __write_pte(p, pte);
+ dsb();
+}
+
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
#define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
@@ -87,6 +87,7 @@
* 0 - 8M <COMMON>
*
* 32M - 128M Frametable: 24 bytes per page for 16GB of RAM
+ * 128M - 256M Virtual-linear mapping to P2M table
* 256M - 1G VMAP: ioremap and early_ioremap use this virtual address
* space
*
@@ -124,7 +125,9 @@
#define CONFIG_SEPARATE_XENHEAP 1
#define FRAMETABLE_VIRT_START _AT(vaddr_t,0x02000000)
+#define VIRT_LIN_P2M_START _AT(vaddr_t,0x08000000)
#define VMAP_VIRT_START _AT(vaddr_t,0x10000000)
+#define VIRT_LIN_P2M_END VMAP_VIRT_START
#define XENHEAP_VIRT_START _AT(vaddr_t,0x40000000)
#define XENHEAP_VIRT_END _AT(vaddr_t,0x7fffffff)
#define DOMHEAP_VIRT_START _AT(vaddr_t,0x80000000)
@@ -157,6 +160,12 @@
#define HYPERVISOR_VIRT_END DIRECTMAP_VIRT_END
+/* Temporary definition for VIRT_LIN_P2M_START and VIRT_LIN_P2M_END
+ * TODO: Needs evaluation!!!!
+ */
+#define VIRT_LIN_P2M_START _AT(vaddr_t, 0x08000000)
+#define VIRT_LIN_P2M_END VMAP_VIRT_START
+
#endif
/* Fixmap slots */
@@ -161,6 +161,13 @@ struct arch_domain
spinlock_t lock;
} vuart;
+ /* dirty-page tracing */
+ struct {
+ volatile int second_lvl_start; /* for context switch */
+ volatile int second_lvl_end;
+ lpae_t *second_lvl[2]; /* copy of guest p2m's first */
+ } dirty;
+
unsigned int evtchn_irq;
} __cacheline_aligned;
@@ -4,6 +4,7 @@
#include <xen/config.h>
#include <xen/kernel.h>
#include <asm/page.h>
+#include <asm/config.h>
#include <public/xen.h>
/* Align Xen to a 2 MiB boundary. */
@@ -342,6 +343,21 @@ static inline void put_page_and_type(struct page_info *page)
}
void get_gma_start_end(struct domain *d, paddr_t *start, paddr_t *end);
+int prepare_vlpt(struct domain *d);
+void cleanup_vlpt(struct domain *d);
+void restore_vlpt(struct domain *d);
+
+/* calculate the xen''s virtual address for accessing the leaf PTE of
+ * a given address (GPA) */
+static inline lpae_t * get_vlpt_3lvl_pte(paddr_t addr)
+{
+ lpae_t *table = (lpae_t *)VIRT_LIN_P2M_START;
+
+ /* Since we slotted the guest''s first p2m page table to xen''s
+ * second page table, one shift is enough for calculating the
+ * index of guest p2m table entry */
+ return &table[addr >> PAGE_SHIFT];
+}
#endif /* __ARCH_ARM_MM__ */
/*