@@ -717,6 +717,14 @@ int domain_relinquish_resources(struct domain *d)
if ( ret )
return ret;
+ d->arch.relmem = RELMEM_mapping;
+ /* Fallthrough */
+
+ case RELMEM_mapping:
+ ret = relinquish_p2m_mapping(d);
+ if ( ret )
+ return ret;
+
d->arch.relmem = RELMEM_done;
/* Fallthrough */
@@ -6,6 +6,8 @@
#include <xen/bitops.h>
#include <asm/flushtlb.h>
#include <asm/gic.h>
+#include <asm/event.h>
+#include <asm/hardirq.h>
/* First level P2M is 2 consecutive pages */
#define P2M_FIRST_ORDER 1
@@ -213,7 +215,8 @@ static int p2m_create_table(struct domain *d,
enum p2m_operation {
INSERT,
ALLOCATE,
- REMOVE
+ REMOVE,
+ RELINQUISH,
};
static int create_p2m_entries(struct domain *d,
@@ -231,6 +234,7 @@ static int create_p2m_entries(struct domain *d,
unsigned long cur_first_page = ~0,
cur_first_offset = ~0,
cur_second_offset = ~0;
+ unsigned long count = 0;
spin_lock(&p2m->lock);
@@ -315,6 +319,7 @@ static int create_p2m_entries(struct domain *d,
maddr += PAGE_SIZE;
}
break;
+ case RELINQUISH:
case REMOVE:
{
lpae_t pte = third[third_table_offset(addr)];
@@ -334,6 +339,28 @@ static int create_p2m_entries(struct domain *d,
if ( flush )
flush_tlb_all_local();
+
+
+ count++;
+
+ if ( op == RELINQUISH && count == 512 && hypercall_preempt_check() )
+ {
+ p2m->next_gfn_to_relinquish = maddr >> PAGE_SHIFT;
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
+
+ /* When the function will remove mapping, p2m type should always
+ * be p2m_invalid. */
+ if ( (t == p2m_ram_rw) || (t == p2m_ram_ro) || (t == p2m_map_foreign))
+ {
+ unsigned long sgfn = paddr_to_pfn(start_gpaddr);
+ unsigned long egfn = paddr_to_pfn(end_gpaddr);
+
+ p2m->max_mapped_gfn = MAX(p2m->max_mapped_gfn, egfn);
+ /* Use next_gfn_to_relinquish to store the lowest gfn mapped */
+ p2m->next_gfn_to_relinquish = MIN(p2m->next_gfn_to_relinquish, sgfn);
}
rc = 0;
@@ -529,12 +556,26 @@ int p2m_init(struct domain *d)
p2m->first_level = NULL;
+ p2m->max_mapped_gfn = 0;
+ p2m->next_gfn_to_relinquish = ULONG_MAX;
+
err:
spin_unlock(&p2m->lock);
return rc;
}
+int relinquish_p2m_mapping(struct domain *d)
+{
+ struct p2m_domain *p2m = &d->arch.p2m;
+
+ return create_p2m_entries(d, RELINQUISH,
+ pfn_to_paddr(p2m->next_gfn_to_relinquish),
+ pfn_to_paddr(p2m->max_mapped_gfn),
+ pfn_to_paddr(INVALID_MFN),
+ MATTR_MEM, p2m_invalid);
+}
+
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
{
paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
@@ -75,6 +75,7 @@ struct arch_domain
RELMEM_not_started,
RELMEM_xen,
RELMEM_page,
+ RELMEM_mapping,
RELMEM_done,
} relmem;
@@ -18,6 +18,15 @@ struct p2m_domain {
/* Current VMID in use */
uint8_t vmid;
+
+ /* Highest guest frame that's ever been mapped in the p2m
+ * Take only into account ram and foreign mapping
+ */
+ unsigned long max_mapped_gfn;
+
+ /* When releasing mapped gfn's in a preemptible manner, recall where
+ * to resume the search */
+ unsigned long next_gfn_to_relinquish;
};
/* List of possible type for each page in the p2m entry.
@@ -48,6 +57,12 @@ int p2m_init(struct domain *d);
/* Return all the p2m resources to Xen. */
void p2m_teardown(struct domain *d);
+/* Remove mapping refcount on each mapping page in the p2m
+ *
+ * TODO: For the moment only foreign mapping is handled
+ */
+int relinquish_p2m_mapping(struct domain *d);
+
/* Allocate a new p2m table for a domain.
*
* Returns 0 for success or -errno.
This function will be called when the domain relinquishes its memory. It removes refcount on every mapped page to a valid MFN. Currently, Xen doesn't take reference on every new mapping but only for foreign mapping. Restrict the function only on foreign mapping. Signed-off-by: Julien Grall <julien.grall@linaro.org> --- Changes in v3: - Rework title - Reuse create_p2m_entries to remove reference - Don't forget to set relmem! - Fix compilation (missing include) Changes in v2: - Introduce the patch --- xen/arch/arm/domain.c | 8 ++++++++ xen/arch/arm/p2m.c | 43 +++++++++++++++++++++++++++++++++++++++++- xen/include/asm-arm/domain.h | 1 + xen/include/asm-arm/p2m.h | 15 +++++++++++++++ 4 files changed, 66 insertions(+), 1 deletion(-)