@@ -965,7 +965,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
index = gfn_remainder >> ( i * EPT_TABLE_ORDER);
ept_entry = table + index;
- if ( !p2m_pod_demand_populate(p2m, gfn_t, i * EPT_TABLE_ORDER, q) )
+ if ( p2m_pod_demand_populate(p2m, gfn_t, i * EPT_TABLE_ORDER) )
goto retry;
else
goto out;
@@ -987,8 +987,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
ASSERT(i == 0);
- if ( p2m_pod_demand_populate(p2m, gfn_t,
- PAGE_ORDER_4K, q) )
+ if ( !p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_4K) )
goto out;
}
@@ -1075,10 +1075,9 @@ static void pod_eager_record(struct p2m_domain *p2m, gfn_t gfn,
mrp->idx %= ARRAY_SIZE(mrp->list);
}
-int
+bool
p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
- unsigned int order,
- p2m_query_t q)
+ unsigned int order)
{
struct domain *d = p2m->domain;
struct page_info *p = NULL; /* Compiler warnings */
@@ -1116,7 +1115,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
*/
p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
- return 0;
+ return true;
}
/* Only reclaim if we're in actual need of more cache. */
@@ -1178,7 +1177,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
}
pod_unlock(p2m);
- return 0;
+ return true;
out_of_memory:
pod_unlock(p2m);
@@ -1186,10 +1185,10 @@ out_of_memory:
__func__, d->domain_id, d->tot_pages, p2m->pod.entry_count,
current->domain->domain_id);
domain_crash(d);
- return -1;
+ return false;
out_fail:
pod_unlock(p2m);
- return -1;
+ return false;
remap_and_retry:
BUG_ON(order != PAGE_ORDER_2M);
pod_unlock(p2m);
@@ -1215,7 +1214,7 @@ remap_and_retry:
__trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), &t);
}
- return 0;
+ return true;
}
@@ -802,7 +802,7 @@ pod_retry_l3:
{
if ( q & P2M_ALLOC )
{
- if ( !p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_1G, q) )
+ if ( p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_1G) )
goto pod_retry_l3;
gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
}
@@ -844,7 +844,7 @@ pod_retry_l2:
if ( p2m_flags_to_type(flags) == p2m_populate_on_demand )
{
if ( q & P2M_ALLOC ) {
- if ( !p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_2M, q) )
+ if ( p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_2M) )
goto pod_retry_l2;
} else
*t = p2m_populate_on_demand;
@@ -883,7 +883,7 @@ pod_retry_l1:
if ( l1t == p2m_populate_on_demand )
{
if ( q & P2M_ALLOC ) {
- if ( !p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_4K, q) )
+ if ( p2m_pod_demand_populate(p2m, gfn_t, PAGE_ORDER_4K) )
goto pod_retry_l1;
} else
*t = p2m_populate_on_demand;
@@ -719,10 +719,8 @@ extern void audit_p2m(struct domain *d,
#endif
/* Called by p2m code when demand-populating a PoD page */
-int
-p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
- unsigned int order,
- p2m_query_t q);
+bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
/*
* Functions specific to the p2m-pt implementation
- Switch the return type to bool - Remove the parameter p2m_query_t q as it is not used Signed-off-by: Julien Grall <julien.grall@arm.com> --- Cc: George Dunlap <george.dunlap@eu.citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> --- xen/arch/x86/mm/p2m-ept.c | 5 ++--- xen/arch/x86/mm/p2m-pod.c | 15 +++++++-------- xen/arch/x86/mm/p2m-pt.c | 6 +++--- xen/include/asm-x86/p2m.h | 6 ++---- 4 files changed, 14 insertions(+), 18 deletions(-)