@@ -49,27 +49,27 @@ static bool_t p2m_mapping(lpae_t pte)
static inline void p2m_write_lock(struct p2m_domain *p2m)
{
- spin_lock(&p2m->lock);
+ write_lock(&p2m->lock);
}
static inline void p2m_write_unlock(struct p2m_domain *p2m)
{
- spin_unlock(&p2m->lock);
+ write_unlock(&p2m->lock);
}
static inline void p2m_read_lock(struct p2m_domain *p2m)
{
- spin_lock(&p2m->lock);
+ read_lock(&p2m->lock);
}
static inline void p2m_read_unlock(struct p2m_domain *p2m)
{
- spin_unlock(&p2m->lock);
+ read_unlock(&p2m->lock);
}
static inline int p2m_is_locked(struct p2m_domain *p2m)
{
- return spin_is_locked(&p2m->lock);
+ return rw_is_locked(&p2m->lock);
}
void p2m_dump_info(struct domain *d)
@@ -1389,7 +1389,7 @@ int p2m_init(struct domain *d)
struct p2m_domain *p2m = &d->arch.p2m;
int rc = 0;
- spin_lock_init(&p2m->lock);
+ rwlock_init(&p2m->lock);
INIT_PAGE_LIST_HEAD(&p2m->pages);
p2m->vmid = INVALID_VMID;
@@ -3,6 +3,7 @@
#include <xen/mm.h>
#include <xen/radix-tree.h>
+#include <xen/rwlock.h>
#include <public/vm_event.h> /* for vm_event_response_t */
#include <public/memory.h>
#include <xen/p2m-common.h>
@@ -20,7 +21,7 @@ extern void memory_type_changed(struct domain *);
/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */
- spinlock_t lock;
+ rwlock_t lock;
/* Pages used to construct the p2m */
struct page_list_head pages;
P2M reads do not require to be serialized. This will add contention when PV drivers are using multi-queue because parallel grant map/unmaps/copies will happen on DomU's p2m. Signed-off-by: Julien Grall <julien.grall@arm.com> --- I have not done benchark to verify the performance, however a rwlock is always an improvement compare to a spinlock when most of the access only read data. It might be possible to convert the rwlock to a per-cpu rwlock which show some improvement on x86. --- xen/arch/arm/p2m.c | 12 ++++++------ xen/include/asm-arm/p2m.h | 3 ++- 2 files changed, 8 insertions(+), 7 deletions(-)