Message ID | 1467131671-24612-1-git-send-email-julien.grall@arm.com |
---|---|
State | New |
Headers | show |
Hi Stefano, On 11/07/16 18:49, Stefano Stabellini wrote: > On Tue, 28 Jun 2016, Julien Grall wrote: >> Currently, accessing the I/O handlers does not require to take a lock >> because new handlers are always added at the end of the array. In a >> follow-up patch, this array will be sort to optimize the look up. >> >> Given that most of the time the I/O handlers will not be modify, >> using a spinlock will add contention when multiple vCPU are accessing >> the emulated MMIOs. So use a read-write lock to protected the handlers. >> >> Finally, take the opportunity to re-indent correctly domain_io_init. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> > > I would appreciate if you could avoid mixing indentation changes with > other changes in the future. The indentation changes was very small and I did not feel it was necessary to have a separate patch for it. I tend to limit the number of patches unless it hides important changes. Anyway, I will try to split coding style changes and functional changes in the future. > > Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > > I'll commit. Thank you! Regards, > >> xen/arch/arm/io.c | 47 +++++++++++++++++++++++++++------------------- >> xen/include/asm-arm/mmio.h | 3 ++- >> 2 files changed, 30 insertions(+), 20 deletions(-) >> >> diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c >> index 0156755..5a96836 100644 >> --- a/xen/arch/arm/io.c >> +++ b/xen/arch/arm/io.c >> @@ -70,23 +70,39 @@ static int handle_write(const struct mmio_handler *handler, struct vcpu *v, >> handler->priv); >> } >> >> -int handle_mmio(mmio_info_t *info) >> +static const struct mmio_handler *find_mmio_handler(struct domain *d, >> + paddr_t gpa) >> { >> - struct vcpu *v = current; >> - int i; >> - const struct mmio_handler *handler = NULL; >> - const struct vmmio *vmmio = &v->domain->arch.vmmio; >> + const struct mmio_handler *handler; >> + unsigned int i; >> + struct vmmio *vmmio = &d->arch.vmmio; >> + >> + read_lock(&vmmio->lock); >> >> for ( i = 0; i < vmmio->num_entries; i++ ) >> { >> handler = &vmmio->handlers[i]; >> >> - if ( (info->gpa >= handler->addr) && >> - (info->gpa < (handler->addr + handler->size)) ) >> + if ( (gpa >= handler->addr) && >> + (gpa < (handler->addr + handler->size)) ) >> break; >> } >> >> if ( i == vmmio->num_entries ) >> + handler = NULL; >> + >> + read_unlock(&vmmio->lock); >> + >> + return handler; >> +} >> + >> +int handle_mmio(mmio_info_t *info) >> +{ >> + struct vcpu *v = current; >> + const struct mmio_handler *handler = NULL; >> + >> + handler = find_mmio_handler(v->domain, info->gpa); >> + if ( !handler ) >> return 0; >> >> if ( info->dabt.write ) >> @@ -104,7 +120,7 @@ void register_mmio_handler(struct domain *d, >> >> BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER); >> >> - spin_lock(&vmmio->lock); >> + write_lock(&vmmio->lock); >> >> handler = &vmmio->handlers[vmmio->num_entries]; >> >> @@ -113,24 +129,17 @@ void register_mmio_handler(struct domain *d, >> handler->size = size; >> handler->priv = priv; >> >> - /* >> - * handle_mmio is not using the lock to avoid contention. >> - * Make sure the other processors see the new handler before >> - * updating the number of entries >> - */ >> - dsb(ish); >> - >> vmmio->num_entries++; >> >> - spin_unlock(&vmmio->lock); >> + write_unlock(&vmmio->lock); >> } >> >> int domain_io_init(struct domain *d) >> { >> - spin_lock_init(&d->arch.vmmio.lock); >> - d->arch.vmmio.num_entries = 0; >> + rwlock_init(&d->arch.vmmio.lock); >> + d->arch.vmmio.num_entries = 0; >> >> - return 0; >> + return 0; >> } >> >> /* >> diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h >> index da1cc2e..32f10f2 100644 >> --- a/xen/include/asm-arm/mmio.h >> +++ b/xen/include/asm-arm/mmio.h >> @@ -20,6 +20,7 @@ >> #define __ASM_ARM_MMIO_H__ >> >> #include <xen/lib.h> >> +#include <xen/rwlock.h> >> #include <asm/processor.h> >> #include <asm/regs.h> >> >> @@ -51,7 +52,7 @@ struct mmio_handler { >> >> struct vmmio { >> int num_entries; >> - spinlock_t lock; >> + rwlock_t lock; >> struct mmio_handler handlers[MAX_IO_HANDLER]; >> }; >> >> -- >> 1.9.1 >> >
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c index 0156755..5a96836 100644 --- a/xen/arch/arm/io.c +++ b/xen/arch/arm/io.c @@ -70,23 +70,39 @@ static int handle_write(const struct mmio_handler *handler, struct vcpu *v, handler->priv); } -int handle_mmio(mmio_info_t *info) +static const struct mmio_handler *find_mmio_handler(struct domain *d, + paddr_t gpa) { - struct vcpu *v = current; - int i; - const struct mmio_handler *handler = NULL; - const struct vmmio *vmmio = &v->domain->arch.vmmio; + const struct mmio_handler *handler; + unsigned int i; + struct vmmio *vmmio = &d->arch.vmmio; + + read_lock(&vmmio->lock); for ( i = 0; i < vmmio->num_entries; i++ ) { handler = &vmmio->handlers[i]; - if ( (info->gpa >= handler->addr) && - (info->gpa < (handler->addr + handler->size)) ) + if ( (gpa >= handler->addr) && + (gpa < (handler->addr + handler->size)) ) break; } if ( i == vmmio->num_entries ) + handler = NULL; + + read_unlock(&vmmio->lock); + + return handler; +} + +int handle_mmio(mmio_info_t *info) +{ + struct vcpu *v = current; + const struct mmio_handler *handler = NULL; + + handler = find_mmio_handler(v->domain, info->gpa); + if ( !handler ) return 0; if ( info->dabt.write ) @@ -104,7 +120,7 @@ void register_mmio_handler(struct domain *d, BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER); - spin_lock(&vmmio->lock); + write_lock(&vmmio->lock); handler = &vmmio->handlers[vmmio->num_entries]; @@ -113,24 +129,17 @@ void register_mmio_handler(struct domain *d, handler->size = size; handler->priv = priv; - /* - * handle_mmio is not using the lock to avoid contention. - * Make sure the other processors see the new handler before - * updating the number of entries - */ - dsb(ish); - vmmio->num_entries++; - spin_unlock(&vmmio->lock); + write_unlock(&vmmio->lock); } int domain_io_init(struct domain *d) { - spin_lock_init(&d->arch.vmmio.lock); - d->arch.vmmio.num_entries = 0; + rwlock_init(&d->arch.vmmio.lock); + d->arch.vmmio.num_entries = 0; - return 0; + return 0; } /* diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h index da1cc2e..32f10f2 100644 --- a/xen/include/asm-arm/mmio.h +++ b/xen/include/asm-arm/mmio.h @@ -20,6 +20,7 @@ #define __ASM_ARM_MMIO_H__ #include <xen/lib.h> +#include <xen/rwlock.h> #include <asm/processor.h> #include <asm/regs.h> @@ -51,7 +52,7 @@ struct mmio_handler { struct vmmio { int num_entries; - spinlock_t lock; + rwlock_t lock; struct mmio_handler handlers[MAX_IO_HANDLER]; };
Currently, accessing the I/O handlers does not require to take a lock because new handlers are always added at the end of the array. In a follow-up patch, this array will be sort to optimize the look up. Given that most of the time the I/O handlers will not be modify, using a spinlock will add contention when multiple vCPU are accessing the emulated MMIOs. So use a read-write lock to protected the handlers. Finally, take the opportunity to re-indent correctly domain_io_init. Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/arch/arm/io.c | 47 +++++++++++++++++++++++++++------------------- xen/include/asm-arm/mmio.h | 3 ++- 2 files changed, 30 insertions(+), 20 deletions(-)