Message ID | 1395330365-9901-5-git-send-email-ian.campbell@citrix.com |
---|---|
State | Superseded |
Headers | show |
Hi Ian, On 03/20/2014 03:45 PM, Ian Campbell wrote: > diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h > index 3f024d4..d309f66 100644 > --- a/xen/include/asm-arm/arm32/atomic.h > +++ b/xen/include/asm-arm/arm32/atomic.h > @@ -21,6 +21,7 @@ static inline void atomic_add(int i, atomic_t *v) > unsigned long tmp; > int result; > > + prefetchw(&v->counter); Xen on ARM doesn't provide prefetch* helper. Shall we implement it? Regards,
On Thu, 2014-03-20 at 17:27 +0000, Julien Grall wrote: > Hi Ian, > > On 03/20/2014 03:45 PM, Ian Campbell wrote: > > diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h > > index 3f024d4..d309f66 100644 > > --- a/xen/include/asm-arm/arm32/atomic.h > > +++ b/xen/include/asm-arm/arm32/atomic.h > > @@ -21,6 +21,7 @@ static inline void atomic_add(int i, atomic_t *v) > > unsigned long tmp; > > int result; > > > > + prefetchw(&v->counter); > > Xen on ARM doesn't provide prefetch* helper. Shall we implement it? It comes from generic code after the first patch in this series. Ian.
diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h index 3f024d4..d309f66 100644 --- a/xen/include/asm-arm/arm32/atomic.h +++ b/xen/include/asm-arm/arm32/atomic.h @@ -21,6 +21,7 @@ static inline void atomic_add(int i, atomic_t *v) unsigned long tmp; int result; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add\n" "1: ldrex %0, [%3]\n" " add %0, %0, %4\n" @@ -59,6 +60,7 @@ static inline void atomic_sub(int i, atomic_t *v) unsigned long tmp; int result; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub\n" "1: ldrex %0, [%3]\n" " sub %0, %0, %4\n" @@ -94,7 +96,8 @@ static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { - unsigned long oldval, res; + int oldval; + unsigned long res; smp_mb(); @@ -118,6 +121,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { unsigned long tmp, tmp2; + prefetchw(addr); __asm__ __volatile__("@ atomic_clear_mask\n" "1: ldrex %0, [%3]\n" " bic %0, %0, %4\n" diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h index 69c8f3f..2c92de9 100644 --- a/xen/include/asm-arm/atomic.h +++ b/xen/include/asm-arm/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_ARM_ATOMIC__ #include <xen/config.h> +#include <xen/prefetch.h> #include <asm/system.h> #define build_atomic_read(name, size, width, type, reg)\