diff mbox series

[3/3] alpha: atomics: Add smp_read_barrier_depends() to release/relaxed atomics

Message ID 1507818377-7546-4-git-send-email-will.deacon@arm.com
State Superseded
Headers show
Series Remove lockless_dereference | expand

Commit Message

Will Deacon Oct. 12, 2017, 2:26 p.m. UTC
As part of the fight against smp_read_barrier_depends(), we require
dependency ordering to be preserved when a dependency is headed by a load
performed using an atomic operation.

This patch adds smp_read_barrier_depends() to the _release and _relaxed
atomics on alpha, which otherwise lack anything to enforce dependency
ordering.

Signed-off-by: Will Deacon <will.deacon@arm.com>

---
 arch/alpha/include/asm/atomic.h | 13 +++++++++++++
 1 file changed, 13 insertions(+)

-- 
2.1.4

Comments

Paul E. McKenney Oct. 12, 2017, 3:17 p.m. UTC | #1
On Thu, Oct 12, 2017 at 03:26:17PM +0100, Will Deacon wrote:
> As part of the fight against smp_read_barrier_depends(), we require

> dependency ordering to be preserved when a dependency is headed by a load

> performed using an atomic operation.

> 

> This patch adds smp_read_barrier_depends() to the _release and _relaxed

> atomics on alpha, which otherwise lack anything to enforce dependency

> ordering.

> 

> Signed-off-by: Will Deacon <will.deacon@arm.com>


Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>


> ---

>  arch/alpha/include/asm/atomic.h | 13 +++++++++++++

>  1 file changed, 13 insertions(+)

> 

> diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h

> index 498933a7df97..16961a3f45ba 100644

> --- a/arch/alpha/include/asm/atomic.h

> +++ b/arch/alpha/include/asm/atomic.h

> @@ -13,6 +13,15 @@

>   * than regular operations.

>   */

> 

> +/*

> + * To ensure dependency ordering is preserved for the _relaxed and

> + * _release atomics, an smp_read_barrier_depends() is unconditionally

> + * inserted into the _relaxed variants, which are used to build the

> + * barriered versions. To avoid redundant back-to-back fences, we can

> + * define the _acquire and _fence versions explicitly.

> + */

> +#define __atomic_op_acquire(op, args...)	op##_relaxed(args)

> +#define __atomic_op_fence			__atomic_op_release

> 

>  #define ATOMIC_INIT(i)		{ (i) }

>  #define ATOMIC64_INIT(i)	{ (i) }

> @@ -60,6 +69,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\

>  	".previous"							\

>  	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\

>  	:"Ir" (i), "m" (v->counter) : "memory");			\

> +	smp_read_barrier_depends();					\

>  	return result;							\

>  }

> 

> @@ -77,6 +87,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\

>  	".previous"							\

>  	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\

>  	:"Ir" (i), "m" (v->counter) : "memory");			\

> +	smp_read_barrier_depends();					\

>  	return result;							\

>  }

> 

> @@ -111,6 +122,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)	\

>  	".previous"							\

>  	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\

>  	:"Ir" (i), "m" (v->counter) : "memory");			\

> +	smp_read_barrier_depends();					\

>  	return result;							\

>  }

> 

> @@ -128,6 +140,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)	\

>  	".previous"							\

>  	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\

>  	:"Ir" (i), "m" (v->counter) : "memory");			\

> +	smp_read_barrier_depends();					\

>  	return result;							\

>  }

> 

> -- 

> 2.1.4

>
diff mbox series

Patch

diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 498933a7df97..16961a3f45ba 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -13,6 +13,15 @@ 
  * than regular operations.
  */
 
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...)	op##_relaxed(args)
+#define __atomic_op_fence			__atomic_op_release
 
 #define ATOMIC_INIT(i)		{ (i) }
 #define ATOMIC64_INIT(i)	{ (i) }
@@ -60,6 +69,7 @@  static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 	".previous"							\
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
 	:"Ir" (i), "m" (v->counter) : "memory");			\
+	smp_read_barrier_depends();					\
 	return result;							\
 }
 
@@ -77,6 +87,7 @@  static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 	".previous"							\
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
 	:"Ir" (i), "m" (v->counter) : "memory");			\
+	smp_read_barrier_depends();					\
 	return result;							\
 }
 
@@ -111,6 +122,7 @@  static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)	\
 	".previous"							\
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
 	:"Ir" (i), "m" (v->counter) : "memory");			\
+	smp_read_barrier_depends();					\
 	return result;							\
 }
 
@@ -128,6 +140,7 @@  static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)	\
 	".previous"							\
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
 	:"Ir" (i), "m" (v->counter) : "memory");			\
+	smp_read_barrier_depends();					\
 	return result;							\
 }