diff mbox

[3/7] linux-generic: odp_ring.c: use __atomic cmpset, added missing barriers, removed dangerous odp_mem_barrier

Message ID 1416484428-23849-4-git-send-email-ola.liljedahl@linaro.org
State New
Headers show

Commit Message

Ola Liljedahl Nov. 20, 2014, 11:53 a.m. UTC
Signed-off-by: Ola Liljedahl <ola.liljedahl@linaro.org>
---
 platform/linux-generic/include/odp_spin_internal.h |  9 -------
 platform/linux-generic/odp_ring.c                  | 28 +++++++++++++++-------
 2 files changed, 20 insertions(+), 17 deletions(-)
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_spin_internal.h b/platform/linux-generic/include/odp_spin_internal.h
index b7e2071..29c524f 100644
--- a/platform/linux-generic/include/odp_spin_internal.h
+++ b/platform/linux-generic/include/odp_spin_internal.h
@@ -15,15 +15,6 @@  extern "C" {
 
 
 /**
- * GCC memory barrier for ODP internal use
- */
-static inline void odp_mem_barrier(void)
-{
-	__asm__ __volatile__ ("" : : : "memory");
-}
-
-
-/**
  * Spin loop for ODP internal use
  */
 static inline void odp_spin(void)
diff --git a/platform/linux-generic/odp_ring.c b/platform/linux-generic/odp_ring.c
index 632aa66..1d3130a 100644
--- a/platform/linux-generic/odp_ring.c
+++ b/platform/linux-generic/odp_ring.c
@@ -259,13 +259,16 @@  int __odph_ring_mp_do_enqueue(odph_ring_t *r, void * const *obj_table,
 		}
 
 		prod_next = prod_head + n;
-		success = odp_atomic_cmpset_u32(&r->prod.head, prod_head,
-					      prod_next);
+		success = __atomic_compare_exchange_n(&r->prod.head,
+				&prod_head,
+				prod_next,
+				false/*strong*/,
+				__ATOMIC_ACQUIRE,
+				__ATOMIC_RELAXED);
 	} while (odp_unlikely(success == 0));
 
 	/* write entries in ring */
 	ENQUEUE_PTRS();
-	odp_mem_barrier();
 
 	/* if we exceed the watermark */
 	if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -282,6 +285,8 @@  int __odph_ring_mp_do_enqueue(odph_ring_t *r, void * const *obj_table,
 	while (odp_unlikely(r->prod.tail != prod_head))
 		odp_spin();
 
+	/* Release our entries and the memory they refer to */
+	__atomic_thread_fence(__ATOMIC_RELEASE);
 	r->prod.tail = prod_next;
 	return ret;
 }
@@ -324,7 +329,6 @@  int __odph_ring_sp_do_enqueue(odph_ring_t *r, void * const *obj_table,
 
 	/* write entries in ring */
 	ENQUEUE_PTRS();
-	odp_mem_barrier();
 
 	/* if we exceed the watermark */
 	if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -334,6 +338,8 @@  int __odph_ring_sp_do_enqueue(odph_ring_t *r, void * const *obj_table,
 		ret = (behavior == ODPH_RING_QUEUE_FIXED) ? 0 : n;
 	}
 
+	/* Release our entries and the memory they refer to */
+	__atomic_thread_fence(__ATOMIC_RELEASE);
 	r->prod.tail = prod_next;
 	return ret;
 }
@@ -378,13 +384,16 @@  int __odph_ring_mc_do_dequeue(odph_ring_t *r, void **obj_table,
 		}
 
 		cons_next = cons_head + n;
-		success = odp_atomic_cmpset_u32(&r->cons.head, cons_head,
-					      cons_next);
+		success = __atomic_compare_exchange_n(&r->cons.head,
+				&cons_head,
+				cons_next,
+				false/*strong*/,
+				__ATOMIC_ACQUIRE,
+				__ATOMIC_RELAXED);
 	} while (odp_unlikely(success == 0));
 
 	/* copy in table */
 	DEQUEUE_PTRS();
-	odp_mem_barrier();
 
 	/*
 	 * If there are other dequeues in progress that preceded us,
@@ -393,6 +402,8 @@  int __odph_ring_mc_do_dequeue(odph_ring_t *r, void **obj_table,
 	while (odp_unlikely(r->cons.tail != cons_head))
 		odp_spin();
 
+	/* Release our entries and the memory they refer to */
+	__atomic_thread_fence(__ATOMIC_RELEASE);
 	r->cons.tail = cons_next;
 
 	return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;
@@ -431,9 +442,10 @@  int __odph_ring_sc_do_dequeue(odph_ring_t *r, void **obj_table,
 	cons_next = cons_head + n;
 	r->cons.head = cons_next;
 
+	/* Acquire the pointers and the memory they refer to */
+	__atomic_thread_fence(__ATOMIC_ACQUIRE);
 	/* copy in table */
 	DEQUEUE_PTRS();
-	odp_mem_barrier();
 
 	r->cons.tail = cons_next;
 	return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;