diff mbox

[API-NEXT,PATCHv3,1/8] linux-generic: schedule: move ordered lock routines to odp_schedule.c

Message ID 1447129211-9095-2-git-send-email-bill.fischofer@linaro.org
State Superseded
Headers show

Commit Message

Bill Fischofer Nov. 10, 2015, 4:20 a.m. UTC
Move the odp_schedule_order_lock() and odp_schedule_order_unlock()
routines from odp_queue.c to odp_schedule.c

Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---
 platform/linux-generic/odp_queue.c    | 31 ------------------------
 platform/linux-generic/odp_schedule.c | 45 ++++++++++++++++++++++++++++++-----
 2 files changed, 39 insertions(+), 37 deletions(-)
diff mbox

Patch

diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
index 1c15e17..2071870 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -22,7 +22,6 @@ 
 #include <odp_debug_internal.h>
 #include <odp/hints.h>
 #include <odp/sync.h>
-#include <odp_spin_internal.h>
 
 #ifdef USE_TICKETLOCK
 #include <odp/ticketlock.h>
@@ -1015,33 +1014,3 @@  int release_order(queue_entry_t *origin_qe, uint64_t order,
 	UNLOCK(&origin_qe->s.lock);
 	return 0;
 }
-
-void odp_schedule_order_lock(unsigned lock_index)
-{
-	queue_entry_t *origin_qe;
-	uint64_t *sync;
-
-	get_sched_sync(&origin_qe, &sync, lock_index);
-	if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
-		return;
-
-	/* Wait until we are in order. Note that sync_out will be incremented
-	 * both by unlocks as well as order resolution, so we're OK if only
-	 * some events in the ordered flow need to lock.
-	 */
-	while (*sync > odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]))
-		odp_spin();
-}
-
-void odp_schedule_order_unlock(unsigned lock_index)
-{
-	queue_entry_t *origin_qe;
-	uint64_t *sync;
-
-	get_sched_sync(&origin_qe, &sync, lock_index);
-	if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
-		return;
-
-	/* Release the ordered lock */
-	odp_atomic_fetch_inc_u64(&origin_qe->s.sync_out[lock_index]);
-}
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 6df8073..195240e 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -22,6 +22,7 @@ 
 
 #include <odp_queue_internal.h>
 #include <odp_packet_io_internal.h>
+#include <odp_spin_internal.h>
 
 odp_thrmask_t sched_mask_all;
 
@@ -793,6 +794,44 @@  void odp_schedule_prefetch(int num ODP_UNUSED)
 {
 }
 
+void odp_schedule_order_lock(unsigned lock_index)
+{
+	queue_entry_t *origin_qe;
+	uint64_t sync, sync_out;
+
+	origin_qe = sched_local.origin_qe;
+	if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
+		return;
+
+	sync = sched_local.sync[lock_index];
+	sync_out = odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]);
+	ODP_ASSERT(sync >= sync_out);
+
+	/* Wait until we are in order. Note that sync_out will be incremented
+	 * both by unlocks as well as order resolution, so we're OK if only
+	 * some events in the ordered flow need to lock.
+	 */
+	while (sync != sync_out) {
+		odp_spin();
+		sync_out =
+			odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]);
+	}
+}
+
+void odp_schedule_order_unlock(unsigned lock_index)
+{
+	queue_entry_t *origin_qe;
+
+	origin_qe = sched_local.origin_qe;
+	if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
+		return;
+	ODP_ASSERT(sched_local.sync[lock_index] ==
+		   odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]));
+
+	/* Release the ordered lock */
+	odp_atomic_fetch_inc_u64(&origin_qe->s.sync_out[lock_index]);
+}
+
 void sched_enq_called(void)
 {
 	sched_local.enq_called = 1;
@@ -804,12 +843,6 @@  void get_sched_order(queue_entry_t **origin_qe, uint64_t *order)
 	*order     = sched_local.order;
 }
 
-void get_sched_sync(queue_entry_t **origin_qe, uint64_t **sync, uint32_t ndx)
-{
-	*origin_qe = sched_local.origin_qe;
-	*sync      = &sched_local.sync[ndx];
-}
-
 void sched_order_resolved(odp_buffer_hdr_t *buf_hdr)
 {
 	if (buf_hdr)