diff mbox series

[API-NEXT,v3,2/2] linux-generic: api schedule unlock lock

Message ID 1504702822-13861-3-git-send-email-odpbot@yandex.ru
State New
Headers show
Series [API-NEXT,v3,1/2] api: schedule: add schedule order unlock lock api | expand

Commit Message

Github ODP bot Sept. 6, 2017, 1 p.m. UTC
From: Balasubramanian Manoharan <bala.manoharan@linaro.org>


Signed-off-by: Balasubramanian Manoharan <bala.manoharan@linaro.org>

---
/** Email created from pull request 160 (bala-manoharan:api_sched_order_lock)
 ** https://github.com/Linaro/odp/pull/160
 ** Patch: https://github.com/Linaro/odp/pull/160.patch
 ** Base sha: 4eae04e80a634c17ac276bb06bce468cbe28cde0
 ** Merge commit sha: c9c66447de67e07c36638143516df6a14743a749
 **/
 platform/linux-generic/include/odp_schedule_if.h        |  1 +
 .../include/odp_schedule_scalable_ordered.h             |  1 +
 platform/linux-generic/odp_schedule.c                   | 13 ++++++++++++-
 platform/linux-generic/odp_schedule_if.c                |  5 +++++
 platform/linux-generic/odp_schedule_iquery.c            | 13 ++++++++++++-
 platform/linux-generic/odp_schedule_scalable.c          | 17 +++++++++++++++++
 platform/linux-generic/odp_schedule_sp.c                |  8 +++++++-
 7 files changed, 55 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index 657993b1..b0db67ab 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -95,6 +95,7 @@  typedef struct {
 				   odp_schedule_group_info_t *);
 	void (*schedule_order_lock)(unsigned);
 	void (*schedule_order_unlock)(unsigned);
+	void (*schedule_order_unlock_lock)(unsigned);
 
 } schedule_api_t;
 
diff --git a/platform/linux-generic/include/odp_schedule_scalable_ordered.h b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
index 1c365a2b..493a4a78 100644
--- a/platform/linux-generic/include/odp_schedule_scalable_ordered.h
+++ b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
@@ -79,6 +79,7 @@  typedef struct reorder_window {
 	uint32_t tail;
 	uint32_t turn;
 	uint32_t olock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+	uint32_t lock_index;
 	uint16_t lock_count;
 	/* Reorder contexts in this window */
 	reorder_context_t *ring[RWIN_SIZE];
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 5b940762..8de0af35 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -163,6 +163,7 @@  typedef struct {
 		int stash_num; /**< Number of stashed enqueue operations */
 		uint8_t in_order; /**< Order status */
 		lock_called_t lock_called; /**< States of ordered locks */
+		uint32_t lock_index;
 		/** Storage for stashed enqueue operations */
 		ordered_stash_t stash[MAX_ORDERED_STASH];
 	} ordered;
@@ -1121,6 +1122,7 @@  static void schedule_order_lock(unsigned lock_index)
 
 		if (lock_seq == sched_local.ordered.ctx) {
 			sched_local.ordered.lock_called.u8[lock_index] = 1;
+			sched_local.ordered.lock_index = lock_index;
 			return;
 		}
 		odp_cpu_pause();
@@ -1141,9 +1143,17 @@  static void schedule_order_unlock(unsigned lock_index)
 
 	ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
 
+	sched_local.ordered.lock_index = sched->queue[queue_index].
+					order_lock_count + 1;
 	odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
 }
 
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+	schedule_order_unlock(sched_local.ordered.lock_index);
+	schedule_order_lock(lock_index);
+}
+
 static void schedule_pause(void)
 {
 	sched_local.pause = 1;
@@ -1429,5 +1439,6 @@  const schedule_api_t schedule_default_api = {
 	.schedule_group_thrmask   = schedule_group_thrmask,
 	.schedule_group_info      = schedule_group_info,
 	.schedule_order_lock      = schedule_order_lock,
-	.schedule_order_unlock    = schedule_order_unlock
+	.schedule_order_unlock    = schedule_order_unlock,
+	.schedule_order_unlock_lock    = schedule_order_unlock_lock
 };
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index e56e3722..858c1949 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -129,3 +129,8 @@  void odp_schedule_order_unlock(unsigned lock_index)
 {
 	return sched_api->schedule_order_unlock(lock_index);
 }
+
+void odp_schedule_order_unlock_lock(uint32_t lock_index)
+{
+	sched_api->schedule_order_unlock_lock(lock_index);
+}
diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c
index b81e5dab..d810ae58 100644
--- a/platform/linux-generic/odp_schedule_iquery.c
+++ b/platform/linux-generic/odp_schedule_iquery.c
@@ -223,6 +223,7 @@  struct sched_thread_local {
 		int stash_num; /**< Number of stashed enqueue operations */
 		uint8_t in_order; /**< Order status */
 		lock_called_t lock_called; /**< States of ordered locks */
+		uint32_t lock_index;
 		/** Storage for stashed enqueue operations */
 		ordered_stash_t stash[MAX_ORDERED_STASH];
 	} ordered;
@@ -1273,6 +1274,7 @@  static void schedule_order_lock(unsigned lock_index)
 
 		if (lock_seq == thread_local.ordered.ctx) {
 			thread_local.ordered.lock_called.u8[lock_index] = 1;
+			thread_local.ordered.lock_index = lock_index;
 			return;
 		}
 		odp_cpu_pause();
@@ -1293,9 +1295,17 @@  static void schedule_order_unlock(unsigned lock_index)
 
 	ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
 
+	thread_local.ordered.lock_index = sched->queues[queue_index].
+						lock_count + 1;
 	odp_atomic_store_rel_u64(ord_lock, thread_local.ordered.ctx + 1);
 }
 
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+	schedule_order_unlock(thread_local.ordered.lock_index);
+	schedule_order_lock(lock_index);
+}
+
 static unsigned schedule_max_ordered_locks(void)
 {
 	return CONFIG_QUEUE_MAX_ORD_LOCKS;
@@ -1368,7 +1378,8 @@  const schedule_api_t schedule_iquery_api = {
 	.schedule_group_thrmask   = schedule_group_thrmask,
 	.schedule_group_info      = schedule_group_info,
 	.schedule_order_lock      = schedule_order_lock,
-	.schedule_order_unlock    = schedule_order_unlock
+	.schedule_order_unlock    = schedule_order_unlock,
+	.schedule_order_unlock_lock    = schedule_order_unlock_lock
 };
 
 static void thread_set_interest(sched_thread_local_t *thread,
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 765326e8..f8b17578 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -1007,6 +1007,8 @@  static void schedule_order_lock(unsigned lock_index)
 		       monitor32(&rctx->rwin->olock[lock_index],
 				 __ATOMIC_ACQUIRE) != rctx->sn)
 			doze();
+	rctx->rwin->lock_index = lock_index;
+
 	}
 }
 
@@ -1025,9 +1027,23 @@  static void schedule_order_unlock(unsigned lock_index)
 	atomic_store_release(&rctx->rwin->olock[lock_index],
 			     rctx->sn + 1,
 			     /*readonly=*/false);
+	rctx->rwin->lock_index = rctx->rwin->lock_count + 1;
 	rctx->olock_flags |= 1U << lock_index;
 }
 
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+	struct reorder_context *rctx;
+
+	rctx = sched_ts->rctx;
+	if (odp_unlikely(rctx == NULL || rctx->rwin == NULL)) {
+		ODP_ERR("Invalid call to odp_schedule_order_unlock_lock\n");
+		return;
+	}
+	schedule_order_unlock(rctx->rwin->lock_index);
+	schedule_order_lock(lock_index);
+}
+
 static void schedule_release_atomic(void)
 {
 	sched_scalable_thread_state_t *ts;
@@ -1978,4 +1994,5 @@  const schedule_api_t schedule_scalable_api = {
 	.schedule_group_info		= schedule_group_info,
 	.schedule_order_lock		= schedule_order_lock,
 	.schedule_order_unlock		= schedule_order_unlock,
+	.schedule_order_unlock_lock	= schedule_order_unlock_lock,
 };
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 05241275..d4dfbcaf 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -819,6 +819,11 @@  static void schedule_order_unlock(unsigned lock_index)
 	(void)lock_index;
 }
 
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+	(void)lock_index;
+}
+
 static void order_lock(void)
 {
 }
@@ -868,5 +873,6 @@  const schedule_api_t schedule_sp_api = {
 	.schedule_group_thrmask   = schedule_group_thrmask,
 	.schedule_group_info      = schedule_group_info,
 	.schedule_order_lock      = schedule_order_lock,
-	.schedule_order_unlock    = schedule_order_unlock
+	.schedule_order_unlock    = schedule_order_unlock,
+	.schedule_order_unlock_lock	= schedule_order_unlock_lock
 };