diff mbox

[API-NEXT,PATCHv2,08/11] linux-generic: schedule: implement scheduler groups

Message ID 1440461669-18578-9-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer Aug. 25, 2015, 12:14 a.m. UTC
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---
 .../include/odp/plat/schedule_types.h              |   4 +
 platform/linux-generic/odp_schedule.c              | 159 +++++++++++++++++++++
 platform/linux-generic/odp_thread.c                |  25 +++-
 3 files changed, 182 insertions(+), 6 deletions(-)
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h b/platform/linux-generic/include/odp/plat/schedule_types.h
index bc25c8c..3665fec 100644
--- a/platform/linux-generic/include/odp/plat/schedule_types.h
+++ b/platform/linux-generic/include/odp/plat/schedule_types.h
@@ -43,8 +43,12 @@  typedef int odp_schedule_sync_t;
 
 typedef int odp_schedule_group_t;
 
+/* These must be kept in sync with thread_globals_t in odp_thread.c */
+#define ODP_SCHED_GROUP_INVALID -1
 #define ODP_SCHED_GROUP_ALL     0
 #define ODP_SCHED_GROUP_WORKER  1
+#define ODP_SCHED_GROUP_CONTROL 2
+#define ODP_SCHED_GROUP_NAMED   3
 
 #define ODP_SCHED_GROUP_NAME_LEN 32
 
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 5d32c81..65b7b3c 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -23,6 +23,8 @@ 
 #include <odp_queue_internal.h>
 #include <odp_packet_io_internal.h>
 
+odp_thrmask_t sched_mask_all;
+
 /* Number of schedule commands.
  * One per scheduled queue and packet interface */
 #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
@@ -48,6 +50,11 @@  typedef struct {
 	odp_pool_t     pool;
 	odp_shm_t      shm;
 	uint32_t       pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
+	odp_spinlock_t grp_lock;
+	struct {
+		char           name[ODP_SCHED_GROUP_NAME_LEN];
+		odp_thrmask_t *mask;
+	} sched_grp[ODP_CONFIG_SCHED_GRPS];
 } sched_t;
 
 /* Schedule command */
@@ -87,6 +94,9 @@  static sched_t *sched;
 /* Thread local scheduler context */
 static __thread sched_local_t sched_local;
 
+/* Internal routine to get scheduler thread mask addrs */
+odp_thrmask_t *thread_sched_grp_mask(int index);
+
 static void sched_local_init(void)
 {
 	int i;
@@ -123,6 +133,7 @@  int odp_schedule_init_global(void)
 
 	memset(sched, 0, sizeof(sched_t));
 
+	odp_pool_param_init(&params);
 	params.buf.size  = sizeof(sched_cmd_t);
 	params.buf.align = 0;
 	params.buf.num   = NUM_SCHED_CMD;
@@ -163,6 +174,15 @@  int odp_schedule_init_global(void)
 		}
 	}
 
+	odp_spinlock_init(&sched->grp_lock);
+
+	for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		memset(&sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN);
+		sched->sched_grp[i].mask = thread_sched_grp_mask(i);
+	}
+
+	odp_thrmask_setall(&sched_mask_all);
+
 	ODP_DBG("done\n");
 
 	return 0;
@@ -466,6 +486,17 @@  static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
 			}
 
 			qe  = sched_cmd->qe;
+			if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL &&
+			    !odp_thrmask_isset(sched->sched_grp
+					       [qe->s.param.sched.group].mask,
+					       thr)) {
+				/* This thread is not eligible for work from
+				 * this queue, so continue scheduling it.
+				 */
+				if (odp_queue_enq(pri_q, ev))
+					ODP_ABORT("schedule failed\n");
+				continue;
+			}
 			num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);
 
 			if (num < 0) {
@@ -587,3 +618,131 @@  int odp_schedule_num_prio(void)
 {
 	return ODP_CONFIG_SCHED_PRIOS;
 }
+
+odp_schedule_group_t odp_schedule_group_create(const char *name,
+					       const odp_thrmask_t *mask)
+{
+	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+	int i;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		if (sched->sched_grp[i].name[0] == 0) {
+			strncpy(sched->sched_grp[i].name, name,
+				ODP_SCHED_GROUP_NAME_LEN - 1);
+			odp_thrmask_copy(sched->sched_grp[i].mask, mask);
+			group = (odp_schedule_group_t)i;
+			break;
+		}
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return group;
+}
+
+int odp_schedule_group_destroy(odp_schedule_group_t group)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group > ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_zero(sched->sched_grp[group].mask);
+		memset(&sched->sched_grp[group].name, 0,
+		       ODP_SCHED_GROUP_NAME_LEN);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+odp_schedule_group_t odp_schedule_group_lookup(const char *name)
+{
+	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+	int i;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		if (strcmp(name, sched->sched_grp[i].name) == 0) {
+			group = (odp_schedule_group_t)i;
+			break;
+		}
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return group;
+}
+
+int odp_schedule_group_join(odp_schedule_group_t group,
+			    const odp_thrmask_t *mask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_or(sched->sched_grp[group].mask,
+			       sched->sched_grp[group].mask,
+			       mask);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+int odp_schedule_group_leave(odp_schedule_group_t group,
+			     const odp_thrmask_t *mask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_t leavemask;
+
+		odp_thrmask_xor(&leavemask, mask, &sched_mask_all);
+		odp_thrmask_and(sched->sched_grp[group].mask,
+				sched->sched_grp[group].mask,
+				&leavemask);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+int odp_schedule_group_thrmask(odp_schedule_group_t group,
+			       odp_thrmask_t *thrmask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		*thrmask = *sched->sched_grp[group].mask;
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index 9905c78..770c64e 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -32,9 +32,15 @@  typedef struct {
 
 typedef struct {
 	thread_state_t thr[ODP_CONFIG_MAX_THREADS];
-	odp_thrmask_t  all;
-	odp_thrmask_t  worker;
-	odp_thrmask_t  control;
+	union {
+		/* struct order must be kept in sync with schedule_types.h */
+		struct {
+			odp_thrmask_t  all;
+			odp_thrmask_t  worker;
+			odp_thrmask_t  control;
+		};
+		odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS];
+	};
 	uint32_t       num;
 	uint32_t       num_worker;
 	uint32_t       num_control;
@@ -53,6 +59,7 @@  static __thread thread_state_t *this_thread;
 int odp_thread_init_global(void)
 {
 	odp_shm_t shm;
+	int i;
 
 	shm = odp_shm_reserve("odp_thread_globals",
 			      sizeof(thread_globals_t),
@@ -65,13 +72,19 @@  int odp_thread_init_global(void)
 
 	memset(thread_globals, 0, sizeof(thread_globals_t));
 	odp_spinlock_init(&thread_globals->lock);
-	odp_thrmask_zero(&thread_globals->all);
-	odp_thrmask_zero(&thread_globals->worker);
-	odp_thrmask_zero(&thread_globals->control);
+
+	for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++)
+		odp_thrmask_zero(&thread_globals->sched_grp_mask[i]);
 
 	return 0;
 }
 
+odp_thrmask_t *thread_sched_grp_mask(int index);
+odp_thrmask_t *thread_sched_grp_mask(int index)
+{
+	return &thread_globals->sched_grp_mask[index];
+}
+
 int odp_thread_term_global(void)
 {
 	int ret;