diff mbox

[API-NEXT,PATCHv3,06/10] linux-generic: schedule: implement scheduler groups

Message ID 1440632493-31734-7-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer Aug. 26, 2015, 11:41 p.m. UTC
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---
 .../include/odp/plat/schedule_types.h              |   4 +
 platform/linux-generic/odp_schedule.c              | 160 +++++++++++++++++++++
 platform/linux-generic/odp_thread.c                |  25 +++-
 3 files changed, 183 insertions(+), 6 deletions(-)

Comments

Bill Fischofer Aug. 28, 2015, 5 p.m. UTC | #1
On Thu, Aug 27, 2015 at 6:13 AM, Savolainen, Petri (Nokia - FI/Espoo) <
petri.savolainen@nokia.com> wrote:

>
>
> > -----Original Message-----
> > From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of
> > ext Bill Fischofer
> > Sent: Thursday, August 27, 2015 2:41 AM
> > To: lng-odp@lists.linaro.org
> > Subject: [lng-odp] [API-NEXT PATCHv3 06/10] linux-generic: schedule:
> > implement scheduler groups
> >
> > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
> > ---
> >  .../include/odp/plat/schedule_types.h              |   4 +
> >  platform/linux-generic/odp_schedule.c              | 160
> > +++++++++++++++++++++
> >  platform/linux-generic/odp_thread.c                |  25 +++-
> >  3 files changed, 183 insertions(+), 6 deletions(-)
> >
> > diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h
> > b/platform/linux-generic/include/odp/plat/schedule_types.h
> > index 87f9c11..9ab90f4 100644
> > --- a/platform/linux-generic/include/odp/plat/schedule_types.h
> > +++ b/platform/linux-generic/include/odp/plat/schedule_types.h
> > @@ -43,8 +43,12 @@ typedef int odp_schedule_sync_t;
> >
> >  typedef int odp_schedule_group_t;
> >
> > +/* These must be kept in sync with thread_globals_t in odp_thread.c */
> > +#define ODP_SCHED_GROUP_INVALID -1
> >  #define ODP_SCHED_GROUP_ALL     0
> >  #define ODP_SCHED_GROUP_WORKER  1
> > +#define ODP_SCHED_GROUP_CONTROL 2
> > +#define ODP_SCHED_GROUP_NAMED   3
>
> This is actually an API change (plat/schedule_types.h implements API).
> Additional constants should be documented in odp/api/schedule_types.h.
> _INVALID and _CONTROL can be added to the API. _NAMED seems to be an
> implementation detail and should be moved out from this header (not visible
> through odp.h)


Not including the @defs for ODP_SCHED_GROUP_ALL and ODP_SCHED_GROUP_WORKER
in schedule.h was an oversight in the original patch, as was not defining
ODP_SCHED_GROUP_INVALID and ODP_SCHED_GROUP_CONTROL.  I'll address this in
v4. ODP_SCHED_GROUP_NAMED will be renamed to _ODP_SCHED_GROUP_NAMED to make
it clear it's for internal use, however implementation-specific things
needed to support the public API are in the include/odp/plat files, e.g.,
include/odp/api/strong_types.h.  It could be moved, but I think it's
clearer to have it here as this is under platform/linux-generic.


>


> >
> >  #define ODP_SCHED_GROUP_NAME_LEN 32
> >
> > diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-
> > generic/odp_schedule.c
> > index 5d32c81..c195ba5 100644
> > --- a/platform/linux-generic/odp_schedule.c
> > +++ b/platform/linux-generic/odp_schedule.c
> > @@ -23,6 +23,8 @@
> >  #include <odp_queue_internal.h>
> >  #include <odp_packet_io_internal.h>
> >
> > +odp_thrmask_t sched_mask_all;
> > +
> >  /* Number of schedule commands.
> >   * One per scheduled queue and packet interface */
> >  #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
> > @@ -48,6 +50,11 @@ typedef struct {
> >       odp_pool_t     pool;
> >       odp_shm_t      shm;
> >       uint32_t
> > pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
> > +     odp_spinlock_t grp_lock;
> > +     struct {
> > +             char           name[ODP_SCHED_GROUP_NAME_LEN];
> > +             odp_thrmask_t *mask;
> > +     } sched_grp[ODP_CONFIG_SCHED_GRPS];
> >  } sched_t;
> >
> >  /* Schedule command */
> > @@ -87,6 +94,9 @@ static sched_t *sched;
> >  /* Thread local scheduler context */
> >  static __thread sched_local_t sched_local;
> >
> > +/* Internal routine to get scheduler thread mask addrs */
> > +odp_thrmask_t *thread_sched_grp_mask(int index);
> > +
> >  static void sched_local_init(void)
> >  {
> >       int i;
> > @@ -123,6 +133,7 @@ int odp_schedule_init_global(void)
> >
> >       memset(sched, 0, sizeof(sched_t));
> >
> > +     odp_pool_param_init(&params);
> >       params.buf.size  = sizeof(sched_cmd_t);
> >       params.buf.align = 0;
> >       params.buf.num   = NUM_SCHED_CMD;
> > @@ -163,6 +174,15 @@ int odp_schedule_init_global(void)
> >               }
> >       }
> >
> > +     odp_spinlock_init(&sched->grp_lock);
> > +
> > +     for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
> > +             memset(&sched->sched_grp[i].name, 0,
> > ODP_SCHED_GROUP_NAME_LEN);
> > +             sched->sched_grp[i].mask = thread_sched_grp_mask(i);
> > +     }
> > +
> > +     odp_thrmask_setall(&sched_mask_all);
> > +
> >       ODP_DBG("done\n");
> >
> >       return 0;
> > @@ -466,6 +486,18 @@ static int schedule(odp_queue_t *out_queue,
> > odp_event_t out_ev[],
> >                       }
> >
> >                       qe  = sched_cmd->qe;
> > +
> > +                     if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL
> &&
> > +                         !odp_thrmask_isset(sched->sched_grp
> > +
> [qe->s.param.sched.group].mask,
>
> Is this style OK? Hard to read if line breaks on array brackets. Use temp
> variable instead to fit in 80 chars.
>

As mentioned elsewhere, when you have a line length of 80 chars and 8 char
tabs, you're going to get the
occasional awkward-looking fold.  Artificially creating temps has its own
ugliness and I don't think it makes sense to do that for the occasional
one-liner like this.


>
> > +                                            thr)) {
> > +                             /* This thread is not eligible for work
> from
> > +                              * this queue, so continue scheduling it.
> > +                              */
> > +                             if (odp_queue_enq(pri_q, ev))
> > +                                     ODP_ABORT("schedule failed\n");
> > +                             continue;
>
> This logic is not optimal. It would be better to arrange scheduler queues
> in groups and poll on those groups which this thread belong. It causes
> unnecessary ping-pong of queues in and out scheduler queues. However, in
> sake of time I'm OK to put this in and optimize afterwards.
>

Agreed, however rewriting the scheduler was beyond the scope of this
patch.  I originally looked at doing what you suggest but quickly realized
this was a much larger take and not relevant to the goal of adding group
support as a first step.  Having a more parametric scheduler is something
we should pursue as a separate work item.


>
> -Petri
>
> > +                     }
> >                       num = queue_deq_multi(qe, sched_local.buf_hdr,
> > max_deq);
> >
> >                       if (num < 0) {
> > @@ -587,3 +619,131 @@ int odp_schedule_num_prio(void)
> >  {
> >       return ODP_CONFIG_SCHED_PRIOS;
> >  }
>
Mike Holmes Aug. 28, 2015, 7:04 p.m. UTC | #2
On 28 August 2015 at 13:00, Bill Fischofer <bill.fischofer@linaro.org>
wrote:

>
>
> On Thu, Aug 27, 2015 at 6:13 AM, Savolainen, Petri (Nokia - FI/Espoo) <
> petri.savolainen@nokia.com> wrote:
>
>>
>>
>> > -----Original Message-----
>> > From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of
>> > ext Bill Fischofer
>> > Sent: Thursday, August 27, 2015 2:41 AM
>> > To: lng-odp@lists.linaro.org
>> > Subject: [lng-odp] [API-NEXT PATCHv3 06/10] linux-generic: schedule:
>> > implement scheduler groups
>> >
>> > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
>> > ---
>> >  .../include/odp/plat/schedule_types.h              |   4 +
>> >  platform/linux-generic/odp_schedule.c              | 160
>> > +++++++++++++++++++++
>> >  platform/linux-generic/odp_thread.c                |  25 +++-
>> >  3 files changed, 183 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h
>> > b/platform/linux-generic/include/odp/plat/schedule_types.h
>> > index 87f9c11..9ab90f4 100644
>> > --- a/platform/linux-generic/include/odp/plat/schedule_types.h
>> > +++ b/platform/linux-generic/include/odp/plat/schedule_types.h
>> > @@ -43,8 +43,12 @@ typedef int odp_schedule_sync_t;
>> >
>> >  typedef int odp_schedule_group_t;
>> >
>> > +/* These must be kept in sync with thread_globals_t in odp_thread.c */
>> > +#define ODP_SCHED_GROUP_INVALID -1
>> >  #define ODP_SCHED_GROUP_ALL     0
>> >  #define ODP_SCHED_GROUP_WORKER  1
>> > +#define ODP_SCHED_GROUP_CONTROL 2
>> > +#define ODP_SCHED_GROUP_NAMED   3
>>
>> This is actually an API change (plat/schedule_types.h implements API).
>> Additional constants should be documented in odp/api/schedule_types.h.
>> _INVALID and _CONTROL can be added to the API. _NAMED seems to be an
>> implementation detail and should be moved out from this header (not visible
>> through odp.h)
>
>
> Not including the @defs for ODP_SCHED_GROUP_ALL and ODP_SCHED_GROUP_WORKER
> in schedule.h was an oversight in the original patch, as was not defining
> ODP_SCHED_GROUP_INVALID and ODP_SCHED_GROUP_CONTROL.  I'll address this in
> v4. ODP_SCHED_GROUP_NAMED will be renamed to _ODP_SCHED_GROUP_NAMED to make
> it clear it's for internal use, however implementation-specific things
> needed to support the public API are in the include/odp/plat files, e.g.,
> include/odp/api/strong_types.h.  It could be moved, but I think it's
> clearer to have it here as this is under platform/linux-generic.
>
>
>>
>
>
>> >
>> >  #define ODP_SCHED_GROUP_NAME_LEN 32
>> >
>> > diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-
>> > generic/odp_schedule.c
>> > index 5d32c81..c195ba5 100644
>> > --- a/platform/linux-generic/odp_schedule.c
>> > +++ b/platform/linux-generic/odp_schedule.c
>> > @@ -23,6 +23,8 @@
>> >  #include <odp_queue_internal.h>
>> >  #include <odp_packet_io_internal.h>
>> >
>> > +odp_thrmask_t sched_mask_all;
>> > +
>> >  /* Number of schedule commands.
>> >   * One per scheduled queue and packet interface */
>> >  #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
>> > @@ -48,6 +50,11 @@ typedef struct {
>> >       odp_pool_t     pool;
>> >       odp_shm_t      shm;
>> >       uint32_t
>> > pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
>> > +     odp_spinlock_t grp_lock;
>> > +     struct {
>> > +             char           name[ODP_SCHED_GROUP_NAME_LEN];
>> > +             odp_thrmask_t *mask;
>> > +     } sched_grp[ODP_CONFIG_SCHED_GRPS];
>> >  } sched_t;
>> >
>> >  /* Schedule command */
>> > @@ -87,6 +94,9 @@ static sched_t *sched;
>> >  /* Thread local scheduler context */
>> >  static __thread sched_local_t sched_local;
>> >
>> > +/* Internal routine to get scheduler thread mask addrs */
>> > +odp_thrmask_t *thread_sched_grp_mask(int index);
>> > +
>> >  static void sched_local_init(void)
>> >  {
>> >       int i;
>> > @@ -123,6 +133,7 @@ int odp_schedule_init_global(void)
>> >
>> >       memset(sched, 0, sizeof(sched_t));
>> >
>> > +     odp_pool_param_init(&params);
>> >       params.buf.size  = sizeof(sched_cmd_t);
>> >       params.buf.align = 0;
>> >       params.buf.num   = NUM_SCHED_CMD;
>> > @@ -163,6 +174,15 @@ int odp_schedule_init_global(void)
>> >               }
>> >       }
>> >
>> > +     odp_spinlock_init(&sched->grp_lock);
>> > +
>> > +     for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
>> > +             memset(&sched->sched_grp[i].name, 0,
>> > ODP_SCHED_GROUP_NAME_LEN);
>> > +             sched->sched_grp[i].mask = thread_sched_grp_mask(i);
>> > +     }
>> > +
>> > +     odp_thrmask_setall(&sched_mask_all);
>> > +
>> >       ODP_DBG("done\n");
>> >
>> >       return 0;
>> > @@ -466,6 +486,18 @@ static int schedule(odp_queue_t *out_queue,
>> > odp_event_t out_ev[],
>> >                       }
>> >
>> >                       qe  = sched_cmd->qe;
>> > +
>> > +                     if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL
>> &&
>> > +                         !odp_thrmask_isset(sched->sched_grp
>> > +
>> [qe->s.param.sched.group].mask,
>>
>> Is this style OK? Hard to read if line breaks on array brackets. Use temp
>> variable instead to fit in 80 chars.
>>
>
> As mentioned elsewhere, when you have a line length of 80 chars and 8 char
> tabs, you're going to get the
> occasional awkward-looking fold.  Artificially creating temps has its own
> ugliness and I don't think it makes sense to do that for the occasional
> one-liner like this.
>

Agree about 80 are a challenge(ugly), but I like below psudo code better,
it says what it is doing more clearly in the final comparison to me.

*group* = qe->s.param.sched.group;
*group_mask_is_set* = odp_thrmask_isset(sched->sched_grp[*group*].mask,
thr);

if (*group* > ODP_SCHED_GROUP_ALL && !*group_mask_is_set*) {



>
>> > +                                            thr)) {
>> > +                             /* This thread is not eligible for work
>> from
>> > +                              * this queue, so continue scheduling it.
>> > +                              */
>> > +                             if (odp_queue_enq(pri_q, ev))
>> > +                                     ODP_ABORT("schedule failed\n");
>> > +                             continue;
>>
>> This logic is not optimal. It would be better to arrange scheduler queues
>> in groups and poll on those groups which this thread belong. It causes
>> unnecessary ping-pong of queues in and out scheduler queues. However, in
>> sake of time I'm OK to put this in and optimize afterwards.
>>
>
> Agreed, however rewriting the scheduler was beyond the scope of this
> patch.  I originally looked at doing what you suggest but quickly realized
> this was a much larger take and not relevant to the goal of adding group
> support as a first step.  Having a more parametric scheduler is something
> we should pursue as a separate work item.
>
>
>>
>> -Petri
>>
>> > +                     }
>> >                       num = queue_deq_multi(qe, sched_local.buf_hdr,
>> > max_deq);
>> >
>> >                       if (num < 0) {
>> > @@ -587,3 +619,131 @@ int odp_schedule_num_prio(void)
>> >  {
>> >       return ODP_CONFIG_SCHED_PRIOS;
>> >  }
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> https://lists.linaro.org/mailman/listinfo/lng-odp
>
>
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h b/platform/linux-generic/include/odp/plat/schedule_types.h
index 87f9c11..9ab90f4 100644
--- a/platform/linux-generic/include/odp/plat/schedule_types.h
+++ b/platform/linux-generic/include/odp/plat/schedule_types.h
@@ -43,8 +43,12 @@  typedef int odp_schedule_sync_t;
 
 typedef int odp_schedule_group_t;
 
+/* These must be kept in sync with thread_globals_t in odp_thread.c */
+#define ODP_SCHED_GROUP_INVALID -1
 #define ODP_SCHED_GROUP_ALL     0
 #define ODP_SCHED_GROUP_WORKER  1
+#define ODP_SCHED_GROUP_CONTROL 2
+#define ODP_SCHED_GROUP_NAMED   3
 
 #define ODP_SCHED_GROUP_NAME_LEN 32
 
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 5d32c81..c195ba5 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -23,6 +23,8 @@ 
 #include <odp_queue_internal.h>
 #include <odp_packet_io_internal.h>
 
+odp_thrmask_t sched_mask_all;
+
 /* Number of schedule commands.
  * One per scheduled queue and packet interface */
 #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
@@ -48,6 +50,11 @@  typedef struct {
 	odp_pool_t     pool;
 	odp_shm_t      shm;
 	uint32_t       pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
+	odp_spinlock_t grp_lock;
+	struct {
+		char           name[ODP_SCHED_GROUP_NAME_LEN];
+		odp_thrmask_t *mask;
+	} sched_grp[ODP_CONFIG_SCHED_GRPS];
 } sched_t;
 
 /* Schedule command */
@@ -87,6 +94,9 @@  static sched_t *sched;
 /* Thread local scheduler context */
 static __thread sched_local_t sched_local;
 
+/* Internal routine to get scheduler thread mask addrs */
+odp_thrmask_t *thread_sched_grp_mask(int index);
+
 static void sched_local_init(void)
 {
 	int i;
@@ -123,6 +133,7 @@  int odp_schedule_init_global(void)
 
 	memset(sched, 0, sizeof(sched_t));
 
+	odp_pool_param_init(&params);
 	params.buf.size  = sizeof(sched_cmd_t);
 	params.buf.align = 0;
 	params.buf.num   = NUM_SCHED_CMD;
@@ -163,6 +174,15 @@  int odp_schedule_init_global(void)
 		}
 	}
 
+	odp_spinlock_init(&sched->grp_lock);
+
+	for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		memset(&sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN);
+		sched->sched_grp[i].mask = thread_sched_grp_mask(i);
+	}
+
+	odp_thrmask_setall(&sched_mask_all);
+
 	ODP_DBG("done\n");
 
 	return 0;
@@ -466,6 +486,18 @@  static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
 			}
 
 			qe  = sched_cmd->qe;
+
+			if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL &&
+			    !odp_thrmask_isset(sched->sched_grp
+					       [qe->s.param.sched.group].mask,
+					       thr)) {
+				/* This thread is not eligible for work from
+				 * this queue, so continue scheduling it.
+				 */
+				if (odp_queue_enq(pri_q, ev))
+					ODP_ABORT("schedule failed\n");
+				continue;
+			}
 			num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);
 
 			if (num < 0) {
@@ -587,3 +619,131 @@  int odp_schedule_num_prio(void)
 {
 	return ODP_CONFIG_SCHED_PRIOS;
 }
+
+odp_schedule_group_t odp_schedule_group_create(const char *name,
+					       const odp_thrmask_t *mask)
+{
+	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+	int i;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		if (sched->sched_grp[i].name[0] == 0) {
+			strncpy(sched->sched_grp[i].name, name,
+				ODP_SCHED_GROUP_NAME_LEN - 1);
+			odp_thrmask_copy(sched->sched_grp[i].mask, mask);
+			group = (odp_schedule_group_t)i;
+			break;
+		}
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return group;
+}
+
+int odp_schedule_group_destroy(odp_schedule_group_t group)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group > ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_zero(sched->sched_grp[group].mask);
+		memset(&sched->sched_grp[group].name, 0,
+		       ODP_SCHED_GROUP_NAME_LEN);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+odp_schedule_group_t odp_schedule_group_lookup(const char *name)
+{
+	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+	int i;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+		if (strcmp(name, sched->sched_grp[i].name) == 0) {
+			group = (odp_schedule_group_t)i;
+			break;
+		}
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return group;
+}
+
+int odp_schedule_group_join(odp_schedule_group_t group,
+			    const odp_thrmask_t *mask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_or(sched->sched_grp[group].mask,
+			       sched->sched_grp[group].mask,
+			       mask);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+int odp_schedule_group_leave(odp_schedule_group_t group,
+			     const odp_thrmask_t *mask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		odp_thrmask_t leavemask;
+
+		odp_thrmask_xor(&leavemask, mask, &sched_mask_all);
+		odp_thrmask_and(sched->sched_grp[group].mask,
+				sched->sched_grp[group].mask,
+				&leavemask);
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
+
+int odp_schedule_group_thrmask(odp_schedule_group_t group,
+			       odp_thrmask_t *thrmask)
+{
+	int ret;
+
+	odp_spinlock_lock(&sched->grp_lock);
+
+	if (group < ODP_CONFIG_SCHED_GRPS &&
+	    group >= ODP_SCHED_GROUP_NAMED &&
+	    sched->sched_grp[group].name[0] != 0) {
+		*thrmask = *sched->sched_grp[group].mask;
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	odp_spinlock_unlock(&sched->grp_lock);
+	return ret;
+}
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index 9905c78..770c64e 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -32,9 +32,15 @@  typedef struct {
 
 typedef struct {
 	thread_state_t thr[ODP_CONFIG_MAX_THREADS];
-	odp_thrmask_t  all;
-	odp_thrmask_t  worker;
-	odp_thrmask_t  control;
+	union {
+		/* struct order must be kept in sync with schedule_types.h */
+		struct {
+			odp_thrmask_t  all;
+			odp_thrmask_t  worker;
+			odp_thrmask_t  control;
+		};
+		odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS];
+	};
 	uint32_t       num;
 	uint32_t       num_worker;
 	uint32_t       num_control;
@@ -53,6 +59,7 @@  static __thread thread_state_t *this_thread;
 int odp_thread_init_global(void)
 {
 	odp_shm_t shm;
+	int i;
 
 	shm = odp_shm_reserve("odp_thread_globals",
 			      sizeof(thread_globals_t),
@@ -65,13 +72,19 @@  int odp_thread_init_global(void)
 
 	memset(thread_globals, 0, sizeof(thread_globals_t));
 	odp_spinlock_init(&thread_globals->lock);
-	odp_thrmask_zero(&thread_globals->all);
-	odp_thrmask_zero(&thread_globals->worker);
-	odp_thrmask_zero(&thread_globals->control);
+
+	for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++)
+		odp_thrmask_zero(&thread_globals->sched_grp_mask[i]);
 
 	return 0;
 }
 
+odp_thrmask_t *thread_sched_grp_mask(int index);
+odp_thrmask_t *thread_sched_grp_mask(int index)
+{
+	return &thread_globals->sched_grp_mask[index];
+}
+
 int odp_thread_term_global(void)
 {
 	int ret;