diff mbox

[API-NEXT] linux-gen: sched: fix SP scheduler hang in process mode

Message ID 1481254476-15124-1-git-send-email-yi.he@linaro.org
State New
Headers show

Commit Message

Yi He Dec. 9, 2016, 3:34 a.m. UTC
SP scheduler hangs in process mode performance test
due to global data structure were not created in shared
memory region.

Signed-off-by: Yi He <yi.he@linaro.org>

---
Fixes: https://bugs.linaro.org/show_bug.cgi?id=2464
After Petri/Matias' pool/ordered queue patchsets, SP scheduler can pass
validation test suite, this patch fixes the last issue as reported and
SP scheduler can pass make check after then.

 platform/linux-generic/odp_schedule_sp.c | 89 ++++++++++++++++++++------------
 1 file changed, 56 insertions(+), 33 deletions(-)

-- 
2.7.4

Comments

Savolainen, Petri (Nokia - FI/Espoo) Dec. 9, 2016, 10:56 a.m. UTC | #1
> -----Original Message-----

> From: Yi He [mailto:yi.he@linaro.org]

> Sent: Friday, December 09, 2016 5:35 AM

> To: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia-bell-

> labs.com>; Elo, Matias (Nokia - FI/Espoo) <matias.elo@nokia-bell-

> labs.com>; lng-odp@lists.linaro.org

> Cc: Yi He <yi.he@linaro.org>

> Subject: [lng-odp] [API-NEXT PATCH] linux-gen: sched: fix SP scheduler

> hang in process mode

> 

> SP scheduler hangs in process mode performance test

> due to global data structure were not created in shared

> memory region.

> 

> Signed-off-by: Yi He <yi.he@linaro.org>


This will conflict with my "linux-gen: schedule_sp: use ring as priority queue" patch sent yesterday. Could you rebase and send again after my patch is merged.

-Petri
Yi He Dec. 12, 2016, 12:51 a.m. UTC | #2
O, sorry missed that patch, I'll rebase and send again.

Best Regards, Yi

On 9 December 2016 at 18:56, Savolainen, Petri (Nokia - FI/Espoo) <
petri.savolainen@nokia-bell-labs.com> wrote:

> > -----Original Message-----

> > From: Yi He [mailto:yi.he@linaro.org]

> > Sent: Friday, December 09, 2016 5:35 AM

> > To: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia-bell-

> > labs.com>; Elo, Matias (Nokia - FI/Espoo) <matias.elo@nokia-bell-

> > labs.com>; lng-odp@lists.linaro.org

> > Cc: Yi He <yi.he@linaro.org>

> > Subject: [lng-odp] [API-NEXT PATCH] linux-gen: sched: fix SP scheduler

> > hang in process mode

> >

> > SP scheduler hangs in process mode performance test

> > due to global data structure were not created in shared

> > memory region.

> >

> > Signed-off-by: Yi He <yi.he@linaro.org>

>

> This will conflict with my "linux-gen: schedule_sp: use ring as priority

> queue" patch sent yesterday. Could you rebase and send again after my patch

> is merged.

>

> -Petri

>
diff mbox

Patch

diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 76d1357..d9f861f 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -9,6 +9,7 @@ 
 #include <odp/api/thread.h>
 #include <odp/api/time.h>
 #include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
 #include <odp_schedule_if.h>
 #include <odp_debug_internal.h>
 #include <odp_align_internal.h>
@@ -85,6 +86,7 @@  typedef struct {
 	sched_cmd_t   pktio_cmd[NUM_PKTIO];
 	prio_queue_t  prio_queue[NUM_PRIO];
 	sched_group_t sched_group;
+	odp_shm_t     shm;
 } sched_global_t;
 
 typedef struct {
@@ -93,32 +95,47 @@  typedef struct {
 	int          thr_id;
 } sched_local_t;
 
-static sched_global_t sched_global;
+static sched_global_t *sched_global;
 static __thread sched_local_t sched_local;
 
 static int init_global(void)
 {
 	int i;
-	sched_group_t *sched_group = &sched_global.sched_group;
+	odp_shm_t shm;
+	sched_group_t *sched_group = NULL;
 
 	ODP_DBG("Using SP scheduler\n");
 
-	memset(&sched_global, 0, sizeof(sched_global_t));
+	shm = odp_shm_reserve("sp_scheduler",
+			      sizeof(sched_global_t),
+			      ODP_CACHE_LINE_SIZE, 0);
+
+	sched_global = odp_shm_addr(shm);
+
+	if (sched_global == NULL) {
+		ODP_ERR("Schedule init: Shm reserve failed.\n");
+		return -1;
+	}
+
+	memset(sched_global, 0, sizeof(sched_global_t));
+
+	sched_global->shm = shm;
 
 	for (i = 0; i < NUM_QUEUE; i++) {
-		sched_global.queue_cmd[i].s.type  = CMD_QUEUE;
-		sched_global.queue_cmd[i].s.index = i;
+		sched_global->queue_cmd[i].s.type  = CMD_QUEUE;
+		sched_global->queue_cmd[i].s.index = i;
 	}
 
 	for (i = 0; i < NUM_PKTIO; i++) {
-		sched_global.pktio_cmd[i].s.type  = CMD_PKTIO;
-		sched_global.pktio_cmd[i].s.index = i;
-		sched_global.pktio_cmd[i].s.prio  = PKTIN_PRIO;
+		sched_global->pktio_cmd[i].s.type  = CMD_PKTIO;
+		sched_global->pktio_cmd[i].s.index = i;
+		sched_global->pktio_cmd[i].s.prio  = PKTIN_PRIO;
 	}
 
 	for (i = 0; i < NUM_PRIO; i++)
-		odp_ticketlock_init(&sched_global.prio_queue[i].s.lock);
+		odp_ticketlock_init(&sched_global->prio_queue[i].s.lock);
 
+	sched_group = &sched_global->sched_group;
 	odp_ticketlock_init(&sched_group->s.lock);
 
 	strncpy(sched_group->s.group[GROUP_ALL].name, "__group_all",
@@ -149,16 +166,22 @@  static int init_local(void)
 
 static int term_global(void)
 {
-	int qi;
+	int qi, ret = 0;
 
 	for (qi = 0; qi < NUM_QUEUE; qi++) {
-		if (sched_global.queue_cmd[qi].s.init) {
+		if (sched_global->queue_cmd[qi].s.init) {
 			/* todo: dequeue until empty ? */
 			sched_cb_queue_destroy_finalize(qi);
 		}
 	}
 
-	return 0;
+	ret = odp_shm_free(sched_global->shm);
+	if (ret < 0) {
+		ODP_ERR("Shm free failed for sp_scheduler");
+		ret = -1;
+	}
+
+	return ret;
 }
 
 static int term_local(void)
@@ -173,7 +196,7 @@  static unsigned max_ordered_locks(void)
 
 static int thr_add(odp_schedule_group_t group, int thr)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < 0 || group >= NUM_GROUP)
 		return -1;
@@ -194,7 +217,7 @@  static int thr_add(odp_schedule_group_t group, int thr)
 
 static int thr_rem(odp_schedule_group_t group, int thr)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < 0 || group >= NUM_GROUP)
 		return -1;
@@ -220,7 +243,7 @@  static int num_grps(void)
 
 static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 	odp_schedule_group_t group = sched_param->group;
 	int prio = 0;
 
@@ -233,25 +256,25 @@  static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
 	if (sched_param->prio > 0)
 		prio = LOWEST_QUEUE_PRIO;
 
-	sched_global.queue_cmd[qi].s.prio  = prio;
-	sched_global.queue_cmd[qi].s.group = group;
-	sched_global.queue_cmd[qi].s.init  = 1;
+	sched_global->queue_cmd[qi].s.prio  = prio;
+	sched_global->queue_cmd[qi].s.group = group;
+	sched_global->queue_cmd[qi].s.init  = 1;
 
 	return 0;
 }
 
 static void destroy_queue(uint32_t qi)
 {
-	sched_global.queue_cmd[qi].s.prio  = 0;
-	sched_global.queue_cmd[qi].s.group = 0;
-	sched_global.queue_cmd[qi].s.init  = 0;
+	sched_global->queue_cmd[qi].s.prio  = 0;
+	sched_global->queue_cmd[qi].s.group = 0;
+	sched_global->queue_cmd[qi].s.init  = 0;
 }
 
 static inline void add_tail(sched_cmd_t *cmd)
 {
 	prio_queue_t *prio_queue;
 
-	prio_queue   = &sched_global.prio_queue[cmd->s.prio];
+	prio_queue   = &sched_global->prio_queue[cmd->s.prio];
 	cmd->s.next  = NULL;
 
 	odp_ticketlock_lock(&prio_queue->s.lock);
@@ -271,14 +294,14 @@  static inline sched_cmd_t *rem_head(int prio)
 	prio_queue_t *prio_queue;
 	sched_cmd_t *cmd;
 
-	prio_queue = &sched_global.prio_queue[prio];
+	prio_queue = &sched_global->prio_queue[prio];
 
 	odp_ticketlock_lock(&prio_queue->s.lock);
 
 	if (prio_queue->s.head == NULL) {
 		cmd = NULL;
 	} else {
-		sched_group_t *sched_group = &sched_global.sched_group;
+		sched_group_t *sched_group = &sched_global->sched_group;
 
 		cmd = prio_queue->s.head;
 
@@ -301,7 +324,7 @@  static int sched_queue(uint32_t qi)
 {
 	sched_cmd_t *cmd;
 
-	cmd = &sched_global.queue_cmd[qi];
+	cmd = &sched_global->queue_cmd[qi];
 	add_tail(cmd);
 
 	return 0;
@@ -327,7 +350,7 @@  static void pktio_start(int pktio_index, int num, int pktin_idx[])
 	ODP_DBG("pktio index: %i, %i pktin queues %i\n",
 		pktio_index, num, pktin_idx[0]);
 
-	cmd = &sched_global.pktio_cmd[pktio_index];
+	cmd = &sched_global->pktio_cmd[pktio_index];
 
 	if (num > NUM_PKTIN)
 		ODP_ABORT("Supports only %i pktin queues per interface\n",
@@ -491,7 +514,7 @@  static odp_schedule_group_t schedule_group_create(const char *name,
 						  const odp_thrmask_t *thrmask)
 {
 	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 	int i;
 
 	odp_ticketlock_lock(&sched_group->s.lock);
@@ -522,7 +545,7 @@  static odp_schedule_group_t schedule_group_create(const char *name,
 
 static int schedule_group_destroy(odp_schedule_group_t group)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < NUM_STATIC_GROUP || group >= NUM_GROUP)
 		return -1;
@@ -545,7 +568,7 @@  static int schedule_group_destroy(odp_schedule_group_t group)
 static odp_schedule_group_t schedule_group_lookup(const char *name)
 {
 	odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 	int i;
 
 	odp_ticketlock_lock(&sched_group->s.lock);
@@ -565,7 +588,7 @@  static odp_schedule_group_t schedule_group_lookup(const char *name)
 static int schedule_group_join(odp_schedule_group_t group,
 			       const odp_thrmask_t *thrmask)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < 0 || group >= NUM_GROUP)
 		return -1;
@@ -589,7 +612,7 @@  static int schedule_group_join(odp_schedule_group_t group,
 static int schedule_group_leave(odp_schedule_group_t group,
 				const odp_thrmask_t *thrmask)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 	odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask;
 	odp_thrmask_t not;
 
@@ -616,7 +639,7 @@  static int schedule_group_leave(odp_schedule_group_t group,
 static int schedule_group_thrmask(odp_schedule_group_t group,
 				  odp_thrmask_t *thrmask)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < 0 || group >= NUM_GROUP)
 		return -1;
@@ -638,7 +661,7 @@  static int schedule_group_thrmask(odp_schedule_group_t group,
 static int schedule_group_info(odp_schedule_group_t group,
 			       odp_schedule_group_info_t *info)
 {
-	sched_group_t *sched_group = &sched_global.sched_group;
+	sched_group_t *sched_group = &sched_global->sched_group;
 
 	if (group < 0 || group >= NUM_GROUP)
 		return -1;