diff mbox

validation: schedule: initial testcases

Message ID 1417692865-3370-1-git-send-email-ciprian.barbu@linaro.org
State New
Headers show

Commit Message

Ciprian Barbu Dec. 4, 2014, 11:34 a.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
This patch relies on the work Taras did to harmonize cunits, it will not apply
otherwise.

Also, like in the RFC sent before I tried to follow the list Taras provided.
All the testcases use the same schedule_common_ routine to check scheduling.
I varied the number of threads, number of queues, sync type, number of prios and
the schedule API (odp_schedule and odp_schedule_multi) for a total of 24
combinations.

Two other tests exist for verifying exclusive access to ATOMIC queues. These
tests use Alex's approach to assign a lock with an atomic queue and check that
odp_spinlock_trylock always succeeds. I tested with parallel queues and the
tests reported failures on trylock, which means there was enough contention but
for ATOMIC queue the mutual exclussiveness was performed correctly. This is type
of test should be enough for our initial purpose.

NOT included are tests that check the order is preserved for ORDERED queues and
tests with timeout. All ODP schedule calls are performed with ODP_SCHED_NO_WAIT
because we don't have an API to stop threads doing a blocking odp_schedule when
all buffers have been consumed. In other words, since the scheduler fairness is
not assumed, we cannot do blocking odp_schedule and cleanly stop those threads
when all buffers were dequeued.

 test/validation/Makefile.am    |   5 +-
 test/validation/odp_schedule.c | 599 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 603 insertions(+), 1 deletion(-)
 create mode 100644 test/validation/odp_schedule.c
diff mbox

Patch

diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index ecb1170..fe10297 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -6,13 +6,15 @@  AM_LDFLAGS += -static
 if ODP_CUNIT_ENABLED
 TESTS = ${bin_PROGRAMS}
 check_PROGRAMS = ${bin_PROGRAMS}
-bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
+bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
 odp_init_LDFLAGS = $(AM_LDFLAGS)
 odp_queue_LDFLAGS = $(AM_LDFLAGS)
 odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
 odp_crypto_LDFLAGS = $(AM_LDFLAGS)
 odp_shm_CFLAGS = $(AM_CFLAGS)
 odp_shm_LDFLAGS = $(AM_LDFLAGS)
+odp_schedule_CFLAGS = $(AM_CFLAGS)
+odp_schedule_LDFLAGS = $(AM_LDFLAGS)
 endif
 
 dist_odp_init_SOURCES = odp_init.c
@@ -21,3 +23,4 @@  dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c \
 			  crypto/odp_crypto_test_sync_inp.c \
 			  odp_crypto.c common/odp_cunit_common.c
 dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
+dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
new file mode 100644
index 0000000..b8b5177
--- /dev/null
+++ b/test/validation/odp_schedule.c
@@ -0,0 +1,599 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp.h"
+#include "odp_cunit_common.h"
+
+#define MAX_WORKERS		32            /* Max worker threads */
+#define MSG_POOL_SIZE           (4*1024*1024)
+#define QUEUES_PER_PRIO		16            /* Queue per priority */
+#define BUF_SIZE		64
+#define TEST_NUM_BUFS		100
+#define MULTI_BUFS_MAX		4             /**< Buffer burst size */
+#define TEST_NUM_BUFS_EXCL	10000
+
+#define GLOBALS_SHM_NAME	"test_globals"
+#define MSG_POOL_NAME		"msg_pool"
+#define SHM_MSG_POOL_NAME	"shm_msg_pool"
+#define SHM_THR_ARGS_NAME	"shm_thr_args"
+
+
+/** Test global variables */
+typedef struct {
+	int core_count;
+	odp_barrier_t barrier;		/* Barrier for test synchronisation */
+	odp_schedule_prio_t prio;	/* Current prio */
+	int buf_count;			/* Number of bufs for current prio */
+	odp_spinlock_t count_lock;	/* Used for accessing prio counters */
+	odp_spinlock_t atomic_lock;	/* Used verify schedule on ATOMIC qs */
+} test_globals_t;
+
+typedef struct ODP_PACKED {
+	pthrd_arg thrdarg;
+	odp_schedule_sync_t sync;
+	int num_queues;
+	int num_prio;
+	int num_bufs;			/* Number of buffers to enqueue */
+	int use_barrier;
+	int multi;			/* Flag for using odp_schedule_multi */
+	int excl;			/* Test ATOMIC exclusive access */
+} thread_args_t;
+
+odp_buffer_pool_t pool;
+
+static void test_schedule_wait_time(void)
+{
+	uint64_t wait_time;
+
+	wait_time = odp_schedule_wait_time(0);
+	CU_ASSERT(wait_time > 0);
+
+	wait_time = odp_schedule_wait_time(1);
+	CU_ASSERT(wait_time > 0);
+
+	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
+	CU_ASSERT(wait_time > 0);
+}
+
+static void test_schedule_num_prio(void)
+{
+	int prio;
+
+	prio = odp_schedule_num_prio();
+
+	CU_ASSERT(prio > 0);
+	/* Just common sense but why not */
+	CU_ASSERT(prio == odp_schedule_num_prio());
+}
+
+static void *schedule_common_(void *arg)
+{
+	thread_args_t *args = (thread_args_t *)arg;
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio, use_barrier;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+	use_barrier = args->use_barrier;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+
+	if (use_barrier)
+		odp_barrier_wait(&globals->barrier);
+
+	while (1) {
+		odp_buffer_t buf;
+		odp_queue_t from;
+		int num = 0;
+		int locked;
+
+		odp_spinlock_lock(&globals->count_lock);
+		if (globals->buf_count ==
+		    args->num_bufs * num_queues * num_prio) {
+			odp_spinlock_unlock(&globals->count_lock);
+			break;
+		}
+		odp_spinlock_unlock(&globals->count_lock);
+		if (args->multi) {
+			odp_buffer_t bufs[MULTI_BUFS_MAX];
+			int j;
+			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, bufs,
+						 MULTI_BUFS_MAX);
+			CU_ASSERT(num >= 0);
+			CU_ASSERT(num <= MULTI_BUFS_MAX);
+			if (num == 0)
+				continue;
+			for (j = 0; j < num; j++)
+				odp_buffer_free(bufs[j]);
+		} else {
+			buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+			if (buf == ODP_BUFFER_INVALID)
+				continue;
+			num = 1;
+			odp_buffer_free(buf);
+		}
+		if (args->excl) {
+			locked = odp_spinlock_trylock(&globals->atomic_lock);
+			CU_ASSERT(locked == 1);
+			CU_ASSERT(from != ODP_QUEUE_INVALID);
+			if (locked) {
+				int cnt;
+				uint64_t cycles = 0;
+				/* Do some work here to keep the thread busy */
+				for (cnt = 0; cnt < 1000; cnt++)
+					cycles += odp_time_cycles();
+
+				odp_spinlock_unlock(&globals->atomic_lock);
+			}
+		}
+
+		/* TODO: odp_queue_sched_prio not in yet so we can't
+		 check the priority is the one we expected */
+		odp_spinlock_lock(&globals->count_lock);
+		globals->buf_count += num;
+
+		if (globals->buf_count % (args->num_bufs * num_queues) == 0)
+			globals->prio++;
+
+		if (sync == ODP_SCHED_SYNC_ATOMIC)
+			odp_schedule_release_atomic();
+
+		if (globals->buf_count ==
+		    args->num_bufs * num_queues * num_prio) {
+			odp_spinlock_unlock(&globals->count_lock);
+			break;
+		}
+		odp_spinlock_unlock(&globals->count_lock);
+	}
+
+	return NULL;
+}
+
+static void fill_queues(thread_args_t *args)
+{
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio;
+	odp_buffer_pool_t pool;
+	int i, j, k;
+	char name[32];
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+
+	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+	for (i = 0; i < num_prio; i++) {
+		for (j = 0; j < num_queues; j++) {
+			odp_queue_t queue;
+
+			switch (sync) {
+			case ODP_SCHED_SYNC_NONE:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_n", i, j);
+				break;
+			case ODP_SCHED_SYNC_ATOMIC:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_a", i, j);
+				break;
+			case ODP_SCHED_SYNC_ORDERED:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_o", i, j);
+				break;
+			default:
+				CU_ASSERT(0);
+				break;
+			}
+
+			queue = odp_queue_lookup(name);
+			CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+			for (k = 0; k < args->num_bufs; k++) {
+				odp_buffer_t buf;
+				buf = odp_buffer_alloc(pool);
+				CU_ASSERT(buf != ODP_BUFFER_INVALID);
+				CU_ASSERT(odp_queue_enq(queue, buf) == 0);
+			}
+		}
+	}
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+			    int num_prio, int use_barrier, int multi, int excl)
+{
+	thread_args_t args;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	globals->prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->buf_count = 0;
+
+	args.sync = sync;
+	args.num_queues = num_queues;
+	args.num_prio = num_prio;
+	args.num_bufs = TEST_NUM_BUFS;
+	args.use_barrier = use_barrier;
+	args.multi = multi;
+	args.excl = excl;
+
+	fill_queues(&args);
+
+	schedule_common_(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+			     int num_prio, int multi, int excl)
+{
+	odp_shm_t shm;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	thr_args = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(thr_args != NULL);
+
+	thr_args->sync = sync;
+	thr_args->num_queues = num_queues;
+	thr_args->num_prio = num_prio;
+	if (excl)
+		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
+	else
+		thr_args->num_bufs = TEST_NUM_BUFS;
+	thr_args->use_barrier = 1;
+	thr_args->multi = multi;
+	thr_args->excl = excl;
+
+	fill_queues(thr_args);
+
+	/* Reset buffer counters from the main thread */
+	globals->prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->buf_count = 0;
+
+	/* Create and launch worker threads */
+	thr_args->thrdarg.numthrds = globals->core_count;
+	odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
+
+	/* Wait for worker threads to terminate */
+	odp_cunit_thread_exit(&thr_args->thrdarg);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_1q_1t_none(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, 1, 1, 0, 0, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_1q_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, 1, 1, 0, 0, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_1q_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, 1, 1, 0, 0, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_none(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, 1, 0, 0, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, 1, 0, 0, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, 1, 0, 0, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, prio, 0, 0, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, prio, 0, 0, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, prio, 0, 0, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_mt_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, prio, 0, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_mt_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, prio, 0, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_mt_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, prio, 0, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues */
+static void test_schedule_1q_mt_atomic_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, 1, 1, 0, 1);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_1q_1t_none(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, 1, 1, 0, 1, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_1q_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, 1, 1, 0, 1, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_1q_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, 1, 1, 0, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_1t_none(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, 1, 0, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, 1, 0, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, 1, 0, 1, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_1t_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, prio, 0, 1, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_1t_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, prio, 0, 1, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_1t_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, prio, 0, 1, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_mt_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, prio, 1, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_mt_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, prio, 1, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_mt_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, prio, 1, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues multi */
+static void test_schedule_mq_mt_atomic_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, 1, 1, 1, 1);
+}
+
+
+static int schedule_test_init(void)
+{
+	odp_shm_t shm;
+	void *pool_base;
+	odp_buffer_pool_t pool;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+	int i, j;
+	int prios;
+
+	shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
+			      ODP_CACHE_LINE_SIZE, 0);
+	pool_base = odp_shm_addr(shm);
+	if (pool_base == NULL) {
+		printf("Shared memory reserve failed.\n");
+		return -1;
+	}
+
+	pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base, MSG_POOL_SIZE,
+				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
+				      ODP_BUFFER_TYPE_RAW);
+	if (pool == ODP_BUFFER_POOL_INVALID) {
+		printf("Pool creation failed (msg).\n");
+		return -1;
+	}
+
+	shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+	globals = odp_shm_addr(shm);
+
+	if (globals == NULL) {
+		printf("Shared memory reserve failed (globals).\n");
+		return -1;
+	}
+
+	memset(globals, 0, sizeof(test_globals_t));
+
+	globals->core_count = odp_sys_core_count();
+	if (globals->core_count > MAX_WORKERS)
+		globals->core_count = MAX_WORKERS;
+
+	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+			      ODP_CACHE_LINE_SIZE, 0);
+	thr_args = odp_shm_addr(shm);
+
+	if (thr_args == NULL) {
+		printf("Shared memory reserve failed (thr_args).\n");
+		return -1;
+	}
+
+	memset(thr_args, 0, sizeof(thread_args_t));
+
+	/* Barrier to sync test case execution */
+	odp_barrier_init(&globals->barrier, globals->core_count);
+	odp_spinlock_init(&globals->count_lock);
+	odp_spinlock_init(&globals->atomic_lock);
+
+	prios = odp_schedule_num_prio();
+
+	for (i = 0; i < prios; i++) {
+		odp_queue_param_t p;
+		p.sched.prio  = i;
+		p.sched.group = ODP_SCHED_GROUP_DEFAULT;
+
+		for (j = 0; j < QUEUES_PER_PRIO; j++) {
+			/* Per sched sync type */
+			char name[32];
+			odp_queue_t q;
+
+			snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_NONE;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+static int schedule_test_finalize(void)
+{
+	odp_term_local();
+	odp_term_global();
+	return 0;
+}
+
+struct CU_TestInfo test_odp_schedule[] = {
+	{"schedule_wait_time", test_schedule_wait_time},
+	{"schedule_num_prio", test_schedule_num_prio},
+	{"schedule_1q_1t_none", test_schedule_1q_1t_none},
+	{"schedule_1q_1t_atomic", test_schedule_1q_1t_atomic},
+	{"schedule_1q_1t_ordered", test_schedule_1q_1t_ordered},
+	{"schedule_mq_1t_none", test_schedule_mq_1t_none},
+	{"schedule_mq_1t_atomic", test_schedule_mq_1t_atomic},
+	{"schedule_mq_1t_ordered", test_schedule_mq_1t_ordered},
+	{"schedule_mq_1t_prio_none", test_schedule_mq_1t_prio_none},
+	{"schedule_mq_1t_prio_atomic", test_schedule_mq_1t_prio_atomic},
+	{"schedule_mq_1t_prio_ordered", test_schedule_mq_1t_prio_ordered},
+	{"schedule_mq_mt_prio_none", test_schedule_mq_mt_prio_none},
+	{"schedule_mq_mt_prio_atomic", test_schedule_mq_mt_prio_atomic},
+	{"schedule_mq_mt_prio_ordered", test_schedule_mq_mt_prio_ordered},
+	{"schedule_1q_mt_atomic_excl", test_schedule_1q_mt_atomic_excl},
+	{"schedulem_1q_1t_none", test_schedulem_1q_1t_none},
+	{"schedulem_1q_1t_atomic", test_schedulem_1q_1t_atomic},
+	{"schedulem_1q_1t_ordered", test_schedulem_1q_1t_ordered},
+	{"schedulem_mq_1t_none", test_schedulem_mq_1t_none},
+	{"schedulem_mq_1t_atomic", test_schedulem_mq_1t_atomic},
+	{"schedulem_mq_1t_ordered", test_schedulem_mq_1t_ordered},
+	{"schedulem_mq_1t_prio_none", test_schedulem_mq_1t_prio_none},
+	{"schedulem_mq_1t_prio_atomic", test_schedulem_mq_1t_prio_atomic},
+	{"schedulem_mq_1t_prio_ordered", test_schedulem_mq_1t_prio_ordered},
+	{"schedulem_mq_mt_prio_none", test_schedulem_mq_mt_prio_none},
+	{"schedulem_mq_mt_prio_atomic", test_schedulem_mq_mt_prio_atomic},
+	{"schedulem_mq_mt_prio_ordered", test_schedulem_mq_mt_prio_ordered},
+	{"schedule_mq_mt_atomic_excl", test_schedule_mq_mt_atomic_excl},
+	CU_TEST_INFO_NULL,
+};
+
+CU_SuiteInfo odp_testsuites[] = {
+	{"Scheduler", schedule_test_init, schedule_test_finalize,
+		NULL, NULL, test_odp_schedule},
+	CU_SUITE_INFO_NULL,
+};