diff mbox

[PATCHv2] validation: schedule: initial testcases

Message ID 1418129379-32658-1-git-send-email-ciprian.barbu@linaro.org
State New
Headers show

Commit Message

Ciprian Barbu Dec. 9, 2014, 12:49 p.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
v2:
 - rebased against ODP tip
 - fixed some bugs
 - added some defines to clearly see the testcase parameters


 test/validation/Makefile.am    |   5 +-
 test/validation/odp_schedule.c | 605 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 609 insertions(+), 1 deletion(-)
 create mode 100644 test/validation/odp_schedule.c

Comments

Mike Holmes Dec. 9, 2014, 9:24 p.m. UTC | #1
Need to add .gitignore odp_schedule

comments inline below


On 9 December 2014 at 07:49, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> ---
> v2:
>  - rebased against ODP tip
>  - fixed some bugs
>  - added some defines to clearly see the testcase parameters
>
>
>  test/validation/Makefile.am    |   5 +-
>  test/validation/odp_schedule.c | 605
> +++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 609 insertions(+), 1 deletion(-)
>  create mode 100644 test/validation/odp_schedule.c
>
> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
> index 8547085..3670c76 100644
> --- a/test/validation/Makefile.am
> +++ b/test/validation/Makefile.am
> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>  if ODP_CUNIT_ENABLED
>  TESTS = ${bin_PROGRAMS}
>  check_PROGRAMS = ${bin_PROGRAMS}
> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>  odp_init_LDFLAGS = $(AM_LDFLAGS)
>  odp_queue_LDFLAGS = $(AM_LDFLAGS)
>  odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>  odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>  odp_shm_CFLAGS = $(AM_CFLAGS)
>  odp_shm_LDFLAGS = $(AM_LDFLAGS)
> +odp_schedule_CFLAGS = $(AM_CFLAGS)
> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>  endif
>
>  dist_odp_init_SOURCES = odp_init.c
> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES =
> crypto/odp_crypto_test_async_inp.c \
>                           crypto/odp_crypto_test_rng.c \
>                           odp_crypto.c common/odp_cunit_common.c
>  dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
> diff --git a/test/validation/odp_schedule.c
> b/test/validation/odp_schedule.c
> new file mode 100644
> index 0000000..ce9e9f8
> --- /dev/null
> +++ b/test/validation/odp_schedule.c
> @@ -0,0 +1,605 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "odp.h"
>

Should be #include <odp.h>


> +#include "odp_cunit_common.h"
> +
> +#define MAX_WORKERS            32            /* Max worker threads */
>

should the define be  MAX_WORKER_THREADS and drop the comment?

+#define MSG_POOL_SIZE           (4*1024*1024)
>

You have spaces above and tabs elsewhere


> +#define QUEUES_PER_PRIO                16            /* Queue per
> priority */
>

Does the comment add value - define looks fine


> +#define BUF_SIZE               64
> +#define TEST_NUM_BUFS          100
> +#define MULTI_BUFS_MAX         4             /* Buffer burst size */
>

should the define be BUFF_BURST_SIZE then you don't need the comment.

In general can the comments be dropped if the variable or define has a
better name ?




> +#define TEST_NUM_BUFS_EXCL     10000
> +
> +#define ONE_Q                  1
> +#define MANY_QS                        QUEUES_PER_PRIO
> +#define ONE_PRIO               1
> +#define SCHD_ONE               0
> +#define SCHD_MULTI             1
> +
> +#define GLOBALS_SHM_NAME       "test_globals"
> +#define MSG_POOL_NAME          "msg_pool"
> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
> +
> +
> +/* Test global variables */
> +typedef struct {
> +       int core_count;
> +       odp_barrier_t barrier;          /* Barrier for test
> synchronisation */
> +       odp_schedule_prio_t prio;       /* Current prio */
> +       int buf_count;                  /* Number of bufs for current prio
> */
> +       odp_spinlock_t count_lock;      /* Used for accessing prio
> counters */
> +       odp_spinlock_t atomic_lock;     /* Used verify schedule on ATOMIC
> qs */
> +} test_globals_t;
> +
> +typedef struct ODP_PACKED {
> +       pthrd_arg thrdarg;
> +       odp_schedule_sync_t sync;
> +       int num_queues;
> +       int num_prio;
> +       int num_bufs;                   /* Number of buffers to enqueue */

+       int num_cores;                  /* Number of cores used for the
> test */
> +       int multi;                      /* Flag for using
> odp_schedule_multi */
>

int enable_sched_multi; Makes more sense to readers  - no need of comment?


> +       int excl;                       /* Test ATOMIC exclusive access */
>

int enable_excl_atomic; Makes more sense to readers   - no need of comment?

+} thread_args_t;
> +
> +odp_buffer_pool_t pool;
> +
> +static void test_schedule_wait_time(void)
> +{
> +       uint64_t wait_time;
> +
> +       wait_time = odp_schedule_wait_time(0);
> +       CU_ASSERT(wait_time > 0);
> +
> +       wait_time = odp_schedule_wait_time(1);
> +       CU_ASSERT(wait_time > 0);
> +
> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> +       CU_ASSERT(wait_time > 0);
> +}
> +
> +static void test_schedule_num_prio(void)
> +{
> +       int prio;
> +
> +       prio = odp_schedule_num_prio();
> +
> +       CU_ASSERT(prio > 0);
> +       /* Just common sense but why not */
>

Is this a question to the reviewer ? If it is needed dont comment, if not
delete it :)


> +       CU_ASSERT(prio == odp_schedule_num_prio());
> +}
> +
> +static void *schedule_common_(void *arg)
> +{
> +       thread_args_t *args = (thread_args_t *)arg;
> +       odp_schedule_sync_t sync;
> +       int num_queues, num_prio, num_bufs, num_cores;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +
> +       sync = args->sync;
> +       num_queues = args->num_queues;
> +       num_prio = args->num_prio;
> +       num_bufs = args->num_bufs;
> +       num_cores = args->num_cores;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +
> +       if (num_cores == globals->core_count)
> +               odp_barrier_wait(&globals->barrier);
> +
> +       while (1) {
> +               odp_buffer_t buf;
> +               odp_queue_t from;
> +               int num = 0;
> +               int locked;
> +
> +               odp_spinlock_lock(&globals->count_lock);
> +               if (globals->buf_count ==
> +                   num_bufs * num_queues * num_prio) {
> +                       odp_spinlock_unlock(&globals->count_lock);
> +                       break;
> +               }
> +               odp_spinlock_unlock(&globals->count_lock);
> +               if (args->multi) {
> +                       odp_buffer_t bufs[MULTI_BUFS_MAX];
> +                       int j;
> +                       num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
> bufs,
> +                                                MULTI_BUFS_MAX);
> +                       CU_ASSERT(num >= 0);
> +                       CU_ASSERT(num <= MULTI_BUFS_MAX);
> +                       if (num == 0)
> +                               continue;
> +                       for (j = 0; j < num; j++)
> +                               odp_buffer_free(bufs[j]);
> +               } else {
> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> +                       if (buf == ODP_BUFFER_INVALID)
> +                               continue;
> +                       num = 1;
> +                       odp_buffer_free(buf);
> +               }
> +               if (args->excl) {
> +                       locked =
> odp_spinlock_trylock(&globals->atomic_lock);
> +                       CU_ASSERT(locked == 1);
> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
> +                       if (locked) {
> +                               int cnt;
> +                               uint64_t cycles = 0;
> +                               /* Do some work here to keep the thread
> busy */
> +                               for (cnt = 0; cnt < 1000; cnt++)
> +                                       cycles += odp_time_cycles();
> +
> +                               odp_spinlock_unlock(&globals->atomic_lock);
> +                       }
> +               }
> +
> +
> +               /* TODO: odp_queue_sched_prio not in yet so we can't
>

All TODOs need to reference an ODP bug for the task.


> +                check the priority is the one we expected */
> +
> +               odp_spinlock_lock(&globals->count_lock);
> +               globals->buf_count += num;
> +
> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
> +                       odp_schedule_release_atomic();
> +
> +               if (globals->buf_count ==
> +                   num_bufs * num_queues * num_prio) {
> +                       odp_spinlock_unlock(&globals->count_lock);
> +                       break;
> +               }
> +               odp_spinlock_unlock(&globals->count_lock);
> +       }
> +
> +       return NULL;
> +}
> +
> +static void fill_queues(thread_args_t *args)
> +{
> +       odp_schedule_sync_t sync;
> +       int num_queues, num_prio;
> +       odp_buffer_pool_t pool;
> +       int i, j, k;
> +       char name[32];
> +
> +       sync = args->sync;
> +       num_queues = args->num_queues;
> +       num_prio = args->num_prio;
> +
> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> +
> +       for (i = 0; i < num_prio; i++) {
> +               for (j = 0; j < num_queues; j++) {
> +                       odp_queue_t queue;
> +
> +                       switch (sync) {
> +                       case ODP_SCHED_SYNC_NONE:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_n", i, j);
> +                               break;
> +                       case ODP_SCHED_SYNC_ATOMIC:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_a", i, j);
> +                               break;
> +                       case ODP_SCHED_SYNC_ORDERED:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_o", i, j);
> +                               break;
> +                       default:
> +                               CU_ASSERT(0);
> +                               break;
> +                       }
> +
> +                       queue = odp_queue_lookup(name);
> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
> +
> +                       for (k = 0; k < args->num_bufs; k++) {
> +                               odp_buffer_t buf;
> +                               buf = odp_buffer_alloc(pool);
> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
> +                               CU_ASSERT(odp_queue_enq(queue, buf) == 0);
> +                       }
> +               }
> +       }
> +}
> +
> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
> +                           int num_prio, int multi)
> +{
> +       thread_args_t args;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +       globals->prio = ODP_SCHED_PRIO_HIGHEST;
> +       globals->buf_count = 0;
> +
> +       args.sync = sync;
> +       args.num_queues = num_queues;
> +       args.num_prio = num_prio;
> +       args.num_bufs = TEST_NUM_BUFS;
> +       args.num_cores = 1;
> +       args.multi = multi;
> +       args.excl = 0;  /* Don't test exclusive access with a single core
> */
>

Make it clear with code rather than comments ?
#define DISABLE_EXCLUSIVE_ACCESS 0


> +
> +       fill_queues(&args);
> +
> +       schedule_common_(&args);
> +}
> +
> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
> +                            int num_prio, int multi, int excl)
> +{
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +       thread_args_t *thr_args;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       thr_args = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(thr_args != NULL);
> +
> +       thr_args->sync = sync;
> +       thr_args->num_queues = num_queues;
> +       thr_args->num_prio = num_prio;
> +       if (excl)
> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> +       else
> +               thr_args->num_bufs = TEST_NUM_BUFS;
> +       thr_args->num_cores = globals->core_count;
> +       thr_args->multi = multi;
> +       thr_args->excl = excl;
> +
> +       fill_queues(thr_args);
> +
> +       /* Reset buffer counters from the main thread */
> +       globals->prio = ODP_SCHED_PRIO_HIGHEST;
> +       globals->buf_count = 0;
> +
> +       /* Create and launch worker threads */
> +       thr_args->thrdarg.numthrds = globals->core_count;
> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
> +
> +       /* Wait for worker threads to terminate */
> +       odp_cunit_thread_exit(&thr_args->thrdarg);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_1q_1t_none(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_1q_1t_atomic(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_1q_1t_ordered(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_none(void)
> +{
> +       /* Only one priority involved in these tests, but use
> +          the same number of queues the more general case uses */
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_atomic(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
> SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_ordered(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
> SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_prio_none(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_prio_atomic(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_prio_ordered(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_mt_prio_none(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE, 0);
>

what is 0 ? multiple cases


> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_mt_prio_atomic(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
> 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_mt_prio_ordered(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
> 0);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues */
> +static void test_schedule_1q_mt_atomic_excl(void)
> +{
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
> 1);
>

What is 1 ? multiple cases


> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedulem_1q_1t_none(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedulem_1q_1t_atomic(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedulem_1q_1t_ordered(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedulem_mq_1t_none(void)
> +{
> +       /* Only one priority involved in these tests, but use
> +          the same number of queues the more general case uses */
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedulem_mq_1t_atomic(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedulem_mq_1t_ordered(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedulem_mq_1t_prio_none(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedulem_mq_1t_prio_atomic(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedulem_mq_1t_prio_ordered(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedulem_mq_mt_prio_none(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI,
> 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedulem_mq_mt_prio_atomic(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI,
> 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedulem_mq_mt_prio_ordered(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
> SCHD_MULTI, 0);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
> +static void test_schedulem_1q_mt_atomic_excl(void)
> +{
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
> SCHD_MULTI, 1);
> +}
> +
> +
> +static int schedule_test_init(void)
> +{
> +       odp_shm_t shm;
> +       void *pool_base;
> +       odp_buffer_pool_t pool;
> +       test_globals_t *globals;
> +       thread_args_t *thr_args;
> +       int i, j;
> +       int prios;
> +
> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
> +                             ODP_CACHE_LINE_SIZE, 0);
> +       pool_base = odp_shm_addr(shm);
> +       if (pool_base == NULL) {
> +               printf("Shared memory reserve failed.\n");
> +               return -1;
> +       }
> +
> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
> MSG_POOL_SIZE,
> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
> +                                     ODP_BUFFER_TYPE_RAW);
> +       if (pool == ODP_BUFFER_POOL_INVALID) {
> +               printf("Pool creation failed (msg).\n");
> +               return -1;
> +       }
> +
> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
> +                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE,
> 0);
> +
> +       globals = odp_shm_addr(shm);
> +
> +       if (globals == NULL) {
> +               printf("Shared memory reserve failed (globals).\n");
> +               return -1;
> +       }
> +
> +       memset(globals, 0, sizeof(test_globals_t));
> +
> +       globals->core_count = odp_sys_core_count();
> +       if (globals->core_count > MAX_WORKERS)
> +               globals->core_count = MAX_WORKERS;
> +
> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
> +                             ODP_CACHE_LINE_SIZE, 0);
> +       thr_args = odp_shm_addr(shm);
> +
> +       if (thr_args == NULL) {
> +               printf("Shared memory reserve failed (thr_args).\n");
> +               return -1;
> +       }
> +
> +       memset(thr_args, 0, sizeof(thread_args_t));
> +
> +       /* Barrier to sync test case execution */
> +       odp_barrier_init(&globals->barrier, globals->core_count);
> +       odp_spinlock_init(&globals->count_lock);
> +       odp_spinlock_init(&globals->atomic_lock);
> +
> +       prios = odp_schedule_num_prio();
> +
> +       for (i = 0; i < prios; i++) {
> +               odp_queue_param_t p;
> +               p.sched.prio  = i;
> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
> +
> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> +                       /* Per sched sync type */
> +                       char name[32];
> +                       odp_queue_t q;
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +               }
> +       }
>

I find the detail obscures the intent with these large loops in loops. A
comment would help, however better than that could it self document and be
a function that creates a given number of named queues with a name to match
? Maybe this would become something used by other tests ?

+       return 0;
> +}
> +
> +static int schedule_test_finalize(void)
> +{
> +       odp_term_local();
> +       odp_term_global();
> +       return 0;
> +}
> +
> +struct CU_TestInfo test_odp_schedule[] = {
> +       {"schedule_wait_time", test_schedule_wait_time},
> +       {"schedule_num_prio", test_schedule_num_prio},
> +       {"schedule_1q_1t_none", test_schedule_1q_1t_none},
> +       {"schedule_1q_1t_atomic", test_schedule_1q_1t_atomic},
> +       {"schedule_1q_1t_ordered", test_schedule_1q_1t_ordered},
> +       {"schedule_mq_1t_none", test_schedule_mq_1t_none},
> +       {"schedule_mq_1t_atomic", test_schedule_mq_1t_atomic},
> +       {"schedule_mq_1t_ordered", test_schedule_mq_1t_ordered},
> +       {"schedule_mq_1t_prio_none", test_schedule_mq_1t_prio_none},
> +       {"schedule_mq_1t_prio_atomic", test_schedule_mq_1t_prio_atomic},
> +       {"schedule_mq_1t_prio_ordered", test_schedule_mq_1t_prio_ordered},
> +       {"schedule_mq_mt_prio_none", test_schedule_mq_mt_prio_none},
> +       {"schedule_mq_mt_prio_atomic", test_schedule_mq_mt_prio_atomic},
> +       {"schedule_mq_mt_prio_ordered", test_schedule_mq_mt_prio_ordered},
> +       {"schedule_1q_mt_atomic_excl", test_schedule_1q_mt_atomic_excl},
> +       {"schedulem_1q_1t_none", test_schedulem_1q_1t_none},
>

what does the "m" mean on schedulem ? could it be clearer ?


> +       {"schedulem_1q_1t_atomic", test_schedulem_1q_1t_atomic},
> +       {"schedulem_1q_1t_ordered", test_schedulem_1q_1t_ordered},
> +       {"schedulem_mq_1t_none", test_schedulem_mq_1t_none},
> +       {"schedulem_mq_1t_atomic", test_schedulem_mq_1t_atomic},
> +       {"schedulem_mq_1t_ordered", test_schedulem_mq_1t_ordered},
> +       {"schedulem_mq_1t_prio_none", test_schedulem_mq_1t_prio_none},
> +       {"schedulem_mq_1t_prio_atomic", test_schedulem_mq_1t_prio_atomic},
> +       {"schedulem_mq_1t_prio_ordered",
> test_schedulem_mq_1t_prio_ordered},
> +       {"schedulem_mq_mt_prio_none", test_schedulem_mq_mt_prio_none},
> +       {"schedulem_mq_mt_prio_atomic", test_schedulem_mq_mt_prio_atomic},
> +       {"schedulem_mq_mt_prio_ordered",
> test_schedulem_mq_mt_prio_ordered},
> +       {"schedulem_1q_mt_atomic_excl", test_schedulem_1q_mt_atomic_excl},
> +       CU_TEST_INFO_NULL,
> +};
> +
> +CU_SuiteInfo odp_testsuites[] = {
> +       {"Scheduler", schedule_test_init, schedule_test_finalize,
>

The main function has a hook for the init called tests_global_init and main
does a terminate for for you.
Please rework schedule_test_init and schedule_test_finalize


> +               NULL, NULL, test_odp_schedule},
>

Does not align under open parens


> +       CU_SUITE_INFO_NULL,
> +};
> --
> 1.8.3.2
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Ciprian Barbu Dec. 10, 2014, 4:34 p.m. UTC | #2
On Tue, Dec 9, 2014 at 11:24 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
> Need to add .gitignore odp_schedule
>
> comments inline below
>
>
> On 9 December 2014 at 07:49, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> ---
>> v2:
>>  - rebased against ODP tip
>>  - fixed some bugs
>>  - added some defines to clearly see the testcase parameters
>>
>>
>>  test/validation/Makefile.am    |   5 +-
>>  test/validation/odp_schedule.c | 605
>> +++++++++++++++++++++++++++++++++++++++++
>>  2 files changed, 609 insertions(+), 1 deletion(-)
>>  create mode 100644 test/validation/odp_schedule.c
>>
>> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
>> index 8547085..3670c76 100644
>> --- a/test/validation/Makefile.am
>> +++ b/test/validation/Makefile.am
>> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>>  if ODP_CUNIT_ENABLED
>>  TESTS = ${bin_PROGRAMS}
>>  check_PROGRAMS = ${bin_PROGRAMS}
>> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
>> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>>  odp_init_LDFLAGS = $(AM_LDFLAGS)
>>  odp_queue_LDFLAGS = $(AM_LDFLAGS)
>>  odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>>  odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>>  odp_shm_CFLAGS = $(AM_CFLAGS)
>>  odp_shm_LDFLAGS = $(AM_LDFLAGS)
>> +odp_schedule_CFLAGS = $(AM_CFLAGS)
>> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>>  endif
>>
>>  dist_odp_init_SOURCES = odp_init.c
>> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES =
>> crypto/odp_crypto_test_async_inp.c \
>>                           crypto/odp_crypto_test_rng.c \
>>                           odp_crypto.c common/odp_cunit_common.c
>>  dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
>> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
>> diff --git a/test/validation/odp_schedule.c
>> b/test/validation/odp_schedule.c
>> new file mode 100644
>> index 0000000..ce9e9f8
>> --- /dev/null
>> +++ b/test/validation/odp_schedule.c
>> @@ -0,0 +1,605 @@
>> +/* Copyright (c) 2014, Linaro Limited
>> + * All rights reserved.
>> + *
>> + * SPDX-License-Identifier:     BSD-3-Clause
>> + */
>> +
>> +#include "odp.h"
>
>
> Should be #include <odp.h>
>
>>
>> +#include "odp_cunit_common.h"
>> +
>> +#define MAX_WORKERS            32            /* Max worker threads */
>
>
> should the define be  MAX_WORKER_THREADS and drop the comment?
>
>> +#define MSG_POOL_SIZE           (4*1024*1024)
>
>
> You have spaces above and tabs elsewhere
>
>>
>> +#define QUEUES_PER_PRIO                16            /* Queue per
>> priority */
>
>
> Does the comment add value - define looks fine
>
>>
>> +#define BUF_SIZE               64
>> +#define TEST_NUM_BUFS          100
>> +#define MULTI_BUFS_MAX         4             /* Buffer burst size */
>
>
> should the define be BUFF_BURST_SIZE then you don't need the comment.
>
> In general can the comments be dropped if the variable or define has a
> better name ?
>
>
>
>>
>> +#define TEST_NUM_BUFS_EXCL     10000
>> +
>> +#define ONE_Q                  1
>> +#define MANY_QS                        QUEUES_PER_PRIO
>> +#define ONE_PRIO               1
>> +#define SCHD_ONE               0
>> +#define SCHD_MULTI             1
>> +
>> +#define GLOBALS_SHM_NAME       "test_globals"
>> +#define MSG_POOL_NAME          "msg_pool"
>> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
>> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
>> +
>> +
>> +/* Test global variables */
>> +typedef struct {
>> +       int core_count;
>> +       odp_barrier_t barrier;          /* Barrier for test
>> synchronisation */
>> +       odp_schedule_prio_t prio;       /* Current prio */
>> +       int buf_count;                  /* Number of bufs for current prio
>> */
>> +       odp_spinlock_t count_lock;      /* Used for accessing prio
>> counters */
>> +       odp_spinlock_t atomic_lock;     /* Used verify schedule on ATOMIC
>> qs */
>> +} test_globals_t;
>> +
>> +typedef struct ODP_PACKED {
>> +       pthrd_arg thrdarg;
>> +       odp_schedule_sync_t sync;
>> +       int num_queues;
>> +       int num_prio;
>> +       int num_bufs;                   /* Number of buffers to enqueue */
>>
>> +       int num_cores;                  /* Number of cores used for the
>> test */
>> +       int multi;                      /* Flag for using
>> odp_schedule_multi */
>
>
> int enable_sched_multi; Makes more sense to readers  - no need of comment?
>
>>
>> +       int excl;                       /* Test ATOMIC exclusive access */
>
>
> int enable_excl_atomic; Makes more sense to readers   - no need of comment?
>
>> +} thread_args_t;
>> +
>> +odp_buffer_pool_t pool;
>> +
>> +static void test_schedule_wait_time(void)
>> +{
>> +       uint64_t wait_time;
>> +
>> +       wait_time = odp_schedule_wait_time(0);
>> +       CU_ASSERT(wait_time > 0);
>> +
>> +       wait_time = odp_schedule_wait_time(1);
>> +       CU_ASSERT(wait_time > 0);
>> +
>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>> +       CU_ASSERT(wait_time > 0);
>> +}
>> +
>> +static void test_schedule_num_prio(void)
>> +{
>> +       int prio;
>> +
>> +       prio = odp_schedule_num_prio();
>> +
>> +       CU_ASSERT(prio > 0);
>> +       /* Just common sense but why not */
>
>
> Is this a question to the reviewer ? If it is needed dont comment, if not
> delete it :)
>
>>
>> +       CU_ASSERT(prio == odp_schedule_num_prio());
>> +}
>> +
>> +static void *schedule_common_(void *arg)
>> +{
>> +       thread_args_t *args = (thread_args_t *)arg;
>> +       odp_schedule_sync_t sync;
>> +       int num_queues, num_prio, num_bufs, num_cores;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +
>> +       sync = args->sync;
>> +       num_queues = args->num_queues;
>> +       num_prio = args->num_prio;
>> +       num_bufs = args->num_bufs;
>> +       num_cores = args->num_cores;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +
>> +       if (num_cores == globals->core_count)
>> +               odp_barrier_wait(&globals->barrier);
>> +
>> +       while (1) {
>> +               odp_buffer_t buf;
>> +               odp_queue_t from;
>> +               int num = 0;
>> +               int locked;
>> +
>> +               odp_spinlock_lock(&globals->count_lock);
>> +               if (globals->buf_count ==
>> +                   num_bufs * num_queues * num_prio) {
>> +                       odp_spinlock_unlock(&globals->count_lock);
>> +                       break;
>> +               }
>> +               odp_spinlock_unlock(&globals->count_lock);
>> +               if (args->multi) {
>> +                       odp_buffer_t bufs[MULTI_BUFS_MAX];
>> +                       int j;
>> +                       num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
>> bufs,
>> +                                                MULTI_BUFS_MAX);
>> +                       CU_ASSERT(num >= 0);
>> +                       CU_ASSERT(num <= MULTI_BUFS_MAX);
>> +                       if (num == 0)
>> +                               continue;
>> +                       for (j = 0; j < num; j++)
>> +                               odp_buffer_free(bufs[j]);
>> +               } else {
>> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +                       if (buf == ODP_BUFFER_INVALID)
>> +                               continue;
>> +                       num = 1;
>> +                       odp_buffer_free(buf);
>> +               }
>> +               if (args->excl) {
>> +                       locked =
>> odp_spinlock_trylock(&globals->atomic_lock);
>> +                       CU_ASSERT(locked == 1);
>> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
>> +                       if (locked) {
>> +                               int cnt;
>> +                               uint64_t cycles = 0;
>> +                               /* Do some work here to keep the thread
>> busy */
>> +                               for (cnt = 0; cnt < 1000; cnt++)
>> +                                       cycles += odp_time_cycles();
>> +
>> +
>> odp_spinlock_unlock(&globals->atomic_lock);
>> +                       }
>> +               }
>> +
>> +
>> +               /* TODO: odp_queue_sched_prio not in yet so we can't
>
>
> All TODOs need to reference an ODP bug for the task.
>
>>
>> +                check the priority is the one we expected */
>> +
>> +               odp_spinlock_lock(&globals->count_lock);
>> +               globals->buf_count += num;
>> +
>> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
>> +                       odp_schedule_release_atomic();
>> +
>> +               if (globals->buf_count ==
>> +                   num_bufs * num_queues * num_prio) {
>> +                       odp_spinlock_unlock(&globals->count_lock);
>> +                       break;
>> +               }
>> +               odp_spinlock_unlock(&globals->count_lock);
>> +       }
>> +
>> +       return NULL;
>> +}
>> +
>> +static void fill_queues(thread_args_t *args)
>> +{
>> +       odp_schedule_sync_t sync;
>> +       int num_queues, num_prio;
>> +       odp_buffer_pool_t pool;
>> +       int i, j, k;
>> +       char name[32];
>> +
>> +       sync = args->sync;
>> +       num_queues = args->num_queues;
>> +       num_prio = args->num_prio;
>> +
>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> +
>> +       for (i = 0; i < num_prio; i++) {
>> +               for (j = 0; j < num_queues; j++) {
>> +                       odp_queue_t queue;
>> +
>> +                       switch (sync) {
>> +                       case ODP_SCHED_SYNC_NONE:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_n", i, j);
>> +                               break;
>> +                       case ODP_SCHED_SYNC_ATOMIC:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_a", i, j);
>> +                               break;
>> +                       case ODP_SCHED_SYNC_ORDERED:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_o", i, j);
>> +                               break;
>> +                       default:
>> +                               CU_ASSERT(0);
>> +                               break;
>> +                       }
>> +
>> +                       queue = odp_queue_lookup(name);
>> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
>> +
>> +                       for (k = 0; k < args->num_bufs; k++) {
>> +                               odp_buffer_t buf;
>> +                               buf = odp_buffer_alloc(pool);
>> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> +                               CU_ASSERT(odp_queue_enq(queue, buf) == 0);
>> +                       }
>> +               }
>> +       }
>> +}
>> +
>> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>> +                           int num_prio, int multi)
>> +{
>> +       thread_args_t args;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +       globals->prio = ODP_SCHED_PRIO_HIGHEST;
>> +       globals->buf_count = 0;
>> +
>> +       args.sync = sync;
>> +       args.num_queues = num_queues;
>> +       args.num_prio = num_prio;
>> +       args.num_bufs = TEST_NUM_BUFS;
>> +       args.num_cores = 1;
>> +       args.multi = multi;
>> +       args.excl = 0;  /* Don't test exclusive access with a single core
>> */
>
>
> Make it clear with code rather than comments ?
> #define DISABLE_EXCLUSIVE_ACCESS 0
>
>>
>> +
>> +       fill_queues(&args);
>> +
>> +       schedule_common_(&args);
>> +}
>> +
>> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>> +                            int num_prio, int multi, int excl)
>> +{
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +       thread_args_t *thr_args;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       thr_args = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(thr_args != NULL);
>> +
>> +       thr_args->sync = sync;
>> +       thr_args->num_queues = num_queues;
>> +       thr_args->num_prio = num_prio;
>> +       if (excl)
>> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> +       else
>> +               thr_args->num_bufs = TEST_NUM_BUFS;
>> +       thr_args->num_cores = globals->core_count;
>> +       thr_args->multi = multi;
>> +       thr_args->excl = excl;
>> +
>> +       fill_queues(thr_args);
>> +
>> +       /* Reset buffer counters from the main thread */
>> +       globals->prio = ODP_SCHED_PRIO_HIGHEST;
>> +       globals->buf_count = 0;
>> +
>> +       /* Create and launch worker threads */
>> +       thr_args->thrdarg.numthrds = globals->core_count;
>> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
>> +
>> +       /* Wait for worker threads to terminate */
>> +       odp_cunit_thread_exit(&thr_args->thrdarg);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_1q_1t_none(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_1q_1t_atomic(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_1q_1t_ordered(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_1t_none(void)
>> +{
>> +       /* Only one priority involved in these tests, but use
>> +          the same number of queues the more general case uses */
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_1t_atomic(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_1t_ordered(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_1t_prio_none(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_1t_prio_atomic(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_1t_prio_ordered(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_mt_prio_none(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE, 0);
>
>
> what is 0 ? multiple cases
>
>>
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_mt_prio_atomic(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
>> 0);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_mt_prio_ordered(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
>> 0);
>> +}
>> +
>> +/* 1 queue many threads check exclusive access on ATOMIC queues */
>> +static void test_schedule_1q_mt_atomic_excl(void)
>> +{
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
>> 1);
>
>
> What is 1 ? multiple cases
>
>>
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedulem_1q_1t_none(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedulem_1q_1t_atomic(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedulem_1q_1t_ordered(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedulem_mq_1t_none(void)
>> +{
>> +       /* Only one priority involved in these tests, but use
>> +          the same number of queues the more general case uses */
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedulem_mq_1t_atomic(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedulem_mq_1t_ordered(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedulem_mq_1t_prio_none(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedulem_mq_1t_prio_atomic(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedulem_mq_1t_prio_ordered(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedulem_mq_mt_prio_none(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI,
>> 0);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedulem_mq_mt_prio_atomic(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI,
>> 0);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi
>> */
>> +static void test_schedulem_mq_mt_prio_ordered(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>> SCHD_MULTI, 0);
>> +}
>> +
>> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
>> +static void test_schedulem_1q_mt_atomic_excl(void)
>> +{
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_MULTI, 1);
>> +}
>> +
>> +
>> +static int schedule_test_init(void)
>> +{
>> +       odp_shm_t shm;
>> +       void *pool_base;
>> +       odp_buffer_pool_t pool;
>> +       test_globals_t *globals;
>> +       thread_args_t *thr_args;
>> +       int i, j;
>> +       int prios;
>> +
>> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
>> +                             ODP_CACHE_LINE_SIZE, 0);
>> +       pool_base = odp_shm_addr(shm);
>> +       if (pool_base == NULL) {
>> +               printf("Shared memory reserve failed.\n");
>> +               return -1;
>> +       }
>> +
>> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
>> MSG_POOL_SIZE,
>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>> +                                     ODP_BUFFER_TYPE_RAW);
>> +       if (pool == ODP_BUFFER_POOL_INVALID) {
>> +               printf("Pool creation failed (msg).\n");
>> +               return -1;
>> +       }
>> +
>> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
>> +                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE,
>> 0);
>> +
>> +       globals = odp_shm_addr(shm);
>> +
>> +       if (globals == NULL) {
>> +               printf("Shared memory reserve failed (globals).\n");
>> +               return -1;
>> +       }
>> +
>> +       memset(globals, 0, sizeof(test_globals_t));
>> +
>> +       globals->core_count = odp_sys_core_count();
>> +       if (globals->core_count > MAX_WORKERS)
>> +               globals->core_count = MAX_WORKERS;
>> +
>> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>> +                             ODP_CACHE_LINE_SIZE, 0);
>> +       thr_args = odp_shm_addr(shm);
>> +
>> +       if (thr_args == NULL) {
>> +               printf("Shared memory reserve failed (thr_args).\n");
>> +               return -1;
>> +       }
>> +
>> +       memset(thr_args, 0, sizeof(thread_args_t));
>> +
>> +       /* Barrier to sync test case execution */
>> +       odp_barrier_init(&globals->barrier, globals->core_count);
>> +       odp_spinlock_init(&globals->count_lock);
>> +       odp_spinlock_init(&globals->atomic_lock);
>> +
>> +       prios = odp_schedule_num_prio();
>> +
>> +       for (i = 0; i < prios; i++) {
>> +               odp_queue_param_t p;
>> +               p.sched.prio  = i;
>> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
>> +
>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>> +                       /* Per sched sync type */
>> +                       char name[32];
>> +                       odp_queue_t q;
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +               }
>> +       }
>
>
> I find the detail obscures the intent with these large loops in loops. A
> comment would help, however better than that could it self document and be a
> function that creates a given number of named queues with a name to match ?
> Maybe this would become something used by other tests ?

I don't think I catch your drift, do you mean adding a new function
that creates a queue? to which you pass sched type and name?

>
>> +       return 0;
>> +}
>> +
>> +static int schedule_test_finalize(void)
>> +{
>> +       odp_term_local();
>> +       odp_term_global();
>> +       return 0;
>> +}
>> +
>> +struct CU_TestInfo test_odp_schedule[] = {
>> +       {"schedule_wait_time", test_schedule_wait_time},
>> +       {"schedule_num_prio", test_schedule_num_prio},
>> +       {"schedule_1q_1t_none", test_schedule_1q_1t_none},
>> +       {"schedule_1q_1t_atomic", test_schedule_1q_1t_atomic},
>> +       {"schedule_1q_1t_ordered", test_schedule_1q_1t_ordered},
>> +       {"schedule_mq_1t_none", test_schedule_mq_1t_none},
>> +       {"schedule_mq_1t_atomic", test_schedule_mq_1t_atomic},
>> +       {"schedule_mq_1t_ordered", test_schedule_mq_1t_ordered},
>> +       {"schedule_mq_1t_prio_none", test_schedule_mq_1t_prio_none},
>> +       {"schedule_mq_1t_prio_atomic", test_schedule_mq_1t_prio_atomic},
>> +       {"schedule_mq_1t_prio_ordered", test_schedule_mq_1t_prio_ordered},
>> +       {"schedule_mq_mt_prio_none", test_schedule_mq_mt_prio_none},
>> +       {"schedule_mq_mt_prio_atomic", test_schedule_mq_mt_prio_atomic},
>> +       {"schedule_mq_mt_prio_ordered", test_schedule_mq_mt_prio_ordered},
>> +       {"schedule_1q_mt_atomic_excl", test_schedule_1q_mt_atomic_excl},
>> +       {"schedulem_1q_1t_none", test_schedulem_1q_1t_none},
>
>
> what does the "m" mean on schedulem ? could it be clearer ?
>
>>
>> +       {"schedulem_1q_1t_atomic", test_schedulem_1q_1t_atomic},
>> +       {"schedulem_1q_1t_ordered", test_schedulem_1q_1t_ordered},
>> +       {"schedulem_mq_1t_none", test_schedulem_mq_1t_none},
>> +       {"schedulem_mq_1t_atomic", test_schedulem_mq_1t_atomic},
>> +       {"schedulem_mq_1t_ordered", test_schedulem_mq_1t_ordered},
>> +       {"schedulem_mq_1t_prio_none", test_schedulem_mq_1t_prio_none},
>> +       {"schedulem_mq_1t_prio_atomic", test_schedulem_mq_1t_prio_atomic},
>> +       {"schedulem_mq_1t_prio_ordered",
>> test_schedulem_mq_1t_prio_ordered},
>> +       {"schedulem_mq_mt_prio_none", test_schedulem_mq_mt_prio_none},
>> +       {"schedulem_mq_mt_prio_atomic", test_schedulem_mq_mt_prio_atomic},
>> +       {"schedulem_mq_mt_prio_ordered",
>> test_schedulem_mq_mt_prio_ordered},
>> +       {"schedulem_1q_mt_atomic_excl", test_schedulem_1q_mt_atomic_excl},
>> +       CU_TEST_INFO_NULL,
>> +};
>> +
>> +CU_SuiteInfo odp_testsuites[] = {
>> +       {"Scheduler", schedule_test_init, schedule_test_finalize,
>
>
> The main function has a hook for the init called tests_global_init and main
> does a terminate for for you.
> Please rework schedule_test_init and schedule_test_finalize
>
>>
>> +               NULL, NULL, test_odp_schedule},
>
>
> Does not align under open parens
>
>>
>> +       CU_SUITE_INFO_NULL,
>> +};
>> --
>> 1.8.3.2
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
Mike Holmes Dec. 10, 2014, 5:26 p.m. UTC | #3
>
> snip
>
.....

> >> +
> >> +       for (i = 0; i < prios; i++) {
> >> +               odp_queue_param_t p;
> >> +               p.sched.prio  = i;
> >> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
> >> +
> >> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> >> +                       /* Per sched sync type */
> >> +                       char name[32];
> >> +                       odp_queue_t q;
> >> +
> >> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
> >> j);
> >> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
> >> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> >> &p);
> >> +
> >> +                       if (q == ODP_QUEUE_INVALID) {
> >> +                               printf("Schedule queue create
> failed.\n");
> >> +                               return -1;
> >> +                       }
> >> +
> >> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
> >> j);
> >> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
> >> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> >> &p);
> >> +
> >> +                       if (q == ODP_QUEUE_INVALID) {
> >> +                               printf("Schedule queue create
> failed.\n");
> >> +                               return -1;
> >> +                       }
> >> +
> >> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
> >> j);
> >> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
> >> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> >> &p);
> >> +
> >> +                       if (q == ODP_QUEUE_INVALID) {
> >> +                               printf("Schedule queue create
> failed.\n");
> >> +                               return -1;
> >> +                       }
> >> +               }
> >> +       }
> >
> >
> > I find the detail obscures the intent with these large loops in loops. A
> > comment would help, however better than that could it self document and
> be a
> > function that creates a given number of named queues with a name to
> match ?
> > Maybe this would become something used by other tests ?
>
> I don't think I catch your drift, do you mean adding a new function
> that creates a queue? to which you pass sched type and name?
>


I meant that the program flow would be clearer with a sub routine.

It could be as it is now where it calls ODP functions that describe at a
high level what you intend to happen
and then rather than starting 30 lines code with of loops of loops with

for (i = 0; i < prios; i++) {
....
....

It said instead create_quesues_for_tests ( args ......)

Making this overall function easier to comprehend at a glance, just a
suggestion.


>
> snip .......
>
diff mbox

Patch

diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index 8547085..3670c76 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -6,13 +6,15 @@  AM_LDFLAGS += -static
 if ODP_CUNIT_ENABLED
 TESTS = ${bin_PROGRAMS}
 check_PROGRAMS = ${bin_PROGRAMS}
-bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
+bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
 odp_init_LDFLAGS = $(AM_LDFLAGS)
 odp_queue_LDFLAGS = $(AM_LDFLAGS)
 odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
 odp_crypto_LDFLAGS = $(AM_LDFLAGS)
 odp_shm_CFLAGS = $(AM_CFLAGS)
 odp_shm_LDFLAGS = $(AM_LDFLAGS)
+odp_schedule_CFLAGS = $(AM_CFLAGS)
+odp_schedule_LDFLAGS = $(AM_LDFLAGS)
 endif
 
 dist_odp_init_SOURCES = odp_init.c
@@ -22,3 +24,4 @@  dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c \
 			  crypto/odp_crypto_test_rng.c \
 			  odp_crypto.c common/odp_cunit_common.c
 dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
+dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
new file mode 100644
index 0000000..ce9e9f8
--- /dev/null
+++ b/test/validation/odp_schedule.c
@@ -0,0 +1,605 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp.h"
+#include "odp_cunit_common.h"
+
+#define MAX_WORKERS		32            /* Max worker threads */
+#define MSG_POOL_SIZE           (4*1024*1024)
+#define QUEUES_PER_PRIO		16            /* Queue per priority */
+#define BUF_SIZE		64
+#define TEST_NUM_BUFS		100
+#define MULTI_BUFS_MAX		4             /* Buffer burst size */
+#define TEST_NUM_BUFS_EXCL	10000
+
+#define ONE_Q			1
+#define MANY_QS			QUEUES_PER_PRIO
+#define ONE_PRIO		1
+#define SCHD_ONE		0
+#define SCHD_MULTI		1
+
+#define GLOBALS_SHM_NAME	"test_globals"
+#define MSG_POOL_NAME		"msg_pool"
+#define SHM_MSG_POOL_NAME	"shm_msg_pool"
+#define SHM_THR_ARGS_NAME	"shm_thr_args"
+
+
+/* Test global variables */
+typedef struct {
+	int core_count;
+	odp_barrier_t barrier;		/* Barrier for test synchronisation */
+	odp_schedule_prio_t prio;	/* Current prio */
+	int buf_count;			/* Number of bufs for current prio */
+	odp_spinlock_t count_lock;	/* Used for accessing prio counters */
+	odp_spinlock_t atomic_lock;	/* Used verify schedule on ATOMIC qs */
+} test_globals_t;
+
+typedef struct ODP_PACKED {
+	pthrd_arg thrdarg;
+	odp_schedule_sync_t sync;
+	int num_queues;
+	int num_prio;
+	int num_bufs;			/* Number of buffers to enqueue */
+	int num_cores;			/* Number of cores used for the test */
+	int multi;			/* Flag for using odp_schedule_multi */
+	int excl;			/* Test ATOMIC exclusive access */
+} thread_args_t;
+
+odp_buffer_pool_t pool;
+
+static void test_schedule_wait_time(void)
+{
+	uint64_t wait_time;
+
+	wait_time = odp_schedule_wait_time(0);
+	CU_ASSERT(wait_time > 0);
+
+	wait_time = odp_schedule_wait_time(1);
+	CU_ASSERT(wait_time > 0);
+
+	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
+	CU_ASSERT(wait_time > 0);
+}
+
+static void test_schedule_num_prio(void)
+{
+	int prio;
+
+	prio = odp_schedule_num_prio();
+
+	CU_ASSERT(prio > 0);
+	/* Just common sense but why not */
+	CU_ASSERT(prio == odp_schedule_num_prio());
+}
+
+static void *schedule_common_(void *arg)
+{
+	thread_args_t *args = (thread_args_t *)arg;
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio, num_bufs, num_cores;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+	num_bufs = args->num_bufs;
+	num_cores = args->num_cores;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+
+	if (num_cores == globals->core_count)
+		odp_barrier_wait(&globals->barrier);
+
+	while (1) {
+		odp_buffer_t buf;
+		odp_queue_t from;
+		int num = 0;
+		int locked;
+
+		odp_spinlock_lock(&globals->count_lock);
+		if (globals->buf_count ==
+		    num_bufs * num_queues * num_prio) {
+			odp_spinlock_unlock(&globals->count_lock);
+			break;
+		}
+		odp_spinlock_unlock(&globals->count_lock);
+		if (args->multi) {
+			odp_buffer_t bufs[MULTI_BUFS_MAX];
+			int j;
+			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, bufs,
+						 MULTI_BUFS_MAX);
+			CU_ASSERT(num >= 0);
+			CU_ASSERT(num <= MULTI_BUFS_MAX);
+			if (num == 0)
+				continue;
+			for (j = 0; j < num; j++)
+				odp_buffer_free(bufs[j]);
+		} else {
+			buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+			if (buf == ODP_BUFFER_INVALID)
+				continue;
+			num = 1;
+			odp_buffer_free(buf);
+		}
+		if (args->excl) {
+			locked = odp_spinlock_trylock(&globals->atomic_lock);
+			CU_ASSERT(locked == 1);
+			CU_ASSERT(from != ODP_QUEUE_INVALID);
+			if (locked) {
+				int cnt;
+				uint64_t cycles = 0;
+				/* Do some work here to keep the thread busy */
+				for (cnt = 0; cnt < 1000; cnt++)
+					cycles += odp_time_cycles();
+
+				odp_spinlock_unlock(&globals->atomic_lock);
+			}
+		}
+
+
+		/* TODO: odp_queue_sched_prio not in yet so we can't
+		 check the priority is the one we expected */
+
+		odp_spinlock_lock(&globals->count_lock);
+		globals->buf_count += num;
+
+		if (sync == ODP_SCHED_SYNC_ATOMIC)
+			odp_schedule_release_atomic();
+
+		if (globals->buf_count ==
+		    num_bufs * num_queues * num_prio) {
+			odp_spinlock_unlock(&globals->count_lock);
+			break;
+		}
+		odp_spinlock_unlock(&globals->count_lock);
+	}
+
+	return NULL;
+}
+
+static void fill_queues(thread_args_t *args)
+{
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio;
+	odp_buffer_pool_t pool;
+	int i, j, k;
+	char name[32];
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+
+	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+	for (i = 0; i < num_prio; i++) {
+		for (j = 0; j < num_queues; j++) {
+			odp_queue_t queue;
+
+			switch (sync) {
+			case ODP_SCHED_SYNC_NONE:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_n", i, j);
+				break;
+			case ODP_SCHED_SYNC_ATOMIC:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_a", i, j);
+				break;
+			case ODP_SCHED_SYNC_ORDERED:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_o", i, j);
+				break;
+			default:
+				CU_ASSERT(0);
+				break;
+			}
+
+			queue = odp_queue_lookup(name);
+			CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+			for (k = 0; k < args->num_bufs; k++) {
+				odp_buffer_t buf;
+				buf = odp_buffer_alloc(pool);
+				CU_ASSERT(buf != ODP_BUFFER_INVALID);
+				CU_ASSERT(odp_queue_enq(queue, buf) == 0);
+			}
+		}
+	}
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+			    int num_prio, int multi)
+{
+	thread_args_t args;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	globals->prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->buf_count = 0;
+
+	args.sync = sync;
+	args.num_queues = num_queues;
+	args.num_prio = num_prio;
+	args.num_bufs = TEST_NUM_BUFS;
+	args.num_cores = 1;
+	args.multi = multi;
+	args.excl = 0;	/* Don't test exclusive access with a single core */
+
+	fill_queues(&args);
+
+	schedule_common_(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+			     int num_prio, int multi, int excl)
+{
+	odp_shm_t shm;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	thr_args = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(thr_args != NULL);
+
+	thr_args->sync = sync;
+	thr_args->num_queues = num_queues;
+	thr_args->num_prio = num_prio;
+	if (excl)
+		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
+	else
+		thr_args->num_bufs = TEST_NUM_BUFS;
+	thr_args->num_cores = globals->core_count;
+	thr_args->multi = multi;
+	thr_args->excl = excl;
+
+	fill_queues(thr_args);
+
+	/* Reset buffer counters from the main thread */
+	globals->prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->buf_count = 0;
+
+	/* Create and launch worker threads */
+	thr_args->thrdarg.numthrds = globals->core_count;
+	odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
+
+	/* Wait for worker threads to terminate */
+	odp_cunit_thread_exit(&thr_args->thrdarg);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_1q_1t_none(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_1q_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_1q_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_none(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_mt_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_mt_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_mt_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues */
+static void test_schedule_1q_mt_atomic_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE, 1);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_1q_1t_none(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_1q_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_1q_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_1t_none(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_1t_atomic(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_1t_ordered(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_1t_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_1t_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_1t_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedulem_mq_mt_prio_none(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedulem_mq_mt_prio_atomic(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedulem_mq_mt_prio_ordered(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues multi */
+static void test_schedulem_1q_mt_atomic_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI, 1);
+}
+
+
+static int schedule_test_init(void)
+{
+	odp_shm_t shm;
+	void *pool_base;
+	odp_buffer_pool_t pool;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+	int i, j;
+	int prios;
+
+	shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
+			      ODP_CACHE_LINE_SIZE, 0);
+	pool_base = odp_shm_addr(shm);
+	if (pool_base == NULL) {
+		printf("Shared memory reserve failed.\n");
+		return -1;
+	}
+
+	pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base, MSG_POOL_SIZE,
+				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
+				      ODP_BUFFER_TYPE_RAW);
+	if (pool == ODP_BUFFER_POOL_INVALID) {
+		printf("Pool creation failed (msg).\n");
+		return -1;
+	}
+
+	shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+	globals = odp_shm_addr(shm);
+
+	if (globals == NULL) {
+		printf("Shared memory reserve failed (globals).\n");
+		return -1;
+	}
+
+	memset(globals, 0, sizeof(test_globals_t));
+
+	globals->core_count = odp_sys_core_count();
+	if (globals->core_count > MAX_WORKERS)
+		globals->core_count = MAX_WORKERS;
+
+	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+			      ODP_CACHE_LINE_SIZE, 0);
+	thr_args = odp_shm_addr(shm);
+
+	if (thr_args == NULL) {
+		printf("Shared memory reserve failed (thr_args).\n");
+		return -1;
+	}
+
+	memset(thr_args, 0, sizeof(thread_args_t));
+
+	/* Barrier to sync test case execution */
+	odp_barrier_init(&globals->barrier, globals->core_count);
+	odp_spinlock_init(&globals->count_lock);
+	odp_spinlock_init(&globals->atomic_lock);
+
+	prios = odp_schedule_num_prio();
+
+	for (i = 0; i < prios; i++) {
+		odp_queue_param_t p;
+		p.sched.prio  = i;
+		p.sched.group = ODP_SCHED_GROUP_DEFAULT;
+
+		for (j = 0; j < QUEUES_PER_PRIO; j++) {
+			/* Per sched sync type */
+			char name[32];
+			odp_queue_t q;
+
+			snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_NONE;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+static int schedule_test_finalize(void)
+{
+	odp_term_local();
+	odp_term_global();
+	return 0;
+}
+
+struct CU_TestInfo test_odp_schedule[] = {
+	{"schedule_wait_time", test_schedule_wait_time},
+	{"schedule_num_prio", test_schedule_num_prio},
+	{"schedule_1q_1t_none", test_schedule_1q_1t_none},
+	{"schedule_1q_1t_atomic", test_schedule_1q_1t_atomic},
+	{"schedule_1q_1t_ordered", test_schedule_1q_1t_ordered},
+	{"schedule_mq_1t_none", test_schedule_mq_1t_none},
+	{"schedule_mq_1t_atomic", test_schedule_mq_1t_atomic},
+	{"schedule_mq_1t_ordered", test_schedule_mq_1t_ordered},
+	{"schedule_mq_1t_prio_none", test_schedule_mq_1t_prio_none},
+	{"schedule_mq_1t_prio_atomic", test_schedule_mq_1t_prio_atomic},
+	{"schedule_mq_1t_prio_ordered", test_schedule_mq_1t_prio_ordered},
+	{"schedule_mq_mt_prio_none", test_schedule_mq_mt_prio_none},
+	{"schedule_mq_mt_prio_atomic", test_schedule_mq_mt_prio_atomic},
+	{"schedule_mq_mt_prio_ordered", test_schedule_mq_mt_prio_ordered},
+	{"schedule_1q_mt_atomic_excl", test_schedule_1q_mt_atomic_excl},
+	{"schedulem_1q_1t_none", test_schedulem_1q_1t_none},
+	{"schedulem_1q_1t_atomic", test_schedulem_1q_1t_atomic},
+	{"schedulem_1q_1t_ordered", test_schedulem_1q_1t_ordered},
+	{"schedulem_mq_1t_none", test_schedulem_mq_1t_none},
+	{"schedulem_mq_1t_atomic", test_schedulem_mq_1t_atomic},
+	{"schedulem_mq_1t_ordered", test_schedulem_mq_1t_ordered},
+	{"schedulem_mq_1t_prio_none", test_schedulem_mq_1t_prio_none},
+	{"schedulem_mq_1t_prio_atomic", test_schedulem_mq_1t_prio_atomic},
+	{"schedulem_mq_1t_prio_ordered", test_schedulem_mq_1t_prio_ordered},
+	{"schedulem_mq_mt_prio_none", test_schedulem_mq_mt_prio_none},
+	{"schedulem_mq_mt_prio_atomic", test_schedulem_mq_mt_prio_atomic},
+	{"schedulem_mq_mt_prio_ordered", test_schedulem_mq_mt_prio_ordered},
+	{"schedulem_1q_mt_atomic_excl", test_schedulem_1q_mt_atomic_excl},
+	CU_TEST_INFO_NULL,
+};
+
+CU_SuiteInfo odp_testsuites[] = {
+	{"Scheduler", schedule_test_init, schedule_test_finalize,
+		NULL, NULL, test_odp_schedule},
+	CU_SUITE_INFO_NULL,
+};