diff mbox

[PATCHv4] validation: schedule: initial testcases

Message ID 1418312632-32328-1-git-send-email-ciprian.barbu@linaro.org
State Accepted
Commit 09a53445c5f9eb1eb21b8bd2738e5aef998b7410
Headers show

Commit Message

Ciprian Barbu Dec. 11, 2014, 3:43 p.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
v4:
 - fixes after Jerin's comments
 - removed tests_global_init and made it suite init function
v3:
 - changes after Mike's review
 - removed duplicate check of end of test in schedule_common_
v2:
 - rebased against ODP tip
 - fixed some bugs
 - added some defines to clearly see the testcase parameters

 test/validation/.gitignore     |   1 +
 test/validation/Makefile.am    |   5 +-
 test/validation/odp_schedule.c | 607 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 612 insertions(+), 1 deletion(-)
 create mode 100644 test/validation/odp_schedule.c

Comments

Mike Holmes Dec. 11, 2014, 6:07 p.m. UTC | #1
On 11 December 2014 at 10:43, Ciprian Barbu <ciprian.barbu@linaro.org>
wrote:

> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>

Reviewed-by: Mike Holmes <mike.holmes@linaro.org>

My issues are covered, we will need to implement the clean up to compliment
 schd_suite_init, once those APIs are in the repo.

Mike



> ---
> v4:
>  - fixes after Jerin's comments
>  - removed tests_global_init and made it suite init function
> v3:
>  - changes after Mike's review
>  - removed duplicate check of end of test in schedule_common_
> v2:
>  - rebased against ODP tip
>  - fixed some bugs
>  - added some defines to clearly see the testcase parameters
>
>  test/validation/.gitignore     |   1 +
>  test/validation/Makefile.am    |   5 +-
>  test/validation/odp_schedule.c | 607
> +++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 612 insertions(+), 1 deletion(-)
>  create mode 100644 test/validation/odp_schedule.c
>
> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
> index 37e2594..32834ae 100644
> --- a/test/validation/.gitignore
> +++ b/test/validation/.gitignore
> @@ -3,4 +3,5 @@
>  odp_init
>  odp_queue
>  odp_crypto
> +odp_schedule
>  odp_shm
> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
> index 8547085..3670c76 100644
> --- a/test/validation/Makefile.am
> +++ b/test/validation/Makefile.am
> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>  if ODP_CUNIT_ENABLED
>  TESTS = ${bin_PROGRAMS}
>  check_PROGRAMS = ${bin_PROGRAMS}
> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>  odp_init_LDFLAGS = $(AM_LDFLAGS)
>  odp_queue_LDFLAGS = $(AM_LDFLAGS)
>  odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>  odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>  odp_shm_CFLAGS = $(AM_CFLAGS)
>  odp_shm_LDFLAGS = $(AM_LDFLAGS)
> +odp_schedule_CFLAGS = $(AM_CFLAGS)
> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>  endif
>
>  dist_odp_init_SOURCES = odp_init.c
> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES =
> crypto/odp_crypto_test_async_inp.c \
>                           crypto/odp_crypto_test_rng.c \
>                           odp_crypto.c common/odp_cunit_common.c
>  dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
> diff --git a/test/validation/odp_schedule.c
> b/test/validation/odp_schedule.c
> new file mode 100644
> index 0000000..9d410e4
> --- /dev/null
> +++ b/test/validation/odp_schedule.c
> @@ -0,0 +1,607 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include <odp.h>
> +#include "odp_cunit_common.h"
> +
> +#define MAX_WORKERS_THREADS    32
> +#define MSG_POOL_SIZE          (4*1024*1024)
> +#define QUEUES_PER_PRIO                16
> +#define BUF_SIZE               64
> +#define TEST_NUM_BUFS          100
> +#define BURST_BUF_SIZE         4
> +#define TEST_NUM_BUFS_EXCL     10000
> +
> +#define GLOBALS_SHM_NAME       "test_globals"
> +#define MSG_POOL_NAME          "msg_pool"
> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
> +
> +#define ONE_Q                  1
> +#define MANY_QS                        QUEUES_PER_PRIO
> +
> +#define ONE_PRIO               1
> +
> +#define SCHD_ONE               0
> +#define SCHD_MULTI             1
> +
> +#define DISABLE_EXCL_ATOMIC    0
> +#define ENABLE_EXCL_ATOMIC     1
> +
> +
> +/* Test global variables */
> +typedef struct {
> +       int core_count;
> +       odp_barrier_t barrier;
> +       odp_schedule_prio_t current_prio;
> +       int prio_buf_count;
> +       odp_ticketlock_t count_lock;
> +       odp_spinlock_t atomic_lock;
> +} test_globals_t;
> +
> +typedef struct ODP_PACKED {
> +       pthrd_arg thrdarg;
> +       odp_schedule_sync_t sync;
> +       int num_queues;
> +       int num_prio;
> +       int num_bufs;
> +       int num_cores;
> +       int enable_schd_multi;
> +       int enable_excl_atomic;
> +} thread_args_t;
> +
> +odp_buffer_pool_t pool;
> +
> +static void test_schedule_wait_time(void)
> +{
> +       uint64_t wait_time;
> +
> +       wait_time = odp_schedule_wait_time(0);
> +
> +       wait_time = odp_schedule_wait_time(1);
> +       CU_ASSERT(wait_time > 0);
> +
> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> +       CU_ASSERT(wait_time > 0);
> +}
> +
> +static void test_schedule_num_prio(void)
> +{
> +       int prio;
> +
> +       prio = odp_schedule_num_prio();
> +
> +       CU_ASSERT(prio > 0);
> +       CU_ASSERT(prio == odp_schedule_num_prio());
> +}
> +
> +static void *schedule_common_(void *arg)
> +{
> +       thread_args_t *args = (thread_args_t *)arg;
> +       odp_schedule_sync_t sync;
> +       int num_queues, num_prio, num_bufs, num_cores;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +
> +       sync = args->sync;
> +       num_queues = args->num_queues;
> +       num_prio = args->num_prio;
> +       num_bufs = args->num_bufs;
> +       num_cores = args->num_cores;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +
> +       if (num_cores == globals->core_count)
> +               odp_barrier_wait(&globals->barrier);
> +
> +       while (1) {
> +               odp_buffer_t buf;
> +               odp_queue_t from;
> +               int num = 0;
> +               int locked;
> +
> +               odp_ticketlock_lock(&globals->count_lock);
> +               if (globals->prio_buf_count ==
> +                   num_bufs * num_queues * num_prio) {
> +                       odp_ticketlock_unlock(&globals->count_lock);
> +                       break;
> +               }
> +               odp_ticketlock_unlock(&globals->count_lock);
> +
> +               if (args->enable_schd_multi) {
> +                       odp_buffer_t bufs[BURST_BUF_SIZE];
> +                       int j;
> +                       num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
> bufs,
> +                                                BURST_BUF_SIZE);
> +                       CU_ASSERT(num >= 0);
> +                       CU_ASSERT(num <= BURST_BUF_SIZE);
> +                       if (num == 0)
> +                               continue;
> +                       for (j = 0; j < num; j++)
> +                               odp_buffer_free(bufs[j]);
> +               } else {
> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> +                       if (buf == ODP_BUFFER_INVALID)
> +                               continue;
> +                       num = 1;
> +                       odp_buffer_free(buf);
> +               }
> +
> +               if (args->enable_excl_atomic) {
> +                       locked =
> odp_spinlock_trylock(&globals->atomic_lock);
> +                       CU_ASSERT(locked == 1);
> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
> +                       if (locked) {
> +                               int cnt;
> +                               uint64_t cycles = 0;
> +                               /* Do some work here to keep the thread
> busy */
> +                               for (cnt = 0; cnt < 1000; cnt++)
> +                                       cycles += odp_time_cycles();
> +
> +                               odp_spinlock_unlock(&globals->atomic_lock);
> +                       }
> +               }
> +
> +               odp_ticketlock_lock(&globals->count_lock);
> +               globals->prio_buf_count += num;
> +
> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
> +                       odp_schedule_release_atomic();
> +
> +               odp_ticketlock_unlock(&globals->count_lock);
> +       }
> +
> +       return NULL;
> +}
> +
> +static void fill_queues(thread_args_t *args)
> +{
> +       odp_schedule_sync_t sync;
> +       int num_queues, num_prio;
> +       odp_buffer_pool_t pool;
> +       int i, j, k;
> +       char name[32];
> +
> +       sync = args->sync;
> +       num_queues = args->num_queues;
> +       num_prio = args->num_prio;
> +
> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> +
> +       for (i = 0; i < num_prio; i++) {
> +               for (j = 0; j < num_queues; j++) {
> +                       odp_queue_t queue;
> +
> +                       switch (sync) {
> +                       case ODP_SCHED_SYNC_NONE:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_n", i, j);
> +                               break;
> +                       case ODP_SCHED_SYNC_ATOMIC:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_a", i, j);
> +                               break;
> +                       case ODP_SCHED_SYNC_ORDERED:
> +                               snprintf(name, sizeof(name),
> +                                        "sched_%d_%d_o", i, j);
> +                               break;
> +                       default:
> +                               CU_ASSERT(0);
> +                               break;
> +                       }
> +
> +                       queue = odp_queue_lookup(name);
> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
> +
> +                       for (k = 0; k < args->num_bufs; k++) {
> +                               odp_buffer_t buf;
> +                               buf = odp_buffer_alloc(pool);
> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
> +                               CU_ASSERT(odp_queue_enq(queue, buf) == 0);
> +                       }
> +               }
> +       }
> +}
> +
> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
> +                           int num_prio, int enable_schd_multi)
> +{
> +       thread_args_t args;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
> +       globals->prio_buf_count = 0;
> +
> +       args.sync = sync;
> +       args.num_queues = num_queues;
> +       args.num_prio = num_prio;
> +       args.num_bufs = TEST_NUM_BUFS;
> +       args.num_cores = 1;
> +       args.enable_schd_multi = enable_schd_multi;
> +       args.enable_excl_atomic = 0;    /* Not needed with a single core */
> +
> +       fill_queues(&args);
> +
> +       schedule_common_(&args);
> +}
> +
> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
> +                            int num_prio, int enable_schd_multi,
> +                            int enable_excl_atomic)
> +{
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +       thread_args_t *thr_args;
> +
> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       globals = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(globals != NULL);
> +
> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +       thr_args = odp_shm_addr(shm);
> +       CU_ASSERT_FATAL(thr_args != NULL);
> +
> +       thr_args->sync = sync;
> +       thr_args->num_queues = num_queues;
> +       thr_args->num_prio = num_prio;
> +       if (enable_excl_atomic)
> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> +       else
> +               thr_args->num_bufs = TEST_NUM_BUFS;
> +       thr_args->num_cores = globals->core_count;
> +       thr_args->enable_schd_multi = enable_schd_multi;
> +       thr_args->enable_excl_atomic = enable_excl_atomic;
> +
> +       fill_queues(thr_args);
> +
> +       /* Reset buffer counters from the main thread */
> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
> +       globals->prio_buf_count = 0;
> +
> +       /* Create and launch worker threads */
> +       thr_args->thrdarg.numthrds = globals->core_count;
> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
> +
> +       /* Wait for worker threads to terminate */
> +       odp_cunit_thread_exit(&thr_args->thrdarg);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_1q_1t_n(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_1q_1t_a(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_1q_1t_o(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_n(void)
> +{
> +       /* Only one priority involved in these tests, but use
> +          the same number of queues the more general case uses */
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_a(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
> SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_o(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
> SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_prio_n(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_prio_a(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_prio_o(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_mt_prio_n(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
> +                        DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_mt_prio_a(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
> +                        DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_mt_prio_o(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
> +                        DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues */
> +static void test_schedule_1q_mt_a_excl(void)
> +{
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
> +                        ENABLE_EXCL_ATOMIC);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_1q_1t_n(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_1q_1t_a(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_1q_1t_o(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_1t_n(void)
> +{
> +       /* Only one priority involved in these tests, but use
> +          the same number of queues the more general case uses */
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_1t_a(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_1t_o(void)
> +{
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
> SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_1t_prio_n(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_1t_prio_a(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_1t_prio_o(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_mt_prio_n(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI,
> 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_mt_prio_a(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI,
> 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_mt_prio_o(void)
> +{
> +       int prio = odp_schedule_num_prio();
> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
> SCHD_MULTI, 0);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
> +static void test_schedule_multi_1q_mt_a_excl(void)
> +{
> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
> SCHD_MULTI,
> +                        ENABLE_EXCL_ATOMIC);
> +}
> +
> +static int create_queues(void)
> +{
> +       int i, j, prios;
> +
> +       prios = odp_schedule_num_prio();
> +
> +       for (i = 0; i < prios; i++) {
> +               odp_queue_param_t p;
> +               p.sched.prio  = i;
> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
> +
> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> +                       /* Per sched sync type */
> +                       char name[32];
> +                       odp_queue_t q;
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +
> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
> j);
> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> &p);
> +
> +                       if (q == ODP_QUEUE_INVALID) {
> +                               printf("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +               }
> +       }
> +
> +       return 0;
> +}
> +
> +static int schd_suite_init(void)
> +{
> +       odp_shm_t shm;
> +       void *pool_base;
> +       odp_buffer_pool_t pool;
> +       test_globals_t *globals;
> +       thread_args_t *thr_args;
> +
> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
> +                             ODP_CACHE_LINE_SIZE, 0);
> +       pool_base = odp_shm_addr(shm);
> +       if (pool_base == NULL) {
> +               printf("Shared memory reserve failed.\n");
> +               return -1;
> +       }
> +
> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
> MSG_POOL_SIZE,
> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
> +                                     ODP_BUFFER_TYPE_RAW);
> +       if (pool == ODP_BUFFER_POOL_INVALID) {
> +               printf("Pool creation failed (msg).\n");
> +               return -1;
> +       }
> +
> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
> +                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE,
> 0);
> +
> +       globals = odp_shm_addr(shm);
> +
> +       if (globals == NULL) {
> +               printf("Shared memory reserve failed (globals).\n");
> +               return -1;
> +       }
> +
> +       memset(globals, 0, sizeof(test_globals_t));
> +
> +       globals->core_count = odp_sys_core_count();
> +       if (globals->core_count > MAX_WORKERS)
> +               globals->core_count = MAX_WORKERS;
> +
> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
> +                             ODP_CACHE_LINE_SIZE, 0);
> +       thr_args = odp_shm_addr(shm);
> +
> +       if (thr_args == NULL) {
> +               printf("Shared memory reserve failed (thr_args).\n");
> +               return -1;
> +       }
> +
> +       memset(thr_args, 0, sizeof(thread_args_t));
> +
> +       /* Barrier to sync test case execution */
> +       odp_barrier_init(&globals->barrier, globals->core_count);
> +       odp_ticketlock_init(&globals->count_lock);
> +       odp_spinlock_init(&globals->atomic_lock);
> +
> +       if (create_queues() != 0)
> +               return -1;
> +
> +       return 0;
> +}
> +
> +struct CU_TestInfo test_odp_schedule[] = {
> +       {"schedule_wait_time",          test_schedule_wait_time},
> +       {"schedule_num_prio",           test_schedule_num_prio},
> +       {"schedule_1q_1t_n",            test_schedule_1q_1t_n},
> +       {"schedule_1q_1t_a",            test_schedule_1q_1t_a},
> +       {"schedule_1q_1t_o",            test_schedule_1q_1t_o},
> +       {"schedule_mq_1t_n",            test_schedule_mq_1t_n},
> +       {"schedule_mq_1t_a",            test_schedule_mq_1t_a},
> +       {"schedule_mq_1t_o",            test_schedule_mq_1t_o},
> +       {"schedule_mq_1t_prio_n",       test_schedule_mq_1t_prio_n},
> +       {"schedule_mq_1t_prio_a",       test_schedule_mq_1t_prio_a},
> +       {"schedule_mq_1t_prio_o",       test_schedule_mq_1t_prio_o},
> +       {"schedule_mq_mt_prio_n",       test_schedule_mq_mt_prio_n},
> +       {"schedule_mq_mt_prio_a",       test_schedule_mq_mt_prio_a},
> +       {"schedule_mq_mt_prio_o",       test_schedule_mq_mt_prio_o},
> +       {"schedule_1q_mt_a_excl",       test_schedule_1q_mt_a_excl},
> +       {"schedule_multi_1q_1t_n",      test_schedule_multi_1q_1t_n},
> +       {"schedule_multi_1q_1t_a",      test_schedule_multi_1q_1t_a},
> +       {"schedule_multi_1q_1t_o",      test_schedule_multi_1q_1t_o},
> +       {"schedule_multi_mq_1t_n",      test_schedule_multi_mq_1t_n},
> +       {"schedule_multi_mq_1t_a",      test_schedule_multi_mq_1t_a},
> +       {"schedule_multi_mq_1t_o",      test_schedule_multi_mq_1t_o},
> +       {"schedule_multi_mq_1t_prio_n", test_schedule_multi_mq_1t_prio_n},
> +       {"schedule_multi_mq_1t_prio_a", test_schedule_multi_mq_1t_prio_a},
> +       {"schedule_multi_mq_1t_prio_o", test_schedule_multi_mq_1t_prio_o},
> +       {"schedule_multi_mq_mt_prio_n", test_schedule_multi_mq_mt_prio_n},
> +       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_prio_a},
> +       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_prio_o},
> +       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_excl},
> +       CU_TEST_INFO_NULL,
> +};
> +
> +CU_SuiteInfo odp_testsuites[] = {
> +       {"Scheduler", schd_suite_init, NULL, NULL, NULL,
> test_odp_schedule},
> +       CU_SUITE_INFO_NULL,
> +};
> --
> 1.8.3.2
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Maxim Uvarov Dec. 12, 2014, 4:15 p.m. UTC | #2
Merged,
Maxim.

On 12/11/2014 06:43 PM, Ciprian Barbu wrote:
> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> ---
> v4:
>   - fixes after Jerin's comments
>   - removed tests_global_init and made it suite init function
> v3:
>   - changes after Mike's review
>   - removed duplicate check of end of test in schedule_common_
> v2:
>   - rebased against ODP tip
>   - fixed some bugs
>   - added some defines to clearly see the testcase parameters
>
>   test/validation/.gitignore     |   1 +
>   test/validation/Makefile.am    |   5 +-
>   test/validation/odp_schedule.c | 607 +++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 612 insertions(+), 1 deletion(-)
>   create mode 100644 test/validation/odp_schedule.c
>
> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
> index 37e2594..32834ae 100644
> --- a/test/validation/.gitignore
> +++ b/test/validation/.gitignore
> @@ -3,4 +3,5 @@
>   odp_init
>   odp_queue
>   odp_crypto
> +odp_schedule
>   odp_shm
> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
> index 8547085..3670c76 100644
> --- a/test/validation/Makefile.am
> +++ b/test/validation/Makefile.am
> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>   if ODP_CUNIT_ENABLED
>   TESTS = ${bin_PROGRAMS}
>   check_PROGRAMS = ${bin_PROGRAMS}
> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>   odp_init_LDFLAGS = $(AM_LDFLAGS)
>   odp_queue_LDFLAGS = $(AM_LDFLAGS)
>   odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>   odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>   odp_shm_CFLAGS = $(AM_CFLAGS)
>   odp_shm_LDFLAGS = $(AM_LDFLAGS)
> +odp_schedule_CFLAGS = $(AM_CFLAGS)
> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>   endif
>   
>   dist_odp_init_SOURCES = odp_init.c
> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c \
>   			  crypto/odp_crypto_test_rng.c \
>   			  odp_crypto.c common/odp_cunit_common.c
>   dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
> diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
> new file mode 100644
> index 0000000..9d410e4
> --- /dev/null
> +++ b/test/validation/odp_schedule.c
> @@ -0,0 +1,607 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include <odp.h>
> +#include "odp_cunit_common.h"
> +
> +#define MAX_WORKERS_THREADS	32
> +#define MSG_POOL_SIZE		(4*1024*1024)
> +#define QUEUES_PER_PRIO		16
> +#define BUF_SIZE		64
> +#define TEST_NUM_BUFS		100
> +#define BURST_BUF_SIZE		4
> +#define TEST_NUM_BUFS_EXCL	10000
> +
> +#define GLOBALS_SHM_NAME	"test_globals"
> +#define MSG_POOL_NAME		"msg_pool"
> +#define SHM_MSG_POOL_NAME	"shm_msg_pool"
> +#define SHM_THR_ARGS_NAME	"shm_thr_args"
> +
> +#define ONE_Q			1
> +#define MANY_QS			QUEUES_PER_PRIO
> +
> +#define ONE_PRIO		1
> +
> +#define SCHD_ONE		0
> +#define SCHD_MULTI		1
> +
> +#define DISABLE_EXCL_ATOMIC	0
> +#define ENABLE_EXCL_ATOMIC	1
> +
> +
> +/* Test global variables */
> +typedef struct {
> +	int core_count;
> +	odp_barrier_t barrier;
> +	odp_schedule_prio_t current_prio;
> +	int prio_buf_count;
> +	odp_ticketlock_t count_lock;
> +	odp_spinlock_t atomic_lock;
> +} test_globals_t;
> +
> +typedef struct ODP_PACKED {
> +	pthrd_arg thrdarg;
> +	odp_schedule_sync_t sync;
> +	int num_queues;
> +	int num_prio;
> +	int num_bufs;
> +	int num_cores;
> +	int enable_schd_multi;
> +	int enable_excl_atomic;
> +} thread_args_t;
> +
> +odp_buffer_pool_t pool;
> +
> +static void test_schedule_wait_time(void)
> +{
> +	uint64_t wait_time;
> +
> +	wait_time = odp_schedule_wait_time(0);
> +
> +	wait_time = odp_schedule_wait_time(1);
> +	CU_ASSERT(wait_time > 0);
> +
> +	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> +	CU_ASSERT(wait_time > 0);
> +}
> +
> +static void test_schedule_num_prio(void)
> +{
> +	int prio;
> +
> +	prio = odp_schedule_num_prio();
> +
> +	CU_ASSERT(prio > 0);
> +	CU_ASSERT(prio == odp_schedule_num_prio());
> +}
> +
> +static void *schedule_common_(void *arg)
> +{
> +	thread_args_t *args = (thread_args_t *)arg;
> +	odp_schedule_sync_t sync;
> +	int num_queues, num_prio, num_bufs, num_cores;
> +	odp_shm_t shm;
> +	test_globals_t *globals;
> +
> +	sync = args->sync;
> +	num_queues = args->num_queues;
> +	num_prio = args->num_prio;
> +	num_bufs = args->num_bufs;
> +	num_cores = args->num_cores;
> +
> +	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +	globals = odp_shm_addr(shm);
> +	CU_ASSERT_FATAL(globals != NULL);
> +
> +
> +	if (num_cores == globals->core_count)
> +		odp_barrier_wait(&globals->barrier);
> +
> +	while (1) {
> +		odp_buffer_t buf;
> +		odp_queue_t from;
> +		int num = 0;
> +		int locked;
> +
> +		odp_ticketlock_lock(&globals->count_lock);
> +		if (globals->prio_buf_count ==
> +		    num_bufs * num_queues * num_prio) {
> +			odp_ticketlock_unlock(&globals->count_lock);
> +			break;
> +		}
> +		odp_ticketlock_unlock(&globals->count_lock);
> +
> +		if (args->enable_schd_multi) {
> +			odp_buffer_t bufs[BURST_BUF_SIZE];
> +			int j;
> +			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, bufs,
> +						 BURST_BUF_SIZE);
> +			CU_ASSERT(num >= 0);
> +			CU_ASSERT(num <= BURST_BUF_SIZE);
> +			if (num == 0)
> +				continue;
> +			for (j = 0; j < num; j++)
> +				odp_buffer_free(bufs[j]);
> +		} else {
> +			buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> +			if (buf == ODP_BUFFER_INVALID)
> +				continue;
> +			num = 1;
> +			odp_buffer_free(buf);
> +		}
> +
> +		if (args->enable_excl_atomic) {
> +			locked = odp_spinlock_trylock(&globals->atomic_lock);
> +			CU_ASSERT(locked == 1);
> +			CU_ASSERT(from != ODP_QUEUE_INVALID);
> +			if (locked) {
> +				int cnt;
> +				uint64_t cycles = 0;
> +				/* Do some work here to keep the thread busy */
> +				for (cnt = 0; cnt < 1000; cnt++)
> +					cycles += odp_time_cycles();
> +
> +				odp_spinlock_unlock(&globals->atomic_lock);
> +			}
> +		}
> +
> +		odp_ticketlock_lock(&globals->count_lock);
> +		globals->prio_buf_count += num;
> +
> +		if (sync == ODP_SCHED_SYNC_ATOMIC)
> +			odp_schedule_release_atomic();
> +
> +		odp_ticketlock_unlock(&globals->count_lock);
> +	}
> +
> +	return NULL;
> +}
> +
> +static void fill_queues(thread_args_t *args)
> +{
> +	odp_schedule_sync_t sync;
> +	int num_queues, num_prio;
> +	odp_buffer_pool_t pool;
> +	int i, j, k;
> +	char name[32];
> +
> +	sync = args->sync;
> +	num_queues = args->num_queues;
> +	num_prio = args->num_prio;
> +
> +	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> +	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> +
> +	for (i = 0; i < num_prio; i++) {
> +		for (j = 0; j < num_queues; j++) {
> +			odp_queue_t queue;
> +
> +			switch (sync) {
> +			case ODP_SCHED_SYNC_NONE:
> +				snprintf(name, sizeof(name),
> +					 "sched_%d_%d_n", i, j);
> +				break;
> +			case ODP_SCHED_SYNC_ATOMIC:
> +				snprintf(name, sizeof(name),
> +					 "sched_%d_%d_a", i, j);
> +				break;
> +			case ODP_SCHED_SYNC_ORDERED:
> +				snprintf(name, sizeof(name),
> +					 "sched_%d_%d_o", i, j);
> +				break;
> +			default:
> +				CU_ASSERT(0);
> +				break;
> +			}
> +
> +			queue = odp_queue_lookup(name);
> +			CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
> +
> +			for (k = 0; k < args->num_bufs; k++) {
> +				odp_buffer_t buf;
> +				buf = odp_buffer_alloc(pool);
> +				CU_ASSERT(buf != ODP_BUFFER_INVALID);
> +				CU_ASSERT(odp_queue_enq(queue, buf) == 0);
> +			}
> +		}
> +	}
> +}
> +
> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
> +			    int num_prio, int enable_schd_multi)
> +{
> +	thread_args_t args;
> +	odp_shm_t shm;
> +	test_globals_t *globals;
> +
> +	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +	globals = odp_shm_addr(shm);
> +	CU_ASSERT_FATAL(globals != NULL);
> +
> +	globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
> +	globals->prio_buf_count = 0;
> +
> +	args.sync = sync;
> +	args.num_queues = num_queues;
> +	args.num_prio = num_prio;
> +	args.num_bufs = TEST_NUM_BUFS;
> +	args.num_cores = 1;
> +	args.enable_schd_multi = enable_schd_multi;
> +	args.enable_excl_atomic = 0;	/* Not needed with a single core */
> +
> +	fill_queues(&args);
> +
> +	schedule_common_(&args);
> +}
> +
> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
> +			     int num_prio, int enable_schd_multi,
> +			     int enable_excl_atomic)
> +{
> +	odp_shm_t shm;
> +	test_globals_t *globals;
> +	thread_args_t *thr_args;
> +
> +	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
> +	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +	globals = odp_shm_addr(shm);
> +	CU_ASSERT_FATAL(globals != NULL);
> +
> +	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
> +	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
> +	thr_args = odp_shm_addr(shm);
> +	CU_ASSERT_FATAL(thr_args != NULL);
> +
> +	thr_args->sync = sync;
> +	thr_args->num_queues = num_queues;
> +	thr_args->num_prio = num_prio;
> +	if (enable_excl_atomic)
> +		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> +	else
> +		thr_args->num_bufs = TEST_NUM_BUFS;
> +	thr_args->num_cores = globals->core_count;
> +	thr_args->enable_schd_multi = enable_schd_multi;
> +	thr_args->enable_excl_atomic = enable_excl_atomic;
> +
> +	fill_queues(thr_args);
> +
> +	/* Reset buffer counters from the main thread */
> +	globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
> +	globals->prio_buf_count = 0;
> +
> +	/* Create and launch worker threads */
> +	thr_args->thrdarg.numthrds = globals->core_count;
> +	odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
> +
> +	/* Wait for worker threads to terminate */
> +	odp_cunit_thread_exit(&thr_args->thrdarg);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_1q_1t_n(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_1q_1t_a(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_1q_1t_o(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_n(void)
> +{
> +	/* Only one priority involved in these tests, but use
> +	   the same number of queues the more general case uses */
> +	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_a(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_o(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_1t_prio_n(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_1t_prio_a(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_1t_prio_o(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
> +static void test_schedule_mq_mt_prio_n(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
> +			 DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
> +static void test_schedule_mq_mt_prio_a(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
> +			 DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
> +static void test_schedule_mq_mt_prio_o(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
> +			 DISABLE_EXCL_ATOMIC);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues */
> +static void test_schedule_1q_mt_a_excl(void)
> +{
> +	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
> +			 ENABLE_EXCL_ATOMIC);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_1q_1t_n(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_1q_1t_a(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_1q_1t_o(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_1t_n(void)
> +{
> +	/* Only one priority involved in these tests, but use
> +	   the same number of queues the more general case uses */
> +	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_1t_a(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_1t_o(void)
> +{
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_1t_prio_n(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_1t_prio_a(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_1t_prio_o(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
> +static void test_schedule_multi_mq_mt_prio_n(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI, 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
> +static void test_schedule_multi_mq_mt_prio_a(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI, 0);
> +}
> +
> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
> +static void test_schedule_multi_mq_mt_prio_o(void)
> +{
> +	int prio = odp_schedule_num_prio();
> +	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI, 0);
> +}
> +
> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
> +static void test_schedule_multi_1q_mt_a_excl(void)
> +{
> +	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI,
> +			 ENABLE_EXCL_ATOMIC);
> +}
> +
> +static int create_queues(void)
> +{
> +	int i, j, prios;
> +
> +	prios = odp_schedule_num_prio();
> +
> +	for (i = 0; i < prios; i++) {
> +		odp_queue_param_t p;
> +		p.sched.prio  = i;
> +		p.sched.group = ODP_SCHED_GROUP_DEFAULT;
> +
> +		for (j = 0; j < QUEUES_PER_PRIO; j++) {
> +			/* Per sched sync type */
> +			char name[32];
> +			odp_queue_t q;
> +
> +			snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
> +			p.sched.sync = ODP_SCHED_SYNC_NONE;
> +			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
> +
> +			if (q == ODP_QUEUE_INVALID) {
> +				printf("Schedule queue create failed.\n");
> +				return -1;
> +			}
> +
> +			snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
> +			p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
> +			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
> +
> +			if (q == ODP_QUEUE_INVALID) {
> +				printf("Schedule queue create failed.\n");
> +				return -1;
> +			}
> +
> +			snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
> +			p.sched.sync = ODP_SCHED_SYNC_ORDERED;
> +			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
> +
> +			if (q == ODP_QUEUE_INVALID) {
> +				printf("Schedule queue create failed.\n");
> +				return -1;
> +			}
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int schd_suite_init(void)
> +{
> +	odp_shm_t shm;
> +	void *pool_base;
> +	odp_buffer_pool_t pool;
> +	test_globals_t *globals;
> +	thread_args_t *thr_args;
> +
> +	shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
> +			      ODP_CACHE_LINE_SIZE, 0);
> +	pool_base = odp_shm_addr(shm);
> +	if (pool_base == NULL) {
> +		printf("Shared memory reserve failed.\n");
> +		return -1;
> +	}
> +
> +	pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base, MSG_POOL_SIZE,
> +				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
> +				      ODP_BUFFER_TYPE_RAW);
> +	if (pool == ODP_BUFFER_POOL_INVALID) {
> +		printf("Pool creation failed (msg).\n");
> +		return -1;
> +	}
> +
> +	shm = odp_shm_reserve(GLOBALS_SHM_NAME,
> +			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
> +
> +	globals = odp_shm_addr(shm);
> +
> +	if (globals == NULL) {
> +		printf("Shared memory reserve failed (globals).\n");
> +		return -1;
> +	}
> +
> +	memset(globals, 0, sizeof(test_globals_t));
> +
> +	globals->core_count = odp_sys_core_count();
> +	if (globals->core_count > MAX_WORKERS)
> +		globals->core_count = MAX_WORKERS;
> +
> +	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
> +			      ODP_CACHE_LINE_SIZE, 0);
> +	thr_args = odp_shm_addr(shm);
> +
> +	if (thr_args == NULL) {
> +		printf("Shared memory reserve failed (thr_args).\n");
> +		return -1;
> +	}
> +
> +	memset(thr_args, 0, sizeof(thread_args_t));
> +
> +	/* Barrier to sync test case execution */
> +	odp_barrier_init(&globals->barrier, globals->core_count);
> +	odp_ticketlock_init(&globals->count_lock);
> +	odp_spinlock_init(&globals->atomic_lock);
> +
> +	if (create_queues() != 0)
> +		return -1;
> +
> +	return 0;
> +}
> +
> +struct CU_TestInfo test_odp_schedule[] = {
> +	{"schedule_wait_time",		test_schedule_wait_time},
> +	{"schedule_num_prio",		test_schedule_num_prio},
> +	{"schedule_1q_1t_n",		test_schedule_1q_1t_n},
> +	{"schedule_1q_1t_a",		test_schedule_1q_1t_a},
> +	{"schedule_1q_1t_o",		test_schedule_1q_1t_o},
> +	{"schedule_mq_1t_n",		test_schedule_mq_1t_n},
> +	{"schedule_mq_1t_a",		test_schedule_mq_1t_a},
> +	{"schedule_mq_1t_o",		test_schedule_mq_1t_o},
> +	{"schedule_mq_1t_prio_n",	test_schedule_mq_1t_prio_n},
> +	{"schedule_mq_1t_prio_a",	test_schedule_mq_1t_prio_a},
> +	{"schedule_mq_1t_prio_o",	test_schedule_mq_1t_prio_o},
> +	{"schedule_mq_mt_prio_n",	test_schedule_mq_mt_prio_n},
> +	{"schedule_mq_mt_prio_a",	test_schedule_mq_mt_prio_a},
> +	{"schedule_mq_mt_prio_o",	test_schedule_mq_mt_prio_o},
> +	{"schedule_1q_mt_a_excl",	test_schedule_1q_mt_a_excl},
> +	{"schedule_multi_1q_1t_n",	test_schedule_multi_1q_1t_n},
> +	{"schedule_multi_1q_1t_a",	test_schedule_multi_1q_1t_a},
> +	{"schedule_multi_1q_1t_o",	test_schedule_multi_1q_1t_o},
> +	{"schedule_multi_mq_1t_n",	test_schedule_multi_mq_1t_n},
> +	{"schedule_multi_mq_1t_a",	test_schedule_multi_mq_1t_a},
> +	{"schedule_multi_mq_1t_o",	test_schedule_multi_mq_1t_o},
> +	{"schedule_multi_mq_1t_prio_n",	test_schedule_multi_mq_1t_prio_n},
> +	{"schedule_multi_mq_1t_prio_a",	test_schedule_multi_mq_1t_prio_a},
> +	{"schedule_multi_mq_1t_prio_o",	test_schedule_multi_mq_1t_prio_o},
> +	{"schedule_multi_mq_mt_prio_n",	test_schedule_multi_mq_mt_prio_n},
> +	{"schedule_multi_mq_mt_prio_a",	test_schedule_multi_mq_mt_prio_a},
> +	{"schedule_multi_mq_mt_prio_o",	test_schedule_multi_mq_mt_prio_o},
> +	{"schedule_multi_1q_mt_a_excl",	test_schedule_multi_1q_mt_a_excl},
> +	CU_TEST_INFO_NULL,
> +};
> +
> +CU_SuiteInfo odp_testsuites[] = {
> +	{"Scheduler", schd_suite_init, NULL, NULL, NULL, test_odp_schedule},
> +	CU_SUITE_INFO_NULL,
> +};
Mike Holmes Dec. 12, 2014, 4:40 p.m. UTC | #3
That improved things :)

http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.func.html

But still missing:

odp_schedule_one
<http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#386>
odp_schedule_pause
<http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#405>
odp_schedule_resume
<http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#411>


On 12 December 2014 at 11:15, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
>
> Merged,
> Maxim.
>
>
> On 12/11/2014 06:43 PM, Ciprian Barbu wrote:
>
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> ---
>> v4:
>>   - fixes after Jerin's comments
>>   - removed tests_global_init and made it suite init function
>> v3:
>>   - changes after Mike's review
>>   - removed duplicate check of end of test in schedule_common_
>> v2:
>>   - rebased against ODP tip
>>   - fixed some bugs
>>   - added some defines to clearly see the testcase parameters
>>
>>   test/validation/.gitignore     |   1 +
>>   test/validation/Makefile.am    |   5 +-
>>   test/validation/odp_schedule.c | 607 ++++++++++++++++++++++++++++++
>> +++++++++++
>>   3 files changed, 612 insertions(+), 1 deletion(-)
>>   create mode 100644 test/validation/odp_schedule.c
>>
>> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
>> index 37e2594..32834ae 100644
>> --- a/test/validation/.gitignore
>> +++ b/test/validation/.gitignore
>> @@ -3,4 +3,5 @@
>>   odp_init
>>   odp_queue
>>   odp_crypto
>> +odp_schedule
>>   odp_shm
>> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
>> index 8547085..3670c76 100644
>> --- a/test/validation/Makefile.am
>> +++ b/test/validation/Makefile.am
>> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>>   if ODP_CUNIT_ENABLED
>>   TESTS = ${bin_PROGRAMS}
>>   check_PROGRAMS = ${bin_PROGRAMS}
>> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
>> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>>   odp_init_LDFLAGS = $(AM_LDFLAGS)
>>   odp_queue_LDFLAGS = $(AM_LDFLAGS)
>>   odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>>   odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>>   odp_shm_CFLAGS = $(AM_CFLAGS)
>>   odp_shm_LDFLAGS = $(AM_LDFLAGS)
>> +odp_schedule_CFLAGS = $(AM_CFLAGS)
>> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>>   endif
>>     dist_odp_init_SOURCES = odp_init.c
>> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c
>> \
>>                           crypto/odp_crypto_test_rng.c \
>>                           odp_crypto.c common/odp_cunit_common.c
>>   dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
>> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
>> diff --git a/test/validation/odp_schedule.c b/test/validation/odp_
>> schedule.c
>> new file mode 100644
>> index 0000000..9d410e4
>> --- /dev/null
>> +++ b/test/validation/odp_schedule.c
>> @@ -0,0 +1,607 @@
>> +/* Copyright (c) 2014, Linaro Limited
>> + * All rights reserved.
>> + *
>> + * SPDX-License-Identifier:     BSD-3-Clause
>> + */
>> +
>> +#include <odp.h>
>> +#include "odp_cunit_common.h"
>> +
>> +#define MAX_WORKERS_THREADS    32
>> +#define MSG_POOL_SIZE          (4*1024*1024)
>> +#define QUEUES_PER_PRIO                16
>> +#define BUF_SIZE               64
>> +#define TEST_NUM_BUFS          100
>> +#define BURST_BUF_SIZE         4
>> +#define TEST_NUM_BUFS_EXCL     10000
>> +
>> +#define GLOBALS_SHM_NAME       "test_globals"
>> +#define MSG_POOL_NAME          "msg_pool"
>> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
>> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
>> +
>> +#define ONE_Q                  1
>> +#define MANY_QS                        QUEUES_PER_PRIO
>> +
>> +#define ONE_PRIO               1
>> +
>> +#define SCHD_ONE               0
>> +#define SCHD_MULTI             1
>> +
>> +#define DISABLE_EXCL_ATOMIC    0
>> +#define ENABLE_EXCL_ATOMIC     1
>> +
>> +
>> +/* Test global variables */
>> +typedef struct {
>> +       int core_count;
>> +       odp_barrier_t barrier;
>> +       odp_schedule_prio_t current_prio;
>> +       int prio_buf_count;
>> +       odp_ticketlock_t count_lock;
>> +       odp_spinlock_t atomic_lock;
>> +} test_globals_t;
>> +
>> +typedef struct ODP_PACKED {
>> +       pthrd_arg thrdarg;
>> +       odp_schedule_sync_t sync;
>> +       int num_queues;
>> +       int num_prio;
>> +       int num_bufs;
>> +       int num_cores;
>> +       int enable_schd_multi;
>> +       int enable_excl_atomic;
>> +} thread_args_t;
>> +
>> +odp_buffer_pool_t pool;
>> +
>> +static void test_schedule_wait_time(void)
>> +{
>> +       uint64_t wait_time;
>> +
>> +       wait_time = odp_schedule_wait_time(0);
>> +
>> +       wait_time = odp_schedule_wait_time(1);
>> +       CU_ASSERT(wait_time > 0);
>> +
>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>> +       CU_ASSERT(wait_time > 0);
>> +}
>> +
>> +static void test_schedule_num_prio(void)
>> +{
>> +       int prio;
>> +
>> +       prio = odp_schedule_num_prio();
>> +
>> +       CU_ASSERT(prio > 0);
>> +       CU_ASSERT(prio == odp_schedule_num_prio());
>> +}
>> +
>> +static void *schedule_common_(void *arg)
>> +{
>> +       thread_args_t *args = (thread_args_t *)arg;
>> +       odp_schedule_sync_t sync;
>> +       int num_queues, num_prio, num_bufs, num_cores;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +
>> +       sync = args->sync;
>> +       num_queues = args->num_queues;
>> +       num_prio = args->num_prio;
>> +       num_bufs = args->num_bufs;
>> +       num_cores = args->num_cores;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +
>> +       if (num_cores == globals->core_count)
>> +               odp_barrier_wait(&globals->barrier);
>> +
>> +       while (1) {
>> +               odp_buffer_t buf;
>> +               odp_queue_t from;
>> +               int num = 0;
>> +               int locked;
>> +
>> +               odp_ticketlock_lock(&globals->count_lock);
>> +               if (globals->prio_buf_count ==
>> +                   num_bufs * num_queues * num_prio) {
>> +                       odp_ticketlock_unlock(&globals->count_lock);
>> +                       break;
>> +               }
>> +               odp_ticketlock_unlock(&globals->count_lock);
>> +
>> +               if (args->enable_schd_multi) {
>> +                       odp_buffer_t bufs[BURST_BUF_SIZE];
>> +                       int j;
>> +                       num = odp_schedule_multi(&from,
>> ODP_SCHED_NO_WAIT, bufs,
>> +                                                BURST_BUF_SIZE);
>> +                       CU_ASSERT(num >= 0);
>> +                       CU_ASSERT(num <= BURST_BUF_SIZE);
>> +                       if (num == 0)
>> +                               continue;
>> +                       for (j = 0; j < num; j++)
>> +                               odp_buffer_free(bufs[j]);
>> +               } else {
>> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +                       if (buf == ODP_BUFFER_INVALID)
>> +                               continue;
>> +                       num = 1;
>> +                       odp_buffer_free(buf);
>> +               }
>> +
>> +               if (args->enable_excl_atomic) {
>> +                       locked = odp_spinlock_trylock(&globals-
>> >atomic_lock);
>> +                       CU_ASSERT(locked == 1);
>> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
>> +                       if (locked) {
>> +                               int cnt;
>> +                               uint64_t cycles = 0;
>> +                               /* Do some work here to keep the thread
>> busy */
>> +                               for (cnt = 0; cnt < 1000; cnt++)
>> +                                       cycles += odp_time_cycles();
>> +
>> +                               odp_spinlock_unlock(&globals->
>> atomic_lock);
>> +                       }
>> +               }
>> +
>> +               odp_ticketlock_lock(&globals->count_lock);
>> +               globals->prio_buf_count += num;
>> +
>> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
>> +                       odp_schedule_release_atomic();
>> +
>> +               odp_ticketlock_unlock(&globals->count_lock);
>> +       }
>> +
>> +       return NULL;
>> +}
>> +
>> +static void fill_queues(thread_args_t *args)
>> +{
>> +       odp_schedule_sync_t sync;
>> +       int num_queues, num_prio;
>> +       odp_buffer_pool_t pool;
>> +       int i, j, k;
>> +       char name[32];
>> +
>> +       sync = args->sync;
>> +       num_queues = args->num_queues;
>> +       num_prio = args->num_prio;
>> +
>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> +
>> +       for (i = 0; i < num_prio; i++) {
>> +               for (j = 0; j < num_queues; j++) {
>> +                       odp_queue_t queue;
>> +
>> +                       switch (sync) {
>> +                       case ODP_SCHED_SYNC_NONE:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_n", i, j);
>> +                               break;
>> +                       case ODP_SCHED_SYNC_ATOMIC:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_a", i, j);
>> +                               break;
>> +                       case ODP_SCHED_SYNC_ORDERED:
>> +                               snprintf(name, sizeof(name),
>> +                                        "sched_%d_%d_o", i, j);
>> +                               break;
>> +                       default:
>> +                               CU_ASSERT(0);
>> +                               break;
>> +                       }
>> +
>> +                       queue = odp_queue_lookup(name);
>> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
>> +
>> +                       for (k = 0; k < args->num_bufs; k++) {
>> +                               odp_buffer_t buf;
>> +                               buf = odp_buffer_alloc(pool);
>> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> +                               CU_ASSERT(odp_queue_enq(queue, buf) == 0);
>> +                       }
>> +               }
>> +       }
>> +}
>> +
>> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>> +                           int num_prio, int enable_schd_multi)
>> +{
>> +       thread_args_t args;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>> +       globals->prio_buf_count = 0;
>> +
>> +       args.sync = sync;
>> +       args.num_queues = num_queues;
>> +       args.num_prio = num_prio;
>> +       args.num_bufs = TEST_NUM_BUFS;
>> +       args.num_cores = 1;
>> +       args.enable_schd_multi = enable_schd_multi;
>> +       args.enable_excl_atomic = 0;    /* Not needed with a single core
>> */
>> +
>> +       fill_queues(&args);
>> +
>> +       schedule_common_(&args);
>> +}
>> +
>> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>> +                            int num_prio, int enable_schd_multi,
>> +                            int enable_excl_atomic)
>> +{
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +       thread_args_t *thr_args;
>> +
>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       globals = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(globals != NULL);
>> +
>> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>> +       thr_args = odp_shm_addr(shm);
>> +       CU_ASSERT_FATAL(thr_args != NULL);
>> +
>> +       thr_args->sync = sync;
>> +       thr_args->num_queues = num_queues;
>> +       thr_args->num_prio = num_prio;
>> +       if (enable_excl_atomic)
>> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> +       else
>> +               thr_args->num_bufs = TEST_NUM_BUFS;
>> +       thr_args->num_cores = globals->core_count;
>> +       thr_args->enable_schd_multi = enable_schd_multi;
>> +       thr_args->enable_excl_atomic = enable_excl_atomic;
>> +
>> +       fill_queues(thr_args);
>> +
>> +       /* Reset buffer counters from the main thread */
>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>> +       globals->prio_buf_count = 0;
>> +
>> +       /* Create and launch worker threads */
>> +       thr_args->thrdarg.numthrds = globals->core_count;
>> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
>> +
>> +       /* Wait for worker threads to terminate */
>> +       odp_cunit_thread_exit(&thr_args->thrdarg);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_1q_1t_n(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_1q_1t_a(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_1q_1t_o(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_1t_n(void)
>> +{
>> +       /* Only one priority involved in these tests, but use
>> +          the same number of queues the more general case uses */
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_1t_a(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_1t_o(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>> SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_1t_prio_n(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_1t_prio_a(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_1t_prio_o(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
>> +static void test_schedule_mq_mt_prio_n(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
>> +                        DISABLE_EXCL_ATOMIC);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
>> +static void test_schedule_mq_mt_prio_a(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
>> +                        DISABLE_EXCL_ATOMIC);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
>> +static void test_schedule_mq_mt_prio_o(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
>> +                        DISABLE_EXCL_ATOMIC);
>> +}
>> +
>> +/* 1 queue many threads check exclusive access on ATOMIC queues */
>> +static void test_schedule_1q_mt_a_excl(void)
>> +{
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_ONE,
>> +                        ENABLE_EXCL_ATOMIC);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedule_multi_1q_1t_n(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedule_multi_1q_1t_a(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedule_multi_1q_1t_o(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedule_multi_mq_1t_n(void)
>> +{
>> +       /* Only one priority involved in these tests, but use
>> +          the same number of queues the more general case uses */
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedule_multi_mq_1t_a(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedule_multi_mq_1t_o(void)
>> +{
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedule_multi_mq_1t_prio_n(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedule_multi_mq_1t_prio_a(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
>> +static void test_schedule_multi_mq_1t_prio_o(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>> SCHD_MULTI);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
>> +static void test_schedule_multi_mq_mt_prio_n(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI,
>> 0);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
>> +static void test_schedule_multi_mq_mt_prio_a(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>> SCHD_MULTI, 0);
>> +}
>> +
>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi
>> */
>> +static void test_schedule_multi_mq_mt_prio_o(void)
>> +{
>> +       int prio = odp_schedule_num_prio();
>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>> SCHD_MULTI, 0);
>> +}
>> +
>> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
>> +static void test_schedule_multi_1q_mt_a_excl(void)
>> +{
>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>> SCHD_MULTI,
>> +                        ENABLE_EXCL_ATOMIC);
>> +}
>> +
>> +static int create_queues(void)
>> +{
>> +       int i, j, prios;
>> +
>> +       prios = odp_schedule_num_prio();
>> +
>> +       for (i = 0; i < prios; i++) {
>> +               odp_queue_param_t p;
>> +               p.sched.prio  = i;
>> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
>> +
>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>> +                       /* Per sched sync type */
>> +                       char name[32];
>> +                       odp_queue_t q;
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +
>> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
>> j);
>> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>> &p);
>> +
>> +                       if (q == ODP_QUEUE_INVALID) {
>> +                               printf("Schedule queue create failed.\n");
>> +                               return -1;
>> +                       }
>> +               }
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +static int schd_suite_init(void)
>> +{
>> +       odp_shm_t shm;
>> +       void *pool_base;
>> +       odp_buffer_pool_t pool;
>> +       test_globals_t *globals;
>> +       thread_args_t *thr_args;
>> +
>> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
>> +                             ODP_CACHE_LINE_SIZE, 0);
>> +       pool_base = odp_shm_addr(shm);
>> +       if (pool_base == NULL) {
>> +               printf("Shared memory reserve failed.\n");
>> +               return -1;
>> +       }
>> +
>> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
>> MSG_POOL_SIZE,
>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>> +                                     ODP_BUFFER_TYPE_RAW);
>> +       if (pool == ODP_BUFFER_POOL_INVALID) {
>> +               printf("Pool creation failed (msg).\n");
>> +               return -1;
>> +       }
>> +
>> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
>> +                             sizeof(test_globals_t),
>> ODP_CACHE_LINE_SIZE, 0);
>> +
>> +       globals = odp_shm_addr(shm);
>> +
>> +       if (globals == NULL) {
>> +               printf("Shared memory reserve failed (globals).\n");
>> +               return -1;
>> +       }
>> +
>> +       memset(globals, 0, sizeof(test_globals_t));
>> +
>> +       globals->core_count = odp_sys_core_count();
>> +       if (globals->core_count > MAX_WORKERS)
>> +               globals->core_count = MAX_WORKERS;
>> +
>> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>> +                             ODP_CACHE_LINE_SIZE, 0);
>> +       thr_args = odp_shm_addr(shm);
>> +
>> +       if (thr_args == NULL) {
>> +               printf("Shared memory reserve failed (thr_args).\n");
>> +               return -1;
>> +       }
>> +
>> +       memset(thr_args, 0, sizeof(thread_args_t));
>> +
>> +       /* Barrier to sync test case execution */
>> +       odp_barrier_init(&globals->barrier, globals->core_count);
>> +       odp_ticketlock_init(&globals->count_lock);
>> +       odp_spinlock_init(&globals->atomic_lock);
>> +
>> +       if (create_queues() != 0)
>> +               return -1;
>> +
>> +       return 0;
>> +}
>> +
>> +struct CU_TestInfo test_odp_schedule[] = {
>> +       {"schedule_wait_time",          test_schedule_wait_time},
>> +       {"schedule_num_prio",           test_schedule_num_prio},
>> +       {"schedule_1q_1t_n",            test_schedule_1q_1t_n},
>> +       {"schedule_1q_1t_a",            test_schedule_1q_1t_a},
>> +       {"schedule_1q_1t_o",            test_schedule_1q_1t_o},
>> +       {"schedule_mq_1t_n",            test_schedule_mq_1t_n},
>> +       {"schedule_mq_1t_a",            test_schedule_mq_1t_a},
>> +       {"schedule_mq_1t_o",            test_schedule_mq_1t_o},
>> +       {"schedule_mq_1t_prio_n",       test_schedule_mq_1t_prio_n},
>> +       {"schedule_mq_1t_prio_a",       test_schedule_mq_1t_prio_a},
>> +       {"schedule_mq_1t_prio_o",       test_schedule_mq_1t_prio_o},
>> +       {"schedule_mq_mt_prio_n",       test_schedule_mq_mt_prio_n},
>> +       {"schedule_mq_mt_prio_a",       test_schedule_mq_mt_prio_a},
>> +       {"schedule_mq_mt_prio_o",       test_schedule_mq_mt_prio_o},
>> +       {"schedule_1q_mt_a_excl",       test_schedule_1q_mt_a_excl},
>> +       {"schedule_multi_1q_1t_n",      test_schedule_multi_1q_1t_n},
>> +       {"schedule_multi_1q_1t_a",      test_schedule_multi_1q_1t_a},
>> +       {"schedule_multi_1q_1t_o",      test_schedule_multi_1q_1t_o},
>> +       {"schedule_multi_mq_1t_n",      test_schedule_multi_mq_1t_n},
>> +       {"schedule_multi_mq_1t_a",      test_schedule_multi_mq_1t_a},
>> +       {"schedule_multi_mq_1t_o",      test_schedule_multi_mq_1t_o},
>> +       {"schedule_multi_mq_1t_prio_n", test_schedule_multi_mq_1t_
>> prio_n},
>> +       {"schedule_multi_mq_1t_prio_a", test_schedule_multi_mq_1t_
>> prio_a},
>> +       {"schedule_multi_mq_1t_prio_o", test_schedule_multi_mq_1t_
>> prio_o},
>> +       {"schedule_multi_mq_mt_prio_n", test_schedule_multi_mq_mt_
>> prio_n},
>> +       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_
>> prio_a},
>> +       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_
>> prio_o},
>> +       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_
>> excl},
>> +       CU_TEST_INFO_NULL,
>> +};
>> +
>> +CU_SuiteInfo odp_testsuites[] = {
>> +       {"Scheduler", schd_suite_init, NULL, NULL, NULL,
>> test_odp_schedule},
>> +       CU_SUITE_INFO_NULL,
>> +};
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Bill Fischofer Dec. 12, 2014, 4:43 p.m. UTC | #4
I recall Petri indicated that odp_schedule_one() was being dropped from
v1.0.

On Fri, Dec 12, 2014 at 10:40 AM, Mike Holmes <mike.holmes@linaro.org>
wrote:
>
> That improved things :)
>
>
> http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.func.html
>
> But still missing:
>
> odp_schedule_one
> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#386>
> odp_schedule_pause
> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#405>
> odp_schedule_resume
> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#411>
>
>
> On 12 December 2014 at 11:15, Maxim Uvarov <maxim.uvarov@linaro.org>
> wrote:
>>
>> Merged,
>> Maxim.
>>
>>
>> On 12/11/2014 06:43 PM, Ciprian Barbu wrote:
>>
>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>> ---
>>> v4:
>>>   - fixes after Jerin's comments
>>>   - removed tests_global_init and made it suite init function
>>> v3:
>>>   - changes after Mike's review
>>>   - removed duplicate check of end of test in schedule_common_
>>> v2:
>>>   - rebased against ODP tip
>>>   - fixed some bugs
>>>   - added some defines to clearly see the testcase parameters
>>>
>>>   test/validation/.gitignore     |   1 +
>>>   test/validation/Makefile.am    |   5 +-
>>>   test/validation/odp_schedule.c | 607 ++++++++++++++++++++++++++++++
>>> +++++++++++
>>>   3 files changed, 612 insertions(+), 1 deletion(-)
>>>   create mode 100644 test/validation/odp_schedule.c
>>>
>>> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
>>> index 37e2594..32834ae 100644
>>> --- a/test/validation/.gitignore
>>> +++ b/test/validation/.gitignore
>>> @@ -3,4 +3,5 @@
>>>   odp_init
>>>   odp_queue
>>>   odp_crypto
>>> +odp_schedule
>>>   odp_shm
>>> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
>>> index 8547085..3670c76 100644
>>> --- a/test/validation/Makefile.am
>>> +++ b/test/validation/Makefile.am
>>> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>>>   if ODP_CUNIT_ENABLED
>>>   TESTS = ${bin_PROGRAMS}
>>>   check_PROGRAMS = ${bin_PROGRAMS}
>>> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
>>> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>>>   odp_init_LDFLAGS = $(AM_LDFLAGS)
>>>   odp_queue_LDFLAGS = $(AM_LDFLAGS)
>>>   odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>>>   odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>>>   odp_shm_CFLAGS = $(AM_CFLAGS)
>>>   odp_shm_LDFLAGS = $(AM_LDFLAGS)
>>> +odp_schedule_CFLAGS = $(AM_CFLAGS)
>>> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>>>   endif
>>>     dist_odp_init_SOURCES = odp_init.c
>>> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c
>>> \
>>>                           crypto/odp_crypto_test_rng.c \
>>>                           odp_crypto.c common/odp_cunit_common.c
>>>   dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
>>> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
>>> diff --git a/test/validation/odp_schedule.c b/test/validation/odp_
>>> schedule.c
>>> new file mode 100644
>>> index 0000000..9d410e4
>>> --- /dev/null
>>> +++ b/test/validation/odp_schedule.c
>>> @@ -0,0 +1,607 @@
>>> +/* Copyright (c) 2014, Linaro Limited
>>> + * All rights reserved.
>>> + *
>>> + * SPDX-License-Identifier:     BSD-3-Clause
>>> + */
>>> +
>>> +#include <odp.h>
>>> +#include "odp_cunit_common.h"
>>> +
>>> +#define MAX_WORKERS_THREADS    32
>>> +#define MSG_POOL_SIZE          (4*1024*1024)
>>> +#define QUEUES_PER_PRIO                16
>>> +#define BUF_SIZE               64
>>> +#define TEST_NUM_BUFS          100
>>> +#define BURST_BUF_SIZE         4
>>> +#define TEST_NUM_BUFS_EXCL     10000
>>> +
>>> +#define GLOBALS_SHM_NAME       "test_globals"
>>> +#define MSG_POOL_NAME          "msg_pool"
>>> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
>>> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
>>> +
>>> +#define ONE_Q                  1
>>> +#define MANY_QS                        QUEUES_PER_PRIO
>>> +
>>> +#define ONE_PRIO               1
>>> +
>>> +#define SCHD_ONE               0
>>> +#define SCHD_MULTI             1
>>> +
>>> +#define DISABLE_EXCL_ATOMIC    0
>>> +#define ENABLE_EXCL_ATOMIC     1
>>> +
>>> +
>>> +/* Test global variables */
>>> +typedef struct {
>>> +       int core_count;
>>> +       odp_barrier_t barrier;
>>> +       odp_schedule_prio_t current_prio;
>>> +       int prio_buf_count;
>>> +       odp_ticketlock_t count_lock;
>>> +       odp_spinlock_t atomic_lock;
>>> +} test_globals_t;
>>> +
>>> +typedef struct ODP_PACKED {
>>> +       pthrd_arg thrdarg;
>>> +       odp_schedule_sync_t sync;
>>> +       int num_queues;
>>> +       int num_prio;
>>> +       int num_bufs;
>>> +       int num_cores;
>>> +       int enable_schd_multi;
>>> +       int enable_excl_atomic;
>>> +} thread_args_t;
>>> +
>>> +odp_buffer_pool_t pool;
>>> +
>>> +static void test_schedule_wait_time(void)
>>> +{
>>> +       uint64_t wait_time;
>>> +
>>> +       wait_time = odp_schedule_wait_time(0);
>>> +
>>> +       wait_time = odp_schedule_wait_time(1);
>>> +       CU_ASSERT(wait_time > 0);
>>> +
>>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>>> +       CU_ASSERT(wait_time > 0);
>>> +}
>>> +
>>> +static void test_schedule_num_prio(void)
>>> +{
>>> +       int prio;
>>> +
>>> +       prio = odp_schedule_num_prio();
>>> +
>>> +       CU_ASSERT(prio > 0);
>>> +       CU_ASSERT(prio == odp_schedule_num_prio());
>>> +}
>>> +
>>> +static void *schedule_common_(void *arg)
>>> +{
>>> +       thread_args_t *args = (thread_args_t *)arg;
>>> +       odp_schedule_sync_t sync;
>>> +       int num_queues, num_prio, num_bufs, num_cores;
>>> +       odp_shm_t shm;
>>> +       test_globals_t *globals;
>>> +
>>> +       sync = args->sync;
>>> +       num_queues = args->num_queues;
>>> +       num_prio = args->num_prio;
>>> +       num_bufs = args->num_bufs;
>>> +       num_cores = args->num_cores;
>>> +
>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>> +       globals = odp_shm_addr(shm);
>>> +       CU_ASSERT_FATAL(globals != NULL);
>>> +
>>> +
>>> +       if (num_cores == globals->core_count)
>>> +               odp_barrier_wait(&globals->barrier);
>>> +
>>> +       while (1) {
>>> +               odp_buffer_t buf;
>>> +               odp_queue_t from;
>>> +               int num = 0;
>>> +               int locked;
>>> +
>>> +               odp_ticketlock_lock(&globals->count_lock);
>>> +               if (globals->prio_buf_count ==
>>> +                   num_bufs * num_queues * num_prio) {
>>> +                       odp_ticketlock_unlock(&globals->count_lock);
>>> +                       break;
>>> +               }
>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>> +
>>> +               if (args->enable_schd_multi) {
>>> +                       odp_buffer_t bufs[BURST_BUF_SIZE];
>>> +                       int j;
>>> +                       num = odp_schedule_multi(&from,
>>> ODP_SCHED_NO_WAIT, bufs,
>>> +                                                BURST_BUF_SIZE);
>>> +                       CU_ASSERT(num >= 0);
>>> +                       CU_ASSERT(num <= BURST_BUF_SIZE);
>>> +                       if (num == 0)
>>> +                               continue;
>>> +                       for (j = 0; j < num; j++)
>>> +                               odp_buffer_free(bufs[j]);
>>> +               } else {
>>> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>> +                       if (buf == ODP_BUFFER_INVALID)
>>> +                               continue;
>>> +                       num = 1;
>>> +                       odp_buffer_free(buf);
>>> +               }
>>> +
>>> +               if (args->enable_excl_atomic) {
>>> +                       locked = odp_spinlock_trylock(&globals-
>>> >atomic_lock);
>>> +                       CU_ASSERT(locked == 1);
>>> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
>>> +                       if (locked) {
>>> +                               int cnt;
>>> +                               uint64_t cycles = 0;
>>> +                               /* Do some work here to keep the thread
>>> busy */
>>> +                               for (cnt = 0; cnt < 1000; cnt++)
>>> +                                       cycles += odp_time_cycles();
>>> +
>>> +                               odp_spinlock_unlock(&globals->
>>> atomic_lock);
>>> +                       }
>>> +               }
>>> +
>>> +               odp_ticketlock_lock(&globals->count_lock);
>>> +               globals->prio_buf_count += num;
>>> +
>>> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
>>> +                       odp_schedule_release_atomic();
>>> +
>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>> +       }
>>> +
>>> +       return NULL;
>>> +}
>>> +
>>> +static void fill_queues(thread_args_t *args)
>>> +{
>>> +       odp_schedule_sync_t sync;
>>> +       int num_queues, num_prio;
>>> +       odp_buffer_pool_t pool;
>>> +       int i, j, k;
>>> +       char name[32];
>>> +
>>> +       sync = args->sync;
>>> +       num_queues = args->num_queues;
>>> +       num_prio = args->num_prio;
>>> +
>>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>> +
>>> +       for (i = 0; i < num_prio; i++) {
>>> +               for (j = 0; j < num_queues; j++) {
>>> +                       odp_queue_t queue;
>>> +
>>> +                       switch (sync) {
>>> +                       case ODP_SCHED_SYNC_NONE:
>>> +                               snprintf(name, sizeof(name),
>>> +                                        "sched_%d_%d_n", i, j);
>>> +                               break;
>>> +                       case ODP_SCHED_SYNC_ATOMIC:
>>> +                               snprintf(name, sizeof(name),
>>> +                                        "sched_%d_%d_a", i, j);
>>> +                               break;
>>> +                       case ODP_SCHED_SYNC_ORDERED:
>>> +                               snprintf(name, sizeof(name),
>>> +                                        "sched_%d_%d_o", i, j);
>>> +                               break;
>>> +                       default:
>>> +                               CU_ASSERT(0);
>>> +                               break;
>>> +                       }
>>> +
>>> +                       queue = odp_queue_lookup(name);
>>> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
>>> +
>>> +                       for (k = 0; k < args->num_bufs; k++) {
>>> +                               odp_buffer_t buf;
>>> +                               buf = odp_buffer_alloc(pool);
>>> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>> +                               CU_ASSERT(odp_queue_enq(queue, buf) ==
>>> 0);
>>> +                       }
>>> +               }
>>> +       }
>>> +}
>>> +
>>> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>>> +                           int num_prio, int enable_schd_multi)
>>> +{
>>> +       thread_args_t args;
>>> +       odp_shm_t shm;
>>> +       test_globals_t *globals;
>>> +
>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>> +       globals = odp_shm_addr(shm);
>>> +       CU_ASSERT_FATAL(globals != NULL);
>>> +
>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>> +       globals->prio_buf_count = 0;
>>> +
>>> +       args.sync = sync;
>>> +       args.num_queues = num_queues;
>>> +       args.num_prio = num_prio;
>>> +       args.num_bufs = TEST_NUM_BUFS;
>>> +       args.num_cores = 1;
>>> +       args.enable_schd_multi = enable_schd_multi;
>>> +       args.enable_excl_atomic = 0;    /* Not needed with a single core
>>> */
>>> +
>>> +       fill_queues(&args);
>>> +
>>> +       schedule_common_(&args);
>>> +}
>>> +
>>> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>>> +                            int num_prio, int enable_schd_multi,
>>> +                            int enable_excl_atomic)
>>> +{
>>> +       odp_shm_t shm;
>>> +       test_globals_t *globals;
>>> +       thread_args_t *thr_args;
>>> +
>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>> +       globals = odp_shm_addr(shm);
>>> +       CU_ASSERT_FATAL(globals != NULL);
>>> +
>>> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>> +       thr_args = odp_shm_addr(shm);
>>> +       CU_ASSERT_FATAL(thr_args != NULL);
>>> +
>>> +       thr_args->sync = sync;
>>> +       thr_args->num_queues = num_queues;
>>> +       thr_args->num_prio = num_prio;
>>> +       if (enable_excl_atomic)
>>> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>> +       else
>>> +               thr_args->num_bufs = TEST_NUM_BUFS;
>>> +       thr_args->num_cores = globals->core_count;
>>> +       thr_args->enable_schd_multi = enable_schd_multi;
>>> +       thr_args->enable_excl_atomic = enable_excl_atomic;
>>> +
>>> +       fill_queues(thr_args);
>>> +
>>> +       /* Reset buffer counters from the main thread */
>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>> +       globals->prio_buf_count = 0;
>>> +
>>> +       /* Create and launch worker threads */
>>> +       thr_args->thrdarg.numthrds = globals->core_count;
>>> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
>>> +
>>> +       /* Wait for worker threads to terminate */
>>> +       odp_cunit_thread_exit(&thr_args->thrdarg);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
>>> +static void test_schedule_1q_1t_n(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
>>> +static void test_schedule_1q_1t_a(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
>>> +static void test_schedule_1q_1t_o(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
>>> +static void test_schedule_mq_1t_n(void)
>>> +{
>>> +       /* Only one priority involved in these tests, but use
>>> +          the same number of queues the more general case uses */
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
>>> +static void test_schedule_mq_1t_a(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
>>> +static void test_schedule_mq_1t_o(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
>>> +static void test_schedule_mq_1t_prio_n(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
>>> +static void test_schedule_mq_1t_prio_a(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
>>> +static void test_schedule_mq_1t_prio_o(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>> SCHD_ONE);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
>>> +static void test_schedule_mq_mt_prio_n(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
>>> +                        DISABLE_EXCL_ATOMIC);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
>>> +static void test_schedule_mq_mt_prio_a(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
>>> +                        DISABLE_EXCL_ATOMIC);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
>>> +static void test_schedule_mq_mt_prio_o(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>> SCHD_ONE,
>>> +                        DISABLE_EXCL_ATOMIC);
>>> +}
>>> +
>>> +/* 1 queue many threads check exclusive access on ATOMIC queues */
>>> +static void test_schedule_1q_mt_a_excl(void)
>>> +{
>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>> SCHD_ONE,
>>> +                        ENABLE_EXCL_ATOMIC);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
>>> +static void test_schedule_multi_1q_1t_n(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>> +static void test_schedule_multi_1q_1t_a(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>> +static void test_schedule_multi_1q_1t_o(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
>>> +static void test_schedule_multi_mq_1t_n(void)
>>> +{
>>> +       /* Only one priority involved in these tests, but use
>>> +          the same number of queues the more general case uses */
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>> +static void test_schedule_multi_mq_1t_a(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>> +static void test_schedule_multi_mq_1t_o(void)
>>> +{
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
>>> +static void test_schedule_multi_mq_1t_prio_n(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
>>> +static void test_schedule_multi_mq_1t_prio_a(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
>>> +static void test_schedule_multi_mq_1t_prio_o(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>> SCHD_MULTI);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
>>> +static void test_schedule_multi_mq_mt_prio_n(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio,
>>> SCHD_MULTI, 0);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi
>>> */
>>> +static void test_schedule_multi_mq_mt_prio_a(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>> SCHD_MULTI, 0);
>>> +}
>>> +
>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi
>>> */
>>> +static void test_schedule_multi_mq_mt_prio_o(void)
>>> +{
>>> +       int prio = odp_schedule_num_prio();
>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>> SCHD_MULTI, 0);
>>> +}
>>> +
>>> +/* 1 queue many threads check exclusive access on ATOMIC queues multi */
>>> +static void test_schedule_multi_1q_mt_a_excl(void)
>>> +{
>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>> SCHD_MULTI,
>>> +                        ENABLE_EXCL_ATOMIC);
>>> +}
>>> +
>>> +static int create_queues(void)
>>> +{
>>> +       int i, j, prios;
>>> +
>>> +       prios = odp_schedule_num_prio();
>>> +
>>> +       for (i = 0; i < prios; i++) {
>>> +               odp_queue_param_t p;
>>> +               p.sched.prio  = i;
>>> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
>>> +
>>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>>> +                       /* Per sched sync type */
>>> +                       char name[32];
>>> +                       odp_queue_t q;
>>> +
>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_n", i,
>>> j);
>>> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
>>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>>> &p);
>>> +
>>> +                       if (q == ODP_QUEUE_INVALID) {
>>> +                               printf("Schedule queue create
>>> failed.\n");
>>> +                               return -1;
>>> +                       }
>>> +
>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_a", i,
>>> j);
>>> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
>>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>>> &p);
>>> +
>>> +                       if (q == ODP_QUEUE_INVALID) {
>>> +                               printf("Schedule queue create
>>> failed.\n");
>>> +                               return -1;
>>> +                       }
>>> +
>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_o", i,
>>> j);
>>> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
>>> +                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
>>> &p);
>>> +
>>> +                       if (q == ODP_QUEUE_INVALID) {
>>> +                               printf("Schedule queue create
>>> failed.\n");
>>> +                               return -1;
>>> +                       }
>>> +               }
>>> +       }
>>> +
>>> +       return 0;
>>> +}
>>> +
>>> +static int schd_suite_init(void)
>>> +{
>>> +       odp_shm_t shm;
>>> +       void *pool_base;
>>> +       odp_buffer_pool_t pool;
>>> +       test_globals_t *globals;
>>> +       thread_args_t *thr_args;
>>> +
>>> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>> +       pool_base = odp_shm_addr(shm);
>>> +       if (pool_base == NULL) {
>>> +               printf("Shared memory reserve failed.\n");
>>> +               return -1;
>>> +       }
>>> +
>>> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
>>> MSG_POOL_SIZE,
>>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>>> +                                     ODP_BUFFER_TYPE_RAW);
>>> +       if (pool == ODP_BUFFER_POOL_INVALID) {
>>> +               printf("Pool creation failed (msg).\n");
>>> +               return -1;
>>> +       }
>>> +
>>> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
>>> +                             sizeof(test_globals_t),
>>> ODP_CACHE_LINE_SIZE, 0);
>>> +
>>> +       globals = odp_shm_addr(shm);
>>> +
>>> +       if (globals == NULL) {
>>> +               printf("Shared memory reserve failed (globals).\n");
>>> +               return -1;
>>> +       }
>>> +
>>> +       memset(globals, 0, sizeof(test_globals_t));
>>> +
>>> +       globals->core_count = odp_sys_core_count();
>>> +       if (globals->core_count > MAX_WORKERS)
>>> +               globals->core_count = MAX_WORKERS;
>>> +
>>> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>> +       thr_args = odp_shm_addr(shm);
>>> +
>>> +       if (thr_args == NULL) {
>>> +               printf("Shared memory reserve failed (thr_args).\n");
>>> +               return -1;
>>> +       }
>>> +
>>> +       memset(thr_args, 0, sizeof(thread_args_t));
>>> +
>>> +       /* Barrier to sync test case execution */
>>> +       odp_barrier_init(&globals->barrier, globals->core_count);
>>> +       odp_ticketlock_init(&globals->count_lock);
>>> +       odp_spinlock_init(&globals->atomic_lock);
>>> +
>>> +       if (create_queues() != 0)
>>> +               return -1;
>>> +
>>> +       return 0;
>>> +}
>>> +
>>> +struct CU_TestInfo test_odp_schedule[] = {
>>> +       {"schedule_wait_time",          test_schedule_wait_time},
>>> +       {"schedule_num_prio",           test_schedule_num_prio},
>>> +       {"schedule_1q_1t_n",            test_schedule_1q_1t_n},
>>> +       {"schedule_1q_1t_a",            test_schedule_1q_1t_a},
>>> +       {"schedule_1q_1t_o",            test_schedule_1q_1t_o},
>>> +       {"schedule_mq_1t_n",            test_schedule_mq_1t_n},
>>> +       {"schedule_mq_1t_a",            test_schedule_mq_1t_a},
>>> +       {"schedule_mq_1t_o",            test_schedule_mq_1t_o},
>>> +       {"schedule_mq_1t_prio_n",       test_schedule_mq_1t_prio_n},
>>> +       {"schedule_mq_1t_prio_a",       test_schedule_mq_1t_prio_a},
>>> +       {"schedule_mq_1t_prio_o",       test_schedule_mq_1t_prio_o},
>>> +       {"schedule_mq_mt_prio_n",       test_schedule_mq_mt_prio_n},
>>> +       {"schedule_mq_mt_prio_a",       test_schedule_mq_mt_prio_a},
>>> +       {"schedule_mq_mt_prio_o",       test_schedule_mq_mt_prio_o},
>>> +       {"schedule_1q_mt_a_excl",       test_schedule_1q_mt_a_excl},
>>> +       {"schedule_multi_1q_1t_n",      test_schedule_multi_1q_1t_n},
>>> +       {"schedule_multi_1q_1t_a",      test_schedule_multi_1q_1t_a},
>>> +       {"schedule_multi_1q_1t_o",      test_schedule_multi_1q_1t_o},
>>> +       {"schedule_multi_mq_1t_n",      test_schedule_multi_mq_1t_n},
>>> +       {"schedule_multi_mq_1t_a",      test_schedule_multi_mq_1t_a},
>>> +       {"schedule_multi_mq_1t_o",      test_schedule_multi_mq_1t_o},
>>> +       {"schedule_multi_mq_1t_prio_n", test_schedule_multi_mq_1t_
>>> prio_n},
>>> +       {"schedule_multi_mq_1t_prio_a", test_schedule_multi_mq_1t_
>>> prio_a},
>>> +       {"schedule_multi_mq_1t_prio_o", test_schedule_multi_mq_1t_
>>> prio_o},
>>> +       {"schedule_multi_mq_mt_prio_n", test_schedule_multi_mq_mt_
>>> prio_n},
>>> +       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_
>>> prio_a},
>>> +       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_
>>> prio_o},
>>> +       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_
>>> excl},
>>> +       CU_TEST_INFO_NULL,
>>> +};
>>> +
>>> +CU_SuiteInfo odp_testsuites[] = {
>>> +       {"Scheduler", schd_suite_init, NULL, NULL, NULL,
>>> test_odp_schedule},
>>> +       CU_SUITE_INFO_NULL,
>>> +};
>>>
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> --
> *Mike Holmes*
> Linaro  Sr Technical Manager
> LNG - ODP
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
Mike Holmes Dec. 12, 2014, 4:45 p.m. UTC | #5
I just checked the Delta doc, you are correct, I will send a patch for that.

On 12 December 2014 at 11:43, Bill Fischofer <bill.fischofer@linaro.org>
wrote:
>
> I recall Petri indicated that odp_schedule_one() was being dropped from
> v1.0.
>
> On Fri, Dec 12, 2014 at 10:40 AM, Mike Holmes <mike.holmes@linaro.org>
> wrote:
>>
>> That improved things :)
>>
>>
>> http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.func.html
>>
>> But still missing:
>>
>> odp_schedule_one
>> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#386>
>> odp_schedule_pause
>> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#405>
>> odp_schedule_resume
>> <http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.gcov.html#411>
>>
>>
>> On 12 December 2014 at 11:15, Maxim Uvarov <maxim.uvarov@linaro.org>
>> wrote:
>>>
>>> Merged,
>>> Maxim.
>>>
>>>
>>> On 12/11/2014 06:43 PM, Ciprian Barbu wrote:
>>>
>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>> ---
>>>> v4:
>>>>   - fixes after Jerin's comments
>>>>   - removed tests_global_init and made it suite init function
>>>> v3:
>>>>   - changes after Mike's review
>>>>   - removed duplicate check of end of test in schedule_common_
>>>> v2:
>>>>   - rebased against ODP tip
>>>>   - fixed some bugs
>>>>   - added some defines to clearly see the testcase parameters
>>>>
>>>>   test/validation/.gitignore     |   1 +
>>>>   test/validation/Makefile.am    |   5 +-
>>>>   test/validation/odp_schedule.c | 607 ++++++++++++++++++++++++++++++
>>>> +++++++++++
>>>>   3 files changed, 612 insertions(+), 1 deletion(-)
>>>>   create mode 100644 test/validation/odp_schedule.c
>>>>
>>>> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
>>>> index 37e2594..32834ae 100644
>>>> --- a/test/validation/.gitignore
>>>> +++ b/test/validation/.gitignore
>>>> @@ -3,4 +3,5 @@
>>>>   odp_init
>>>>   odp_queue
>>>>   odp_crypto
>>>> +odp_schedule
>>>>   odp_shm
>>>> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
>>>> index 8547085..3670c76 100644
>>>> --- a/test/validation/Makefile.am
>>>> +++ b/test/validation/Makefile.am
>>>> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>>>>   if ODP_CUNIT_ENABLED
>>>>   TESTS = ${bin_PROGRAMS}
>>>>   check_PROGRAMS = ${bin_PROGRAMS}
>>>> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
>>>> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>>>>   odp_init_LDFLAGS = $(AM_LDFLAGS)
>>>>   odp_queue_LDFLAGS = $(AM_LDFLAGS)
>>>>   odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>>>>   odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>>>>   odp_shm_CFLAGS = $(AM_CFLAGS)
>>>>   odp_shm_LDFLAGS = $(AM_LDFLAGS)
>>>> +odp_schedule_CFLAGS = $(AM_CFLAGS)
>>>> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>>>>   endif
>>>>     dist_odp_init_SOURCES = odp_init.c
>>>> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES =
>>>> crypto/odp_crypto_test_async_inp.c \
>>>>                           crypto/odp_crypto_test_rng.c \
>>>>                           odp_crypto.c common/odp_cunit_common.c
>>>>   dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
>>>> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
>>>> diff --git a/test/validation/odp_schedule.c b/test/validation/odp_
>>>> schedule.c
>>>> new file mode 100644
>>>> index 0000000..9d410e4
>>>> --- /dev/null
>>>> +++ b/test/validation/odp_schedule.c
>>>> @@ -0,0 +1,607 @@
>>>> +/* Copyright (c) 2014, Linaro Limited
>>>> + * All rights reserved.
>>>> + *
>>>> + * SPDX-License-Identifier:     BSD-3-Clause
>>>> + */
>>>> +
>>>> +#include <odp.h>
>>>> +#include "odp_cunit_common.h"
>>>> +
>>>> +#define MAX_WORKERS_THREADS    32
>>>> +#define MSG_POOL_SIZE          (4*1024*1024)
>>>> +#define QUEUES_PER_PRIO                16
>>>> +#define BUF_SIZE               64
>>>> +#define TEST_NUM_BUFS          100
>>>> +#define BURST_BUF_SIZE         4
>>>> +#define TEST_NUM_BUFS_EXCL     10000
>>>> +
>>>> +#define GLOBALS_SHM_NAME       "test_globals"
>>>> +#define MSG_POOL_NAME          "msg_pool"
>>>> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
>>>> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
>>>> +
>>>> +#define ONE_Q                  1
>>>> +#define MANY_QS                        QUEUES_PER_PRIO
>>>> +
>>>> +#define ONE_PRIO               1
>>>> +
>>>> +#define SCHD_ONE               0
>>>> +#define SCHD_MULTI             1
>>>> +
>>>> +#define DISABLE_EXCL_ATOMIC    0
>>>> +#define ENABLE_EXCL_ATOMIC     1
>>>> +
>>>> +
>>>> +/* Test global variables */
>>>> +typedef struct {
>>>> +       int core_count;
>>>> +       odp_barrier_t barrier;
>>>> +       odp_schedule_prio_t current_prio;
>>>> +       int prio_buf_count;
>>>> +       odp_ticketlock_t count_lock;
>>>> +       odp_spinlock_t atomic_lock;
>>>> +} test_globals_t;
>>>> +
>>>> +typedef struct ODP_PACKED {
>>>> +       pthrd_arg thrdarg;
>>>> +       odp_schedule_sync_t sync;
>>>> +       int num_queues;
>>>> +       int num_prio;
>>>> +       int num_bufs;
>>>> +       int num_cores;
>>>> +       int enable_schd_multi;
>>>> +       int enable_excl_atomic;
>>>> +} thread_args_t;
>>>> +
>>>> +odp_buffer_pool_t pool;
>>>> +
>>>> +static void test_schedule_wait_time(void)
>>>> +{
>>>> +       uint64_t wait_time;
>>>> +
>>>> +       wait_time = odp_schedule_wait_time(0);
>>>> +
>>>> +       wait_time = odp_schedule_wait_time(1);
>>>> +       CU_ASSERT(wait_time > 0);
>>>> +
>>>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>>>> +       CU_ASSERT(wait_time > 0);
>>>> +}
>>>> +
>>>> +static void test_schedule_num_prio(void)
>>>> +{
>>>> +       int prio;
>>>> +
>>>> +       prio = odp_schedule_num_prio();
>>>> +
>>>> +       CU_ASSERT(prio > 0);
>>>> +       CU_ASSERT(prio == odp_schedule_num_prio());
>>>> +}
>>>> +
>>>> +static void *schedule_common_(void *arg)
>>>> +{
>>>> +       thread_args_t *args = (thread_args_t *)arg;
>>>> +       odp_schedule_sync_t sync;
>>>> +       int num_queues, num_prio, num_bufs, num_cores;
>>>> +       odp_shm_t shm;
>>>> +       test_globals_t *globals;
>>>> +
>>>> +       sync = args->sync;
>>>> +       num_queues = args->num_queues;
>>>> +       num_prio = args->num_prio;
>>>> +       num_bufs = args->num_bufs;
>>>> +       num_cores = args->num_cores;
>>>> +
>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>> +       globals = odp_shm_addr(shm);
>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>> +
>>>> +
>>>> +       if (num_cores == globals->core_count)
>>>> +               odp_barrier_wait(&globals->barrier);
>>>> +
>>>> +       while (1) {
>>>> +               odp_buffer_t buf;
>>>> +               odp_queue_t from;
>>>> +               int num = 0;
>>>> +               int locked;
>>>> +
>>>> +               odp_ticketlock_lock(&globals->count_lock);
>>>> +               if (globals->prio_buf_count ==
>>>> +                   num_bufs * num_queues * num_prio) {
>>>> +                       odp_ticketlock_unlock(&globals->count_lock);
>>>> +                       break;
>>>> +               }
>>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>>> +
>>>> +               if (args->enable_schd_multi) {
>>>> +                       odp_buffer_t bufs[BURST_BUF_SIZE];
>>>> +                       int j;
>>>> +                       num = odp_schedule_multi(&from,
>>>> ODP_SCHED_NO_WAIT, bufs,
>>>> +                                                BURST_BUF_SIZE);
>>>> +                       CU_ASSERT(num >= 0);
>>>> +                       CU_ASSERT(num <= BURST_BUF_SIZE);
>>>> +                       if (num == 0)
>>>> +                               continue;
>>>> +                       for (j = 0; j < num; j++)
>>>> +                               odp_buffer_free(bufs[j]);
>>>> +               } else {
>>>> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>> +                       if (buf == ODP_BUFFER_INVALID)
>>>> +                               continue;
>>>> +                       num = 1;
>>>> +                       odp_buffer_free(buf);
>>>> +               }
>>>> +
>>>> +               if (args->enable_excl_atomic) {
>>>> +                       locked = odp_spinlock_trylock(&globals-
>>>> >atomic_lock);
>>>> +                       CU_ASSERT(locked == 1);
>>>> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
>>>> +                       if (locked) {
>>>> +                               int cnt;
>>>> +                               uint64_t cycles = 0;
>>>> +                               /* Do some work here to keep the thread
>>>> busy */
>>>> +                               for (cnt = 0; cnt < 1000; cnt++)
>>>> +                                       cycles += odp_time_cycles();
>>>> +
>>>> +                               odp_spinlock_unlock(&globals->
>>>> atomic_lock);
>>>> +                       }
>>>> +               }
>>>> +
>>>> +               odp_ticketlock_lock(&globals->count_lock);
>>>> +               globals->prio_buf_count += num;
>>>> +
>>>> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
>>>> +                       odp_schedule_release_atomic();
>>>> +
>>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>>> +       }
>>>> +
>>>> +       return NULL;
>>>> +}
>>>> +
>>>> +static void fill_queues(thread_args_t *args)
>>>> +{
>>>> +       odp_schedule_sync_t sync;
>>>> +       int num_queues, num_prio;
>>>> +       odp_buffer_pool_t pool;
>>>> +       int i, j, k;
>>>> +       char name[32];
>>>> +
>>>> +       sync = args->sync;
>>>> +       num_queues = args->num_queues;
>>>> +       num_prio = args->num_prio;
>>>> +
>>>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>> +
>>>> +       for (i = 0; i < num_prio; i++) {
>>>> +               for (j = 0; j < num_queues; j++) {
>>>> +                       odp_queue_t queue;
>>>> +
>>>> +                       switch (sync) {
>>>> +                       case ODP_SCHED_SYNC_NONE:
>>>> +                               snprintf(name, sizeof(name),
>>>> +                                        "sched_%d_%d_n", i, j);
>>>> +                               break;
>>>> +                       case ODP_SCHED_SYNC_ATOMIC:
>>>> +                               snprintf(name, sizeof(name),
>>>> +                                        "sched_%d_%d_a", i, j);
>>>> +                               break;
>>>> +                       case ODP_SCHED_SYNC_ORDERED:
>>>> +                               snprintf(name, sizeof(name),
>>>> +                                        "sched_%d_%d_o", i, j);
>>>> +                               break;
>>>> +                       default:
>>>> +                               CU_ASSERT(0);
>>>> +                               break;
>>>> +                       }
>>>> +
>>>> +                       queue = odp_queue_lookup(name);
>>>> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
>>>> +
>>>> +                       for (k = 0; k < args->num_bufs; k++) {
>>>> +                               odp_buffer_t buf;
>>>> +                               buf = odp_buffer_alloc(pool);
>>>> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>> +                               CU_ASSERT(odp_queue_enq(queue, buf) ==
>>>> 0);
>>>> +                       }
>>>> +               }
>>>> +       }
>>>> +}
>>>> +
>>>> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>>>> +                           int num_prio, int enable_schd_multi)
>>>> +{
>>>> +       thread_args_t args;
>>>> +       odp_shm_t shm;
>>>> +       test_globals_t *globals;
>>>> +
>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>> +       globals = odp_shm_addr(shm);
>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>> +
>>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>>> +       globals->prio_buf_count = 0;
>>>> +
>>>> +       args.sync = sync;
>>>> +       args.num_queues = num_queues;
>>>> +       args.num_prio = num_prio;
>>>> +       args.num_bufs = TEST_NUM_BUFS;
>>>> +       args.num_cores = 1;
>>>> +       args.enable_schd_multi = enable_schd_multi;
>>>> +       args.enable_excl_atomic = 0;    /* Not needed with a single
>>>> core */
>>>> +
>>>> +       fill_queues(&args);
>>>> +
>>>> +       schedule_common_(&args);
>>>> +}
>>>> +
>>>> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>>>> +                            int num_prio, int enable_schd_multi,
>>>> +                            int enable_excl_atomic)
>>>> +{
>>>> +       odp_shm_t shm;
>>>> +       test_globals_t *globals;
>>>> +       thread_args_t *thr_args;
>>>> +
>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>> +       globals = odp_shm_addr(shm);
>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>> +
>>>> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>> +       thr_args = odp_shm_addr(shm);
>>>> +       CU_ASSERT_FATAL(thr_args != NULL);
>>>> +
>>>> +       thr_args->sync = sync;
>>>> +       thr_args->num_queues = num_queues;
>>>> +       thr_args->num_prio = num_prio;
>>>> +       if (enable_excl_atomic)
>>>> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>> +       else
>>>> +               thr_args->num_bufs = TEST_NUM_BUFS;
>>>> +       thr_args->num_cores = globals->core_count;
>>>> +       thr_args->enable_schd_multi = enable_schd_multi;
>>>> +       thr_args->enable_excl_atomic = enable_excl_atomic;
>>>> +
>>>> +       fill_queues(thr_args);
>>>> +
>>>> +       /* Reset buffer counters from the main thread */
>>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>>> +       globals->prio_buf_count = 0;
>>>> +
>>>> +       /* Create and launch worker threads */
>>>> +       thr_args->thrdarg.numthrds = globals->core_count;
>>>> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
>>>> +
>>>> +       /* Wait for worker threads to terminate */
>>>> +       odp_cunit_thread_exit(&thr_args->thrdarg);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
>>>> +static void test_schedule_1q_1t_n(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
>>>> +static void test_schedule_1q_1t_a(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
>>>> +static void test_schedule_1q_1t_o(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
>>>> +static void test_schedule_mq_1t_n(void)
>>>> +{
>>>> +       /* Only one priority involved in these tests, but use
>>>> +          the same number of queues the more general case uses */
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
>>>> +static void test_schedule_mq_1t_a(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
>>>> +static void test_schedule_mq_1t_o(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
>>>> +static void test_schedule_mq_1t_prio_n(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
>>>> +static void test_schedule_mq_1t_prio_a(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
>>>> +static void test_schedule_mq_1t_prio_o(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>> SCHD_ONE);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
>>>> +static void test_schedule_mq_mt_prio_n(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
>>>> +                        DISABLE_EXCL_ATOMIC);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
>>>> +static void test_schedule_mq_mt_prio_a(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>> SCHD_ONE,
>>>> +                        DISABLE_EXCL_ATOMIC);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
>>>> +static void test_schedule_mq_mt_prio_o(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>> SCHD_ONE,
>>>> +                        DISABLE_EXCL_ATOMIC);
>>>> +}
>>>> +
>>>> +/* 1 queue many threads check exclusive access on ATOMIC queues */
>>>> +static void test_schedule_1q_mt_a_excl(void)
>>>> +{
>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>> SCHD_ONE,
>>>> +                        ENABLE_EXCL_ATOMIC);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
>>>> +static void test_schedule_multi_1q_1t_n(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>>> +static void test_schedule_multi_1q_1t_a(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>>> +static void test_schedule_multi_1q_1t_o(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
>>>> +static void test_schedule_multi_mq_1t_n(void)
>>>> +{
>>>> +       /* Only one priority involved in these tests, but use
>>>> +          the same number of queues the more general case uses */
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>>> +static void test_schedule_multi_mq_1t_a(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>>> +static void test_schedule_multi_mq_1t_o(void)
>>>> +{
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
>>>> +static void test_schedule_multi_mq_1t_prio_n(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
>>>> +static void test_schedule_multi_mq_1t_prio_a(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
>>>> +static void test_schedule_multi_mq_1t_prio_o(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>> SCHD_MULTI);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
>>>> +static void test_schedule_multi_mq_mt_prio_n(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio,
>>>> SCHD_MULTI, 0);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi
>>>> */
>>>> +static void test_schedule_multi_mq_mt_prio_a(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>> SCHD_MULTI, 0);
>>>> +}
>>>> +
>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED
>>>> multi */
>>>> +static void test_schedule_multi_mq_mt_prio_o(void)
>>>> +{
>>>> +       int prio = odp_schedule_num_prio();
>>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>> SCHD_MULTI, 0);
>>>> +}
>>>> +
>>>> +/* 1 queue many threads check exclusive access on ATOMIC queues multi
>>>> */
>>>> +static void test_schedule_multi_1q_mt_a_excl(void)
>>>> +{
>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>> SCHD_MULTI,
>>>> +                        ENABLE_EXCL_ATOMIC);
>>>> +}
>>>> +
>>>> +static int create_queues(void)
>>>> +{
>>>> +       int i, j, prios;
>>>> +
>>>> +       prios = odp_schedule_num_prio();
>>>> +
>>>> +       for (i = 0; i < prios; i++) {
>>>> +               odp_queue_param_t p;
>>>> +               p.sched.prio  = i;
>>>> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
>>>> +
>>>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>>>> +                       /* Per sched sync type */
>>>> +                       char name[32];
>>>> +                       odp_queue_t q;
>>>> +
>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_n",
>>>> i, j);
>>>> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
>>>> +                       q = odp_queue_create(name,
>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>> +
>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>> +                               printf("Schedule queue create
>>>> failed.\n");
>>>> +                               return -1;
>>>> +                       }
>>>> +
>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_a",
>>>> i, j);
>>>> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
>>>> +                       q = odp_queue_create(name,
>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>> +
>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>> +                               printf("Schedule queue create
>>>> failed.\n");
>>>> +                               return -1;
>>>> +                       }
>>>> +
>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_o",
>>>> i, j);
>>>> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
>>>> +                       q = odp_queue_create(name,
>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>> +
>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>> +                               printf("Schedule queue create
>>>> failed.\n");
>>>> +                               return -1;
>>>> +                       }
>>>> +               }
>>>> +       }
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +static int schd_suite_init(void)
>>>> +{
>>>> +       odp_shm_t shm;
>>>> +       void *pool_base;
>>>> +       odp_buffer_pool_t pool;
>>>> +       test_globals_t *globals;
>>>> +       thread_args_t *thr_args;
>>>> +
>>>> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
>>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>>> +       pool_base = odp_shm_addr(shm);
>>>> +       if (pool_base == NULL) {
>>>> +               printf("Shared memory reserve failed.\n");
>>>> +               return -1;
>>>> +       }
>>>> +
>>>> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
>>>> MSG_POOL_SIZE,
>>>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>>>> +                                     ODP_BUFFER_TYPE_RAW);
>>>> +       if (pool == ODP_BUFFER_POOL_INVALID) {
>>>> +               printf("Pool creation failed (msg).\n");
>>>> +               return -1;
>>>> +       }
>>>> +
>>>> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
>>>> +                             sizeof(test_globals_t),
>>>> ODP_CACHE_LINE_SIZE, 0);
>>>> +
>>>> +       globals = odp_shm_addr(shm);
>>>> +
>>>> +       if (globals == NULL) {
>>>> +               printf("Shared memory reserve failed (globals).\n");
>>>> +               return -1;
>>>> +       }
>>>> +
>>>> +       memset(globals, 0, sizeof(test_globals_t));
>>>> +
>>>> +       globals->core_count = odp_sys_core_count();
>>>> +       if (globals->core_count > MAX_WORKERS)
>>>> +               globals->core_count = MAX_WORKERS;
>>>> +
>>>> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>>> +       thr_args = odp_shm_addr(shm);
>>>> +
>>>> +       if (thr_args == NULL) {
>>>> +               printf("Shared memory reserve failed (thr_args).\n");
>>>> +               return -1;
>>>> +       }
>>>> +
>>>> +       memset(thr_args, 0, sizeof(thread_args_t));
>>>> +
>>>> +       /* Barrier to sync test case execution */
>>>> +       odp_barrier_init(&globals->barrier, globals->core_count);
>>>> +       odp_ticketlock_init(&globals->count_lock);
>>>> +       odp_spinlock_init(&globals->atomic_lock);
>>>> +
>>>> +       if (create_queues() != 0)
>>>> +               return -1;
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +
>>>> +struct CU_TestInfo test_odp_schedule[] = {
>>>> +       {"schedule_wait_time",          test_schedule_wait_time},
>>>> +       {"schedule_num_prio",           test_schedule_num_prio},
>>>> +       {"schedule_1q_1t_n",            test_schedule_1q_1t_n},
>>>> +       {"schedule_1q_1t_a",            test_schedule_1q_1t_a},
>>>> +       {"schedule_1q_1t_o",            test_schedule_1q_1t_o},
>>>> +       {"schedule_mq_1t_n",            test_schedule_mq_1t_n},
>>>> +       {"schedule_mq_1t_a",            test_schedule_mq_1t_a},
>>>> +       {"schedule_mq_1t_o",            test_schedule_mq_1t_o},
>>>> +       {"schedule_mq_1t_prio_n",       test_schedule_mq_1t_prio_n},
>>>> +       {"schedule_mq_1t_prio_a",       test_schedule_mq_1t_prio_a},
>>>> +       {"schedule_mq_1t_prio_o",       test_schedule_mq_1t_prio_o},
>>>> +       {"schedule_mq_mt_prio_n",       test_schedule_mq_mt_prio_n},
>>>> +       {"schedule_mq_mt_prio_a",       test_schedule_mq_mt_prio_a},
>>>> +       {"schedule_mq_mt_prio_o",       test_schedule_mq_mt_prio_o},
>>>> +       {"schedule_1q_mt_a_excl",       test_schedule_1q_mt_a_excl},
>>>> +       {"schedule_multi_1q_1t_n",      test_schedule_multi_1q_1t_n},
>>>> +       {"schedule_multi_1q_1t_a",      test_schedule_multi_1q_1t_a},
>>>> +       {"schedule_multi_1q_1t_o",      test_schedule_multi_1q_1t_o},
>>>> +       {"schedule_multi_mq_1t_n",      test_schedule_multi_mq_1t_n},
>>>> +       {"schedule_multi_mq_1t_a",      test_schedule_multi_mq_1t_a},
>>>> +       {"schedule_multi_mq_1t_o",      test_schedule_multi_mq_1t_o},
>>>> +       {"schedule_multi_mq_1t_prio_n", test_schedule_multi_mq_1t_
>>>> prio_n},
>>>> +       {"schedule_multi_mq_1t_prio_a", test_schedule_multi_mq_1t_
>>>> prio_a},
>>>> +       {"schedule_multi_mq_1t_prio_o", test_schedule_multi_mq_1t_
>>>> prio_o},
>>>> +       {"schedule_multi_mq_mt_prio_n", test_schedule_multi_mq_mt_
>>>> prio_n},
>>>> +       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_
>>>> prio_a},
>>>> +       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_
>>>> prio_o},
>>>> +       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_
>>>> excl},
>>>> +       CU_TEST_INFO_NULL,
>>>> +};
>>>> +
>>>> +CU_SuiteInfo odp_testsuites[] = {
>>>> +       {"Scheduler", schd_suite_init, NULL, NULL, NULL,
>>>> test_odp_schedule},
>>>> +       CU_SUITE_INFO_NULL,
>>>> +};
>>>>
>>>
>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>
>>
>> --
>> *Mike Holmes*
>> Linaro  Sr Technical Manager
>> LNG - ODP
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>>
Ciprian Barbu Dec. 12, 2014, 5:34 p.m. UTC | #6
On Fri, Dec 12, 2014 at 6:45 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
> I just checked the Delta doc, you are correct, I will send a patch for that.
>
> On 12 December 2014 at 11:43, Bill Fischofer <bill.fischofer@linaro.org>
> wrote:
>>
>> I recall Petri indicated that odp_schedule_one() was being dropped from
>> v1.0.
>>
>> On Fri, Dec 12, 2014 at 10:40 AM, Mike Holmes <mike.holmes@linaro.org>
>> wrote:
>>>
>>> That improved things :)
>>>
>>>
>>> http://docs.opendataplane.org/linux-generic-gcov-html/linux-generic/odp_schedule.c.func.html
>>>
>>> But still missing:
>>>
>>> odp_schedule_one
>>> odp_schedule_pause
>>> odp_schedule_resume

I can add pause and resume, it shouldn't be hard. But at least I got
something merged in, to get things going.

>>>
>>>
>>> On 12 December 2014 at 11:15, Maxim Uvarov <maxim.uvarov@linaro.org>
>>> wrote:
>>>>
>>>> Merged,
>>>> Maxim.
>>>>
>>>>
>>>> On 12/11/2014 06:43 PM, Ciprian Barbu wrote:
>>>>>
>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>> ---
>>>>> v4:
>>>>>   - fixes after Jerin's comments
>>>>>   - removed tests_global_init and made it suite init function
>>>>> v3:
>>>>>   - changes after Mike's review
>>>>>   - removed duplicate check of end of test in schedule_common_
>>>>> v2:
>>>>>   - rebased against ODP tip
>>>>>   - fixed some bugs
>>>>>   - added some defines to clearly see the testcase parameters
>>>>>
>>>>>   test/validation/.gitignore     |   1 +
>>>>>   test/validation/Makefile.am    |   5 +-
>>>>>   test/validation/odp_schedule.c | 607
>>>>> +++++++++++++++++++++++++++++++++++++++++
>>>>>   3 files changed, 612 insertions(+), 1 deletion(-)
>>>>>   create mode 100644 test/validation/odp_schedule.c
>>>>>
>>>>> diff --git a/test/validation/.gitignore b/test/validation/.gitignore
>>>>> index 37e2594..32834ae 100644
>>>>> --- a/test/validation/.gitignore
>>>>> +++ b/test/validation/.gitignore
>>>>> @@ -3,4 +3,5 @@
>>>>>   odp_init
>>>>>   odp_queue
>>>>>   odp_crypto
>>>>> +odp_schedule
>>>>>   odp_shm
>>>>> diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
>>>>> index 8547085..3670c76 100644
>>>>> --- a/test/validation/Makefile.am
>>>>> +++ b/test/validation/Makefile.am
>>>>> @@ -6,13 +6,15 @@ AM_LDFLAGS += -static
>>>>>   if ODP_CUNIT_ENABLED
>>>>>   TESTS = ${bin_PROGRAMS}
>>>>>   check_PROGRAMS = ${bin_PROGRAMS}
>>>>> -bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
>>>>> +bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
>>>>>   odp_init_LDFLAGS = $(AM_LDFLAGS)
>>>>>   odp_queue_LDFLAGS = $(AM_LDFLAGS)
>>>>>   odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
>>>>>   odp_crypto_LDFLAGS = $(AM_LDFLAGS)
>>>>>   odp_shm_CFLAGS = $(AM_CFLAGS)
>>>>>   odp_shm_LDFLAGS = $(AM_LDFLAGS)
>>>>> +odp_schedule_CFLAGS = $(AM_CFLAGS)
>>>>> +odp_schedule_LDFLAGS = $(AM_LDFLAGS)
>>>>>   endif
>>>>>     dist_odp_init_SOURCES = odp_init.c
>>>>> @@ -22,3 +24,4 @@ dist_odp_crypto_SOURCES =
>>>>> crypto/odp_crypto_test_async_inp.c \
>>>>>                           crypto/odp_crypto_test_rng.c \
>>>>>                           odp_crypto.c common/odp_cunit_common.c
>>>>>   dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
>>>>> +dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
>>>>> diff --git a/test/validation/odp_schedule.c
>>>>> b/test/validation/odp_schedule.c
>>>>> new file mode 100644
>>>>> index 0000000..9d410e4
>>>>> --- /dev/null
>>>>> +++ b/test/validation/odp_schedule.c
>>>>> @@ -0,0 +1,607 @@
>>>>> +/* Copyright (c) 2014, Linaro Limited
>>>>> + * All rights reserved.
>>>>> + *
>>>>> + * SPDX-License-Identifier:     BSD-3-Clause
>>>>> + */
>>>>> +
>>>>> +#include <odp.h>
>>>>> +#include "odp_cunit_common.h"
>>>>> +
>>>>> +#define MAX_WORKERS_THREADS    32
>>>>> +#define MSG_POOL_SIZE          (4*1024*1024)
>>>>> +#define QUEUES_PER_PRIO                16
>>>>> +#define BUF_SIZE               64
>>>>> +#define TEST_NUM_BUFS          100
>>>>> +#define BURST_BUF_SIZE         4
>>>>> +#define TEST_NUM_BUFS_EXCL     10000
>>>>> +
>>>>> +#define GLOBALS_SHM_NAME       "test_globals"
>>>>> +#define MSG_POOL_NAME          "msg_pool"
>>>>> +#define SHM_MSG_POOL_NAME      "shm_msg_pool"
>>>>> +#define SHM_THR_ARGS_NAME      "shm_thr_args"
>>>>> +
>>>>> +#define ONE_Q                  1
>>>>> +#define MANY_QS                        QUEUES_PER_PRIO
>>>>> +
>>>>> +#define ONE_PRIO               1
>>>>> +
>>>>> +#define SCHD_ONE               0
>>>>> +#define SCHD_MULTI             1
>>>>> +
>>>>> +#define DISABLE_EXCL_ATOMIC    0
>>>>> +#define ENABLE_EXCL_ATOMIC     1
>>>>> +
>>>>> +
>>>>> +/* Test global variables */
>>>>> +typedef struct {
>>>>> +       int core_count;
>>>>> +       odp_barrier_t barrier;
>>>>> +       odp_schedule_prio_t current_prio;
>>>>> +       int prio_buf_count;
>>>>> +       odp_ticketlock_t count_lock;
>>>>> +       odp_spinlock_t atomic_lock;
>>>>> +} test_globals_t;
>>>>> +
>>>>> +typedef struct ODP_PACKED {
>>>>> +       pthrd_arg thrdarg;
>>>>> +       odp_schedule_sync_t sync;
>>>>> +       int num_queues;
>>>>> +       int num_prio;
>>>>> +       int num_bufs;
>>>>> +       int num_cores;
>>>>> +       int enable_schd_multi;
>>>>> +       int enable_excl_atomic;
>>>>> +} thread_args_t;
>>>>> +
>>>>> +odp_buffer_pool_t pool;
>>>>> +
>>>>> +static void test_schedule_wait_time(void)
>>>>> +{
>>>>> +       uint64_t wait_time;
>>>>> +
>>>>> +       wait_time = odp_schedule_wait_time(0);
>>>>> +
>>>>> +       wait_time = odp_schedule_wait_time(1);
>>>>> +       CU_ASSERT(wait_time > 0);
>>>>> +
>>>>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>>>>> +       CU_ASSERT(wait_time > 0);
>>>>> +}
>>>>> +
>>>>> +static void test_schedule_num_prio(void)
>>>>> +{
>>>>> +       int prio;
>>>>> +
>>>>> +       prio = odp_schedule_num_prio();
>>>>> +
>>>>> +       CU_ASSERT(prio > 0);
>>>>> +       CU_ASSERT(prio == odp_schedule_num_prio());
>>>>> +}
>>>>> +
>>>>> +static void *schedule_common_(void *arg)
>>>>> +{
>>>>> +       thread_args_t *args = (thread_args_t *)arg;
>>>>> +       odp_schedule_sync_t sync;
>>>>> +       int num_queues, num_prio, num_bufs, num_cores;
>>>>> +       odp_shm_t shm;
>>>>> +       test_globals_t *globals;
>>>>> +
>>>>> +       sync = args->sync;
>>>>> +       num_queues = args->num_queues;
>>>>> +       num_prio = args->num_prio;
>>>>> +       num_bufs = args->num_bufs;
>>>>> +       num_cores = args->num_cores;
>>>>> +
>>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>>> +       globals = odp_shm_addr(shm);
>>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>>> +
>>>>> +
>>>>> +       if (num_cores == globals->core_count)
>>>>> +               odp_barrier_wait(&globals->barrier);
>>>>> +
>>>>> +       while (1) {
>>>>> +               odp_buffer_t buf;
>>>>> +               odp_queue_t from;
>>>>> +               int num = 0;
>>>>> +               int locked;
>>>>> +
>>>>> +               odp_ticketlock_lock(&globals->count_lock);
>>>>> +               if (globals->prio_buf_count ==
>>>>> +                   num_bufs * num_queues * num_prio) {
>>>>> +                       odp_ticketlock_unlock(&globals->count_lock);
>>>>> +                       break;
>>>>> +               }
>>>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>>>> +
>>>>> +               if (args->enable_schd_multi) {
>>>>> +                       odp_buffer_t bufs[BURST_BUF_SIZE];
>>>>> +                       int j;
>>>>> +                       num = odp_schedule_multi(&from,
>>>>> ODP_SCHED_NO_WAIT, bufs,
>>>>> +                                                BURST_BUF_SIZE);
>>>>> +                       CU_ASSERT(num >= 0);
>>>>> +                       CU_ASSERT(num <= BURST_BUF_SIZE);
>>>>> +                       if (num == 0)
>>>>> +                               continue;
>>>>> +                       for (j = 0; j < num; j++)
>>>>> +                               odp_buffer_free(bufs[j]);
>>>>> +               } else {
>>>>> +                       buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>> +                       if (buf == ODP_BUFFER_INVALID)
>>>>> +                               continue;
>>>>> +                       num = 1;
>>>>> +                       odp_buffer_free(buf);
>>>>> +               }
>>>>> +
>>>>> +               if (args->enable_excl_atomic) {
>>>>> +                       locked =
>>>>> odp_spinlock_trylock(&globals->atomic_lock);
>>>>> +                       CU_ASSERT(locked == 1);
>>>>> +                       CU_ASSERT(from != ODP_QUEUE_INVALID);
>>>>> +                       if (locked) {
>>>>> +                               int cnt;
>>>>> +                               uint64_t cycles = 0;
>>>>> +                               /* Do some work here to keep the thread
>>>>> busy */
>>>>> +                               for (cnt = 0; cnt < 1000; cnt++)
>>>>> +                                       cycles += odp_time_cycles();
>>>>> +
>>>>> +
>>>>> odp_spinlock_unlock(&globals->atomic_lock);
>>>>> +                       }
>>>>> +               }
>>>>> +
>>>>> +               odp_ticketlock_lock(&globals->count_lock);
>>>>> +               globals->prio_buf_count += num;
>>>>> +
>>>>> +               if (sync == ODP_SCHED_SYNC_ATOMIC)
>>>>> +                       odp_schedule_release_atomic();
>>>>> +
>>>>> +               odp_ticketlock_unlock(&globals->count_lock);
>>>>> +       }
>>>>> +
>>>>> +       return NULL;
>>>>> +}
>>>>> +
>>>>> +static void fill_queues(thread_args_t *args)
>>>>> +{
>>>>> +       odp_schedule_sync_t sync;
>>>>> +       int num_queues, num_prio;
>>>>> +       odp_buffer_pool_t pool;
>>>>> +       int i, j, k;
>>>>> +       char name[32];
>>>>> +
>>>>> +       sync = args->sync;
>>>>> +       num_queues = args->num_queues;
>>>>> +       num_prio = args->num_prio;
>>>>> +
>>>>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>>> +
>>>>> +       for (i = 0; i < num_prio; i++) {
>>>>> +               for (j = 0; j < num_queues; j++) {
>>>>> +                       odp_queue_t queue;
>>>>> +
>>>>> +                       switch (sync) {
>>>>> +                       case ODP_SCHED_SYNC_NONE:
>>>>> +                               snprintf(name, sizeof(name),
>>>>> +                                        "sched_%d_%d_n", i, j);
>>>>> +                               break;
>>>>> +                       case ODP_SCHED_SYNC_ATOMIC:
>>>>> +                               snprintf(name, sizeof(name),
>>>>> +                                        "sched_%d_%d_a", i, j);
>>>>> +                               break;
>>>>> +                       case ODP_SCHED_SYNC_ORDERED:
>>>>> +                               snprintf(name, sizeof(name),
>>>>> +                                        "sched_%d_%d_o", i, j);
>>>>> +                               break;
>>>>> +                       default:
>>>>> +                               CU_ASSERT(0);
>>>>> +                               break;
>>>>> +                       }
>>>>> +
>>>>> +                       queue = odp_queue_lookup(name);
>>>>> +                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
>>>>> +
>>>>> +                       for (k = 0; k < args->num_bufs; k++) {
>>>>> +                               odp_buffer_t buf;
>>>>> +                               buf = odp_buffer_alloc(pool);
>>>>> +                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>>> +                               CU_ASSERT(odp_queue_enq(queue, buf) ==
>>>>> 0);
>>>>> +                       }
>>>>> +               }
>>>>> +       }
>>>>> +}
>>>>> +
>>>>> +static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>>>>> +                           int num_prio, int enable_schd_multi)
>>>>> +{
>>>>> +       thread_args_t args;
>>>>> +       odp_shm_t shm;
>>>>> +       test_globals_t *globals;
>>>>> +
>>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>>> +       globals = odp_shm_addr(shm);
>>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>>> +
>>>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>>>> +       globals->prio_buf_count = 0;
>>>>> +
>>>>> +       args.sync = sync;
>>>>> +       args.num_queues = num_queues;
>>>>> +       args.num_prio = num_prio;
>>>>> +       args.num_bufs = TEST_NUM_BUFS;
>>>>> +       args.num_cores = 1;
>>>>> +       args.enable_schd_multi = enable_schd_multi;
>>>>> +       args.enable_excl_atomic = 0;    /* Not needed with a single
>>>>> core */
>>>>> +
>>>>> +       fill_queues(&args);
>>>>> +
>>>>> +       schedule_common_(&args);
>>>>> +}
>>>>> +
>>>>> +static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>>>>> +                            int num_prio, int enable_schd_multi,
>>>>> +                            int enable_excl_atomic)
>>>>> +{
>>>>> +       odp_shm_t shm;
>>>>> +       test_globals_t *globals;
>>>>> +       thread_args_t *thr_args;
>>>>> +
>>>>> +       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
>>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>>> +       globals = odp_shm_addr(shm);
>>>>> +       CU_ASSERT_FATAL(globals != NULL);
>>>>> +
>>>>> +       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
>>>>> +       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
>>>>> +       thr_args = odp_shm_addr(shm);
>>>>> +       CU_ASSERT_FATAL(thr_args != NULL);
>>>>> +
>>>>> +       thr_args->sync = sync;
>>>>> +       thr_args->num_queues = num_queues;
>>>>> +       thr_args->num_prio = num_prio;
>>>>> +       if (enable_excl_atomic)
>>>>> +               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>>> +       else
>>>>> +               thr_args->num_bufs = TEST_NUM_BUFS;
>>>>> +       thr_args->num_cores = globals->core_count;
>>>>> +       thr_args->enable_schd_multi = enable_schd_multi;
>>>>> +       thr_args->enable_excl_atomic = enable_excl_atomic;
>>>>> +
>>>>> +       fill_queues(thr_args);
>>>>> +
>>>>> +       /* Reset buffer counters from the main thread */
>>>>> +       globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
>>>>> +       globals->prio_buf_count = 0;
>>>>> +
>>>>> +       /* Create and launch worker threads */
>>>>> +       thr_args->thrdarg.numthrds = globals->core_count;
>>>>> +       odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
>>>>> +
>>>>> +       /* Wait for worker threads to terminate */
>>>>> +       odp_cunit_thread_exit(&thr_args->thrdarg);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
>>>>> +static void test_schedule_1q_1t_n(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
>>>>> +static void test_schedule_1q_1t_a(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
>>>>> +static void test_schedule_1q_1t_o(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
>>>>> +static void test_schedule_mq_1t_n(void)
>>>>> +{
>>>>> +       /* Only one priority involved in these tests, but use
>>>>> +          the same number of queues the more general case uses */
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
>>>>> +static void test_schedule_mq_1t_a(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
>>>>> +static void test_schedule_mq_1t_o(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
>>>>> +static void test_schedule_mq_1t_prio_n(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
>>>>> +static void test_schedule_mq_1t_prio_a(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
>>>>> +static void test_schedule_mq_1t_prio_o(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>>> SCHD_ONE);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
>>>>> +static void test_schedule_mq_mt_prio_n(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
>>>>> +                        DISABLE_EXCL_ATOMIC);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
>>>>> +static void test_schedule_mq_mt_prio_a(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>>> SCHD_ONE,
>>>>> +                        DISABLE_EXCL_ATOMIC);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
>>>>> +static void test_schedule_mq_mt_prio_o(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>>> SCHD_ONE,
>>>>> +                        DISABLE_EXCL_ATOMIC);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue many threads check exclusive access on ATOMIC queues */
>>>>> +static void test_schedule_1q_mt_a_excl(void)
>>>>> +{
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>>> SCHD_ONE,
>>>>> +                        ENABLE_EXCL_ATOMIC);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
>>>>> +static void test_schedule_multi_1q_1t_n(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>>>> +static void test_schedule_multi_1q_1t_a(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>>>> +static void test_schedule_multi_1q_1t_o(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
>>>>> +static void test_schedule_multi_mq_1t_n(void)
>>>>> +{
>>>>> +       /* Only one priority involved in these tests, but use
>>>>> +          the same number of queues the more general case uses */
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
>>>>> +static void test_schedule_multi_mq_1t_a(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
>>>>> +static void test_schedule_multi_mq_1t_o(void)
>>>>> +{
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
>>>>> +static void test_schedule_multi_mq_1t_prio_n(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
>>>>> +static void test_schedule_multi_mq_1t_prio_a(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
>>>>> +static void test_schedule_multi_mq_1t_prio_o(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>>> SCHD_MULTI);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi
>>>>> */
>>>>> +static void test_schedule_multi_mq_mt_prio_n(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio,
>>>>> SCHD_MULTI, 0);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi
>>>>> */
>>>>> +static void test_schedule_multi_mq_mt_prio_a(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio,
>>>>> SCHD_MULTI, 0);
>>>>> +}
>>>>> +
>>>>> +/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED
>>>>> multi */
>>>>> +static void test_schedule_multi_mq_mt_prio_o(void)
>>>>> +{
>>>>> +       int prio = odp_schedule_num_prio();
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio,
>>>>> SCHD_MULTI, 0);
>>>>> +}
>>>>> +
>>>>> +/* 1 queue many threads check exclusive access on ATOMIC queues multi
>>>>> */
>>>>> +static void test_schedule_multi_1q_mt_a_excl(void)
>>>>> +{
>>>>> +       parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO,
>>>>> SCHD_MULTI,
>>>>> +                        ENABLE_EXCL_ATOMIC);
>>>>> +}
>>>>> +
>>>>> +static int create_queues(void)
>>>>> +{
>>>>> +       int i, j, prios;
>>>>> +
>>>>> +       prios = odp_schedule_num_prio();
>>>>> +
>>>>> +       for (i = 0; i < prios; i++) {
>>>>> +               odp_queue_param_t p;
>>>>> +               p.sched.prio  = i;
>>>>> +               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
>>>>> +
>>>>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>>>>> +                       /* Per sched sync type */
>>>>> +                       char name[32];
>>>>> +                       odp_queue_t q;
>>>>> +
>>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_n",
>>>>> i, j);
>>>>> +                       p.sched.sync = ODP_SCHED_SYNC_NONE;
>>>>> +                       q = odp_queue_create(name,
>>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>>> +
>>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>>> +                               printf("Schedule queue create
>>>>> failed.\n");
>>>>> +                               return -1;
>>>>> +                       }
>>>>> +
>>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_a",
>>>>> i, j);
>>>>> +                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
>>>>> +                       q = odp_queue_create(name,
>>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>>> +
>>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>>> +                               printf("Schedule queue create
>>>>> failed.\n");
>>>>> +                               return -1;
>>>>> +                       }
>>>>> +
>>>>> +                       snprintf(name, sizeof(name), "sched_%d_%d_o",
>>>>> i, j);
>>>>> +                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
>>>>> +                       q = odp_queue_create(name,
>>>>> ODP_QUEUE_TYPE_SCHED, &p);
>>>>> +
>>>>> +                       if (q == ODP_QUEUE_INVALID) {
>>>>> +                               printf("Schedule queue create
>>>>> failed.\n");
>>>>> +                               return -1;
>>>>> +                       }
>>>>> +               }
>>>>> +       }
>>>>> +
>>>>> +       return 0;
>>>>> +}
>>>>> +
>>>>> +static int schd_suite_init(void)
>>>>> +{
>>>>> +       odp_shm_t shm;
>>>>> +       void *pool_base;
>>>>> +       odp_buffer_pool_t pool;
>>>>> +       test_globals_t *globals;
>>>>> +       thread_args_t *thr_args;
>>>>> +
>>>>> +       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
>>>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>>>> +       pool_base = odp_shm_addr(shm);
>>>>> +       if (pool_base == NULL) {
>>>>> +               printf("Shared memory reserve failed.\n");
>>>>> +               return -1;
>>>>> +       }
>>>>> +
>>>>> +       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base,
>>>>> MSG_POOL_SIZE,
>>>>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>>>>> +                                     ODP_BUFFER_TYPE_RAW);
>>>>> +       if (pool == ODP_BUFFER_POOL_INVALID) {
>>>>> +               printf("Pool creation failed (msg).\n");
>>>>> +               return -1;
>>>>> +       }
>>>>> +
>>>>> +       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
>>>>> +                             sizeof(test_globals_t),
>>>>> ODP_CACHE_LINE_SIZE, 0);
>>>>> +
>>>>> +       globals = odp_shm_addr(shm);
>>>>> +
>>>>> +       if (globals == NULL) {
>>>>> +               printf("Shared memory reserve failed (globals).\n");
>>>>> +               return -1;
>>>>> +       }
>>>>> +
>>>>> +       memset(globals, 0, sizeof(test_globals_t));
>>>>> +
>>>>> +       globals->core_count = odp_sys_core_count();
>>>>> +       if (globals->core_count > MAX_WORKERS)
>>>>> +               globals->core_count = MAX_WORKERS;
>>>>> +
>>>>> +       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>>>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>>>> +       thr_args = odp_shm_addr(shm);
>>>>> +
>>>>> +       if (thr_args == NULL) {
>>>>> +               printf("Shared memory reserve failed (thr_args).\n");
>>>>> +               return -1;
>>>>> +       }
>>>>> +
>>>>> +       memset(thr_args, 0, sizeof(thread_args_t));
>>>>> +
>>>>> +       /* Barrier to sync test case execution */
>>>>> +       odp_barrier_init(&globals->barrier, globals->core_count);
>>>>> +       odp_ticketlock_init(&globals->count_lock);
>>>>> +       odp_spinlock_init(&globals->atomic_lock);
>>>>> +
>>>>> +       if (create_queues() != 0)
>>>>> +               return -1;
>>>>> +
>>>>> +       return 0;
>>>>> +}
>>>>> +
>>>>> +struct CU_TestInfo test_odp_schedule[] = {
>>>>> +       {"schedule_wait_time",          test_schedule_wait_time},
>>>>> +       {"schedule_num_prio",           test_schedule_num_prio},
>>>>> +       {"schedule_1q_1t_n",            test_schedule_1q_1t_n},
>>>>> +       {"schedule_1q_1t_a",            test_schedule_1q_1t_a},
>>>>> +       {"schedule_1q_1t_o",            test_schedule_1q_1t_o},
>>>>> +       {"schedule_mq_1t_n",            test_schedule_mq_1t_n},
>>>>> +       {"schedule_mq_1t_a",            test_schedule_mq_1t_a},
>>>>> +       {"schedule_mq_1t_o",            test_schedule_mq_1t_o},
>>>>> +       {"schedule_mq_1t_prio_n",       test_schedule_mq_1t_prio_n},
>>>>> +       {"schedule_mq_1t_prio_a",       test_schedule_mq_1t_prio_a},
>>>>> +       {"schedule_mq_1t_prio_o",       test_schedule_mq_1t_prio_o},
>>>>> +       {"schedule_mq_mt_prio_n",       test_schedule_mq_mt_prio_n},
>>>>> +       {"schedule_mq_mt_prio_a",       test_schedule_mq_mt_prio_a},
>>>>> +       {"schedule_mq_mt_prio_o",       test_schedule_mq_mt_prio_o},
>>>>> +       {"schedule_1q_mt_a_excl",       test_schedule_1q_mt_a_excl},
>>>>> +       {"schedule_multi_1q_1t_n",      test_schedule_multi_1q_1t_n},
>>>>> +       {"schedule_multi_1q_1t_a",      test_schedule_multi_1q_1t_a},
>>>>> +       {"schedule_multi_1q_1t_o",      test_schedule_multi_1q_1t_o},
>>>>> +       {"schedule_multi_mq_1t_n",      test_schedule_multi_mq_1t_n},
>>>>> +       {"schedule_multi_mq_1t_a",      test_schedule_multi_mq_1t_a},
>>>>> +       {"schedule_multi_mq_1t_o",      test_schedule_multi_mq_1t_o},
>>>>> +       {"schedule_multi_mq_1t_prio_n",
>>>>> test_schedule_multi_mq_1t_prio_n},
>>>>> +       {"schedule_multi_mq_1t_prio_a",
>>>>> test_schedule_multi_mq_1t_prio_a},
>>>>> +       {"schedule_multi_mq_1t_prio_o",
>>>>> test_schedule_multi_mq_1t_prio_o},
>>>>> +       {"schedule_multi_mq_mt_prio_n",
>>>>> test_schedule_multi_mq_mt_prio_n},
>>>>> +       {"schedule_multi_mq_mt_prio_a",
>>>>> test_schedule_multi_mq_mt_prio_a},
>>>>> +       {"schedule_multi_mq_mt_prio_o",
>>>>> test_schedule_multi_mq_mt_prio_o},
>>>>> +       {"schedule_multi_1q_mt_a_excl",
>>>>> test_schedule_multi_1q_mt_a_excl},
>>>>> +       CU_TEST_INFO_NULL,
>>>>> +};
>>>>> +
>>>>> +CU_SuiteInfo odp_testsuites[] = {
>>>>> +       {"Scheduler", schd_suite_init, NULL, NULL, NULL,
>>>>> test_odp_schedule},
>>>>> +       CU_SUITE_INFO_NULL,
>>>>> +};
>>>>
>>>>
>>>>
>>>> _______________________________________________
>>>> lng-odp mailing list
>>>> lng-odp@lists.linaro.org
>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>>
>>>
>>> --
>>> Mike Holmes
>>> Linaro  Sr Technical Manager
>>> LNG - ODP
>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
diff mbox

Patch

diff --git a/test/validation/.gitignore b/test/validation/.gitignore
index 37e2594..32834ae 100644
--- a/test/validation/.gitignore
+++ b/test/validation/.gitignore
@@ -3,4 +3,5 @@ 
 odp_init
 odp_queue
 odp_crypto
+odp_schedule
 odp_shm
diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index 8547085..3670c76 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -6,13 +6,15 @@  AM_LDFLAGS += -static
 if ODP_CUNIT_ENABLED
 TESTS = ${bin_PROGRAMS}
 check_PROGRAMS = ${bin_PROGRAMS}
-bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm
+bin_PROGRAMS = odp_init odp_queue odp_crypto odp_shm odp_schedule
 odp_init_LDFLAGS = $(AM_LDFLAGS)
 odp_queue_LDFLAGS = $(AM_LDFLAGS)
 odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
 odp_crypto_LDFLAGS = $(AM_LDFLAGS)
 odp_shm_CFLAGS = $(AM_CFLAGS)
 odp_shm_LDFLAGS = $(AM_LDFLAGS)
+odp_schedule_CFLAGS = $(AM_CFLAGS)
+odp_schedule_LDFLAGS = $(AM_LDFLAGS)
 endif
 
 dist_odp_init_SOURCES = odp_init.c
@@ -22,3 +24,4 @@  dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c \
 			  crypto/odp_crypto_test_rng.c \
 			  odp_crypto.c common/odp_cunit_common.c
 dist_odp_shm_SOURCES = odp_shm.c common/odp_cunit_common.c
+dist_odp_schedule_SOURCES = odp_schedule.c common/odp_cunit_common.c
diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
new file mode 100644
index 0000000..9d410e4
--- /dev/null
+++ b/test/validation/odp_schedule.c
@@ -0,0 +1,607 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp.h>
+#include "odp_cunit_common.h"
+
+#define MAX_WORKERS_THREADS	32
+#define MSG_POOL_SIZE		(4*1024*1024)
+#define QUEUES_PER_PRIO		16
+#define BUF_SIZE		64
+#define TEST_NUM_BUFS		100
+#define BURST_BUF_SIZE		4
+#define TEST_NUM_BUFS_EXCL	10000
+
+#define GLOBALS_SHM_NAME	"test_globals"
+#define MSG_POOL_NAME		"msg_pool"
+#define SHM_MSG_POOL_NAME	"shm_msg_pool"
+#define SHM_THR_ARGS_NAME	"shm_thr_args"
+
+#define ONE_Q			1
+#define MANY_QS			QUEUES_PER_PRIO
+
+#define ONE_PRIO		1
+
+#define SCHD_ONE		0
+#define SCHD_MULTI		1
+
+#define DISABLE_EXCL_ATOMIC	0
+#define ENABLE_EXCL_ATOMIC	1
+
+
+/* Test global variables */
+typedef struct {
+	int core_count;
+	odp_barrier_t barrier;
+	odp_schedule_prio_t current_prio;
+	int prio_buf_count;
+	odp_ticketlock_t count_lock;
+	odp_spinlock_t atomic_lock;
+} test_globals_t;
+
+typedef struct ODP_PACKED {
+	pthrd_arg thrdarg;
+	odp_schedule_sync_t sync;
+	int num_queues;
+	int num_prio;
+	int num_bufs;
+	int num_cores;
+	int enable_schd_multi;
+	int enable_excl_atomic;
+} thread_args_t;
+
+odp_buffer_pool_t pool;
+
+static void test_schedule_wait_time(void)
+{
+	uint64_t wait_time;
+
+	wait_time = odp_schedule_wait_time(0);
+
+	wait_time = odp_schedule_wait_time(1);
+	CU_ASSERT(wait_time > 0);
+
+	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
+	CU_ASSERT(wait_time > 0);
+}
+
+static void test_schedule_num_prio(void)
+{
+	int prio;
+
+	prio = odp_schedule_num_prio();
+
+	CU_ASSERT(prio > 0);
+	CU_ASSERT(prio == odp_schedule_num_prio());
+}
+
+static void *schedule_common_(void *arg)
+{
+	thread_args_t *args = (thread_args_t *)arg;
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio, num_bufs, num_cores;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+	num_bufs = args->num_bufs;
+	num_cores = args->num_cores;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+
+	if (num_cores == globals->core_count)
+		odp_barrier_wait(&globals->barrier);
+
+	while (1) {
+		odp_buffer_t buf;
+		odp_queue_t from;
+		int num = 0;
+		int locked;
+
+		odp_ticketlock_lock(&globals->count_lock);
+		if (globals->prio_buf_count ==
+		    num_bufs * num_queues * num_prio) {
+			odp_ticketlock_unlock(&globals->count_lock);
+			break;
+		}
+		odp_ticketlock_unlock(&globals->count_lock);
+
+		if (args->enable_schd_multi) {
+			odp_buffer_t bufs[BURST_BUF_SIZE];
+			int j;
+			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, bufs,
+						 BURST_BUF_SIZE);
+			CU_ASSERT(num >= 0);
+			CU_ASSERT(num <= BURST_BUF_SIZE);
+			if (num == 0)
+				continue;
+			for (j = 0; j < num; j++)
+				odp_buffer_free(bufs[j]);
+		} else {
+			buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+			if (buf == ODP_BUFFER_INVALID)
+				continue;
+			num = 1;
+			odp_buffer_free(buf);
+		}
+
+		if (args->enable_excl_atomic) {
+			locked = odp_spinlock_trylock(&globals->atomic_lock);
+			CU_ASSERT(locked == 1);
+			CU_ASSERT(from != ODP_QUEUE_INVALID);
+			if (locked) {
+				int cnt;
+				uint64_t cycles = 0;
+				/* Do some work here to keep the thread busy */
+				for (cnt = 0; cnt < 1000; cnt++)
+					cycles += odp_time_cycles();
+
+				odp_spinlock_unlock(&globals->atomic_lock);
+			}
+		}
+
+		odp_ticketlock_lock(&globals->count_lock);
+		globals->prio_buf_count += num;
+
+		if (sync == ODP_SCHED_SYNC_ATOMIC)
+			odp_schedule_release_atomic();
+
+		odp_ticketlock_unlock(&globals->count_lock);
+	}
+
+	return NULL;
+}
+
+static void fill_queues(thread_args_t *args)
+{
+	odp_schedule_sync_t sync;
+	int num_queues, num_prio;
+	odp_buffer_pool_t pool;
+	int i, j, k;
+	char name[32];
+
+	sync = args->sync;
+	num_queues = args->num_queues;
+	num_prio = args->num_prio;
+
+	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+	for (i = 0; i < num_prio; i++) {
+		for (j = 0; j < num_queues; j++) {
+			odp_queue_t queue;
+
+			switch (sync) {
+			case ODP_SCHED_SYNC_NONE:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_n", i, j);
+				break;
+			case ODP_SCHED_SYNC_ATOMIC:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_a", i, j);
+				break;
+			case ODP_SCHED_SYNC_ORDERED:
+				snprintf(name, sizeof(name),
+					 "sched_%d_%d_o", i, j);
+				break;
+			default:
+				CU_ASSERT(0);
+				break;
+			}
+
+			queue = odp_queue_lookup(name);
+			CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+			for (k = 0; k < args->num_bufs; k++) {
+				odp_buffer_t buf;
+				buf = odp_buffer_alloc(pool);
+				CU_ASSERT(buf != ODP_BUFFER_INVALID);
+				CU_ASSERT(odp_queue_enq(queue, buf) == 0);
+			}
+		}
+	}
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+			    int num_prio, int enable_schd_multi)
+{
+	thread_args_t args;
+	odp_shm_t shm;
+	test_globals_t *globals;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->prio_buf_count = 0;
+
+	args.sync = sync;
+	args.num_queues = num_queues;
+	args.num_prio = num_prio;
+	args.num_bufs = TEST_NUM_BUFS;
+	args.num_cores = 1;
+	args.enable_schd_multi = enable_schd_multi;
+	args.enable_excl_atomic = 0;	/* Not needed with a single core */
+
+	fill_queues(&args);
+
+	schedule_common_(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+			     int num_prio, int enable_schd_multi,
+			     int enable_excl_atomic)
+{
+	odp_shm_t shm;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+
+	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	globals = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(globals != NULL);
+
+	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+	thr_args = odp_shm_addr(shm);
+	CU_ASSERT_FATAL(thr_args != NULL);
+
+	thr_args->sync = sync;
+	thr_args->num_queues = num_queues;
+	thr_args->num_prio = num_prio;
+	if (enable_excl_atomic)
+		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
+	else
+		thr_args->num_bufs = TEST_NUM_BUFS;
+	thr_args->num_cores = globals->core_count;
+	thr_args->enable_schd_multi = enable_schd_multi;
+	thr_args->enable_excl_atomic = enable_excl_atomic;
+
+	fill_queues(thr_args);
+
+	/* Reset buffer counters from the main thread */
+	globals->current_prio = ODP_SCHED_PRIO_HIGHEST;
+	globals->prio_buf_count = 0;
+
+	/* Create and launch worker threads */
+	thr_args->thrdarg.numthrds = globals->core_count;
+	odp_cunit_thread_create(schedule_common_, &thr_args->thrdarg);
+
+	/* Wait for worker threads to terminate */
+	odp_cunit_thread_exit(&thr_args->thrdarg);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_1q_1t_n(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_1q_1t_a(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_1q_1t_o(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_n(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_a(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_o(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_1t_prio_n(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_1t_prio_a(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_1t_prio_o(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+static void test_schedule_mq_mt_prio_n(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
+			 DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+static void test_schedule_mq_mt_prio_a(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
+			 DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+static void test_schedule_mq_mt_prio_o(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
+			 DISABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues */
+static void test_schedule_1q_mt_a_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
+			 ENABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedule_multi_1q_1t_n(void)
+{
+	schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedule_multi_1q_1t_a(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedule_multi_1q_1t_o(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
+static void test_schedule_multi_mq_1t_n(void)
+{
+	/* Only one priority involved in these tests, but use
+	   the same number of queues the more general case uses */
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedule_multi_mq_1t_a(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedule_multi_mq_1t_o(void)
+{
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedule_multi_mq_1t_prio_n(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedule_multi_mq_1t_prio_a(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedule_multi_mq_1t_prio_o(void)
+{
+	int prio = odp_schedule_num_prio();
+	schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
+static void test_schedule_multi_mq_mt_prio_n(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void test_schedule_multi_mq_mt_prio_a(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
+static void test_schedule_multi_mq_mt_prio_o(void)
+{
+	int prio = odp_schedule_num_prio();
+	parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues multi */
+static void test_schedule_multi_1q_mt_a_excl(void)
+{
+	parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI,
+			 ENABLE_EXCL_ATOMIC);
+}
+
+static int create_queues(void)
+{
+	int i, j, prios;
+
+	prios = odp_schedule_num_prio();
+
+	for (i = 0; i < prios; i++) {
+		odp_queue_param_t p;
+		p.sched.prio  = i;
+		p.sched.group = ODP_SCHED_GROUP_DEFAULT;
+
+		for (j = 0; j < QUEUES_PER_PRIO; j++) {
+			/* Per sched sync type */
+			char name[32];
+			odp_queue_t q;
+
+			snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_NONE;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+
+			snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+			p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+			q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+			if (q == ODP_QUEUE_INVALID) {
+				printf("Schedule queue create failed.\n");
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int schd_suite_init(void)
+{
+	odp_shm_t shm;
+	void *pool_base;
+	odp_buffer_pool_t pool;
+	test_globals_t *globals;
+	thread_args_t *thr_args;
+
+	shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
+			      ODP_CACHE_LINE_SIZE, 0);
+	pool_base = odp_shm_addr(shm);
+	if (pool_base == NULL) {
+		printf("Shared memory reserve failed.\n");
+		return -1;
+	}
+
+	pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base, MSG_POOL_SIZE,
+				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
+				      ODP_BUFFER_TYPE_RAW);
+	if (pool == ODP_BUFFER_POOL_INVALID) {
+		printf("Pool creation failed (msg).\n");
+		return -1;
+	}
+
+	shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+	globals = odp_shm_addr(shm);
+
+	if (globals == NULL) {
+		printf("Shared memory reserve failed (globals).\n");
+		return -1;
+	}
+
+	memset(globals, 0, sizeof(test_globals_t));
+
+	globals->core_count = odp_sys_core_count();
+	if (globals->core_count > MAX_WORKERS)
+		globals->core_count = MAX_WORKERS;
+
+	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+			      ODP_CACHE_LINE_SIZE, 0);
+	thr_args = odp_shm_addr(shm);
+
+	if (thr_args == NULL) {
+		printf("Shared memory reserve failed (thr_args).\n");
+		return -1;
+	}
+
+	memset(thr_args, 0, sizeof(thread_args_t));
+
+	/* Barrier to sync test case execution */
+	odp_barrier_init(&globals->barrier, globals->core_count);
+	odp_ticketlock_init(&globals->count_lock);
+	odp_spinlock_init(&globals->atomic_lock);
+
+	if (create_queues() != 0)
+		return -1;
+
+	return 0;
+}
+
+struct CU_TestInfo test_odp_schedule[] = {
+	{"schedule_wait_time",		test_schedule_wait_time},
+	{"schedule_num_prio",		test_schedule_num_prio},
+	{"schedule_1q_1t_n",		test_schedule_1q_1t_n},
+	{"schedule_1q_1t_a",		test_schedule_1q_1t_a},
+	{"schedule_1q_1t_o",		test_schedule_1q_1t_o},
+	{"schedule_mq_1t_n",		test_schedule_mq_1t_n},
+	{"schedule_mq_1t_a",		test_schedule_mq_1t_a},
+	{"schedule_mq_1t_o",		test_schedule_mq_1t_o},
+	{"schedule_mq_1t_prio_n",	test_schedule_mq_1t_prio_n},
+	{"schedule_mq_1t_prio_a",	test_schedule_mq_1t_prio_a},
+	{"schedule_mq_1t_prio_o",	test_schedule_mq_1t_prio_o},
+	{"schedule_mq_mt_prio_n",	test_schedule_mq_mt_prio_n},
+	{"schedule_mq_mt_prio_a",	test_schedule_mq_mt_prio_a},
+	{"schedule_mq_mt_prio_o",	test_schedule_mq_mt_prio_o},
+	{"schedule_1q_mt_a_excl",	test_schedule_1q_mt_a_excl},
+	{"schedule_multi_1q_1t_n",	test_schedule_multi_1q_1t_n},
+	{"schedule_multi_1q_1t_a",	test_schedule_multi_1q_1t_a},
+	{"schedule_multi_1q_1t_o",	test_schedule_multi_1q_1t_o},
+	{"schedule_multi_mq_1t_n",	test_schedule_multi_mq_1t_n},
+	{"schedule_multi_mq_1t_a",	test_schedule_multi_mq_1t_a},
+	{"schedule_multi_mq_1t_o",	test_schedule_multi_mq_1t_o},
+	{"schedule_multi_mq_1t_prio_n",	test_schedule_multi_mq_1t_prio_n},
+	{"schedule_multi_mq_1t_prio_a",	test_schedule_multi_mq_1t_prio_a},
+	{"schedule_multi_mq_1t_prio_o",	test_schedule_multi_mq_1t_prio_o},
+	{"schedule_multi_mq_mt_prio_n",	test_schedule_multi_mq_mt_prio_n},
+	{"schedule_multi_mq_mt_prio_a",	test_schedule_multi_mq_mt_prio_a},
+	{"schedule_multi_mq_mt_prio_o",	test_schedule_multi_mq_mt_prio_o},
+	{"schedule_multi_1q_mt_a_excl",	test_schedule_multi_1q_mt_a_excl},
+	CU_TEST_INFO_NULL,
+};
+
+CU_SuiteInfo odp_testsuites[] = {
+	{"Scheduler", schd_suite_init, NULL, NULL, NULL, test_odp_schedule},
+	CU_SUITE_INFO_NULL,
+};