diff mbox

[RFC] cunit: add tests for scheduler API

Message ID 1416510144-24926-1-git-send-email-ciprian.barbu@linaro.org
State New
Headers show

Commit Message

Ciprian Barbu Nov. 20, 2014, 7:02 p.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
The testcases are based almost entirely on the odp_example.
There are no alloc tests and I added a test case for odp_schedule_wait_time.
The major differencs between the odp_example and this cunit is the partition
into testcases, the odp_example calls every test case from one big function.

I had to work some magic in order to be able to pass arguments to test cases,
I hope is not too hard to follow.

 configure.ac                                  |   1 +
 test/cunit/Makefile.am                        |   2 +
 test/cunit/schedule/Makefile.am               |  10 +
 test/cunit/schedule/odp_schedule_test.c       | 844 ++++++++++++++++++++++++++
 test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
 test/cunit/schedule/odp_schedule_testsuites.h |  21 +
 6 files changed, 913 insertions(+)
 create mode 100644 test/cunit/schedule/Makefile.am
 create mode 100644 test/cunit/schedule/odp_schedule_test.c
 create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
 create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h

Comments

Alexandru Badicioiu Nov. 21, 2014, 10:18 a.m. UTC | #1
Hi,
the scheduling tests in odp_example were discussed some time ago and there
was an agreement, at least for FSL and TI platforms, that fair scheduling
assumed by the following loop:
for (i = 0; i < QUEUE_ROUNDS; i++) {
               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);

               if (odp_queue_enq(queue, buf)) {
                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
                       return -1;
               }
       }

for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
hardware queue is to be scheduled to the same core as long as there are
packets in the queue and the core has room to dequeue them (other platforms
please confirm or infirm). On my specific platform I can force this with a
particular HW configuration, but this configuration effectively disables
the use of POLL queues.
I think we need scheduling tests for the most general case (SYNC_NONE and
no assumption about how many buffers are scheduled to a particular core).

Thanks,
Alex


On 20 November 2014 21:02, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> ---
> The testcases are based almost entirely on the odp_example.
> There are no alloc tests and I added a test case for
> odp_schedule_wait_time.
> The major differencs between the odp_example and this cunit is the
> partition
> into testcases, the odp_example calls every test case from one big
> function.
>
> I had to work some magic in order to be able to pass arguments to test
> cases,
> I hope is not too hard to follow.
>
>  configure.ac                                  |   1 +
>  test/cunit/Makefile.am                        |   2 +
>  test/cunit/schedule/Makefile.am               |  10 +
>  test/cunit/schedule/odp_schedule_test.c       | 844
> ++++++++++++++++++++++++++
>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>  6 files changed, 913 insertions(+)
>  create mode 100644 test/cunit/schedule/Makefile.am
>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>
> diff --git a/configure.ac b/configure.ac
> index fcd7279..a47db72 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>                  test/Makefile
>                  test/api_test/Makefile
>                   test/cunit/Makefile
> +                 test/cunit/schedule/Makefile
>                  pkgconfig/libodp.pc])
>
>  AC_SEARCH_LIBS([timer_create],[rt posix4])
> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
> index 439e134..b6033ee 100644
> --- a/test/cunit/Makefile.am
> +++ b/test/cunit/Makefile.am
> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>  AM_CFLAGS += -I$(CUNIT_PATH)/include
>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>
> +SUBDIRS = schedule
> +
>  if ODP_CUNIT_ENABLED
>  TESTS = ${bin_PROGRAMS}
>  check_PROGRAMS = ${bin_PROGRAMS}
> diff --git a/test/cunit/schedule/Makefile.am
> b/test/cunit/schedule/Makefile.am
> new file mode 100644
> index 0000000..ad68b03
> --- /dev/null
> +++ b/test/cunit/schedule/Makefile.am
> @@ -0,0 +1,10 @@
> +include $(top_srcdir)/test/Makefile.inc
> +
> +if ODP_CUNIT_ENABLED
> +bin_PROGRAMS = odp_schedule_test
> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static
> -lcunit
> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
> +endif
> +
> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
> +                                odp_schedule_testsuites.c
> diff --git a/test/cunit/schedule/odp_schedule_test.c
> b/test/cunit/schedule/odp_schedule_test.c
> new file mode 100644
> index 0000000..fa67f6e
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_test.c
> @@ -0,0 +1,844 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "odp_schedule_testsuites.h"
> +#include <odph_linux.h>
> +
> +#define MAX_WORKERS            32            /**< Max worker threads */
> +#define MSG_POOL_SIZE           (4*1024*1024)
> +#define QUEUES_PER_PRIO                64            /**< Queue per
> priority */
> +#define QUEUE_ROUNDS           (512*1024)    /**< Queue test rounds */
> +#define MULTI_BUFS_MAX         4             /**< Buffer burst size */
> +#define BUF_SIZE               64
> +
> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
> +
> +/** Test arguments */
> +typedef struct {
> +       int core_count; /**< Core count */
> +       int proc_mode;  /**< Process mode */
> +} test_args_t;
> +
> +typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
> +                                int, odp_barrier_t *);
> +
> +/** Scheduler test case arguments */
> +typedef struct {
> +       char name[64];  /**< test case name */
> +       int prio;
> +       test_case_routine func;
> +} test_case_args_t;
> +
> +/** Test global variables */
> +typedef struct {
> +       odp_barrier_t barrier;/**< @private Barrier for test
> synchronisation */
> +       test_args_t test_args;/**< @private Test case function and
> arguments */
> +} test_globals_t;
> +
> +static void execute_parallel(void *(*func) (void *), test_case_args_t *);
> +static int num_workers;
> +
> +/**
> + * @internal CUnit test case for verifying functionality of
> + *           schedule_wait_time
> + */
> +static void schedule_wait_time(void)
> +{
> +       uint64_t wait_time;
> +
> +       wait_time = odp_schedule_wait_time(0);
> +       CU_ASSERT(wait_time > 0);
> +       CU_PASS("schedule_wait_time(0)");
> +
> +       wait_time = odp_schedule_wait_time(1);
> +       CU_ASSERT(wait_time > 0);
> +       CU_PASS("schedule_wait_time(1)");
> +
> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> +       CU_ASSERT(wait_time > 0);
> +       CU_PASS("schedule_wait_time(MAX_LONG_INT)");
> +}
> +
> +/**
> + * @internal Clear all scheduled queues. Retry to be sure that all
> + * buffers have been scheduled.
> + */
> +static void clear_sched_queues(void)
> +{
> +       odp_buffer_t buf;
> +
> +       while (1) {
> +               buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> +
> +               if (buf == ODP_BUFFER_INVALID)
> +                       break;
> +
> +               odp_buffer_free(buf);
> +       }
> +}
> +
> +/**
> + * @internal Create multiple queues from a pool of buffers
> + *
> + * @param thr  Thread
> + * @param msg_pool  Buffer pool
> + * @param prio   Queue priority
> + *
> + * @return 0 if successful
> + */
> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int prio)
> +{
> +       char name[] = "sched_XX_YY";
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +       int i;
> +
> +       name[6] = '0' + prio/10;
> +       name[7] = '0' + prio - 10*(prio/10);
> +
> +       /* Alloc and enqueue a buffer per queue */
> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> +               name[9]  = '0' + i/10;
> +               name[10] = '0' + i - 10*(i/10);
> +
> +               queue = odp_queue_lookup(name);
> +
> +               if (queue == ODP_QUEUE_INVALID) {
> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
> name);
> +                       return -1;
> +               }
> +
> +               buf = odp_buffer_alloc(msg_pool);
> +
> +               if (!odp_buffer_is_valid(buf)) {
> +                       ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> +                       return -1;
> +               }
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Create a single queue from a pool of buffers
> + *
> + * @param thr  Thread
> + * @param msg_pool  Buffer pool
> + * @param prio   Queue priority
> + *
> + * @return 0 if successful
> + */
> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
> +{
> +       char name[] = "sched_XX_00";
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +
> +       buf = odp_buffer_alloc(msg_pool);
> +
> +       if (!odp_buffer_is_valid(buf)) {
> +               ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> +               return -1;
> +       }
> +
> +       name[6] = '0' + prio/10;
> +       name[7] = '0' + prio - 10*(prio/10);
> +
> +       queue = odp_queue_lookup(name);
> +
> +       if (queue == ODP_QUEUE_INVALID) {
> +               ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
> +               return -1;
> +       }
> +
> +       if (odp_queue_enq(queue, buf)) {
> +               ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +               return -1;
> +       }
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of a single queue - with odp_schedule_one()
> + *
> + * Enqueue a buffer to the shared queue. Schedule and enqueue the received
> + * buffer back into the queue.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_one_single(const char *str, int thr,
> +                                   odp_buffer_pool_t msg_pool,
> +                                   int prio, odp_barrier_t *barrier)
> +{
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +       uint64_t t1, t2, cycles, ns;
> +       uint32_t i;
> +       uint32_t tot = 0;
> +
> +       if (create_queue(thr, msg_pool, prio)) {
> +               CU_FAIL_FATAL("lookup queue");
> +               return -1;
> +       }
> +
> +       t1 = odp_time_get_cycles();
> +
> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> +               odp_schedule_release_atomic();
> +
> +       t2     = odp_time_get_cycles();
> +       cycles = odp_time_diff_cycles(t1, t2);
> +       ns     = odp_time_cycles_to_ns(cycles);
> +       tot    = i;
> +
> +       odp_barrier_sync(barrier);
> +       clear_sched_queues();
> +
> +       cycles = cycles/tot;
> +       ns     = ns/tot;
> +
> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +              thr, str, cycles, ns);
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues - with odp_schedule_one()
> + *
> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> + * buffer back into the queue it came from.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_one_many(const char *str, int thr,
> +                                 odp_buffer_pool_t msg_pool,
> +                                 int prio, odp_barrier_t *barrier)
> +{
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +       uint64_t t1 = 0;
> +       uint64_t t2 = 0;
> +       uint64_t cycles, ns;
> +       uint32_t i;
> +       uint32_t tot = 0;
> +
> +       if (create_queues(thr, msg_pool, prio))
> +               return -1;
> +
> +       /* Start sched-enq loop */
> +       t1 = odp_time_get_cycles();
> +
> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> +               odp_schedule_release_atomic();
> +
> +       t2     = odp_time_get_cycles();
> +       cycles = odp_time_diff_cycles(t1, t2);
> +       ns     = odp_time_cycles_to_ns(cycles);
> +       tot    = i;
> +
> +       odp_barrier_sync(barrier);
> +       clear_sched_queues();
> +
> +       cycles = cycles/tot;
> +       ns     = ns/tot;
> +
> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +              thr, str, cycles, ns);
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of a single queue - with odp_schedule()
> + *
> + * Enqueue a buffer to the shared queue. Schedule and enqueue the received
> + * buffer back into the queue.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_single(const char *str, int thr,
> +                               odp_buffer_pool_t msg_pool,
> +                               int prio, odp_barrier_t *barrier)
> +{
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +       uint64_t t1, t2, cycles, ns;
> +       uint32_t i;
> +       uint32_t tot = 0;
> +
> +       if (create_queue(thr, msg_pool, prio))
> +               return -1;
> +
> +       t1 = odp_time_get_cycles();
> +
> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       /* Clear possible locally stored buffers */
> +       odp_schedule_pause();
> +
> +       tot = i;
> +
> +       while (1) {
> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> +
> +               if (buf == ODP_BUFFER_INVALID)
> +                       break;
> +
> +               tot++;
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       odp_schedule_resume();
> +
> +       t2     = odp_time_get_cycles();
> +       cycles = odp_time_diff_cycles(t1, t2);
> +       ns     = odp_time_cycles_to_ns(cycles);
> +
> +       odp_barrier_sync(barrier);
> +       clear_sched_queues();
> +
> +       cycles = cycles/tot;
> +       ns     = ns/tot;
> +
> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +              thr, str, cycles, ns);
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues - with odp_schedule()
> + *
> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> + * buffer back into the queue it came from.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_many(const char *str, int thr,
> +                             odp_buffer_pool_t msg_pool,
> +                             int prio, odp_barrier_t *barrier)
> +{
> +       odp_buffer_t buf;
> +       odp_queue_t queue;
> +       uint64_t t1 = 0;
> +       uint64_t t2 = 0;
> +       uint64_t cycles, ns;
> +       uint32_t i;
> +       uint32_t tot = 0;
> +
> +       if (create_queues(thr, msg_pool, prio))
> +               return -1;
> +
> +       /* Start sched-enq loop */
> +       t1 = odp_time_get_cycles();
> +
> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       /* Clear possible locally stored buffers */
> +       odp_schedule_pause();
> +
> +       tot = i;
> +
> +       while (1) {
> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> +
> +               if (buf == ODP_BUFFER_INVALID)
> +                       break;
> +
> +               tot++;
> +
> +               if (odp_queue_enq(queue, buf)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       odp_schedule_resume();
> +
> +       t2     = odp_time_get_cycles();
> +       cycles = odp_time_diff_cycles(t1, t2);
> +       ns     = odp_time_cycles_to_ns(cycles);
> +
> +       odp_barrier_sync(barrier);
> +       clear_sched_queues();
> +
> +       cycles = cycles/tot;
> +       ns     = ns/tot;
> +
> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +              thr, str, cycles, ns);
> +
> +       return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues with multi_sched and
> multi_enq
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_multi(const char *str, int thr,
> +                              odp_buffer_pool_t msg_pool,
> +                              int prio, odp_barrier_t *barrier)
> +{
> +       odp_buffer_t buf[MULTI_BUFS_MAX];
> +       odp_queue_t queue;
> +       uint64_t t1 = 0;
> +       uint64_t t2 = 0;
> +       uint64_t cycles, ns;
> +       int i, j;
> +       int num;
> +       uint32_t tot = 0;
> +       char name[] = "sched_XX_YY";
> +
> +       name[6] = '0' + prio/10;
> +       name[7] = '0' + prio - 10*(prio/10);
> +
> +       /* Alloc and enqueue a buffer per queue */
> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> +               name[9]  = '0' + i/10;
> +               name[10] = '0' + i - 10*(i/10);
> +
> +               queue = odp_queue_lookup(name);
> +
> +               if (queue == ODP_QUEUE_INVALID) {
> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
> name);
> +                       return -1;
> +               }
> +
> +               for (j = 0; j < MULTI_BUFS_MAX; j++) {
> +                       buf[j] = odp_buffer_alloc(msg_pool);
> +
> +                       if (!odp_buffer_is_valid(buf[j])) {
> +                               ODP_ERR("  [%i] msg_pool alloc failed\n",
> thr);
> +                               return -1;
> +                       }
> +               }
> +
> +               if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       /* Start sched-enq loop */
> +       t1 = odp_time_get_cycles();
> +
> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> +               num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
> +                                        MULTI_BUFS_MAX);
> +
> +               tot += num;
> +
> +               if (odp_queue_enq_multi(queue, buf, num)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       /* Clear possible locally stored buffers */
> +       odp_schedule_pause();
> +
> +       while (1) {
> +               num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, buf,
> +                                        MULTI_BUFS_MAX);
> +
> +               if (num == 0)
> +                       break;
> +
> +               tot += num;
> +
> +               if (odp_queue_enq_multi(queue, buf, num)) {
> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +                       return -1;
> +               }
> +       }
> +
> +       odp_schedule_resume();
> +
> +
> +       t2     = odp_time_get_cycles();
> +       cycles = odp_time_diff_cycles(t1, t2);
> +       ns     = odp_time_cycles_to_ns(cycles);
> +
> +       odp_barrier_sync(barrier);
> +       clear_sched_queues();
> +
> +       if (tot) {
> +               cycles = cycles/tot;
> +               ns     = ns/tot;
> +       } else {
> +               cycles = 0;
> +               ns     = 0;
> +       }
> +
> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +              thr, str, cycles, ns);
> +
> +       return 0;
> +}
> +
> +/**
> + * Template function for running the scheduler tests.
> + * The main reason for having this function is that CUnit does not offer
> a way
> + * to pass arguments to a testcase function.
> + * The other reason is that there are common steps for all testcases.
> + */
> +static void *exec_template(void *arg)
> +{
> +       odp_buffer_pool_t msg_pool;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +       odp_barrier_t *barrier;
> +       test_case_args_t *args = (test_case_args_t*) arg;
> +
> +       shm     = odp_shm_lookup("test_globals");
> +       globals = odp_shm_addr(shm);
> +
> +       CU_ASSERT(globals != NULL);
> +
> +       barrier = &globals->barrier;
> +
> +       /*
> +        * Sync before start
> +        */
> +       odp_barrier_sync(barrier);
> +
> +       /*
> +        * Find the buffer pool
> +        */
> +       msg_pool = odp_buffer_pool_lookup("msg_pool");
> +
> +       CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
> +
> +       odp_barrier_sync(barrier);
> +
> +       /*
> +        * Now run the testcase routine passing the arguments
> +        */
> +       args->func(args->name, odp_thread_id(), msg_pool,
> +                  args->prio, barrier);
> +
> +       return arg;
> +}
> +
> +/* Low prio */
> +
> +static void schedule_one_single_lo(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> +       args.func = test_schedule_one_single;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_single_lo(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_____s_lo");
> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> +       args.func = test_schedule_single;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_one_many_lo(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> +       args.func = test_schedule_one_many;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_many_lo(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> +       args.func = test_schedule_many;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_multi_lo(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> +       args.func = test_schedule_multi;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +/* High prio */
> +
> +static void schedule_one_single_hi(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> +       args.func = test_schedule_single;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_single_hi(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_____s_hi");
> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> +       args.func = test_schedule_single;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_one_many_hi(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> +       args.func = test_schedule_one_many;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_many_hi(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_____m_hi");
> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> +       args.func = test_schedule_many;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_multi_hi(void)
> +{
> +       test_case_args_t args;
> +       snprintf(args.name, sizeof(args.name), "sched_multi_hi");
> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> +       args.func = test_schedule_multi;
> +       execute_parallel(exec_template, &args);
> +}
> +
> +static void execute_parallel(void *(*start_routine) (void *),
> +                            test_case_args_t *test_case_args)
> +{
> +       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
> +       int first_core;
> +
> +       memset(thread_tbl, 0, sizeof(thread_tbl));
> +
> +       /*
> +        * By default core #0 runs Linux kernel background tasks.
> +        * Start mapping thread from core #1
> +        */
> +       first_core = 1;
> +
> +       if (odp_sys_core_count() == 1)
> +               first_core = 0;
> +
> +       odph_linux_pthread_create(thread_tbl, num_workers, first_core,
> +                                       start_routine, test_case_args);
> +
> +       /* Wait for worker threads to terminate */
> +       odph_linux_pthread_join(thread_tbl, num_workers);
> +}
> +
> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
> +{
> +       void *pool_base;
> +       odp_shm_t shm;
> +       odp_buffer_pool_t pool;
> +
> +       shm = odp_shm_reserve("msg_pool",
> +                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> +
> +       pool_base = odp_shm_addr(shm);
> +
> +       if (NULL == pool_base) {
> +               printf("Shared memory reserve failed.\n");
> +               return -1;
> +       }
> +
> +       pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
> +                                     ODP_BUFFER_TYPE_RAW);
> +
> +       if (ODP_BUFFER_POOL_INVALID == pool) {
> +               printf("Pool create failed.\n");
> +               return -1;
> +       }
> +       return pool;
> +}
> +
> +int schedule_test_init(void)
> +{
> +       test_args_t args;
> +       odp_shm_t shm;
> +       test_globals_t *globals;
> +       int i, j;
> +       int prios;
> +
> +       if (0 != odp_init_global(NULL, NULL)) {
> +               printf("odp_init_global fail.\n");
> +               return -1;
> +       }
> +       if (0 != odp_init_local()) {
> +               printf("odp_init_local fail.\n");
> +               return -1;
> +       }
> +       if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
> +               printf("test_odp_buffer_pool_init fail.\n");
> +               return -1;
> +       }
> +
> +       /* A worker thread per core */
> +       num_workers = odp_sys_core_count();
> +
> +       if (args.core_count)
> +               num_workers = args.core_count;
> +
> +       /* force to max core count */
> +       if (num_workers > MAX_WORKERS)
> +               num_workers = MAX_WORKERS;
> +       shm = odp_shm_reserve("test_globals",
> +                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE,
> 0);
> +
> +       globals = odp_shm_addr(shm);
> +
> +       if (globals == NULL) {
> +               ODP_ERR("Shared memory reserve failed.\n");
> +               return -1;
> +       }
> +
> +       memset(globals, 0, sizeof(test_globals_t));
> +
> +       /* Barrier to sync test case execution */
> +       odp_barrier_init_count(&globals->barrier, num_workers);
> +
> +       prios = odp_schedule_num_prio();
> +
> +       for (i = 0; i < prios; i++) {
> +               odp_queue_param_t param;
> +               odp_queue_t queue;
> +               char name[] = "sched_XX_YY";
> +
> +               if (i != ODP_SCHED_PRIO_HIGHEST &&
> +                   i != ODP_SCHED_PRIO_LOWEST)
> +                       continue;
> +
> +               name[6] = '0' + i/10;
> +               name[7] = '0' + i - 10*(i/10);
> +
> +               param.sched.prio  = i;
> +               param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
> +               param.sched.group = ODP_SCHED_GROUP_DEFAULT;
> +
> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> +                       name[9]  = '0' + j/10;
> +                       name[10] = '0' + j - 10*(j/10);
> +
> +                       queue = odp_queue_create(name,
> ODP_QUEUE_TYPE_SCHED,
> +                                                &param);
> +
> +                       if (queue == ODP_QUEUE_INVALID) {
> +                               ODP_ERR("Schedule queue create failed.\n");
> +                               return -1;
> +                       }
> +               }
> +       }
> +       return 0;
> +}
> +
> +int schedule_test_finalize(void)
> +{
> +       odp_term_local();
> +       odp_term_global();
> +       return 0;
> +}
> +
> +struct CU_TestInfo schedule_tests[] = {
> +       _CU_TEST_INFO(schedule_wait_time),
> +       _CU_TEST_INFO(schedule_one_single_lo),
> +       _CU_TEST_INFO(schedule_single_lo),
> +       _CU_TEST_INFO(schedule_one_many_lo),
> +       _CU_TEST_INFO(schedule_many_lo),
> +       _CU_TEST_INFO(schedule_multi_lo),
> +       _CU_TEST_INFO(schedule_one_single_hi),
> +       _CU_TEST_INFO(schedule_single_hi),
> +       _CU_TEST_INFO(schedule_one_many_hi),
> +       _CU_TEST_INFO(schedule_many_hi),
> +       _CU_TEST_INFO(schedule_multi_hi),
> +       CU_TEST_INFO_NULL,
> +};
> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c
> b/test/cunit/schedule/odp_schedule_testsuites.c
> new file mode 100644
> index 0000000..1053069
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
> @@ -0,0 +1,35 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "odp_schedule_testsuites.h"
> +
> +static CU_SuiteInfo suites[] = {
> +       {
> +               "Scheduler tests" ,
> +               schedule_test_init,
> +               schedule_test_finalize,
> +               NULL,
> +               NULL,
> +               schedule_tests
> +       },
> +       CU_SUITE_INFO_NULL,
> +};
> +
> +int main(void)
> +{
> +       /* initialize the CUnit test registry */
> +       if (CUE_SUCCESS != CU_initialize_registry())
> +               return CU_get_error();
> +
> +       /* register suites */
> +       CU_register_suites(suites);
> +       /* Run all tests using the CUnit Basic interface */
> +       CU_basic_set_mode(CU_BRM_VERBOSE);
> +       CU_basic_run_tests();
> +       CU_cleanup_registry();
> +
> +       return CU_get_error();
> +}
> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h
> b/test/cunit/schedule/odp_schedule_testsuites.h
> new file mode 100644
> index 0000000..67a2a69
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
> @@ -0,0 +1,21 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
> +#define ODP_SCHEDULE_TESTSUITES_H_
> +
> +#include "odp.h"
> +#include <CUnit/Basic.h>
> +
> +/* Helper macro for CU_TestInfo initialization */
> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
> +
> +extern struct CU_TestInfo schedule_tests[];
> +
> +extern int schedule_test_init(void);
> +extern int schedule_test_finalize(void);
> +
> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
> --
> 1.8.3.2
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Taras Kondratiuk Nov. 21, 2014, 10:25 a.m. UTC | #2
On 11/21/2014 12:18 PM, Alexandru Badicioiu wrote:
> Hi,
> the scheduling tests in odp_example were discussed some time ago and
> there was an agreement, at least for FSL and TI platforms, that fair
> scheduling assumed by the following loop:
> for (i = 0; i < QUEUE_ROUNDS; i++) {
>                 buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>
>                 if (odp_queue_enq(queue, buf)) {
>                         ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>                         return -1;
>                 }
>         }
>
> for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
> hardware queue is to be scheduled to the same core as long as there are
> packets in the queue and the core has room to dequeue them (other
> platforms please confirm or infirm). On my specific platform I can force
> this with a particular HW configuration, but this configuration
> effectively disables the use of POLL queues.
> I think we need scheduling tests for the most general case (SYNC_NONE
> and no assumption about how many buffers are scheduled to a particular
> core).

I agree. Tests should not assume fair scheduling.
Balasubramanian Manoharan Nov. 21, 2014, 10:27 a.m. UTC | #3
I agree. The test should only consider whether all the buffer that have
been enqueued
have been despatched by the scheduler.
Fair scheduling cannot be tested using cunit, that should be taken during
performance evaluation if needed.

Regards,
Bala

On 21 November 2014 15:55, Taras Kondratiuk <taras.kondratiuk@linaro.org>
wrote:

> On 11/21/2014 12:18 PM, Alexandru Badicioiu wrote:
>
>> Hi,
>> the scheduling tests in odp_example were discussed some time ago and
>> there was an agreement, at least for FSL and TI platforms, that fair
>> scheduling assumed by the following loop:
>> for (i = 0; i < QUEUE_ROUNDS; i++) {
>>                 buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>>
>>                 if (odp_queue_enq(queue, buf)) {
>>                         ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>>                         return -1;
>>                 }
>>         }
>>
>> for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
>> hardware queue is to be scheduled to the same core as long as there are
>> packets in the queue and the core has room to dequeue them (other
>> platforms please confirm or infirm). On my specific platform I can force
>> this with a particular HW configuration, but this configuration
>> effectively disables the use of POLL queues.
>> I think we need scheduling tests for the most general case (SYNC_NONE
>> and no assumption about how many buffers are scheduled to a particular
>> core).
>>
>
> I agree. Tests should not assume fair scheduling.
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Taras Kondratiuk Nov. 21, 2014, 11:31 a.m. UTC | #4
On 11/20/2014 09:02 PM, Ciprian Barbu wrote:
> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> ---
> The testcases are based almost entirely on the odp_example.
> There are no alloc tests and I added a test case for odp_schedule_wait_time.
> The major differencs between the odp_example and this cunit is the partition
> into testcases, the odp_example calls every test case from one big function.
>
> I had to work some magic in order to be able to pass arguments to test cases,
> I hope is not too hard to follow.
>
>   configure.ac                                  |   1 +
>   test/cunit/Makefile.am                        |   2 +
>   test/cunit/schedule/Makefile.am               |  10 +
>   test/cunit/schedule/odp_schedule_test.c       | 844 ++++++++++++++++++++++++++
>   test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>   test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>   6 files changed, 913 insertions(+)
>   create mode 100644 test/cunit/schedule/Makefile.am
>   create mode 100644 test/cunit/schedule/odp_schedule_test.c
>   create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>   create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>
> diff --git a/configure.ac b/configure.ac
> index fcd7279..a47db72 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>   		 test/Makefile
>   		 test/api_test/Makefile
>                    test/cunit/Makefile
> +                 test/cunit/schedule/Makefile
>   		 pkgconfig/libodp.pc])
>
>   AC_SEARCH_LIBS([timer_create],[rt posix4])
> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
> index 439e134..b6033ee 100644
> --- a/test/cunit/Makefile.am
> +++ b/test/cunit/Makefile.am
> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>   AM_CFLAGS += -I$(CUNIT_PATH)/include
>   AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>
> +SUBDIRS = schedule
> +

Don't create a separate makefile, so all test binaries will be the same
directory.

>   if ODP_CUNIT_ENABLED
>   TESTS = ${bin_PROGRAMS}
>   check_PROGRAMS = ${bin_PROGRAMS}

> +
> +struct CU_TestInfo schedule_tests[] = {
> +	_CU_TEST_INFO(schedule_wait_time),
> +	_CU_TEST_INFO(schedule_one_single_lo),
> +	_CU_TEST_INFO(schedule_single_lo),
> +	_CU_TEST_INFO(schedule_one_many_lo),
> +	_CU_TEST_INFO(schedule_many_lo),
> +	_CU_TEST_INFO(schedule_multi_lo),
> +	_CU_TEST_INFO(schedule_one_single_hi),
> +	_CU_TEST_INFO(schedule_single_hi),
> +	_CU_TEST_INFO(schedule_one_many_hi),
> +	_CU_TEST_INFO(schedule_many_hi),
> +	_CU_TEST_INFO(schedule_multi_hi),
> +	CU_TEST_INFO_NULL,
> +};

schedule_one() will be dropped, so these tests also can be dropped.

I think odp_example is not a good base for scheduler tests. It was
written as benchmark, but not as a verification test.

Basic scheduler tests are missing:
- verify that priority works correctly.
- verify that atomicity works correctly for ATOMIC queues.
- verify order for ORDERED queues.
Ciprian Barbu Nov. 21, 2014, 1:33 p.m. UTC | #5
On Fri, Nov 21, 2014 at 12:18 PM, Alexandru Badicioiu
<alexandru.badicioiu@linaro.org> wrote:
> Hi,
> the scheduling tests in odp_example were discussed some time ago and there
> was an agreement, at least for FSL and TI platforms, that fair scheduling
> assumed by the following loop:
> for (i = 0; i < QUEUE_ROUNDS; i++) {
>                buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>
>                if (odp_queue_enq(queue, buf)) {
>                        ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>                        return -1;
>                }
>        }
>
> for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC hardware
> queue is to be scheduled to the same core as long as there are packets in
> the queue and the core has room to dequeue them (other platforms please
> confirm or infirm). On my specific platform I can force this with a
> particular HW configuration, but this configuration effectively disables the
> use of POLL queues.
> I think we need scheduling tests for the most general case (SYNC_NONE and no
> assumption about how many buffers are scheduled to a particular core).

Ok, I remember about that discussion, I didn't understand it at the time.

So here is a question, is it better to drop the multi-threading
structure and do everything in a single thread? Or keep multiple
threads but use ODP_SCHED_NO_WAIT? Or both?

>
> Thanks,
> Alex
>
>
> On 20 November 2014 21:02, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> ---
>> The testcases are based almost entirely on the odp_example.
>> There are no alloc tests and I added a test case for
>> odp_schedule_wait_time.
>> The major differencs between the odp_example and this cunit is the
>> partition
>> into testcases, the odp_example calls every test case from one big
>> function.
>>
>> I had to work some magic in order to be able to pass arguments to test
>> cases,
>> I hope is not too hard to follow.
>>
>>  configure.ac                                  |   1 +
>>  test/cunit/Makefile.am                        |   2 +
>>  test/cunit/schedule/Makefile.am               |  10 +
>>  test/cunit/schedule/odp_schedule_test.c       | 844
>> ++++++++++++++++++++++++++
>>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>>  6 files changed, 913 insertions(+)
>>  create mode 100644 test/cunit/schedule/Makefile.am
>>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
>>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>>
>> diff --git a/configure.ac b/configure.ac
>> index fcd7279..a47db72 100644
>> --- a/configure.ac
>> +++ b/configure.ac
>> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>>                  test/Makefile
>>                  test/api_test/Makefile
>>                   test/cunit/Makefile
>> +                 test/cunit/schedule/Makefile
>>                  pkgconfig/libodp.pc])
>>
>>  AC_SEARCH_LIBS([timer_create],[rt posix4])
>> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
>> index 439e134..b6033ee 100644
>> --- a/test/cunit/Makefile.am
>> +++ b/test/cunit/Makefile.am
>> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>>  AM_CFLAGS += -I$(CUNIT_PATH)/include
>>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>>
>> +SUBDIRS = schedule
>> +
>>  if ODP_CUNIT_ENABLED
>>  TESTS = ${bin_PROGRAMS}
>>  check_PROGRAMS = ${bin_PROGRAMS}
>> diff --git a/test/cunit/schedule/Makefile.am
>> b/test/cunit/schedule/Makefile.am
>> new file mode 100644
>> index 0000000..ad68b03
>> --- /dev/null
>> +++ b/test/cunit/schedule/Makefile.am
>> @@ -0,0 +1,10 @@
>> +include $(top_srcdir)/test/Makefile.inc
>> +
>> +if ODP_CUNIT_ENABLED
>> +bin_PROGRAMS = odp_schedule_test
>> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static
>> -lcunit
>> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
>> +endif
>> +
>> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
>> +                                odp_schedule_testsuites.c
>> diff --git a/test/cunit/schedule/odp_schedule_test.c
>> b/test/cunit/schedule/odp_schedule_test.c
>> new file mode 100644
>> index 0000000..fa67f6e
>> --- /dev/null
>> +++ b/test/cunit/schedule/odp_schedule_test.c
>> @@ -0,0 +1,844 @@
>> +/* Copyright (c) 2014, Linaro Limited
>> + * All rights reserved.
>> + *
>> + * SPDX-License-Identifier:     BSD-3-Clause
>> + */
>> +
>> +#include "odp_schedule_testsuites.h"
>> +#include <odph_linux.h>
>> +
>> +#define MAX_WORKERS            32            /**< Max worker threads */
>> +#define MSG_POOL_SIZE           (4*1024*1024)
>> +#define QUEUES_PER_PRIO                64            /**< Queue per
>> priority */
>> +#define QUEUE_ROUNDS           (512*1024)    /**< Queue test rounds */
>> +#define MULTI_BUFS_MAX         4             /**< Buffer burst size */
>> +#define BUF_SIZE               64
>> +
>> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
>> +
>> +/** Test arguments */
>> +typedef struct {
>> +       int core_count; /**< Core count */
>> +       int proc_mode;  /**< Process mode */
>> +} test_args_t;
>> +
>> +typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
>> +                                int, odp_barrier_t *);
>> +
>> +/** Scheduler test case arguments */
>> +typedef struct {
>> +       char name[64];  /**< test case name */
>> +       int prio;
>> +       test_case_routine func;
>> +} test_case_args_t;
>> +
>> +/** Test global variables */
>> +typedef struct {
>> +       odp_barrier_t barrier;/**< @private Barrier for test
>> synchronisation */
>> +       test_args_t test_args;/**< @private Test case function and
>> arguments */
>> +} test_globals_t;
>> +
>> +static void execute_parallel(void *(*func) (void *), test_case_args_t *);
>> +static int num_workers;
>> +
>> +/**
>> + * @internal CUnit test case for verifying functionality of
>> + *           schedule_wait_time
>> + */
>> +static void schedule_wait_time(void)
>> +{
>> +       uint64_t wait_time;
>> +
>> +       wait_time = odp_schedule_wait_time(0);
>> +       CU_ASSERT(wait_time > 0);
>> +       CU_PASS("schedule_wait_time(0)");
>> +
>> +       wait_time = odp_schedule_wait_time(1);
>> +       CU_ASSERT(wait_time > 0);
>> +       CU_PASS("schedule_wait_time(1)");
>> +
>> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>> +       CU_ASSERT(wait_time > 0);
>> +       CU_PASS("schedule_wait_time(MAX_LONG_INT)");
>> +}
>> +
>> +/**
>> + * @internal Clear all scheduled queues. Retry to be sure that all
>> + * buffers have been scheduled.
>> + */
>> +static void clear_sched_queues(void)
>> +{
>> +       odp_buffer_t buf;
>> +
>> +       while (1) {
>> +               buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
>> +
>> +               if (buf == ODP_BUFFER_INVALID)
>> +                       break;
>> +
>> +               odp_buffer_free(buf);
>> +       }
>> +}
>> +
>> +/**
>> + * @internal Create multiple queues from a pool of buffers
>> + *
>> + * @param thr  Thread
>> + * @param msg_pool  Buffer pool
>> + * @param prio   Queue priority
>> + *
>> + * @return 0 if successful
>> + */
>> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int prio)
>> +{
>> +       char name[] = "sched_XX_YY";
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +       int i;
>> +
>> +       name[6] = '0' + prio/10;
>> +       name[7] = '0' + prio - 10*(prio/10);
>> +
>> +       /* Alloc and enqueue a buffer per queue */
>> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
>> +               name[9]  = '0' + i/10;
>> +               name[10] = '0' + i - 10*(i/10);
>> +
>> +               queue = odp_queue_lookup(name);
>> +
>> +               if (queue == ODP_QUEUE_INVALID) {
>> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
>> name);
>> +                       return -1;
>> +               }
>> +
>> +               buf = odp_buffer_alloc(msg_pool);
>> +
>> +               if (!odp_buffer_is_valid(buf)) {
>> +                       ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
>> +                       return -1;
>> +               }
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Create a single queue from a pool of buffers
>> + *
>> + * @param thr  Thread
>> + * @param msg_pool  Buffer pool
>> + * @param prio   Queue priority
>> + *
>> + * @return 0 if successful
>> + */
>> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
>> +{
>> +       char name[] = "sched_XX_00";
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +
>> +       buf = odp_buffer_alloc(msg_pool);
>> +
>> +       if (!odp_buffer_is_valid(buf)) {
>> +               ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
>> +               return -1;
>> +       }
>> +
>> +       name[6] = '0' + prio/10;
>> +       name[7] = '0' + prio - 10*(prio/10);
>> +
>> +       queue = odp_queue_lookup(name);
>> +
>> +       if (queue == ODP_QUEUE_INVALID) {
>> +               ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
>> +               return -1;
>> +       }
>> +
>> +       if (odp_queue_enq(queue, buf)) {
>> +               ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +               return -1;
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Test scheduling of a single queue - with odp_schedule_one()
>> + *
>> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
>> received
>> + * buffer back into the queue.
>> + *
>> + * @param str      Test case name string
>> + * @param thr      Thread
>> + * @param msg_pool Buffer pool
>> + * @param prio     Priority
>> + * @param barrier  Barrier
>> + *
>> + * @return 0 if successful
>> + */
>> +static int test_schedule_one_single(const char *str, int thr,
>> +                                   odp_buffer_pool_t msg_pool,
>> +                                   int prio, odp_barrier_t *barrier)
>> +{
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +       uint64_t t1, t2, cycles, ns;
>> +       uint32_t i;
>> +       uint32_t tot = 0;
>> +
>> +       if (create_queue(thr, msg_pool, prio)) {
>> +               CU_FAIL_FATAL("lookup queue");
>> +               return -1;
>> +       }
>> +
>> +       t1 = odp_time_get_cycles();
>> +
>> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
>> +               odp_schedule_release_atomic();
>> +
>> +       t2     = odp_time_get_cycles();
>> +       cycles = odp_time_diff_cycles(t1, t2);
>> +       ns     = odp_time_cycles_to_ns(cycles);
>> +       tot    = i;
>> +
>> +       odp_barrier_sync(barrier);
>> +       clear_sched_queues();
>> +
>> +       cycles = cycles/tot;
>> +       ns     = ns/tot;
>> +
>> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> +              thr, str, cycles, ns);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Test scheduling of multiple queues - with odp_schedule_one()
>> + *
>> + * Enqueue a buffer to each queue. Schedule and enqueue the received
>> + * buffer back into the queue it came from.
>> + *
>> + * @param str      Test case name string
>> + * @param thr      Thread
>> + * @param msg_pool Buffer pool
>> + * @param prio     Priority
>> + * @param barrier  Barrier
>> + *
>> + * @return 0 if successful
>> + */
>> +static int test_schedule_one_many(const char *str, int thr,
>> +                                 odp_buffer_pool_t msg_pool,
>> +                                 int prio, odp_barrier_t *barrier)
>> +{
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +       uint64_t t1 = 0;
>> +       uint64_t t2 = 0;
>> +       uint64_t cycles, ns;
>> +       uint32_t i;
>> +       uint32_t tot = 0;
>> +
>> +       if (create_queues(thr, msg_pool, prio))
>> +               return -1;
>> +
>> +       /* Start sched-enq loop */
>> +       t1 = odp_time_get_cycles();
>> +
>> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
>> +               odp_schedule_release_atomic();
>> +
>> +       t2     = odp_time_get_cycles();
>> +       cycles = odp_time_diff_cycles(t1, t2);
>> +       ns     = odp_time_cycles_to_ns(cycles);
>> +       tot    = i;
>> +
>> +       odp_barrier_sync(barrier);
>> +       clear_sched_queues();
>> +
>> +       cycles = cycles/tot;
>> +       ns     = ns/tot;
>> +
>> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> +              thr, str, cycles, ns);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Test scheduling of a single queue - with odp_schedule()
>> + *
>> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
>> received
>> + * buffer back into the queue.
>> + *
>> + * @param str      Test case name string
>> + * @param thr      Thread
>> + * @param msg_pool Buffer pool
>> + * @param prio     Priority
>> + * @param barrier  Barrier
>> + *
>> + * @return 0 if successful
>> + */
>> +static int test_schedule_single(const char *str, int thr,
>> +                               odp_buffer_pool_t msg_pool,
>> +                               int prio, odp_barrier_t *barrier)
>> +{
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +       uint64_t t1, t2, cycles, ns;
>> +       uint32_t i;
>> +       uint32_t tot = 0;
>> +
>> +       if (create_queue(thr, msg_pool, prio))
>> +               return -1;
>> +
>> +       t1 = odp_time_get_cycles();
>> +
>> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       /* Clear possible locally stored buffers */
>> +       odp_schedule_pause();
>> +
>> +       tot = i;
>> +
>> +       while (1) {
>> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
>> +
>> +               if (buf == ODP_BUFFER_INVALID)
>> +                       break;
>> +
>> +               tot++;
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       odp_schedule_resume();
>> +
>> +       t2     = odp_time_get_cycles();
>> +       cycles = odp_time_diff_cycles(t1, t2);
>> +       ns     = odp_time_cycles_to_ns(cycles);
>> +
>> +       odp_barrier_sync(barrier);
>> +       clear_sched_queues();
>> +
>> +       cycles = cycles/tot;
>> +       ns     = ns/tot;
>> +
>> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> +              thr, str, cycles, ns);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Test scheduling of multiple queues - with odp_schedule()
>> + *
>> + * Enqueue a buffer to each queue. Schedule and enqueue the received
>> + * buffer back into the queue it came from.
>> + *
>> + * @param str      Test case name string
>> + * @param thr      Thread
>> + * @param msg_pool Buffer pool
>> + * @param prio     Priority
>> + * @param barrier  Barrier
>> + *
>> + * @return 0 if successful
>> + */
>> +static int test_schedule_many(const char *str, int thr,
>> +                             odp_buffer_pool_t msg_pool,
>> +                             int prio, odp_barrier_t *barrier)
>> +{
>> +       odp_buffer_t buf;
>> +       odp_queue_t queue;
>> +       uint64_t t1 = 0;
>> +       uint64_t t2 = 0;
>> +       uint64_t cycles, ns;
>> +       uint32_t i;
>> +       uint32_t tot = 0;
>> +
>> +       if (create_queues(thr, msg_pool, prio))
>> +               return -1;
>> +
>> +       /* Start sched-enq loop */
>> +       t1 = odp_time_get_cycles();
>> +
>> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       /* Clear possible locally stored buffers */
>> +       odp_schedule_pause();
>> +
>> +       tot = i;
>> +
>> +       while (1) {
>> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
>> +
>> +               if (buf == ODP_BUFFER_INVALID)
>> +                       break;
>> +
>> +               tot++;
>> +
>> +               if (odp_queue_enq(queue, buf)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       odp_schedule_resume();
>> +
>> +       t2     = odp_time_get_cycles();
>> +       cycles = odp_time_diff_cycles(t1, t2);
>> +       ns     = odp_time_cycles_to_ns(cycles);
>> +
>> +       odp_barrier_sync(barrier);
>> +       clear_sched_queues();
>> +
>> +       cycles = cycles/tot;
>> +       ns     = ns/tot;
>> +
>> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> +              thr, str, cycles, ns);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * @internal Test scheduling of multiple queues with multi_sched and
>> multi_enq
>> + *
>> + * @param str      Test case name string
>> + * @param thr      Thread
>> + * @param msg_pool Buffer pool
>> + * @param prio     Priority
>> + * @param barrier  Barrier
>> + *
>> + * @return 0 if successful
>> + */
>> +static int test_schedule_multi(const char *str, int thr,
>> +                              odp_buffer_pool_t msg_pool,
>> +                              int prio, odp_barrier_t *barrier)
>> +{
>> +       odp_buffer_t buf[MULTI_BUFS_MAX];
>> +       odp_queue_t queue;
>> +       uint64_t t1 = 0;
>> +       uint64_t t2 = 0;
>> +       uint64_t cycles, ns;
>> +       int i, j;
>> +       int num;
>> +       uint32_t tot = 0;
>> +       char name[] = "sched_XX_YY";
>> +
>> +       name[6] = '0' + prio/10;
>> +       name[7] = '0' + prio - 10*(prio/10);
>> +
>> +       /* Alloc and enqueue a buffer per queue */
>> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
>> +               name[9]  = '0' + i/10;
>> +               name[10] = '0' + i - 10*(i/10);
>> +
>> +               queue = odp_queue_lookup(name);
>> +
>> +               if (queue == ODP_QUEUE_INVALID) {
>> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
>> name);
>> +                       return -1;
>> +               }
>> +
>> +               for (j = 0; j < MULTI_BUFS_MAX; j++) {
>> +                       buf[j] = odp_buffer_alloc(msg_pool);
>> +
>> +                       if (!odp_buffer_is_valid(buf[j])) {
>> +                               ODP_ERR("  [%i] msg_pool alloc failed\n",
>> thr);
>> +                               return -1;
>> +                       }
>> +               }
>> +
>> +               if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       /* Start sched-enq loop */
>> +       t1 = odp_time_get_cycles();
>> +
>> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> +               num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
>> +                                        MULTI_BUFS_MAX);
>> +
>> +               tot += num;
>> +
>> +               if (odp_queue_enq_multi(queue, buf, num)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       /* Clear possible locally stored buffers */
>> +       odp_schedule_pause();
>> +
>> +       while (1) {
>> +               num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, buf,
>> +                                        MULTI_BUFS_MAX);
>> +
>> +               if (num == 0)
>> +                       break;
>> +
>> +               tot += num;
>> +
>> +               if (odp_queue_enq_multi(queue, buf, num)) {
>> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> +                       return -1;
>> +               }
>> +       }
>> +
>> +       odp_schedule_resume();
>> +
>> +
>> +       t2     = odp_time_get_cycles();
>> +       cycles = odp_time_diff_cycles(t1, t2);
>> +       ns     = odp_time_cycles_to_ns(cycles);
>> +
>> +       odp_barrier_sync(barrier);
>> +       clear_sched_queues();
>> +
>> +       if (tot) {
>> +               cycles = cycles/tot;
>> +               ns     = ns/tot;
>> +       } else {
>> +               cycles = 0;
>> +               ns     = 0;
>> +       }
>> +
>> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> +              thr, str, cycles, ns);
>> +
>> +       return 0;
>> +}
>> +
>> +/**
>> + * Template function for running the scheduler tests.
>> + * The main reason for having this function is that CUnit does not offer
>> a way
>> + * to pass arguments to a testcase function.
>> + * The other reason is that there are common steps for all testcases.
>> + */
>> +static void *exec_template(void *arg)
>> +{
>> +       odp_buffer_pool_t msg_pool;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +       odp_barrier_t *barrier;
>> +       test_case_args_t *args = (test_case_args_t*) arg;
>> +
>> +       shm     = odp_shm_lookup("test_globals");
>> +       globals = odp_shm_addr(shm);
>> +
>> +       CU_ASSERT(globals != NULL);
>> +
>> +       barrier = &globals->barrier;
>> +
>> +       /*
>> +        * Sync before start
>> +        */
>> +       odp_barrier_sync(barrier);
>> +
>> +       /*
>> +        * Find the buffer pool
>> +        */
>> +       msg_pool = odp_buffer_pool_lookup("msg_pool");
>> +
>> +       CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
>> +
>> +       odp_barrier_sync(barrier);
>> +
>> +       /*
>> +        * Now run the testcase routine passing the arguments
>> +        */
>> +       args->func(args->name, odp_thread_id(), msg_pool,
>> +                  args->prio, barrier);
>> +
>> +       return arg;
>> +}
>> +
>> +/* Low prio */
>> +
>> +static void schedule_one_single_lo(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
>> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> +       args.func = test_schedule_one_single;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_single_lo(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_____s_lo");
>> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> +       args.func = test_schedule_single;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_one_many_lo(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
>> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> +       args.func = test_schedule_one_many;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_many_lo(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
>> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> +       args.func = test_schedule_many;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_multi_lo(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
>> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> +       args.func = test_schedule_multi;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +/* High prio */
>> +
>> +static void schedule_one_single_hi(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
>> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +       args.func = test_schedule_single;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_single_hi(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_____s_hi");
>> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +       args.func = test_schedule_single;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_one_many_hi(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
>> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +       args.func = test_schedule_one_many;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_many_hi(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_____m_hi");
>> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +       args.func = test_schedule_many;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void schedule_multi_hi(void)
>> +{
>> +       test_case_args_t args;
>> +       snprintf(args.name, sizeof(args.name), "sched_multi_hi");
>> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +       args.func = test_schedule_multi;
>> +       execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void execute_parallel(void *(*start_routine) (void *),
>> +                            test_case_args_t *test_case_args)
>> +{
>> +       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>> +       int first_core;
>> +
>> +       memset(thread_tbl, 0, sizeof(thread_tbl));
>> +
>> +       /*
>> +        * By default core #0 runs Linux kernel background tasks.
>> +        * Start mapping thread from core #1
>> +        */
>> +       first_core = 1;
>> +
>> +       if (odp_sys_core_count() == 1)
>> +               first_core = 0;
>> +
>> +       odph_linux_pthread_create(thread_tbl, num_workers, first_core,
>> +                                       start_routine, test_case_args);
>> +
>> +       /* Wait for worker threads to terminate */
>> +       odph_linux_pthread_join(thread_tbl, num_workers);
>> +}
>> +
>> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
>> +{
>> +       void *pool_base;
>> +       odp_shm_t shm;
>> +       odp_buffer_pool_t pool;
>> +
>> +       shm = odp_shm_reserve("msg_pool",
>> +                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
>> +
>> +       pool_base = odp_shm_addr(shm);
>> +
>> +       if (NULL == pool_base) {
>> +               printf("Shared memory reserve failed.\n");
>> +               return -1;
>> +       }
>> +
>> +       pool = odp_buffer_pool_create("msg_pool", pool_base,
>> MSG_POOL_SIZE,
>> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>> +                                     ODP_BUFFER_TYPE_RAW);
>> +
>> +       if (ODP_BUFFER_POOL_INVALID == pool) {
>> +               printf("Pool create failed.\n");
>> +               return -1;
>> +       }
>> +       return pool;
>> +}
>> +
>> +int schedule_test_init(void)
>> +{
>> +       test_args_t args;
>> +       odp_shm_t shm;
>> +       test_globals_t *globals;
>> +       int i, j;
>> +       int prios;
>> +
>> +       if (0 != odp_init_global(NULL, NULL)) {
>> +               printf("odp_init_global fail.\n");
>> +               return -1;
>> +       }
>> +       if (0 != odp_init_local()) {
>> +               printf("odp_init_local fail.\n");
>> +               return -1;
>> +       }
>> +       if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
>> +               printf("test_odp_buffer_pool_init fail.\n");
>> +               return -1;
>> +       }
>> +
>> +       /* A worker thread per core */
>> +       num_workers = odp_sys_core_count();
>> +
>> +       if (args.core_count)
>> +               num_workers = args.core_count;
>> +
>> +       /* force to max core count */
>> +       if (num_workers > MAX_WORKERS)
>> +               num_workers = MAX_WORKERS;
>> +       shm = odp_shm_reserve("test_globals",
>> +                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE,
>> 0);
>> +
>> +       globals = odp_shm_addr(shm);
>> +
>> +       if (globals == NULL) {
>> +               ODP_ERR("Shared memory reserve failed.\n");
>> +               return -1;
>> +       }
>> +
>> +       memset(globals, 0, sizeof(test_globals_t));
>> +
>> +       /* Barrier to sync test case execution */
>> +       odp_barrier_init_count(&globals->barrier, num_workers);
>> +
>> +       prios = odp_schedule_num_prio();
>> +
>> +       for (i = 0; i < prios; i++) {
>> +               odp_queue_param_t param;
>> +               odp_queue_t queue;
>> +               char name[] = "sched_XX_YY";
>> +
>> +               if (i != ODP_SCHED_PRIO_HIGHEST &&
>> +                   i != ODP_SCHED_PRIO_LOWEST)
>> +                       continue;
>> +
>> +               name[6] = '0' + i/10;
>> +               name[7] = '0' + i - 10*(i/10);
>> +
>> +               param.sched.prio  = i;
>> +               param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
>> +               param.sched.group = ODP_SCHED_GROUP_DEFAULT;
>> +
>> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>> +                       name[9]  = '0' + j/10;
>> +                       name[10] = '0' + j - 10*(j/10);
>> +
>> +                       queue = odp_queue_create(name,
>> ODP_QUEUE_TYPE_SCHED,
>> +                                                &param);
>> +
>> +                       if (queue == ODP_QUEUE_INVALID) {
>> +                               ODP_ERR("Schedule queue create
>> failed.\n");
>> +                               return -1;
>> +                       }
>> +               }
>> +       }
>> +       return 0;
>> +}
>> +
>> +int schedule_test_finalize(void)
>> +{
>> +       odp_term_local();
>> +       odp_term_global();
>> +       return 0;
>> +}
>> +
>> +struct CU_TestInfo schedule_tests[] = {
>> +       _CU_TEST_INFO(schedule_wait_time),
>> +       _CU_TEST_INFO(schedule_one_single_lo),
>> +       _CU_TEST_INFO(schedule_single_lo),
>> +       _CU_TEST_INFO(schedule_one_many_lo),
>> +       _CU_TEST_INFO(schedule_many_lo),
>> +       _CU_TEST_INFO(schedule_multi_lo),
>> +       _CU_TEST_INFO(schedule_one_single_hi),
>> +       _CU_TEST_INFO(schedule_single_hi),
>> +       _CU_TEST_INFO(schedule_one_many_hi),
>> +       _CU_TEST_INFO(schedule_many_hi),
>> +       _CU_TEST_INFO(schedule_multi_hi),
>> +       CU_TEST_INFO_NULL,
>> +};
>> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c
>> b/test/cunit/schedule/odp_schedule_testsuites.c
>> new file mode 100644
>> index 0000000..1053069
>> --- /dev/null
>> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
>> @@ -0,0 +1,35 @@
>> +/* Copyright (c) 2014, Linaro Limited
>> + * All rights reserved.
>> + *
>> + * SPDX-License-Identifier:     BSD-3-Clause
>> + */
>> +
>> +#include "odp_schedule_testsuites.h"
>> +
>> +static CU_SuiteInfo suites[] = {
>> +       {
>> +               "Scheduler tests" ,
>> +               schedule_test_init,
>> +               schedule_test_finalize,
>> +               NULL,
>> +               NULL,
>> +               schedule_tests
>> +       },
>> +       CU_SUITE_INFO_NULL,
>> +};
>> +
>> +int main(void)
>> +{
>> +       /* initialize the CUnit test registry */
>> +       if (CUE_SUCCESS != CU_initialize_registry())
>> +               return CU_get_error();
>> +
>> +       /* register suites */
>> +       CU_register_suites(suites);
>> +       /* Run all tests using the CUnit Basic interface */
>> +       CU_basic_set_mode(CU_BRM_VERBOSE);
>> +       CU_basic_run_tests();
>> +       CU_cleanup_registry();
>> +
>> +       return CU_get_error();
>> +}
>> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h
>> b/test/cunit/schedule/odp_schedule_testsuites.h
>> new file mode 100644
>> index 0000000..67a2a69
>> --- /dev/null
>> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
>> @@ -0,0 +1,21 @@
>> +/* Copyright (c) 2014, Linaro Limited
>> + * All rights reserved.
>> + *
>> + * SPDX-License-Identifier:     BSD-3-Clause
>> + */
>> +
>> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
>> +#define ODP_SCHEDULE_TESTSUITES_H_
>> +
>> +#include "odp.h"
>> +#include <CUnit/Basic.h>
>> +
>> +/* Helper macro for CU_TestInfo initialization */
>> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
>> +
>> +extern struct CU_TestInfo schedule_tests[];
>> +
>> +extern int schedule_test_init(void);
>> +extern int schedule_test_finalize(void);
>> +
>> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
>> --
>> 1.8.3.2
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
Ciprian Barbu Nov. 21, 2014, 1:44 p.m. UTC | #6
On Fri, Nov 21, 2014 at 1:31 PM, Taras Kondratiuk
<taras.kondratiuk@linaro.org> wrote:
> On 11/20/2014 09:02 PM, Ciprian Barbu wrote:
>>
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> ---
>> The testcases are based almost entirely on the odp_example.
>> There are no alloc tests and I added a test case for
>> odp_schedule_wait_time.
>> The major differencs between the odp_example and this cunit is the
>> partition
>> into testcases, the odp_example calls every test case from one big
>> function.
>>
>> I had to work some magic in order to be able to pass arguments to test
>> cases,
>> I hope is not too hard to follow.
>>
>>   configure.ac                                  |   1 +
>>   test/cunit/Makefile.am                        |   2 +
>>   test/cunit/schedule/Makefile.am               |  10 +
>>   test/cunit/schedule/odp_schedule_test.c       | 844
>> ++++++++++++++++++++++++++
>>   test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>>   test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>>   6 files changed, 913 insertions(+)
>>   create mode 100644 test/cunit/schedule/Makefile.am
>>   create mode 100644 test/cunit/schedule/odp_schedule_test.c
>>   create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>>   create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>>
>> diff --git a/configure.ac b/configure.ac
>> index fcd7279..a47db72 100644
>> --- a/configure.ac
>> +++ b/configure.ac
>> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>>                  test/Makefile
>>                  test/api_test/Makefile
>>                    test/cunit/Makefile
>> +                 test/cunit/schedule/Makefile
>>                  pkgconfig/libodp.pc])
>>
>>   AC_SEARCH_LIBS([timer_create],[rt posix4])
>> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
>> index 439e134..b6033ee 100644
>> --- a/test/cunit/Makefile.am
>> +++ b/test/cunit/Makefile.am
>> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>>   AM_CFLAGS += -I$(CUNIT_PATH)/include
>>   AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>>
>> +SUBDIRS = schedule
>> +
>
>
> Don't create a separate makefile, so all test binaries will be the same
> directory.

Did you get that feedback on private? I don't see it in the comments.
Anyway, I can drop the extra Makefile no problem.

>
>>   if ODP_CUNIT_ENABLED
>>   TESTS = ${bin_PROGRAMS}
>>   check_PROGRAMS = ${bin_PROGRAMS}
>
>
>> +
>> +struct CU_TestInfo schedule_tests[] = {
>> +       _CU_TEST_INFO(schedule_wait_time),
>> +       _CU_TEST_INFO(schedule_one_single_lo),
>> +       _CU_TEST_INFO(schedule_single_lo),
>> +       _CU_TEST_INFO(schedule_one_many_lo),
>> +       _CU_TEST_INFO(schedule_many_lo),
>> +       _CU_TEST_INFO(schedule_multi_lo),
>> +       _CU_TEST_INFO(schedule_one_single_hi),
>> +       _CU_TEST_INFO(schedule_single_hi),
>> +       _CU_TEST_INFO(schedule_one_many_hi),
>> +       _CU_TEST_INFO(schedule_many_hi),
>> +       _CU_TEST_INFO(schedule_multi_hi),
>> +       CU_TEST_INFO_NULL,
>> +};
>
>
> schedule_one() will be dropped, so these tests also can be dropped.

Yes I know I had to drop them. I kept them in for this RFC for easy
comparison against odp_example

>
> I think odp_example is not a good base for scheduler tests. It was
> written as benchmark, but not as a verification test.

That's actually not the feedback I got from Mike, correct me if I'm wrong.

>
> Basic scheduler tests are missing:
> - verify that priority works correctly.
> - verify that atomicity works correctly for ATOMIC queues.
> - verify order for ORDERED queues.

That's good input, thank you.
Alexandru Badicioiu Nov. 21, 2014, 1:45 p.m. UTC | #7
I think multithreading should be kept, to test the essential ability of the
scheduler to deliver to multiple cores. The thing that should IMO be
dropped is the fairness assumption (each core get as many buffers as it
enqueued). The most general thing that should be tested for the scheduler
is that is able to deliver all enqueued buffers (as Bala suggested too).

Alex

On 21 November 2014 15:33, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> On Fri, Nov 21, 2014 at 12:18 PM, Alexandru Badicioiu
> <alexandru.badicioiu@linaro.org> wrote:
> > Hi,
> > the scheduling tests in odp_example were discussed some time ago and
> there
> > was an agreement, at least for FSL and TI platforms, that fair scheduling
> > assumed by the following loop:
> > for (i = 0; i < QUEUE_ROUNDS; i++) {
> >                buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >
> >                if (odp_queue_enq(queue, buf)) {
> >                        ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >                        return -1;
> >                }
> >        }
> >
> > for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
> hardware
> > queue is to be scheduled to the same core as long as there are packets in
> > the queue and the core has room to dequeue them (other platforms please
> > confirm or infirm). On my specific platform I can force this with a
> > particular HW configuration, but this configuration effectively disables
> the
> > use of POLL queues.
> > I think we need scheduling tests for the most general case (SYNC_NONE
> and no
> > assumption about how many buffers are scheduled to a particular core).
>
> Ok, I remember about that discussion, I didn't understand it at the time.
>
> So here is a question, is it better to drop the multi-threading
> structure and do everything in a single thread? Or keep multiple
> threads but use ODP_SCHED_NO_WAIT? Or both?
>
> >
> > Thanks,
> > Alex
> >
> >
> > On 20 November 2014 21:02, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
> >>
> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> ---
> >> The testcases are based almost entirely on the odp_example.
> >> There are no alloc tests and I added a test case for
> >> odp_schedule_wait_time.
> >> The major differencs between the odp_example and this cunit is the
> >> partition
> >> into testcases, the odp_example calls every test case from one big
> >> function.
> >>
> >> I had to work some magic in order to be able to pass arguments to test
> >> cases,
> >> I hope is not too hard to follow.
> >>
> >>  configure.ac                                  |   1 +
> >>  test/cunit/Makefile.am                        |   2 +
> >>  test/cunit/schedule/Makefile.am               |  10 +
> >>  test/cunit/schedule/odp_schedule_test.c       | 844
> >> ++++++++++++++++++++++++++
> >>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
> >>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
> >>  6 files changed, 913 insertions(+)
> >>  create mode 100644 test/cunit/schedule/Makefile.am
> >>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
> >>
> >> diff --git a/configure.ac b/configure.ac
> >> index fcd7279..a47db72 100644
> >> --- a/configure.ac
> >> +++ b/configure.ac
> >> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
> >>                  test/Makefile
> >>                  test/api_test/Makefile
> >>                   test/cunit/Makefile
> >> +                 test/cunit/schedule/Makefile
> >>                  pkgconfig/libodp.pc])
> >>
> >>  AC_SEARCH_LIBS([timer_create],[rt posix4])
> >> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
> >> index 439e134..b6033ee 100644
> >> --- a/test/cunit/Makefile.am
> >> +++ b/test/cunit/Makefile.am
> >> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
> >>  AM_CFLAGS += -I$(CUNIT_PATH)/include
> >>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
> >>
> >> +SUBDIRS = schedule
> >> +
> >>  if ODP_CUNIT_ENABLED
> >>  TESTS = ${bin_PROGRAMS}
> >>  check_PROGRAMS = ${bin_PROGRAMS}
> >> diff --git a/test/cunit/schedule/Makefile.am
> >> b/test/cunit/schedule/Makefile.am
> >> new file mode 100644
> >> index 0000000..ad68b03
> >> --- /dev/null
> >> +++ b/test/cunit/schedule/Makefile.am
> >> @@ -0,0 +1,10 @@
> >> +include $(top_srcdir)/test/Makefile.inc
> >> +
> >> +if ODP_CUNIT_ENABLED
> >> +bin_PROGRAMS = odp_schedule_test
> >> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static
> >> -lcunit
> >> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
> >> +endif
> >> +
> >> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
> >> +                                odp_schedule_testsuites.c
> >> diff --git a/test/cunit/schedule/odp_schedule_test.c
> >> b/test/cunit/schedule/odp_schedule_test.c
> >> new file mode 100644
> >> index 0000000..fa67f6e
> >> --- /dev/null
> >> +++ b/test/cunit/schedule/odp_schedule_test.c
> >> @@ -0,0 +1,844 @@
> >> +/* Copyright (c) 2014, Linaro Limited
> >> + * All rights reserved.
> >> + *
> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> + */
> >> +
> >> +#include "odp_schedule_testsuites.h"
> >> +#include <odph_linux.h>
> >> +
> >> +#define MAX_WORKERS            32            /**< Max worker threads */
> >> +#define MSG_POOL_SIZE           (4*1024*1024)
> >> +#define QUEUES_PER_PRIO                64            /**< Queue per
> >> priority */
> >> +#define QUEUE_ROUNDS           (512*1024)    /**< Queue test rounds */
> >> +#define MULTI_BUFS_MAX         4             /**< Buffer burst size */
> >> +#define BUF_SIZE               64
> >> +
> >> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
> >> +
> >> +/** Test arguments */
> >> +typedef struct {
> >> +       int core_count; /**< Core count */
> >> +       int proc_mode;  /**< Process mode */
> >> +} test_args_t;
> >> +
> >> +typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
> >> +                                int, odp_barrier_t *);
> >> +
> >> +/** Scheduler test case arguments */
> >> +typedef struct {
> >> +       char name[64];  /**< test case name */
> >> +       int prio;
> >> +       test_case_routine func;
> >> +} test_case_args_t;
> >> +
> >> +/** Test global variables */
> >> +typedef struct {
> >> +       odp_barrier_t barrier;/**< @private Barrier for test
> >> synchronisation */
> >> +       test_args_t test_args;/**< @private Test case function and
> >> arguments */
> >> +} test_globals_t;
> >> +
> >> +static void execute_parallel(void *(*func) (void *), test_case_args_t
> *);
> >> +static int num_workers;
> >> +
> >> +/**
> >> + * @internal CUnit test case for verifying functionality of
> >> + *           schedule_wait_time
> >> + */
> >> +static void schedule_wait_time(void)
> >> +{
> >> +       uint64_t wait_time;
> >> +
> >> +       wait_time = odp_schedule_wait_time(0);
> >> +       CU_ASSERT(wait_time > 0);
> >> +       CU_PASS("schedule_wait_time(0)");
> >> +
> >> +       wait_time = odp_schedule_wait_time(1);
> >> +       CU_ASSERT(wait_time > 0);
> >> +       CU_PASS("schedule_wait_time(1)");
> >> +
> >> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> >> +       CU_ASSERT(wait_time > 0);
> >> +       CU_PASS("schedule_wait_time(MAX_LONG_INT)");
> >> +}
> >> +
> >> +/**
> >> + * @internal Clear all scheduled queues. Retry to be sure that all
> >> + * buffers have been scheduled.
> >> + */
> >> +static void clear_sched_queues(void)
> >> +{
> >> +       odp_buffer_t buf;
> >> +
> >> +       while (1) {
> >> +               buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> >> +
> >> +               if (buf == ODP_BUFFER_INVALID)
> >> +                       break;
> >> +
> >> +               odp_buffer_free(buf);
> >> +       }
> >> +}
> >> +
> >> +/**
> >> + * @internal Create multiple queues from a pool of buffers
> >> + *
> >> + * @param thr  Thread
> >> + * @param msg_pool  Buffer pool
> >> + * @param prio   Queue priority
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int prio)
> >> +{
> >> +       char name[] = "sched_XX_YY";
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +       int i;
> >> +
> >> +       name[6] = '0' + prio/10;
> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> +
> >> +       /* Alloc and enqueue a buffer per queue */
> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> >> +               name[9]  = '0' + i/10;
> >> +               name[10] = '0' + i - 10*(i/10);
> >> +
> >> +               queue = odp_queue_lookup(name);
> >> +
> >> +               if (queue == ODP_QUEUE_INVALID) {
> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
> >> name);
> >> +                       return -1;
> >> +               }
> >> +
> >> +               buf = odp_buffer_alloc(msg_pool);
> >> +
> >> +               if (!odp_buffer_is_valid(buf)) {
> >> +                       ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> >> +                       return -1;
> >> +               }
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Create a single queue from a pool of buffers
> >> + *
> >> + * @param thr  Thread
> >> + * @param msg_pool  Buffer pool
> >> + * @param prio   Queue priority
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
> >> +{
> >> +       char name[] = "sched_XX_00";
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +
> >> +       buf = odp_buffer_alloc(msg_pool);
> >> +
> >> +       if (!odp_buffer_is_valid(buf)) {
> >> +               ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> >> +               return -1;
> >> +       }
> >> +
> >> +       name[6] = '0' + prio/10;
> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> +
> >> +       queue = odp_queue_lookup(name);
> >> +
> >> +       if (queue == ODP_QUEUE_INVALID) {
> >> +               ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
> >> +               return -1;
> >> +       }
> >> +
> >> +       if (odp_queue_enq(queue, buf)) {
> >> +               ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +               return -1;
> >> +       }
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Test scheduling of a single queue - with
> odp_schedule_one()
> >> + *
> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
> >> received
> >> + * buffer back into the queue.
> >> + *
> >> + * @param str      Test case name string
> >> + * @param thr      Thread
> >> + * @param msg_pool Buffer pool
> >> + * @param prio     Priority
> >> + * @param barrier  Barrier
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int test_schedule_one_single(const char *str, int thr,
> >> +                                   odp_buffer_pool_t msg_pool,
> >> +                                   int prio, odp_barrier_t *barrier)
> >> +{
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +       uint64_t t1, t2, cycles, ns;
> >> +       uint32_t i;
> >> +       uint32_t tot = 0;
> >> +
> >> +       if (create_queue(thr, msg_pool, prio)) {
> >> +               CU_FAIL_FATAL("lookup queue");
> >> +               return -1;
> >> +       }
> >> +
> >> +       t1 = odp_time_get_cycles();
> >> +
> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> >> +               odp_schedule_release_atomic();
> >> +
> >> +       t2     = odp_time_get_cycles();
> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> +       tot    = i;
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +       clear_sched_queues();
> >> +
> >> +       cycles = cycles/tot;
> >> +       ns     = ns/tot;
> >> +
> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> +              thr, str, cycles, ns);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Test scheduling of multiple queues - with
> odp_schedule_one()
> >> + *
> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> >> + * buffer back into the queue it came from.
> >> + *
> >> + * @param str      Test case name string
> >> + * @param thr      Thread
> >> + * @param msg_pool Buffer pool
> >> + * @param prio     Priority
> >> + * @param barrier  Barrier
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int test_schedule_one_many(const char *str, int thr,
> >> +                                 odp_buffer_pool_t msg_pool,
> >> +                                 int prio, odp_barrier_t *barrier)
> >> +{
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +       uint64_t t1 = 0;
> >> +       uint64_t t2 = 0;
> >> +       uint64_t cycles, ns;
> >> +       uint32_t i;
> >> +       uint32_t tot = 0;
> >> +
> >> +       if (create_queues(thr, msg_pool, prio))
> >> +               return -1;
> >> +
> >> +       /* Start sched-enq loop */
> >> +       t1 = odp_time_get_cycles();
> >> +
> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> >> +               odp_schedule_release_atomic();
> >> +
> >> +       t2     = odp_time_get_cycles();
> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> +       tot    = i;
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +       clear_sched_queues();
> >> +
> >> +       cycles = cycles/tot;
> >> +       ns     = ns/tot;
> >> +
> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> +              thr, str, cycles, ns);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Test scheduling of a single queue - with odp_schedule()
> >> + *
> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
> >> received
> >> + * buffer back into the queue.
> >> + *
> >> + * @param str      Test case name string
> >> + * @param thr      Thread
> >> + * @param msg_pool Buffer pool
> >> + * @param prio     Priority
> >> + * @param barrier  Barrier
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int test_schedule_single(const char *str, int thr,
> >> +                               odp_buffer_pool_t msg_pool,
> >> +                               int prio, odp_barrier_t *barrier)
> >> +{
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +       uint64_t t1, t2, cycles, ns;
> >> +       uint32_t i;
> >> +       uint32_t tot = 0;
> >> +
> >> +       if (create_queue(thr, msg_pool, prio))
> >> +               return -1;
> >> +
> >> +       t1 = odp_time_get_cycles();
> >> +
> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       /* Clear possible locally stored buffers */
> >> +       odp_schedule_pause();
> >> +
> >> +       tot = i;
> >> +
> >> +       while (1) {
> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> >> +
> >> +               if (buf == ODP_BUFFER_INVALID)
> >> +                       break;
> >> +
> >> +               tot++;
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       odp_schedule_resume();
> >> +
> >> +       t2     = odp_time_get_cycles();
> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +       clear_sched_queues();
> >> +
> >> +       cycles = cycles/tot;
> >> +       ns     = ns/tot;
> >> +
> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> +              thr, str, cycles, ns);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Test scheduling of multiple queues - with odp_schedule()
> >> + *
> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> >> + * buffer back into the queue it came from.
> >> + *
> >> + * @param str      Test case name string
> >> + * @param thr      Thread
> >> + * @param msg_pool Buffer pool
> >> + * @param prio     Priority
> >> + * @param barrier  Barrier
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int test_schedule_many(const char *str, int thr,
> >> +                             odp_buffer_pool_t msg_pool,
> >> +                             int prio, odp_barrier_t *barrier)
> >> +{
> >> +       odp_buffer_t buf;
> >> +       odp_queue_t queue;
> >> +       uint64_t t1 = 0;
> >> +       uint64_t t2 = 0;
> >> +       uint64_t cycles, ns;
> >> +       uint32_t i;
> >> +       uint32_t tot = 0;
> >> +
> >> +       if (create_queues(thr, msg_pool, prio))
> >> +               return -1;
> >> +
> >> +       /* Start sched-enq loop */
> >> +       t1 = odp_time_get_cycles();
> >> +
> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       /* Clear possible locally stored buffers */
> >> +       odp_schedule_pause();
> >> +
> >> +       tot = i;
> >> +
> >> +       while (1) {
> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> >> +
> >> +               if (buf == ODP_BUFFER_INVALID)
> >> +                       break;
> >> +
> >> +               tot++;
> >> +
> >> +               if (odp_queue_enq(queue, buf)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       odp_schedule_resume();
> >> +
> >> +       t2     = odp_time_get_cycles();
> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +       clear_sched_queues();
> >> +
> >> +       cycles = cycles/tot;
> >> +       ns     = ns/tot;
> >> +
> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> +              thr, str, cycles, ns);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * @internal Test scheduling of multiple queues with multi_sched and
> >> multi_enq
> >> + *
> >> + * @param str      Test case name string
> >> + * @param thr      Thread
> >> + * @param msg_pool Buffer pool
> >> + * @param prio     Priority
> >> + * @param barrier  Barrier
> >> + *
> >> + * @return 0 if successful
> >> + */
> >> +static int test_schedule_multi(const char *str, int thr,
> >> +                              odp_buffer_pool_t msg_pool,
> >> +                              int prio, odp_barrier_t *barrier)
> >> +{
> >> +       odp_buffer_t buf[MULTI_BUFS_MAX];
> >> +       odp_queue_t queue;
> >> +       uint64_t t1 = 0;
> >> +       uint64_t t2 = 0;
> >> +       uint64_t cycles, ns;
> >> +       int i, j;
> >> +       int num;
> >> +       uint32_t tot = 0;
> >> +       char name[] = "sched_XX_YY";
> >> +
> >> +       name[6] = '0' + prio/10;
> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> +
> >> +       /* Alloc and enqueue a buffer per queue */
> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> >> +               name[9]  = '0' + i/10;
> >> +               name[10] = '0' + i - 10*(i/10);
> >> +
> >> +               queue = odp_queue_lookup(name);
> >> +
> >> +               if (queue == ODP_QUEUE_INVALID) {
> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
> >> name);
> >> +                       return -1;
> >> +               }
> >> +
> >> +               for (j = 0; j < MULTI_BUFS_MAX; j++) {
> >> +                       buf[j] = odp_buffer_alloc(msg_pool);
> >> +
> >> +                       if (!odp_buffer_is_valid(buf[j])) {
> >> +                               ODP_ERR("  [%i] msg_pool alloc
> failed\n",
> >> thr);
> >> +                               return -1;
> >> +                       }
> >> +               }
> >> +
> >> +               if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       /* Start sched-enq loop */
> >> +       t1 = odp_time_get_cycles();
> >> +
> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
> >> +                                        MULTI_BUFS_MAX);
> >> +
> >> +               tot += num;
> >> +
> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       /* Clear possible locally stored buffers */
> >> +       odp_schedule_pause();
> >> +
> >> +       while (1) {
> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, buf,
> >> +                                        MULTI_BUFS_MAX);
> >> +
> >> +               if (num == 0)
> >> +                       break;
> >> +
> >> +               tot += num;
> >> +
> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> +                       return -1;
> >> +               }
> >> +       }
> >> +
> >> +       odp_schedule_resume();
> >> +
> >> +
> >> +       t2     = odp_time_get_cycles();
> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +       clear_sched_queues();
> >> +
> >> +       if (tot) {
> >> +               cycles = cycles/tot;
> >> +               ns     = ns/tot;
> >> +       } else {
> >> +               cycles = 0;
> >> +               ns     = 0;
> >> +       }
> >> +
> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> +              thr, str, cycles, ns);
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +/**
> >> + * Template function for running the scheduler tests.
> >> + * The main reason for having this function is that CUnit does not
> offer
> >> a way
> >> + * to pass arguments to a testcase function.
> >> + * The other reason is that there are common steps for all testcases.
> >> + */
> >> +static void *exec_template(void *arg)
> >> +{
> >> +       odp_buffer_pool_t msg_pool;
> >> +       odp_shm_t shm;
> >> +       test_globals_t *globals;
> >> +       odp_barrier_t *barrier;
> >> +       test_case_args_t *args = (test_case_args_t*) arg;
> >> +
> >> +       shm     = odp_shm_lookup("test_globals");
> >> +       globals = odp_shm_addr(shm);
> >> +
> >> +       CU_ASSERT(globals != NULL);
> >> +
> >> +       barrier = &globals->barrier;
> >> +
> >> +       /*
> >> +        * Sync before start
> >> +        */
> >> +       odp_barrier_sync(barrier);
> >> +
> >> +       /*
> >> +        * Find the buffer pool
> >> +        */
> >> +       msg_pool = odp_buffer_pool_lookup("msg_pool");
> >> +
> >> +       CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
> >> +
> >> +       odp_barrier_sync(barrier);
> >> +
> >> +       /*
> >> +        * Now run the testcase routine passing the arguments
> >> +        */
> >> +       args->func(args->name, odp_thread_id(), msg_pool,
> >> +                  args->prio, barrier);
> >> +
> >> +       return arg;
> >> +}
> >> +
> >> +/* Low prio */
> >> +
> >> +static void schedule_one_single_lo(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> +       args.func = test_schedule_one_single;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_single_lo(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_lo");
> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> +       args.func = test_schedule_single;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_one_many_lo(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> +       args.func = test_schedule_one_many;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_many_lo(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> +       args.func = test_schedule_many;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_multi_lo(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> +       args.func = test_schedule_multi;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +/* High prio */
> >> +
> >> +static void schedule_one_single_hi(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> +       args.func = test_schedule_single;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_single_hi(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_hi");
> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> +       args.func = test_schedule_single;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_one_many_hi(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> +       args.func = test_schedule_one_many;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_many_hi(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_hi");
> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> +       args.func = test_schedule_many;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void schedule_multi_hi(void)
> >> +{
> >> +       test_case_args_t args;
> >> +       snprintf(args.name, sizeof(args.name), "sched_multi_hi");
> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> +       args.func = test_schedule_multi;
> >> +       execute_parallel(exec_template, &args);
> >> +}
> >> +
> >> +static void execute_parallel(void *(*start_routine) (void *),
> >> +                            test_case_args_t *test_case_args)
> >> +{
> >> +       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
> >> +       int first_core;
> >> +
> >> +       memset(thread_tbl, 0, sizeof(thread_tbl));
> >> +
> >> +       /*
> >> +        * By default core #0 runs Linux kernel background tasks.
> >> +        * Start mapping thread from core #1
> >> +        */
> >> +       first_core = 1;
> >> +
> >> +       if (odp_sys_core_count() == 1)
> >> +               first_core = 0;
> >> +
> >> +       odph_linux_pthread_create(thread_tbl, num_workers, first_core,
> >> +                                       start_routine, test_case_args);
> >> +
> >> +       /* Wait for worker threads to terminate */
> >> +       odph_linux_pthread_join(thread_tbl, num_workers);
> >> +}
> >> +
> >> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
> >> +{
> >> +       void *pool_base;
> >> +       odp_shm_t shm;
> >> +       odp_buffer_pool_t pool;
> >> +
> >> +       shm = odp_shm_reserve("msg_pool",
> >> +                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> >> +
> >> +       pool_base = odp_shm_addr(shm);
> >> +
> >> +       if (NULL == pool_base) {
> >> +               printf("Shared memory reserve failed.\n");
> >> +               return -1;
> >> +       }
> >> +
> >> +       pool = odp_buffer_pool_create("msg_pool", pool_base,
> >> MSG_POOL_SIZE,
> >> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
> >> +                                     ODP_BUFFER_TYPE_RAW);
> >> +
> >> +       if (ODP_BUFFER_POOL_INVALID == pool) {
> >> +               printf("Pool create failed.\n");
> >> +               return -1;
> >> +       }
> >> +       return pool;
> >> +}
> >> +
> >> +int schedule_test_init(void)
> >> +{
> >> +       test_args_t args;
> >> +       odp_shm_t shm;
> >> +       test_globals_t *globals;
> >> +       int i, j;
> >> +       int prios;
> >> +
> >> +       if (0 != odp_init_global(NULL, NULL)) {
> >> +               printf("odp_init_global fail.\n");
> >> +               return -1;
> >> +       }
> >> +       if (0 != odp_init_local()) {
> >> +               printf("odp_init_local fail.\n");
> >> +               return -1;
> >> +       }
> >> +       if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
> >> +               printf("test_odp_buffer_pool_init fail.\n");
> >> +               return -1;
> >> +       }
> >> +
> >> +       /* A worker thread per core */
> >> +       num_workers = odp_sys_core_count();
> >> +
> >> +       if (args.core_count)
> >> +               num_workers = args.core_count;
> >> +
> >> +       /* force to max core count */
> >> +       if (num_workers > MAX_WORKERS)
> >> +               num_workers = MAX_WORKERS;
> >> +       shm = odp_shm_reserve("test_globals",
> >> +                             sizeof(test_globals_t),
> ODP_CACHE_LINE_SIZE,
> >> 0);
> >> +
> >> +       globals = odp_shm_addr(shm);
> >> +
> >> +       if (globals == NULL) {
> >> +               ODP_ERR("Shared memory reserve failed.\n");
> >> +               return -1;
> >> +       }
> >> +
> >> +       memset(globals, 0, sizeof(test_globals_t));
> >> +
> >> +       /* Barrier to sync test case execution */
> >> +       odp_barrier_init_count(&globals->barrier, num_workers);
> >> +
> >> +       prios = odp_schedule_num_prio();
> >> +
> >> +       for (i = 0; i < prios; i++) {
> >> +               odp_queue_param_t param;
> >> +               odp_queue_t queue;
> >> +               char name[] = "sched_XX_YY";
> >> +
> >> +               if (i != ODP_SCHED_PRIO_HIGHEST &&
> >> +                   i != ODP_SCHED_PRIO_LOWEST)
> >> +                       continue;
> >> +
> >> +               name[6] = '0' + i/10;
> >> +               name[7] = '0' + i - 10*(i/10);
> >> +
> >> +               param.sched.prio  = i;
> >> +               param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
> >> +               param.sched.group = ODP_SCHED_GROUP_DEFAULT;
> >> +
> >> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> >> +                       name[9]  = '0' + j/10;
> >> +                       name[10] = '0' + j - 10*(j/10);
> >> +
> >> +                       queue = odp_queue_create(name,
> >> ODP_QUEUE_TYPE_SCHED,
> >> +                                                &param);
> >> +
> >> +                       if (queue == ODP_QUEUE_INVALID) {
> >> +                               ODP_ERR("Schedule queue create
> >> failed.\n");
> >> +                               return -1;
> >> +                       }
> >> +               }
> >> +       }
> >> +       return 0;
> >> +}
> >> +
> >> +int schedule_test_finalize(void)
> >> +{
> >> +       odp_term_local();
> >> +       odp_term_global();
> >> +       return 0;
> >> +}
> >> +
> >> +struct CU_TestInfo schedule_tests[] = {
> >> +       _CU_TEST_INFO(schedule_wait_time),
> >> +       _CU_TEST_INFO(schedule_one_single_lo),
> >> +       _CU_TEST_INFO(schedule_single_lo),
> >> +       _CU_TEST_INFO(schedule_one_many_lo),
> >> +       _CU_TEST_INFO(schedule_many_lo),
> >> +       _CU_TEST_INFO(schedule_multi_lo),
> >> +       _CU_TEST_INFO(schedule_one_single_hi),
> >> +       _CU_TEST_INFO(schedule_single_hi),
> >> +       _CU_TEST_INFO(schedule_one_many_hi),
> >> +       _CU_TEST_INFO(schedule_many_hi),
> >> +       _CU_TEST_INFO(schedule_multi_hi),
> >> +       CU_TEST_INFO_NULL,
> >> +};
> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c
> >> b/test/cunit/schedule/odp_schedule_testsuites.c
> >> new file mode 100644
> >> index 0000000..1053069
> >> --- /dev/null
> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
> >> @@ -0,0 +1,35 @@
> >> +/* Copyright (c) 2014, Linaro Limited
> >> + * All rights reserved.
> >> + *
> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> + */
> >> +
> >> +#include "odp_schedule_testsuites.h"
> >> +
> >> +static CU_SuiteInfo suites[] = {
> >> +       {
> >> +               "Scheduler tests" ,
> >> +               schedule_test_init,
> >> +               schedule_test_finalize,
> >> +               NULL,
> >> +               NULL,
> >> +               schedule_tests
> >> +       },
> >> +       CU_SUITE_INFO_NULL,
> >> +};
> >> +
> >> +int main(void)
> >> +{
> >> +       /* initialize the CUnit test registry */
> >> +       if (CUE_SUCCESS != CU_initialize_registry())
> >> +               return CU_get_error();
> >> +
> >> +       /* register suites */
> >> +       CU_register_suites(suites);
> >> +       /* Run all tests using the CUnit Basic interface */
> >> +       CU_basic_set_mode(CU_BRM_VERBOSE);
> >> +       CU_basic_run_tests();
> >> +       CU_cleanup_registry();
> >> +
> >> +       return CU_get_error();
> >> +}
> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h
> >> b/test/cunit/schedule/odp_schedule_testsuites.h
> >> new file mode 100644
> >> index 0000000..67a2a69
> >> --- /dev/null
> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
> >> @@ -0,0 +1,21 @@
> >> +/* Copyright (c) 2014, Linaro Limited
> >> + * All rights reserved.
> >> + *
> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> + */
> >> +
> >> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
> >> +#define ODP_SCHEDULE_TESTSUITES_H_
> >> +
> >> +#include "odp.h"
> >> +#include <CUnit/Basic.h>
> >> +
> >> +/* Helper macro for CU_TestInfo initialization */
> >> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
> >> +
> >> +extern struct CU_TestInfo schedule_tests[];
> >> +
> >> +extern int schedule_test_init(void);
> >> +extern int schedule_test_finalize(void);
> >> +
> >> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
> >> --
> >> 1.8.3.2
> >>
> >>
> >> _______________________________________________
> >> lng-odp mailing list
> >> lng-odp@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >
> >
>
Ciprian Barbu Nov. 21, 2014, 2 p.m. UTC | #8
On Fri, Nov 21, 2014 at 3:45 PM, Alexandru Badicioiu
<alexandru.badicioiu@linaro.org> wrote:
> I think multithreading should be kept, to test the essential ability of the
> scheduler to deliver to multiple cores. The thing that should IMO be dropped
> is the fairness assumption (each core get as many buffers as it enqueued).
> The most general thing that should be tested for the scheduler is that is
> able to deliver all enqueued buffers (as Bala suggested too).

OK, then I will modify the logic there, not relying on each core
getting a buffer from the scheduler. I will use a 500 ms wait time, is
that ok?

>
> Alex
>
> On 21 November 2014 15:33, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> On Fri, Nov 21, 2014 at 12:18 PM, Alexandru Badicioiu
>> <alexandru.badicioiu@linaro.org> wrote:
>> > Hi,
>> > the scheduling tests in odp_example were discussed some time ago and
>> > there
>> > was an agreement, at least for FSL and TI platforms, that fair
>> > scheduling
>> > assumed by the following loop:
>> > for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >                buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>> >
>> >                if (odp_queue_enq(queue, buf)) {
>> >                        ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >                        return -1;
>> >                }
>> >        }
>> >
>> > for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
>> > hardware
>> > queue is to be scheduled to the same core as long as there are packets
>> > in
>> > the queue and the core has room to dequeue them (other platforms please
>> > confirm or infirm). On my specific platform I can force this with a
>> > particular HW configuration, but this configuration effectively disables
>> > the
>> > use of POLL queues.
>> > I think we need scheduling tests for the most general case (SYNC_NONE
>> > and no
>> > assumption about how many buffers are scheduled to a particular core).
>>
>> Ok, I remember about that discussion, I didn't understand it at the time.
>>
>> So here is a question, is it better to drop the multi-threading
>> structure and do everything in a single thread? Or keep multiple
>> threads but use ODP_SCHED_NO_WAIT? Or both?
>>
>> >
>> > Thanks,
>> > Alex
>> >
>> >
>> > On 20 November 2014 21:02, Ciprian Barbu <ciprian.barbu@linaro.org>
>> > wrote:
>> >>
>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >> ---
>> >> The testcases are based almost entirely on the odp_example.
>> >> There are no alloc tests and I added a test case for
>> >> odp_schedule_wait_time.
>> >> The major differencs between the odp_example and this cunit is the
>> >> partition
>> >> into testcases, the odp_example calls every test case from one big
>> >> function.
>> >>
>> >> I had to work some magic in order to be able to pass arguments to test
>> >> cases,
>> >> I hope is not too hard to follow.
>> >>
>> >>  configure.ac                                  |   1 +
>> >>  test/cunit/Makefile.am                        |   2 +
>> >>  test/cunit/schedule/Makefile.am               |  10 +
>> >>  test/cunit/schedule/odp_schedule_test.c       | 844
>> >> ++++++++++++++++++++++++++
>> >>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>> >>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>> >>  6 files changed, 913 insertions(+)
>> >>  create mode 100644 test/cunit/schedule/Makefile.am
>> >>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
>> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>> >>
>> >> diff --git a/configure.ac b/configure.ac
>> >> index fcd7279..a47db72 100644
>> >> --- a/configure.ac
>> >> +++ b/configure.ac
>> >> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>> >>                  test/Makefile
>> >>                  test/api_test/Makefile
>> >>                   test/cunit/Makefile
>> >> +                 test/cunit/schedule/Makefile
>> >>                  pkgconfig/libodp.pc])
>> >>
>> >>  AC_SEARCH_LIBS([timer_create],[rt posix4])
>> >> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
>> >> index 439e134..b6033ee 100644
>> >> --- a/test/cunit/Makefile.am
>> >> +++ b/test/cunit/Makefile.am
>> >> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>> >>  AM_CFLAGS += -I$(CUNIT_PATH)/include
>> >>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>> >>
>> >> +SUBDIRS = schedule
>> >> +
>> >>  if ODP_CUNIT_ENABLED
>> >>  TESTS = ${bin_PROGRAMS}
>> >>  check_PROGRAMS = ${bin_PROGRAMS}
>> >> diff --git a/test/cunit/schedule/Makefile.am
>> >> b/test/cunit/schedule/Makefile.am
>> >> new file mode 100644
>> >> index 0000000..ad68b03
>> >> --- /dev/null
>> >> +++ b/test/cunit/schedule/Makefile.am
>> >> @@ -0,0 +1,10 @@
>> >> +include $(top_srcdir)/test/Makefile.inc
>> >> +
>> >> +if ODP_CUNIT_ENABLED
>> >> +bin_PROGRAMS = odp_schedule_test
>> >> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static
>> >> -lcunit
>> >> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
>> >> +endif
>> >> +
>> >> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
>> >> +                                odp_schedule_testsuites.c
>> >> diff --git a/test/cunit/schedule/odp_schedule_test.c
>> >> b/test/cunit/schedule/odp_schedule_test.c
>> >> new file mode 100644
>> >> index 0000000..fa67f6e
>> >> --- /dev/null
>> >> +++ b/test/cunit/schedule/odp_schedule_test.c
>> >> @@ -0,0 +1,844 @@
>> >> +/* Copyright (c) 2014, Linaro Limited
>> >> + * All rights reserved.
>> >> + *
>> >> + * SPDX-License-Identifier:     BSD-3-Clause
>> >> + */
>> >> +
>> >> +#include "odp_schedule_testsuites.h"
>> >> +#include <odph_linux.h>
>> >> +
>> >> +#define MAX_WORKERS            32            /**< Max worker threads
>> >> */
>> >> +#define MSG_POOL_SIZE           (4*1024*1024)
>> >> +#define QUEUES_PER_PRIO                64            /**< Queue per
>> >> priority */
>> >> +#define QUEUE_ROUNDS           (512*1024)    /**< Queue test rounds */
>> >> +#define MULTI_BUFS_MAX         4             /**< Buffer burst size */
>> >> +#define BUF_SIZE               64
>> >> +
>> >> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
>> >> +
>> >> +/** Test arguments */
>> >> +typedef struct {
>> >> +       int core_count; /**< Core count */
>> >> +       int proc_mode;  /**< Process mode */
>> >> +} test_args_t;
>> >> +
>> >> +typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
>> >> +                                int, odp_barrier_t *);
>> >> +
>> >> +/** Scheduler test case arguments */
>> >> +typedef struct {
>> >> +       char name[64];  /**< test case name */
>> >> +       int prio;
>> >> +       test_case_routine func;
>> >> +} test_case_args_t;
>> >> +
>> >> +/** Test global variables */
>> >> +typedef struct {
>> >> +       odp_barrier_t barrier;/**< @private Barrier for test
>> >> synchronisation */
>> >> +       test_args_t test_args;/**< @private Test case function and
>> >> arguments */
>> >> +} test_globals_t;
>> >> +
>> >> +static void execute_parallel(void *(*func) (void *), test_case_args_t
>> >> *);
>> >> +static int num_workers;
>> >> +
>> >> +/**
>> >> + * @internal CUnit test case for verifying functionality of
>> >> + *           schedule_wait_time
>> >> + */
>> >> +static void schedule_wait_time(void)
>> >> +{
>> >> +       uint64_t wait_time;
>> >> +
>> >> +       wait_time = odp_schedule_wait_time(0);
>> >> +       CU_ASSERT(wait_time > 0);
>> >> +       CU_PASS("schedule_wait_time(0)");
>> >> +
>> >> +       wait_time = odp_schedule_wait_time(1);
>> >> +       CU_ASSERT(wait_time > 0);
>> >> +       CU_PASS("schedule_wait_time(1)");
>> >> +
>> >> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
>> >> +       CU_ASSERT(wait_time > 0);
>> >> +       CU_PASS("schedule_wait_time(MAX_LONG_INT)");
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Clear all scheduled queues. Retry to be sure that all
>> >> + * buffers have been scheduled.
>> >> + */
>> >> +static void clear_sched_queues(void)
>> >> +{
>> >> +       odp_buffer_t buf;
>> >> +
>> >> +       while (1) {
>> >> +               buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
>> >> +
>> >> +               if (buf == ODP_BUFFER_INVALID)
>> >> +                       break;
>> >> +
>> >> +               odp_buffer_free(buf);
>> >> +       }
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Create multiple queues from a pool of buffers
>> >> + *
>> >> + * @param thr  Thread
>> >> + * @param msg_pool  Buffer pool
>> >> + * @param prio   Queue priority
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int
>> >> prio)
>> >> +{
>> >> +       char name[] = "sched_XX_YY";
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +       int i;
>> >> +
>> >> +       name[6] = '0' + prio/10;
>> >> +       name[7] = '0' + prio - 10*(prio/10);
>> >> +
>> >> +       /* Alloc and enqueue a buffer per queue */
>> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
>> >> +               name[9]  = '0' + i/10;
>> >> +               name[10] = '0' + i - 10*(i/10);
>> >> +
>> >> +               queue = odp_queue_lookup(name);
>> >> +
>> >> +               if (queue == ODP_QUEUE_INVALID) {
>> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n",
>> >> thr,
>> >> name);
>> >> +                       return -1;
>> >> +               }
>> >> +
>> >> +               buf = odp_buffer_alloc(msg_pool);
>> >> +
>> >> +               if (!odp_buffer_is_valid(buf)) {
>> >> +                       ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Create a single queue from a pool of buffers
>> >> + *
>> >> + * @param thr  Thread
>> >> + * @param msg_pool  Buffer pool
>> >> + * @param prio   Queue priority
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
>> >> +{
>> >> +       char name[] = "sched_XX_00";
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +
>> >> +       buf = odp_buffer_alloc(msg_pool);
>> >> +
>> >> +       if (!odp_buffer_is_valid(buf)) {
>> >> +               ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       name[6] = '0' + prio/10;
>> >> +       name[7] = '0' + prio - 10*(prio/10);
>> >> +
>> >> +       queue = odp_queue_lookup(name);
>> >> +
>> >> +       if (queue == ODP_QUEUE_INVALID) {
>> >> +               ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       if (odp_queue_enq(queue, buf)) {
>> >> +               ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Test scheduling of a single queue - with
>> >> odp_schedule_one()
>> >> + *
>> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
>> >> received
>> >> + * buffer back into the queue.
>> >> + *
>> >> + * @param str      Test case name string
>> >> + * @param thr      Thread
>> >> + * @param msg_pool Buffer pool
>> >> + * @param prio     Priority
>> >> + * @param barrier  Barrier
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int test_schedule_one_single(const char *str, int thr,
>> >> +                                   odp_buffer_pool_t msg_pool,
>> >> +                                   int prio, odp_barrier_t *barrier)
>> >> +{
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +       uint64_t t1, t2, cycles, ns;
>> >> +       uint32_t i;
>> >> +       uint32_t tot = 0;
>> >> +
>> >> +       if (create_queue(thr, msg_pool, prio)) {
>> >> +               CU_FAIL_FATAL("lookup queue");
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       t1 = odp_time_get_cycles();
>> >> +
>> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
>> >> +               odp_schedule_release_atomic();
>> >> +
>> >> +       t2     = odp_time_get_cycles();
>> >> +       cycles = odp_time_diff_cycles(t1, t2);
>> >> +       ns     = odp_time_cycles_to_ns(cycles);
>> >> +       tot    = i;
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +       clear_sched_queues();
>> >> +
>> >> +       cycles = cycles/tot;
>> >> +       ns     = ns/tot;
>> >> +
>> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> >> +              thr, str, cycles, ns);
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Test scheduling of multiple queues - with
>> >> odp_schedule_one()
>> >> + *
>> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
>> >> + * buffer back into the queue it came from.
>> >> + *
>> >> + * @param str      Test case name string
>> >> + * @param thr      Thread
>> >> + * @param msg_pool Buffer pool
>> >> + * @param prio     Priority
>> >> + * @param barrier  Barrier
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int test_schedule_one_many(const char *str, int thr,
>> >> +                                 odp_buffer_pool_t msg_pool,
>> >> +                                 int prio, odp_barrier_t *barrier)
>> >> +{
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +       uint64_t t1 = 0;
>> >> +       uint64_t t2 = 0;
>> >> +       uint64_t cycles, ns;
>> >> +       uint32_t i;
>> >> +       uint32_t tot = 0;
>> >> +
>> >> +       if (create_queues(thr, msg_pool, prio))
>> >> +               return -1;
>> >> +
>> >> +       /* Start sched-enq loop */
>> >> +       t1 = odp_time_get_cycles();
>> >> +
>> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
>> >> +               odp_schedule_release_atomic();
>> >> +
>> >> +       t2     = odp_time_get_cycles();
>> >> +       cycles = odp_time_diff_cycles(t1, t2);
>> >> +       ns     = odp_time_cycles_to_ns(cycles);
>> >> +       tot    = i;
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +       clear_sched_queues();
>> >> +
>> >> +       cycles = cycles/tot;
>> >> +       ns     = ns/tot;
>> >> +
>> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> >> +              thr, str, cycles, ns);
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Test scheduling of a single queue - with odp_schedule()
>> >> + *
>> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
>> >> received
>> >> + * buffer back into the queue.
>> >> + *
>> >> + * @param str      Test case name string
>> >> + * @param thr      Thread
>> >> + * @param msg_pool Buffer pool
>> >> + * @param prio     Priority
>> >> + * @param barrier  Barrier
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int test_schedule_single(const char *str, int thr,
>> >> +                               odp_buffer_pool_t msg_pool,
>> >> +                               int prio, odp_barrier_t *barrier)
>> >> +{
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +       uint64_t t1, t2, cycles, ns;
>> >> +       uint32_t i;
>> >> +       uint32_t tot = 0;
>> >> +
>> >> +       if (create_queue(thr, msg_pool, prio))
>> >> +               return -1;
>> >> +
>> >> +       t1 = odp_time_get_cycles();
>> >> +
>> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       /* Clear possible locally stored buffers */
>> >> +       odp_schedule_pause();
>> >> +
>> >> +       tot = i;
>> >> +
>> >> +       while (1) {
>> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
>> >> +
>> >> +               if (buf == ODP_BUFFER_INVALID)
>> >> +                       break;
>> >> +
>> >> +               tot++;
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       odp_schedule_resume();
>> >> +
>> >> +       t2     = odp_time_get_cycles();
>> >> +       cycles = odp_time_diff_cycles(t1, t2);
>> >> +       ns     = odp_time_cycles_to_ns(cycles);
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +       clear_sched_queues();
>> >> +
>> >> +       cycles = cycles/tot;
>> >> +       ns     = ns/tot;
>> >> +
>> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> >> +              thr, str, cycles, ns);
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Test scheduling of multiple queues - with odp_schedule()
>> >> + *
>> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
>> >> + * buffer back into the queue it came from.
>> >> + *
>> >> + * @param str      Test case name string
>> >> + * @param thr      Thread
>> >> + * @param msg_pool Buffer pool
>> >> + * @param prio     Priority
>> >> + * @param barrier  Barrier
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int test_schedule_many(const char *str, int thr,
>> >> +                             odp_buffer_pool_t msg_pool,
>> >> +                             int prio, odp_barrier_t *barrier)
>> >> +{
>> >> +       odp_buffer_t buf;
>> >> +       odp_queue_t queue;
>> >> +       uint64_t t1 = 0;
>> >> +       uint64_t t2 = 0;
>> >> +       uint64_t cycles, ns;
>> >> +       uint32_t i;
>> >> +       uint32_t tot = 0;
>> >> +
>> >> +       if (create_queues(thr, msg_pool, prio))
>> >> +               return -1;
>> >> +
>> >> +       /* Start sched-enq loop */
>> >> +       t1 = odp_time_get_cycles();
>> >> +
>> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       /* Clear possible locally stored buffers */
>> >> +       odp_schedule_pause();
>> >> +
>> >> +       tot = i;
>> >> +
>> >> +       while (1) {
>> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
>> >> +
>> >> +               if (buf == ODP_BUFFER_INVALID)
>> >> +                       break;
>> >> +
>> >> +               tot++;
>> >> +
>> >> +               if (odp_queue_enq(queue, buf)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       odp_schedule_resume();
>> >> +
>> >> +       t2     = odp_time_get_cycles();
>> >> +       cycles = odp_time_diff_cycles(t1, t2);
>> >> +       ns     = odp_time_cycles_to_ns(cycles);
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +       clear_sched_queues();
>> >> +
>> >> +       cycles = cycles/tot;
>> >> +       ns     = ns/tot;
>> >> +
>> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> >> +              thr, str, cycles, ns);
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * @internal Test scheduling of multiple queues with multi_sched and
>> >> multi_enq
>> >> + *
>> >> + * @param str      Test case name string
>> >> + * @param thr      Thread
>> >> + * @param msg_pool Buffer pool
>> >> + * @param prio     Priority
>> >> + * @param barrier  Barrier
>> >> + *
>> >> + * @return 0 if successful
>> >> + */
>> >> +static int test_schedule_multi(const char *str, int thr,
>> >> +                              odp_buffer_pool_t msg_pool,
>> >> +                              int prio, odp_barrier_t *barrier)
>> >> +{
>> >> +       odp_buffer_t buf[MULTI_BUFS_MAX];
>> >> +       odp_queue_t queue;
>> >> +       uint64_t t1 = 0;
>> >> +       uint64_t t2 = 0;
>> >> +       uint64_t cycles, ns;
>> >> +       int i, j;
>> >> +       int num;
>> >> +       uint32_t tot = 0;
>> >> +       char name[] = "sched_XX_YY";
>> >> +
>> >> +       name[6] = '0' + prio/10;
>> >> +       name[7] = '0' + prio - 10*(prio/10);
>> >> +
>> >> +       /* Alloc and enqueue a buffer per queue */
>> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
>> >> +               name[9]  = '0' + i/10;
>> >> +               name[10] = '0' + i - 10*(i/10);
>> >> +
>> >> +               queue = odp_queue_lookup(name);
>> >> +
>> >> +               if (queue == ODP_QUEUE_INVALID) {
>> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n",
>> >> thr,
>> >> name);
>> >> +                       return -1;
>> >> +               }
>> >> +
>> >> +               for (j = 0; j < MULTI_BUFS_MAX; j++) {
>> >> +                       buf[j] = odp_buffer_alloc(msg_pool);
>> >> +
>> >> +                       if (!odp_buffer_is_valid(buf[j])) {
>> >> +                               ODP_ERR("  [%i] msg_pool alloc
>> >> failed\n",
>> >> thr);
>> >> +                               return -1;
>> >> +                       }
>> >> +               }
>> >> +
>> >> +               if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       /* Start sched-enq loop */
>> >> +       t1 = odp_time_get_cycles();
>> >> +
>> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
>> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
>> >> +                                        MULTI_BUFS_MAX);
>> >> +
>> >> +               tot += num;
>> >> +
>> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       /* Clear possible locally stored buffers */
>> >> +       odp_schedule_pause();
>> >> +
>> >> +       while (1) {
>> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
>> >> buf,
>> >> +                                        MULTI_BUFS_MAX);
>> >> +
>> >> +               if (num == 0)
>> >> +                       break;
>> >> +
>> >> +               tot += num;
>> >> +
>> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
>> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
>> >> +                       return -1;
>> >> +               }
>> >> +       }
>> >> +
>> >> +       odp_schedule_resume();
>> >> +
>> >> +
>> >> +       t2     = odp_time_get_cycles();
>> >> +       cycles = odp_time_diff_cycles(t1, t2);
>> >> +       ns     = odp_time_cycles_to_ns(cycles);
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +       clear_sched_queues();
>> >> +
>> >> +       if (tot) {
>> >> +               cycles = cycles/tot;
>> >> +               ns     = ns/tot;
>> >> +       } else {
>> >> +               cycles = 0;
>> >> +               ns     = 0;
>> >> +       }
>> >> +
>> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
>> >> +              thr, str, cycles, ns);
>> >> +
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +/**
>> >> + * Template function for running the scheduler tests.
>> >> + * The main reason for having this function is that CUnit does not
>> >> offer
>> >> a way
>> >> + * to pass arguments to a testcase function.
>> >> + * The other reason is that there are common steps for all testcases.
>> >> + */
>> >> +static void *exec_template(void *arg)
>> >> +{
>> >> +       odp_buffer_pool_t msg_pool;
>> >> +       odp_shm_t shm;
>> >> +       test_globals_t *globals;
>> >> +       odp_barrier_t *barrier;
>> >> +       test_case_args_t *args = (test_case_args_t*) arg;
>> >> +
>> >> +       shm     = odp_shm_lookup("test_globals");
>> >> +       globals = odp_shm_addr(shm);
>> >> +
>> >> +       CU_ASSERT(globals != NULL);
>> >> +
>> >> +       barrier = &globals->barrier;
>> >> +
>> >> +       /*
>> >> +        * Sync before start
>> >> +        */
>> >> +       odp_barrier_sync(barrier);
>> >> +
>> >> +       /*
>> >> +        * Find the buffer pool
>> >> +        */
>> >> +       msg_pool = odp_buffer_pool_lookup("msg_pool");
>> >> +
>> >> +       CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
>> >> +
>> >> +       odp_barrier_sync(barrier);
>> >> +
>> >> +       /*
>> >> +        * Now run the testcase routine passing the arguments
>> >> +        */
>> >> +       args->func(args->name, odp_thread_id(), msg_pool,
>> >> +                  args->prio, barrier);
>> >> +
>> >> +       return arg;
>> >> +}
>> >> +
>> >> +/* Low prio */
>> >> +
>> >> +static void schedule_one_single_lo(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
>> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> >> +       args.func = test_schedule_one_single;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_single_lo(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_lo");
>> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> >> +       args.func = test_schedule_single;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_one_many_lo(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
>> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> >> +       args.func = test_schedule_one_many;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_many_lo(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
>> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> >> +       args.func = test_schedule_many;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_multi_lo(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
>> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
>> >> +       args.func = test_schedule_multi;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +/* High prio */
>> >> +
>> >> +static void schedule_one_single_hi(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
>> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> >> +       args.func = test_schedule_single;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_single_hi(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_hi");
>> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> >> +       args.func = test_schedule_single;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_one_many_hi(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
>> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> >> +       args.func = test_schedule_one_many;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_many_hi(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_hi");
>> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> >> +       args.func = test_schedule_many;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void schedule_multi_hi(void)
>> >> +{
>> >> +       test_case_args_t args;
>> >> +       snprintf(args.name, sizeof(args.name), "sched_multi_hi");
>> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
>> >> +       args.func = test_schedule_multi;
>> >> +       execute_parallel(exec_template, &args);
>> >> +}
>> >> +
>> >> +static void execute_parallel(void *(*start_routine) (void *),
>> >> +                            test_case_args_t *test_case_args)
>> >> +{
>> >> +       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>> >> +       int first_core;
>> >> +
>> >> +       memset(thread_tbl, 0, sizeof(thread_tbl));
>> >> +
>> >> +       /*
>> >> +        * By default core #0 runs Linux kernel background tasks.
>> >> +        * Start mapping thread from core #1
>> >> +        */
>> >> +       first_core = 1;
>> >> +
>> >> +       if (odp_sys_core_count() == 1)
>> >> +               first_core = 0;
>> >> +
>> >> +       odph_linux_pthread_create(thread_tbl, num_workers, first_core,
>> >> +                                       start_routine, test_case_args);
>> >> +
>> >> +       /* Wait for worker threads to terminate */
>> >> +       odph_linux_pthread_join(thread_tbl, num_workers);
>> >> +}
>> >> +
>> >> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
>> >> +{
>> >> +       void *pool_base;
>> >> +       odp_shm_t shm;
>> >> +       odp_buffer_pool_t pool;
>> >> +
>> >> +       shm = odp_shm_reserve("msg_pool",
>> >> +                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
>> >> +
>> >> +       pool_base = odp_shm_addr(shm);
>> >> +
>> >> +       if (NULL == pool_base) {
>> >> +               printf("Shared memory reserve failed.\n");
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       pool = odp_buffer_pool_create("msg_pool", pool_base,
>> >> MSG_POOL_SIZE,
>> >> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
>> >> +                                     ODP_BUFFER_TYPE_RAW);
>> >> +
>> >> +       if (ODP_BUFFER_POOL_INVALID == pool) {
>> >> +               printf("Pool create failed.\n");
>> >> +               return -1;
>> >> +       }
>> >> +       return pool;
>> >> +}
>> >> +
>> >> +int schedule_test_init(void)
>> >> +{
>> >> +       test_args_t args;
>> >> +       odp_shm_t shm;
>> >> +       test_globals_t *globals;
>> >> +       int i, j;
>> >> +       int prios;
>> >> +
>> >> +       if (0 != odp_init_global(NULL, NULL)) {
>> >> +               printf("odp_init_global fail.\n");
>> >> +               return -1;
>> >> +       }
>> >> +       if (0 != odp_init_local()) {
>> >> +               printf("odp_init_local fail.\n");
>> >> +               return -1;
>> >> +       }
>> >> +       if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
>> >> +               printf("test_odp_buffer_pool_init fail.\n");
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       /* A worker thread per core */
>> >> +       num_workers = odp_sys_core_count();
>> >> +
>> >> +       if (args.core_count)
>> >> +               num_workers = args.core_count;
>> >> +
>> >> +       /* force to max core count */
>> >> +       if (num_workers > MAX_WORKERS)
>> >> +               num_workers = MAX_WORKERS;
>> >> +       shm = odp_shm_reserve("test_globals",
>> >> +                             sizeof(test_globals_t),
>> >> ODP_CACHE_LINE_SIZE,
>> >> 0);
>> >> +
>> >> +       globals = odp_shm_addr(shm);
>> >> +
>> >> +       if (globals == NULL) {
>> >> +               ODP_ERR("Shared memory reserve failed.\n");
>> >> +               return -1;
>> >> +       }
>> >> +
>> >> +       memset(globals, 0, sizeof(test_globals_t));
>> >> +
>> >> +       /* Barrier to sync test case execution */
>> >> +       odp_barrier_init_count(&globals->barrier, num_workers);
>> >> +
>> >> +       prios = odp_schedule_num_prio();
>> >> +
>> >> +       for (i = 0; i < prios; i++) {
>> >> +               odp_queue_param_t param;
>> >> +               odp_queue_t queue;
>> >> +               char name[] = "sched_XX_YY";
>> >> +
>> >> +               if (i != ODP_SCHED_PRIO_HIGHEST &&
>> >> +                   i != ODP_SCHED_PRIO_LOWEST)
>> >> +                       continue;
>> >> +
>> >> +               name[6] = '0' + i/10;
>> >> +               name[7] = '0' + i - 10*(i/10);
>> >> +
>> >> +               param.sched.prio  = i;
>> >> +               param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
>> >> +               param.sched.group = ODP_SCHED_GROUP_DEFAULT;
>> >> +
>> >> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
>> >> +                       name[9]  = '0' + j/10;
>> >> +                       name[10] = '0' + j - 10*(j/10);
>> >> +
>> >> +                       queue = odp_queue_create(name,
>> >> ODP_QUEUE_TYPE_SCHED,
>> >> +                                                &param);
>> >> +
>> >> +                       if (queue == ODP_QUEUE_INVALID) {
>> >> +                               ODP_ERR("Schedule queue create
>> >> failed.\n");
>> >> +                               return -1;
>> >> +                       }
>> >> +               }
>> >> +       }
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +int schedule_test_finalize(void)
>> >> +{
>> >> +       odp_term_local();
>> >> +       odp_term_global();
>> >> +       return 0;
>> >> +}
>> >> +
>> >> +struct CU_TestInfo schedule_tests[] = {
>> >> +       _CU_TEST_INFO(schedule_wait_time),
>> >> +       _CU_TEST_INFO(schedule_one_single_lo),
>> >> +       _CU_TEST_INFO(schedule_single_lo),
>> >> +       _CU_TEST_INFO(schedule_one_many_lo),
>> >> +       _CU_TEST_INFO(schedule_many_lo),
>> >> +       _CU_TEST_INFO(schedule_multi_lo),
>> >> +       _CU_TEST_INFO(schedule_one_single_hi),
>> >> +       _CU_TEST_INFO(schedule_single_hi),
>> >> +       _CU_TEST_INFO(schedule_one_many_hi),
>> >> +       _CU_TEST_INFO(schedule_many_hi),
>> >> +       _CU_TEST_INFO(schedule_multi_hi),
>> >> +       CU_TEST_INFO_NULL,
>> >> +};
>> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c
>> >> b/test/cunit/schedule/odp_schedule_testsuites.c
>> >> new file mode 100644
>> >> index 0000000..1053069
>> >> --- /dev/null
>> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
>> >> @@ -0,0 +1,35 @@
>> >> +/* Copyright (c) 2014, Linaro Limited
>> >> + * All rights reserved.
>> >> + *
>> >> + * SPDX-License-Identifier:     BSD-3-Clause
>> >> + */
>> >> +
>> >> +#include "odp_schedule_testsuites.h"
>> >> +
>> >> +static CU_SuiteInfo suites[] = {
>> >> +       {
>> >> +               "Scheduler tests" ,
>> >> +               schedule_test_init,
>> >> +               schedule_test_finalize,
>> >> +               NULL,
>> >> +               NULL,
>> >> +               schedule_tests
>> >> +       },
>> >> +       CU_SUITE_INFO_NULL,
>> >> +};
>> >> +
>> >> +int main(void)
>> >> +{
>> >> +       /* initialize the CUnit test registry */
>> >> +       if (CUE_SUCCESS != CU_initialize_registry())
>> >> +               return CU_get_error();
>> >> +
>> >> +       /* register suites */
>> >> +       CU_register_suites(suites);
>> >> +       /* Run all tests using the CUnit Basic interface */
>> >> +       CU_basic_set_mode(CU_BRM_VERBOSE);
>> >> +       CU_basic_run_tests();
>> >> +       CU_cleanup_registry();
>> >> +
>> >> +       return CU_get_error();
>> >> +}
>> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h
>> >> b/test/cunit/schedule/odp_schedule_testsuites.h
>> >> new file mode 100644
>> >> index 0000000..67a2a69
>> >> --- /dev/null
>> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
>> >> @@ -0,0 +1,21 @@
>> >> +/* Copyright (c) 2014, Linaro Limited
>> >> + * All rights reserved.
>> >> + *
>> >> + * SPDX-License-Identifier:     BSD-3-Clause
>> >> + */
>> >> +
>> >> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
>> >> +#define ODP_SCHEDULE_TESTSUITES_H_
>> >> +
>> >> +#include "odp.h"
>> >> +#include <CUnit/Basic.h>
>> >> +
>> >> +/* Helper macro for CU_TestInfo initialization */
>> >> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
>> >> +
>> >> +extern struct CU_TestInfo schedule_tests[];
>> >> +
>> >> +extern int schedule_test_init(void);
>> >> +extern int schedule_test_finalize(void);
>> >> +
>> >> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
>> >> --
>> >> 1.8.3.2
>> >>
>> >>
>> >> _______________________________________________
>> >> lng-odp mailing list
>> >> lng-odp@lists.linaro.org
>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >
>> >
>
>
Alexandru Badicioiu Nov. 21, 2014, 2:24 p.m. UTC | #9
I think is not OK to hardcode time intervals like 500ms, not even for a
single given platform.
I would imagine a basic test like this, based on pktio example - core 0
(which does not run a schedule loop) allocates buffer from a pool and
enqueues a given number of buffers to one or more scheduled queues. The
other cores run the schedule loop with NO_WAIT and each time they get a
buffer the buffer is released into the pool and a global counter is
incremented (atomically).
The counter is checked at each schedule loop iteration if it reached the
total number of enqueued buffers. The pass condition would when each core
detected that the global counter reached the number of enqueued buffers.

Hope this helps.
Alex

On 21 November 2014 16:00, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> On Fri, Nov 21, 2014 at 3:45 PM, Alexandru Badicioiu
> <alexandru.badicioiu@linaro.org> wrote:
> > I think multithreading should be kept, to test the essential ability of
> the
> > scheduler to deliver to multiple cores. The thing that should IMO be
> dropped
> > is the fairness assumption (each core get as many buffers as it
> enqueued).
> > The most general thing that should be tested for the scheduler is that is
> > able to deliver all enqueued buffers (as Bala suggested too).
>
> OK, then I will modify the logic there, not relying on each core
> getting a buffer from the scheduler. I will use a 500 ms wait time, is
> that ok?
>
> >
> > Alex
> >
> > On 21 November 2014 15:33, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
> >>
> >> On Fri, Nov 21, 2014 at 12:18 PM, Alexandru Badicioiu
> >> <alexandru.badicioiu@linaro.org> wrote:
> >> > Hi,
> >> > the scheduling tests in odp_example were discussed some time ago and
> >> > there
> >> > was an agreement, at least for FSL and TI platforms, that fair
> >> > scheduling
> >> > assumed by the following loop:
> >> > for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >                buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >> >
> >> >                if (odp_queue_enq(queue, buf)) {
> >> >                        ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> >                        return -1;
> >> >                }
> >> >        }
> >> >
> >> > for an ATOMIC queue doesn't make sense as the behavior of an ATOMIC
> >> > hardware
> >> > queue is to be scheduled to the same core as long as there are packets
> >> > in
> >> > the queue and the core has room to dequeue them (other platforms
> please
> >> > confirm or infirm). On my specific platform I can force this with a
> >> > particular HW configuration, but this configuration effectively
> disables
> >> > the
> >> > use of POLL queues.
> >> > I think we need scheduling tests for the most general case (SYNC_NONE
> >> > and no
> >> > assumption about how many buffers are scheduled to a particular core).
> >>
> >> Ok, I remember about that discussion, I didn't understand it at the
> time.
> >>
> >> So here is a question, is it better to drop the multi-threading
> >> structure and do everything in a single thread? Or keep multiple
> >> threads but use ODP_SCHED_NO_WAIT? Or both?
> >>
> >> >
> >> > Thanks,
> >> > Alex
> >> >
> >> >
> >> > On 20 November 2014 21:02, Ciprian Barbu <ciprian.barbu@linaro.org>
> >> > wrote:
> >> >>
> >> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> >> ---
> >> >> The testcases are based almost entirely on the odp_example.
> >> >> There are no alloc tests and I added a test case for
> >> >> odp_schedule_wait_time.
> >> >> The major differencs between the odp_example and this cunit is the
> >> >> partition
> >> >> into testcases, the odp_example calls every test case from one big
> >> >> function.
> >> >>
> >> >> I had to work some magic in order to be able to pass arguments to
> test
> >> >> cases,
> >> >> I hope is not too hard to follow.
> >> >>
> >> >>  configure.ac                                  |   1 +
> >> >>  test/cunit/Makefile.am                        |   2 +
> >> >>  test/cunit/schedule/Makefile.am               |  10 +
> >> >>  test/cunit/schedule/odp_schedule_test.c       | 844
> >> >> ++++++++++++++++++++++++++
> >> >>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
> >> >>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
> >> >>  6 files changed, 913 insertions(+)
> >> >>  create mode 100644 test/cunit/schedule/Makefile.am
> >> >>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
> >> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
> >> >>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
> >> >>
> >> >> diff --git a/configure.ac b/configure.ac
> >> >> index fcd7279..a47db72 100644
> >> >> --- a/configure.ac
> >> >> +++ b/configure.ac
> >> >> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
> >> >>                  test/Makefile
> >> >>                  test/api_test/Makefile
> >> >>                   test/cunit/Makefile
> >> >> +                 test/cunit/schedule/Makefile
> >> >>                  pkgconfig/libodp.pc])
> >> >>
> >> >>  AC_SEARCH_LIBS([timer_create],[rt posix4])
> >> >> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
> >> >> index 439e134..b6033ee 100644
> >> >> --- a/test/cunit/Makefile.am
> >> >> +++ b/test/cunit/Makefile.am
> >> >> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
> >> >>  AM_CFLAGS += -I$(CUNIT_PATH)/include
> >> >>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
> >> >>
> >> >> +SUBDIRS = schedule
> >> >> +
> >> >>  if ODP_CUNIT_ENABLED
> >> >>  TESTS = ${bin_PROGRAMS}
> >> >>  check_PROGRAMS = ${bin_PROGRAMS}
> >> >> diff --git a/test/cunit/schedule/Makefile.am
> >> >> b/test/cunit/schedule/Makefile.am
> >> >> new file mode 100644
> >> >> index 0000000..ad68b03
> >> >> --- /dev/null
> >> >> +++ b/test/cunit/schedule/Makefile.am
> >> >> @@ -0,0 +1,10 @@
> >> >> +include $(top_srcdir)/test/Makefile.inc
> >> >> +
> >> >> +if ODP_CUNIT_ENABLED
> >> >> +bin_PROGRAMS = odp_schedule_test
> >> >> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib
> -static
> >> >> -lcunit
> >> >> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
> >> >> +endif
> >> >> +
> >> >> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
> >> >> +                                odp_schedule_testsuites.c
> >> >> diff --git a/test/cunit/schedule/odp_schedule_test.c
> >> >> b/test/cunit/schedule/odp_schedule_test.c
> >> >> new file mode 100644
> >> >> index 0000000..fa67f6e
> >> >> --- /dev/null
> >> >> +++ b/test/cunit/schedule/odp_schedule_test.c
> >> >> @@ -0,0 +1,844 @@
> >> >> +/* Copyright (c) 2014, Linaro Limited
> >> >> + * All rights reserved.
> >> >> + *
> >> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> >> + */
> >> >> +
> >> >> +#include "odp_schedule_testsuites.h"
> >> >> +#include <odph_linux.h>
> >> >> +
> >> >> +#define MAX_WORKERS            32            /**< Max worker threads
> >> >> */
> >> >> +#define MSG_POOL_SIZE           (4*1024*1024)
> >> >> +#define QUEUES_PER_PRIO                64            /**< Queue per
> >> >> priority */
> >> >> +#define QUEUE_ROUNDS           (512*1024)    /**< Queue test rounds
> */
> >> >> +#define MULTI_BUFS_MAX         4             /**< Buffer burst size
> */
> >> >> +#define BUF_SIZE               64
> >> >> +
> >> >> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
> >> >> +
> >> >> +/** Test arguments */
> >> >> +typedef struct {
> >> >> +       int core_count; /**< Core count */
> >> >> +       int proc_mode;  /**< Process mode */
> >> >> +} test_args_t;
> >> >> +
> >> >> +typedef int (*test_case_routine)(const char *, int,
> odp_buffer_pool_t,
> >> >> +                                int, odp_barrier_t *);
> >> >> +
> >> >> +/** Scheduler test case arguments */
> >> >> +typedef struct {
> >> >> +       char name[64];  /**< test case name */
> >> >> +       int prio;
> >> >> +       test_case_routine func;
> >> >> +} test_case_args_t;
> >> >> +
> >> >> +/** Test global variables */
> >> >> +typedef struct {
> >> >> +       odp_barrier_t barrier;/**< @private Barrier for test
> >> >> synchronisation */
> >> >> +       test_args_t test_args;/**< @private Test case function and
> >> >> arguments */
> >> >> +} test_globals_t;
> >> >> +
> >> >> +static void execute_parallel(void *(*func) (void *),
> test_case_args_t
> >> >> *);
> >> >> +static int num_workers;
> >> >> +
> >> >> +/**
> >> >> + * @internal CUnit test case for verifying functionality of
> >> >> + *           schedule_wait_time
> >> >> + */
> >> >> +static void schedule_wait_time(void)
> >> >> +{
> >> >> +       uint64_t wait_time;
> >> >> +
> >> >> +       wait_time = odp_schedule_wait_time(0);
> >> >> +       CU_ASSERT(wait_time > 0);
> >> >> +       CU_PASS("schedule_wait_time(0)");
> >> >> +
> >> >> +       wait_time = odp_schedule_wait_time(1);
> >> >> +       CU_ASSERT(wait_time > 0);
> >> >> +       CU_PASS("schedule_wait_time(1)");
> >> >> +
> >> >> +       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> >> >> +       CU_ASSERT(wait_time > 0);
> >> >> +       CU_PASS("schedule_wait_time(MAX_LONG_INT)");
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Clear all scheduled queues. Retry to be sure that all
> >> >> + * buffers have been scheduled.
> >> >> + */
> >> >> +static void clear_sched_queues(void)
> >> >> +{
> >> >> +       odp_buffer_t buf;
> >> >> +
> >> >> +       while (1) {
> >> >> +               buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> >> >> +
> >> >> +               if (buf == ODP_BUFFER_INVALID)
> >> >> +                       break;
> >> >> +
> >> >> +               odp_buffer_free(buf);
> >> >> +       }
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Create multiple queues from a pool of buffers
> >> >> + *
> >> >> + * @param thr  Thread
> >> >> + * @param msg_pool  Buffer pool
> >> >> + * @param prio   Queue priority
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int
> >> >> prio)
> >> >> +{
> >> >> +       char name[] = "sched_XX_YY";
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +       int i;
> >> >> +
> >> >> +       name[6] = '0' + prio/10;
> >> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> >> +
> >> >> +       /* Alloc and enqueue a buffer per queue */
> >> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> >> >> +               name[9]  = '0' + i/10;
> >> >> +               name[10] = '0' + i - 10*(i/10);
> >> >> +
> >> >> +               queue = odp_queue_lookup(name);
> >> >> +
> >> >> +               if (queue == ODP_QUEUE_INVALID) {
> >> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n",
> >> >> thr,
> >> >> name);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +
> >> >> +               buf = odp_buffer_alloc(msg_pool);
> >> >> +
> >> >> +               if (!odp_buffer_is_valid(buf)) {
> >> >> +                       ODP_ERR("  [%i] msg_pool alloc failed\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Create a single queue from a pool of buffers
> >> >> + *
> >> >> + * @param thr  Thread
> >> >> + * @param msg_pool  Buffer pool
> >> >> + * @param prio   Queue priority
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int
> prio)
> >> >> +{
> >> >> +       char name[] = "sched_XX_00";
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +
> >> >> +       buf = odp_buffer_alloc(msg_pool);
> >> >> +
> >> >> +       if (!odp_buffer_is_valid(buf)) {
> >> >> +               ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       name[6] = '0' + prio/10;
> >> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> >> +
> >> >> +       queue = odp_queue_lookup(name);
> >> >> +
> >> >> +       if (queue == ODP_QUEUE_INVALID) {
> >> >> +               ODP_ERR("  [%i] Queue %s lookup failed.\n", thr,
> name);
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       if (odp_queue_enq(queue, buf)) {
> >> >> +               ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Test scheduling of a single queue - with
> >> >> odp_schedule_one()
> >> >> + *
> >> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
> >> >> received
> >> >> + * buffer back into the queue.
> >> >> + *
> >> >> + * @param str      Test case name string
> >> >> + * @param thr      Thread
> >> >> + * @param msg_pool Buffer pool
> >> >> + * @param prio     Priority
> >> >> + * @param barrier  Barrier
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int test_schedule_one_single(const char *str, int thr,
> >> >> +                                   odp_buffer_pool_t msg_pool,
> >> >> +                                   int prio, odp_barrier_t *barrier)
> >> >> +{
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +       uint64_t t1, t2, cycles, ns;
> >> >> +       uint32_t i;
> >> >> +       uint32_t tot = 0;
> >> >> +
> >> >> +       if (create_queue(thr, msg_pool, prio)) {
> >> >> +               CU_FAIL_FATAL("lookup queue");
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       t1 = odp_time_get_cycles();
> >> >> +
> >> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> >> >> +               odp_schedule_release_atomic();
> >> >> +
> >> >> +       t2     = odp_time_get_cycles();
> >> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> >> +       tot    = i;
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +       clear_sched_queues();
> >> >> +
> >> >> +       cycles = cycles/tot;
> >> >> +       ns     = ns/tot;
> >> >> +
> >> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> >> +              thr, str, cycles, ns);
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Test scheduling of multiple queues - with
> >> >> odp_schedule_one()
> >> >> + *
> >> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> >> >> + * buffer back into the queue it came from.
> >> >> + *
> >> >> + * @param str      Test case name string
> >> >> + * @param thr      Thread
> >> >> + * @param msg_pool Buffer pool
> >> >> + * @param prio     Priority
> >> >> + * @param barrier  Barrier
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int test_schedule_one_many(const char *str, int thr,
> >> >> +                                 odp_buffer_pool_t msg_pool,
> >> >> +                                 int prio, odp_barrier_t *barrier)
> >> >> +{
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +       uint64_t t1 = 0;
> >> >> +       uint64_t t2 = 0;
> >> >> +       uint64_t cycles, ns;
> >> >> +       uint32_t i;
> >> >> +       uint32_t tot = 0;
> >> >> +
> >> >> +       if (create_queues(thr, msg_pool, prio))
> >> >> +               return -1;
> >> >> +
> >> >> +       /* Start sched-enq loop */
> >> >> +       t1 = odp_time_get_cycles();
> >> >> +
> >> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >> +               buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> >> >> +               odp_schedule_release_atomic();
> >> >> +
> >> >> +       t2     = odp_time_get_cycles();
> >> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> >> +       tot    = i;
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +       clear_sched_queues();
> >> >> +
> >> >> +       cycles = cycles/tot;
> >> >> +       ns     = ns/tot;
> >> >> +
> >> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> >> +              thr, str, cycles, ns);
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Test scheduling of a single queue - with odp_schedule()
> >> >> + *
> >> >> + * Enqueue a buffer to the shared queue. Schedule and enqueue the
> >> >> received
> >> >> + * buffer back into the queue.
> >> >> + *
> >> >> + * @param str      Test case name string
> >> >> + * @param thr      Thread
> >> >> + * @param msg_pool Buffer pool
> >> >> + * @param prio     Priority
> >> >> + * @param barrier  Barrier
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int test_schedule_single(const char *str, int thr,
> >> >> +                               odp_buffer_pool_t msg_pool,
> >> >> +                               int prio, odp_barrier_t *barrier)
> >> >> +{
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +       uint64_t t1, t2, cycles, ns;
> >> >> +       uint32_t i;
> >> >> +       uint32_t tot = 0;
> >> >> +
> >> >> +       if (create_queue(thr, msg_pool, prio))
> >> >> +               return -1;
> >> >> +
> >> >> +       t1 = odp_time_get_cycles();
> >> >> +
> >> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       /* Clear possible locally stored buffers */
> >> >> +       odp_schedule_pause();
> >> >> +
> >> >> +       tot = i;
> >> >> +
> >> >> +       while (1) {
> >> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> >> >> +
> >> >> +               if (buf == ODP_BUFFER_INVALID)
> >> >> +                       break;
> >> >> +
> >> >> +               tot++;
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       odp_schedule_resume();
> >> >> +
> >> >> +       t2     = odp_time_get_cycles();
> >> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +       clear_sched_queues();
> >> >> +
> >> >> +       cycles = cycles/tot;
> >> >> +       ns     = ns/tot;
> >> >> +
> >> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> >> +              thr, str, cycles, ns);
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Test scheduling of multiple queues - with
> odp_schedule()
> >> >> + *
> >> >> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> >> >> + * buffer back into the queue it came from.
> >> >> + *
> >> >> + * @param str      Test case name string
> >> >> + * @param thr      Thread
> >> >> + * @param msg_pool Buffer pool
> >> >> + * @param prio     Priority
> >> >> + * @param barrier  Barrier
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int test_schedule_many(const char *str, int thr,
> >> >> +                             odp_buffer_pool_t msg_pool,
> >> >> +                             int prio, odp_barrier_t *barrier)
> >> >> +{
> >> >> +       odp_buffer_t buf;
> >> >> +       odp_queue_t queue;
> >> >> +       uint64_t t1 = 0;
> >> >> +       uint64_t t2 = 0;
> >> >> +       uint64_t cycles, ns;
> >> >> +       uint32_t i;
> >> >> +       uint32_t tot = 0;
> >> >> +
> >> >> +       if (create_queues(thr, msg_pool, prio))
> >> >> +               return -1;
> >> >> +
> >> >> +       /* Start sched-enq loop */
> >> >> +       t1 = odp_time_get_cycles();
> >> >> +
> >> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >> +               buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       /* Clear possible locally stored buffers */
> >> >> +       odp_schedule_pause();
> >> >> +
> >> >> +       tot = i;
> >> >> +
> >> >> +       while (1) {
> >> >> +               buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> >> >> +
> >> >> +               if (buf == ODP_BUFFER_INVALID)
> >> >> +                       break;
> >> >> +
> >> >> +               tot++;
> >> >> +
> >> >> +               if (odp_queue_enq(queue, buf)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       odp_schedule_resume();
> >> >> +
> >> >> +       t2     = odp_time_get_cycles();
> >> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +       clear_sched_queues();
> >> >> +
> >> >> +       cycles = cycles/tot;
> >> >> +       ns     = ns/tot;
> >> >> +
> >> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> >> +              thr, str, cycles, ns);
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * @internal Test scheduling of multiple queues with multi_sched and
> >> >> multi_enq
> >> >> + *
> >> >> + * @param str      Test case name string
> >> >> + * @param thr      Thread
> >> >> + * @param msg_pool Buffer pool
> >> >> + * @param prio     Priority
> >> >> + * @param barrier  Barrier
> >> >> + *
> >> >> + * @return 0 if successful
> >> >> + */
> >> >> +static int test_schedule_multi(const char *str, int thr,
> >> >> +                              odp_buffer_pool_t msg_pool,
> >> >> +                              int prio, odp_barrier_t *barrier)
> >> >> +{
> >> >> +       odp_buffer_t buf[MULTI_BUFS_MAX];
> >> >> +       odp_queue_t queue;
> >> >> +       uint64_t t1 = 0;
> >> >> +       uint64_t t2 = 0;
> >> >> +       uint64_t cycles, ns;
> >> >> +       int i, j;
> >> >> +       int num;
> >> >> +       uint32_t tot = 0;
> >> >> +       char name[] = "sched_XX_YY";
> >> >> +
> >> >> +       name[6] = '0' + prio/10;
> >> >> +       name[7] = '0' + prio - 10*(prio/10);
> >> >> +
> >> >> +       /* Alloc and enqueue a buffer per queue */
> >> >> +       for (i = 0; i < QUEUES_PER_PRIO; i++) {
> >> >> +               name[9]  = '0' + i/10;
> >> >> +               name[10] = '0' + i - 10*(i/10);
> >> >> +
> >> >> +               queue = odp_queue_lookup(name);
> >> >> +
> >> >> +               if (queue == ODP_QUEUE_INVALID) {
> >> >> +                       ODP_ERR("  [%i] Queue %s lookup failed.\n",
> >> >> thr,
> >> >> name);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +
> >> >> +               for (j = 0; j < MULTI_BUFS_MAX; j++) {
> >> >> +                       buf[j] = odp_buffer_alloc(msg_pool);
> >> >> +
> >> >> +                       if (!odp_buffer_is_valid(buf[j])) {
> >> >> +                               ODP_ERR("  [%i] msg_pool alloc
> >> >> failed\n",
> >> >> thr);
> >> >> +                               return -1;
> >> >> +                       }
> >> >> +               }
> >> >> +
> >> >> +               if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX))
> {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       /* Start sched-enq loop */
> >> >> +       t1 = odp_time_get_cycles();
> >> >> +
> >> >> +       for (i = 0; i < QUEUE_ROUNDS; i++) {
> >> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
> >> >> +                                        MULTI_BUFS_MAX);
> >> >> +
> >> >> +               tot += num;
> >> >> +
> >> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       /* Clear possible locally stored buffers */
> >> >> +       odp_schedule_pause();
> >> >> +
> >> >> +       while (1) {
> >> >> +               num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
> >> >> buf,
> >> >> +                                        MULTI_BUFS_MAX);
> >> >> +
> >> >> +               if (num == 0)
> >> >> +                       break;
> >> >> +
> >> >> +               tot += num;
> >> >> +
> >> >> +               if (odp_queue_enq_multi(queue, buf, num)) {
> >> >> +                       ODP_ERR("  [%i] Queue enqueue failed.\n",
> thr);
> >> >> +                       return -1;
> >> >> +               }
> >> >> +       }
> >> >> +
> >> >> +       odp_schedule_resume();
> >> >> +
> >> >> +
> >> >> +       t2     = odp_time_get_cycles();
> >> >> +       cycles = odp_time_diff_cycles(t1, t2);
> >> >> +       ns     = odp_time_cycles_to_ns(cycles);
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +       clear_sched_queues();
> >> >> +
> >> >> +       if (tot) {
> >> >> +               cycles = cycles/tot;
> >> >> +               ns     = ns/tot;
> >> >> +       } else {
> >> >> +               cycles = 0;
> >> >> +               ns     = 0;
> >> >> +       }
> >> >> +
> >> >> +       printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> >> >> +              thr, str, cycles, ns);
> >> >> +
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +/**
> >> >> + * Template function for running the scheduler tests.
> >> >> + * The main reason for having this function is that CUnit does not
> >> >> offer
> >> >> a way
> >> >> + * to pass arguments to a testcase function.
> >> >> + * The other reason is that there are common steps for all
> testcases.
> >> >> + */
> >> >> +static void *exec_template(void *arg)
> >> >> +{
> >> >> +       odp_buffer_pool_t msg_pool;
> >> >> +       odp_shm_t shm;
> >> >> +       test_globals_t *globals;
> >> >> +       odp_barrier_t *barrier;
> >> >> +       test_case_args_t *args = (test_case_args_t*) arg;
> >> >> +
> >> >> +       shm     = odp_shm_lookup("test_globals");
> >> >> +       globals = odp_shm_addr(shm);
> >> >> +
> >> >> +       CU_ASSERT(globals != NULL);
> >> >> +
> >> >> +       barrier = &globals->barrier;
> >> >> +
> >> >> +       /*
> >> >> +        * Sync before start
> >> >> +        */
> >> >> +       odp_barrier_sync(barrier);
> >> >> +
> >> >> +       /*
> >> >> +        * Find the buffer pool
> >> >> +        */
> >> >> +       msg_pool = odp_buffer_pool_lookup("msg_pool");
> >> >> +
> >> >> +       CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
> >> >> +
> >> >> +       odp_barrier_sync(barrier);
> >> >> +
> >> >> +       /*
> >> >> +        * Now run the testcase routine passing the arguments
> >> >> +        */
> >> >> +       args->func(args->name, odp_thread_id(), msg_pool,
> >> >> +                  args->prio, barrier);
> >> >> +
> >> >> +       return arg;
> >> >> +}
> >> >> +
> >> >> +/* Low prio */
> >> >> +
> >> >> +static void schedule_one_single_lo(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
> >> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> >> +       args.func = test_schedule_one_single;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_single_lo(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_lo");
> >> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> >> +       args.func = test_schedule_single;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_one_many_lo(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
> >> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> >> +       args.func = test_schedule_one_many;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_many_lo(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> >> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> >> +       args.func = test_schedule_many;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_multi_lo(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> >> >> +       args.prio = ODP_SCHED_PRIO_LOWEST;
> >> >> +       args.func = test_schedule_multi;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +/* High prio */
> >> >> +
> >> >> +static void schedule_one_single_hi(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
> >> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> >> +       args.func = test_schedule_single;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_single_hi(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_____s_hi");
> >> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> >> +       args.func = test_schedule_single;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_one_many_hi(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
> >> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> >> +       args.func = test_schedule_one_many;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_many_hi(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_____m_hi");
> >> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> >> +       args.func = test_schedule_many;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void schedule_multi_hi(void)
> >> >> +{
> >> >> +       test_case_args_t args;
> >> >> +       snprintf(args.name, sizeof(args.name), "sched_multi_hi");
> >> >> +       args.prio = ODP_SCHED_PRIO_HIGHEST;
> >> >> +       args.func = test_schedule_multi;
> >> >> +       execute_parallel(exec_template, &args);
> >> >> +}
> >> >> +
> >> >> +static void execute_parallel(void *(*start_routine) (void *),
> >> >> +                            test_case_args_t *test_case_args)
> >> >> +{
> >> >> +       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
> >> >> +       int first_core;
> >> >> +
> >> >> +       memset(thread_tbl, 0, sizeof(thread_tbl));
> >> >> +
> >> >> +       /*
> >> >> +        * By default core #0 runs Linux kernel background tasks.
> >> >> +        * Start mapping thread from core #1
> >> >> +        */
> >> >> +       first_core = 1;
> >> >> +
> >> >> +       if (odp_sys_core_count() == 1)
> >> >> +               first_core = 0;
> >> >> +
> >> >> +       odph_linux_pthread_create(thread_tbl, num_workers,
> first_core,
> >> >> +                                       start_routine,
> test_case_args);
> >> >> +
> >> >> +       /* Wait for worker threads to terminate */
> >> >> +       odph_linux_pthread_join(thread_tbl, num_workers);
> >> >> +}
> >> >> +
> >> >> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
> >> >> +{
> >> >> +       void *pool_base;
> >> >> +       odp_shm_t shm;
> >> >> +       odp_buffer_pool_t pool;
> >> >> +
> >> >> +       shm = odp_shm_reserve("msg_pool",
> >> >> +                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> >> >> +
> >> >> +       pool_base = odp_shm_addr(shm);
> >> >> +
> >> >> +       if (NULL == pool_base) {
> >> >> +               printf("Shared memory reserve failed.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       pool = odp_buffer_pool_create("msg_pool", pool_base,
> >> >> MSG_POOL_SIZE,
> >> >> +                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
> >> >> +                                     ODP_BUFFER_TYPE_RAW);
> >> >> +
> >> >> +       if (ODP_BUFFER_POOL_INVALID == pool) {
> >> >> +               printf("Pool create failed.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +       return pool;
> >> >> +}
> >> >> +
> >> >> +int schedule_test_init(void)
> >> >> +{
> >> >> +       test_args_t args;
> >> >> +       odp_shm_t shm;
> >> >> +       test_globals_t *globals;
> >> >> +       int i, j;
> >> >> +       int prios;
> >> >> +
> >> >> +       if (0 != odp_init_global(NULL, NULL)) {
> >> >> +               printf("odp_init_global fail.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +       if (0 != odp_init_local()) {
> >> >> +               printf("odp_init_local fail.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +       if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
> >> >> +               printf("test_odp_buffer_pool_init fail.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       /* A worker thread per core */
> >> >> +       num_workers = odp_sys_core_count();
> >> >> +
> >> >> +       if (args.core_count)
> >> >> +               num_workers = args.core_count;
> >> >> +
> >> >> +       /* force to max core count */
> >> >> +       if (num_workers > MAX_WORKERS)
> >> >> +               num_workers = MAX_WORKERS;
> >> >> +       shm = odp_shm_reserve("test_globals",
> >> >> +                             sizeof(test_globals_t),
> >> >> ODP_CACHE_LINE_SIZE,
> >> >> 0);
> >> >> +
> >> >> +       globals = odp_shm_addr(shm);
> >> >> +
> >> >> +       if (globals == NULL) {
> >> >> +               ODP_ERR("Shared memory reserve failed.\n");
> >> >> +               return -1;
> >> >> +       }
> >> >> +
> >> >> +       memset(globals, 0, sizeof(test_globals_t));
> >> >> +
> >> >> +       /* Barrier to sync test case execution */
> >> >> +       odp_barrier_init_count(&globals->barrier, num_workers);
> >> >> +
> >> >> +       prios = odp_schedule_num_prio();
> >> >> +
> >> >> +       for (i = 0; i < prios; i++) {
> >> >> +               odp_queue_param_t param;
> >> >> +               odp_queue_t queue;
> >> >> +               char name[] = "sched_XX_YY";
> >> >> +
> >> >> +               if (i != ODP_SCHED_PRIO_HIGHEST &&
> >> >> +                   i != ODP_SCHED_PRIO_LOWEST)
> >> >> +                       continue;
> >> >> +
> >> >> +               name[6] = '0' + i/10;
> >> >> +               name[7] = '0' + i - 10*(i/10);
> >> >> +
> >> >> +               param.sched.prio  = i;
> >> >> +               param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
> >> >> +               param.sched.group = ODP_SCHED_GROUP_DEFAULT;
> >> >> +
> >> >> +               for (j = 0; j < QUEUES_PER_PRIO; j++) {
> >> >> +                       name[9]  = '0' + j/10;
> >> >> +                       name[10] = '0' + j - 10*(j/10);
> >> >> +
> >> >> +                       queue = odp_queue_create(name,
> >> >> ODP_QUEUE_TYPE_SCHED,
> >> >> +                                                &param);
> >> >> +
> >> >> +                       if (queue == ODP_QUEUE_INVALID) {
> >> >> +                               ODP_ERR("Schedule queue create
> >> >> failed.\n");
> >> >> +                               return -1;
> >> >> +                       }
> >> >> +               }
> >> >> +       }
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +int schedule_test_finalize(void)
> >> >> +{
> >> >> +       odp_term_local();
> >> >> +       odp_term_global();
> >> >> +       return 0;
> >> >> +}
> >> >> +
> >> >> +struct CU_TestInfo schedule_tests[] = {
> >> >> +       _CU_TEST_INFO(schedule_wait_time),
> >> >> +       _CU_TEST_INFO(schedule_one_single_lo),
> >> >> +       _CU_TEST_INFO(schedule_single_lo),
> >> >> +       _CU_TEST_INFO(schedule_one_many_lo),
> >> >> +       _CU_TEST_INFO(schedule_many_lo),
> >> >> +       _CU_TEST_INFO(schedule_multi_lo),
> >> >> +       _CU_TEST_INFO(schedule_one_single_hi),
> >> >> +       _CU_TEST_INFO(schedule_single_hi),
> >> >> +       _CU_TEST_INFO(schedule_one_many_hi),
> >> >> +       _CU_TEST_INFO(schedule_many_hi),
> >> >> +       _CU_TEST_INFO(schedule_multi_hi),
> >> >> +       CU_TEST_INFO_NULL,
> >> >> +};
> >> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c
> >> >> b/test/cunit/schedule/odp_schedule_testsuites.c
> >> >> new file mode 100644
> >> >> index 0000000..1053069
> >> >> --- /dev/null
> >> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
> >> >> @@ -0,0 +1,35 @@
> >> >> +/* Copyright (c) 2014, Linaro Limited
> >> >> + * All rights reserved.
> >> >> + *
> >> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> >> + */
> >> >> +
> >> >> +#include "odp_schedule_testsuites.h"
> >> >> +
> >> >> +static CU_SuiteInfo suites[] = {
> >> >> +       {
> >> >> +               "Scheduler tests" ,
> >> >> +               schedule_test_init,
> >> >> +               schedule_test_finalize,
> >> >> +               NULL,
> >> >> +               NULL,
> >> >> +               schedule_tests
> >> >> +       },
> >> >> +       CU_SUITE_INFO_NULL,
> >> >> +};
> >> >> +
> >> >> +int main(void)
> >> >> +{
> >> >> +       /* initialize the CUnit test registry */
> >> >> +       if (CUE_SUCCESS != CU_initialize_registry())
> >> >> +               return CU_get_error();
> >> >> +
> >> >> +       /* register suites */
> >> >> +       CU_register_suites(suites);
> >> >> +       /* Run all tests using the CUnit Basic interface */
> >> >> +       CU_basic_set_mode(CU_BRM_VERBOSE);
> >> >> +       CU_basic_run_tests();
> >> >> +       CU_cleanup_registry();
> >> >> +
> >> >> +       return CU_get_error();
> >> >> +}
> >> >> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h
> >> >> b/test/cunit/schedule/odp_schedule_testsuites.h
> >> >> new file mode 100644
> >> >> index 0000000..67a2a69
> >> >> --- /dev/null
> >> >> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
> >> >> @@ -0,0 +1,21 @@
> >> >> +/* Copyright (c) 2014, Linaro Limited
> >> >> + * All rights reserved.
> >> >> + *
> >> >> + * SPDX-License-Identifier:     BSD-3-Clause
> >> >> + */
> >> >> +
> >> >> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
> >> >> +#define ODP_SCHEDULE_TESTSUITES_H_
> >> >> +
> >> >> +#include "odp.h"
> >> >> +#include <CUnit/Basic.h>
> >> >> +
> >> >> +/* Helper macro for CU_TestInfo initialization */
> >> >> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
> >> >> +
> >> >> +extern struct CU_TestInfo schedule_tests[];
> >> >> +
> >> >> +extern int schedule_test_init(void);
> >> >> +extern int schedule_test_finalize(void);
> >> >> +
> >> >> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
> >> >> --
> >> >> 1.8.3.2
> >> >>
> >> >>
> >> >> _______________________________________________
> >> >> lng-odp mailing list
> >> >> lng-odp@lists.linaro.org
> >> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >
> >> >
> >
> >
>
Taras Kondratiuk Nov. 21, 2014, 2:25 p.m. UTC | #10
On 11/21/2014 03:44 PM, Ciprian Barbu wrote:
> On Fri, Nov 21, 2014 at 1:31 PM, Taras Kondratiuk
> <taras.kondratiuk@linaro.org> wrote:
>> On 11/20/2014 09:02 PM, Ciprian Barbu wrote:
>>>
>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>> ---
>>> The testcases are based almost entirely on the odp_example.
>>> There are no alloc tests and I added a test case for
>>> odp_schedule_wait_time.
>>> The major differencs between the odp_example and this cunit is the
>>> partition
>>> into testcases, the odp_example calls every test case from one big
>>> function.
>>>
>>> I had to work some magic in order to be able to pass arguments to test
>>> cases,
>>> I hope is not too hard to follow.
>>>
>>>    configure.ac                                  |   1 +
>>>    test/cunit/Makefile.am                        |   2 +
>>>    test/cunit/schedule/Makefile.am               |  10 +
>>>    test/cunit/schedule/odp_schedule_test.c       | 844
>>> ++++++++++++++++++++++++++
>>>    test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>>>    test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>>>    6 files changed, 913 insertions(+)
>>>    create mode 100644 test/cunit/schedule/Makefile.am
>>>    create mode 100644 test/cunit/schedule/odp_schedule_test.c
>>>    create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>>>    create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>>>
>>> diff --git a/configure.ac b/configure.ac
>>> index fcd7279..a47db72 100644
>>> --- a/configure.ac
>>> +++ b/configure.ac
>>> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>>>                   test/Makefile
>>>                   test/api_test/Makefile
>>>                     test/cunit/Makefile
>>> +                 test/cunit/schedule/Makefile
>>>                   pkgconfig/libodp.pc])
>>>
>>>    AC_SEARCH_LIBS([timer_create],[rt posix4])
>>> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
>>> index 439e134..b6033ee 100644
>>> --- a/test/cunit/Makefile.am
>>> +++ b/test/cunit/Makefile.am
>>> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>>>    AM_CFLAGS += -I$(CUNIT_PATH)/include
>>>    AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>>>
>>> +SUBDIRS = schedule
>>> +
>>
>>
>> Don't create a separate makefile, so all test binaries will be the same
>> directory.
>
> Did you get that feedback on private? I don't see it in the comments.
> Anyway, I can drop the extra Makefile no problem.

Anders complained that 'make check' has some issues with this. And I've 
noticed that Alex have changed Crypto tests in this way.

>
>>
>>>    if ODP_CUNIT_ENABLED
>>>    TESTS = ${bin_PROGRAMS}
>>>    check_PROGRAMS = ${bin_PROGRAMS}
>>
>>
>>> +
>>> +struct CU_TestInfo schedule_tests[] = {
>>> +       _CU_TEST_INFO(schedule_wait_time),
>>> +       _CU_TEST_INFO(schedule_one_single_lo),
>>> +       _CU_TEST_INFO(schedule_single_lo),
>>> +       _CU_TEST_INFO(schedule_one_many_lo),
>>> +       _CU_TEST_INFO(schedule_many_lo),
>>> +       _CU_TEST_INFO(schedule_multi_lo),
>>> +       _CU_TEST_INFO(schedule_one_single_hi),
>>> +       _CU_TEST_INFO(schedule_single_hi),
>>> +       _CU_TEST_INFO(schedule_one_many_hi),
>>> +       _CU_TEST_INFO(schedule_many_hi),
>>> +       _CU_TEST_INFO(schedule_multi_hi),
>>> +       CU_TEST_INFO_NULL,
>>> +};
>>
>>
>> schedule_one() will be dropped, so these tests also can be dropped.
>
> Yes I know I had to drop them. I kept them in for this RFC for easy
> comparison against odp_example
>
>>
>> I think odp_example is not a good base for scheduler tests. It was
>> written as benchmark, but not as a verification test.
>
> That's actually not the feedback I got from Mike, correct me if I'm wrong.

My main concerns about odp_example:
- it does millions of iterations which is not necessary for functional 
verification (at least for basic).
- it makes wrong assumption about scheduler fairness.
- it doesn't check main functional features mentioned in a quote below.

It would be better to start from scratch, that try to modify
odp_example.

I imagine basic scheduler testing in following steps:
1. One queue, single thread. odp_schedule() return all enqueued buffers.
    Test each queue type (PARALLEL, ATOMIC, ORDERED).
2. Many queues, single thread. odp_schedule() return all enqueued
    buffers. Verify buffer source queue. Test each queue type.
3. Queues with different priorities, single thread. odp_schedule()
    return all buffers according to queue priorities. Test each queue
    type.
4. Same as 3 but multi-threaded.
5. One ATOMIC queue, several threads. Verify that only one thread at a
    time can get a buffer from that queue.
6. Same as 5, but use odp_schedule_release_atomic() and check that one
    more thread could get an event. That is an optional test, because
    odp_schedule_release_atomic() is a hint and may be ignored by
    platform.
7. Two queues one of them ORDERED, several threads. Verify that buffers
    scheduled from ORDERED queue are enqueue into the second queue in
    correct order.
8. Test scheduler timeout APIs.


>
>>
>> Basic scheduler tests are missing:
>> - verify that priority works correctly.
>> - verify that atomicity works correctly for ATOMIC queues.
>> - verify order for ORDERED queues.
>
> That's good input, thank you.
>
Alexandru Badicioiu Nov. 21, 2014, 2:38 p.m. UTC | #11
On 21 November 2014 16:25, Taras Kondratiuk <taras.kondratiuk@linaro.org>
wrote:

> On 11/21/2014 03:44 PM, Ciprian Barbu wrote:
>
>> On Fri, Nov 21, 2014 at 1:31 PM, Taras Kondratiuk
>> <taras.kondratiuk@linaro.org> wrote:
>>
>>> On 11/20/2014 09:02 PM, Ciprian Barbu wrote:
>>>
>>>>
>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>> ---
>>>> The testcases are based almost entirely on the odp_example.
>>>> There are no alloc tests and I added a test case for
>>>> odp_schedule_wait_time.
>>>> The major differencs between the odp_example and this cunit is the
>>>> partition
>>>> into testcases, the odp_example calls every test case from one big
>>>> function.
>>>>
>>>> I had to work some magic in order to be able to pass arguments to test
>>>> cases,
>>>> I hope is not too hard to follow.
>>>>
>>>>    configure.ac                                  |   1 +
>>>>    test/cunit/Makefile.am                        |   2 +
>>>>    test/cunit/schedule/Makefile.am               |  10 +
>>>>    test/cunit/schedule/odp_schedule_test.c       | 844
>>>> ++++++++++++++++++++++++++
>>>>    test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>>>>    test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>>>>    6 files changed, 913 insertions(+)
>>>>    create mode 100644 test/cunit/schedule/Makefile.am
>>>>    create mode 100644 test/cunit/schedule/odp_schedule_test.c
>>>>    create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>>>>    create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
>>>>
>>>> diff --git a/configure.ac b/configure.ac
>>>> index fcd7279..a47db72 100644
>>>> --- a/configure.ac
>>>> +++ b/configure.ac
>>>> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>>>>                   test/Makefile
>>>>                   test/api_test/Makefile
>>>>                     test/cunit/Makefile
>>>> +                 test/cunit/schedule/Makefile
>>>>                   pkgconfig/libodp.pc])
>>>>
>>>>    AC_SEARCH_LIBS([timer_create],[rt posix4])
>>>> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
>>>> index 439e134..b6033ee 100644
>>>> --- a/test/cunit/Makefile.am
>>>> +++ b/test/cunit/Makefile.am
>>>> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>>>>    AM_CFLAGS += -I$(CUNIT_PATH)/include
>>>>    AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>>>>
>>>> +SUBDIRS = schedule
>>>> +
>>>>
>>>
>>>
>>> Don't create a separate makefile, so all test binaries will be the same
>>> directory.
>>>
>>
>> Did you get that feedback on private? I don't see it in the comments.
>> Anyway, I can drop the extra Makefile no problem.
>>
>
> Anders complained that 'make check' has some issues with this. And I've
> noticed that Alex have changed Crypto tests in this way.
>
>
>>
>>>     if ODP_CUNIT_ENABLED
>>>>    TESTS = ${bin_PROGRAMS}
>>>>    check_PROGRAMS = ${bin_PROGRAMS}
>>>>
>>>
>>>
>>>  +
>>>> +struct CU_TestInfo schedule_tests[] = {
>>>> +       _CU_TEST_INFO(schedule_wait_time),
>>>> +       _CU_TEST_INFO(schedule_one_single_lo),
>>>> +       _CU_TEST_INFO(schedule_single_lo),
>>>> +       _CU_TEST_INFO(schedule_one_many_lo),
>>>> +       _CU_TEST_INFO(schedule_many_lo),
>>>> +       _CU_TEST_INFO(schedule_multi_lo),
>>>> +       _CU_TEST_INFO(schedule_one_single_hi),
>>>> +       _CU_TEST_INFO(schedule_single_hi),
>>>> +       _CU_TEST_INFO(schedule_one_many_hi),
>>>> +       _CU_TEST_INFO(schedule_many_hi),
>>>> +       _CU_TEST_INFO(schedule_multi_hi),
>>>> +       CU_TEST_INFO_NULL,
>>>> +};
>>>>
>>>
>>>
>>> schedule_one() will be dropped, so these tests also can be dropped.
>>>
>>
>> Yes I know I had to drop them. I kept them in for this RFC for easy
>> comparison against odp_example
>>
>>
>>> I think odp_example is not a good base for scheduler tests. It was
>>> written as benchmark, but not as a verification test.
>>>
>>
>> That's actually not the feedback I got from Mike, correct me if I'm wrong.
>>
>
> My main concerns about odp_example:
> - it does millions of iterations which is not necessary for functional
> verification (at least for basic).
> - it makes wrong assumption about scheduler fairness.
> - it doesn't check main functional features mentioned in a quote below.
>
> It would be better to start from scratch, that try to modify
> odp_example.
>
> I imagine basic scheduler testing in following steps:
> 1. One queue, single thread. odp_schedule() return all enqueued buffers.
>    Test each queue type (PARALLEL, ATOMIC, ORDERED).
> 2. Many queues, single thread. odp_schedule() return all enqueued
>    buffers. Verify buffer source queue. Test each queue type.
> 3. Queues with different priorities, single thread. odp_schedule()
>    return all buffers according to queue priorities. Test each queue
>    type.
> 4. Same as 3 but multi-threaded.
> 5. One ATOMIC queue, several threads. Verify that only one thread at a
>    time can get a buffer from that queue.
> 6. Same as 5, but use odp_schedule_release_atomic() and check that one
>    more thread could get an event. That is an optional test, because
>    odp_schedule_release_atomic() is a hint and may be ignored by
>    platform.
> 7. Two queues one of them ORDERED, several threads. Verify that buffers
>    scheduled from ORDERED queue are enqueue into the second queue in
>    correct order.
>
I think you meant "Verify that buffers scheduled from ORDERED queue are
_dequeued_ from the second queue in the correct order".

> 8. Test scheduler timeout APIs.
>
>
>
>
>>
>>> Basic scheduler tests are missing:
>>> - verify that priority works correctly.
>>> - verify that atomicity works correctly for ATOMIC queues.
>>> - verify order for ORDERED queues.
>>>
>>
>> That's good input, thank you.
>>
>>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Taras Kondratiuk Nov. 21, 2014, 2:58 p.m. UTC | #12
On 11/21/2014 04:38 PM, Alexandru Badicioiu wrote:
>
>
> On 21 November 2014 16:25, Taras Kondratiuk <taras.kondratiuk@linaro.org
>     7. Two queues one of them ORDERED, several threads. Verify that buffers
>         scheduled from ORDERED queue are enqueue into the second queue in
>         correct order.
>
> I think you meant "Verify that buffers scheduled from ORDERED queue are
> _dequeued_ from the second queue in the correct order".

Right. To be more clear.
- buffers are pushed to ORDERED queues in some order.
- then they are scheduled from ORDERED queue and enqueued to the second
   queue by several threads.
- at the end buffers dequeued from the second queue should have the
   initial order.
Jerin Jacob Nov. 21, 2014, 3:52 p.m. UTC | #13
On Thu, Nov 20, 2014 at 09:02:24PM +0200, Ciprian Barbu wrote:
> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> ---
> The testcases are based almost entirely on the odp_example.
> There are no alloc tests and I added a test case for odp_schedule_wait_time.
> The major differencs between the odp_example and this cunit is the partition
> into testcases, the odp_example calls every test case from one big function.
> 
> I had to work some magic in order to be able to pass arguments to test cases,
> I hope is not too hard to follow.
> 
>  configure.ac                                  |   1 +
>  test/cunit/Makefile.am                        |   2 +
>  test/cunit/schedule/Makefile.am               |  10 +
>  test/cunit/schedule/odp_schedule_test.c       | 844 ++++++++++++++++++++++++++
>  test/cunit/schedule/odp_schedule_testsuites.c |  35 ++
>  test/cunit/schedule/odp_schedule_testsuites.h |  21 +
>  6 files changed, 913 insertions(+)
>  create mode 100644 test/cunit/schedule/Makefile.am
>  create mode 100644 test/cunit/schedule/odp_schedule_test.c
>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.c
>  create mode 100644 test/cunit/schedule/odp_schedule_testsuites.h
> 
> diff --git a/configure.ac b/configure.ac
> index fcd7279..a47db72 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -173,6 +173,7 @@ AC_CONFIG_FILES([Makefile
>  		 test/Makefile
>  		 test/api_test/Makefile
>                   test/cunit/Makefile
> +                 test/cunit/schedule/Makefile
>  		 pkgconfig/libodp.pc])
>  
>  AC_SEARCH_LIBS([timer_create],[rt posix4])
> diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
> index 439e134..b6033ee 100644
> --- a/test/cunit/Makefile.am
> +++ b/test/cunit/Makefile.am
> @@ -3,6 +3,8 @@ include $(top_srcdir)/test/Makefile.inc
>  AM_CFLAGS += -I$(CUNIT_PATH)/include
>  AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
>  
> +SUBDIRS = schedule
> +
>  if ODP_CUNIT_ENABLED
>  TESTS = ${bin_PROGRAMS}
>  check_PROGRAMS = ${bin_PROGRAMS}
> diff --git a/test/cunit/schedule/Makefile.am b/test/cunit/schedule/Makefile.am
> new file mode 100644
> index 0000000..ad68b03
> --- /dev/null
> +++ b/test/cunit/schedule/Makefile.am
> @@ -0,0 +1,10 @@
> +include $(top_srcdir)/test/Makefile.inc
> +
> +if ODP_CUNIT_ENABLED
> +bin_PROGRAMS = odp_schedule_test
> +odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static -lcunit
> +odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
> +endif
> +
> +dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
> +				 odp_schedule_testsuites.c
> diff --git a/test/cunit/schedule/odp_schedule_test.c b/test/cunit/schedule/odp_schedule_test.c
> new file mode 100644
> index 0000000..fa67f6e
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_test.c
> @@ -0,0 +1,844 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "odp_schedule_testsuites.h"
> +#include <odph_linux.h>
> +
> +#define MAX_WORKERS		32            /**< Max worker threads */
> +#define MSG_POOL_SIZE           (4*1024*1024)
> +#define QUEUES_PER_PRIO		64            /**< Queue per priority */
> +#define QUEUE_ROUNDS		(512*1024)    /**< Queue test rounds */
> +#define MULTI_BUFS_MAX		4             /**< Buffer burst size */
> +#define BUF_SIZE		64
> +
> +#define SCHED_MSG "Test_buff_FOR_simple_schedule"
> +
> +/** Test arguments */
> +typedef struct {
> +	int core_count; /**< Core count */
> +	int proc_mode;  /**< Process mode */
> +} test_args_t;
> +
> +typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
> +				 int, odp_barrier_t *);
> +
> +/** Scheduler test case arguments */
> +typedef struct {
> +	char name[64];	/**< test case name */
> +	int prio;
> +	test_case_routine func;
> +} test_case_args_t;
> +
> +/** Test global variables */
> +typedef struct {
> +	odp_barrier_t barrier;/**< @private Barrier for test synchronisation */
> +	test_args_t test_args;/**< @private Test case function and arguments */
> +} test_globals_t;
> +
> +static void execute_parallel(void *(*func) (void *), test_case_args_t *);
> +static int num_workers;
> +
> +/**
> + * @internal CUnit test case for verifying functionality of
> + *           schedule_wait_time
> + */
> +static void schedule_wait_time(void)
> +{
> +	uint64_t wait_time;
> +
> +	wait_time = odp_schedule_wait_time(0);
> +	CU_ASSERT(wait_time > 0);

wait_time can be zero for odp_schedule_wait_time(0).

> +	CU_PASS("schedule_wait_time(0)");
> +
> +	wait_time = odp_schedule_wait_time(1);
> +	CU_ASSERT(wait_time > 0);
> +	CU_PASS("schedule_wait_time(1)");
> +
> +	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
> +	CU_ASSERT(wait_time > 0);
> +	CU_PASS("schedule_wait_time(MAX_LONG_INT)");
> +}
> +
> +/**
> + * @internal Clear all scheduled queues. Retry to be sure that all
> + * buffers have been scheduled.
> + */
> +static void clear_sched_queues(void)
> +{
> +	odp_buffer_t buf;
> +
> +	while (1) {
> +		buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> +
> +		if (buf == ODP_BUFFER_INVALID)
> +			break;
> +
> +		odp_buffer_free(buf);
> +	}
> +}
> +
> +/**
> + * @internal Create multiple queues from a pool of buffers
> + *
> + * @param thr  Thread
> + * @param msg_pool  Buffer pool
> + * @param prio   Queue priority
> + *
> + * @return 0 if successful
> + */
> +static int create_queues(int thr, odp_buffer_pool_t msg_pool, int prio)
> +{
> +	char name[] = "sched_XX_YY";
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +	int i;
> +
> +	name[6] = '0' + prio/10;
> +	name[7] = '0' + prio - 10*(prio/10);
> +
> +	/* Alloc and enqueue a buffer per queue */
> +	for (i = 0; i < QUEUES_PER_PRIO; i++) {
> +		name[9]  = '0' + i/10;
> +		name[10] = '0' + i - 10*(i/10);
> +
> +		queue = odp_queue_lookup(name);
> +
> +		if (queue == ODP_QUEUE_INVALID) {
> +			ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
> +			return -1;
> +		}
> +
> +		buf = odp_buffer_alloc(msg_pool);
> +
> +		if (!odp_buffer_is_valid(buf)) {
> +			ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> +			return -1;
> +		}
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Create a single queue from a pool of buffers
> + *
> + * @param thr  Thread
> + * @param msg_pool  Buffer pool
> + * @param prio   Queue priority
> + *
> + * @return 0 if successful
> + */
> +static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
> +{
> +	char name[] = "sched_XX_00";
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +
> +	buf = odp_buffer_alloc(msg_pool);
> +
> +	if (!odp_buffer_is_valid(buf)) {
> +		ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> +		return -1;
> +	}
> +
> +	name[6] = '0' + prio/10;
> +	name[7] = '0' + prio - 10*(prio/10);
> +
> +	queue = odp_queue_lookup(name);
> +
> +	if (queue == ODP_QUEUE_INVALID) {
> +		ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
> +		return -1;
> +	}
> +
> +	if (odp_queue_enq(queue, buf)) {
> +		ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of a single queue - with odp_schedule_one()
> + *
> + * Enqueue a buffer to the shared queue. Schedule and enqueue the received
> + * buffer back into the queue.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_one_single(const char *str, int thr,
> +				    odp_buffer_pool_t msg_pool,
> +				    int prio, odp_barrier_t *barrier)
> +{
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +	uint64_t t1, t2, cycles, ns;
> +	uint32_t i;
> +	uint32_t tot = 0;
> +
> +	if (create_queue(thr, msg_pool, prio)) {
> +		CU_FAIL_FATAL("lookup queue");
> +		return -1;
> +	}
> +
> +	t1 = odp_time_get_cycles();
> +
> +	for (i = 0; i < QUEUE_ROUNDS; i++) {
> +		buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> +		odp_schedule_release_atomic();
> +
> +	t2     = odp_time_get_cycles();
> +	cycles = odp_time_diff_cycles(t1, t2);
> +	ns     = odp_time_cycles_to_ns(cycles);
> +	tot    = i;
> +
> +	odp_barrier_sync(barrier);
> +	clear_sched_queues();
> +
> +	cycles = cycles/tot;
> +	ns     = ns/tot;
> +
> +	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +	       thr, str, cycles, ns);
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues - with odp_schedule_one()
> + *
> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> + * buffer back into the queue it came from.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_one_many(const char *str, int thr,
> +				  odp_buffer_pool_t msg_pool,
> +				  int prio, odp_barrier_t *barrier)
> +{
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +	uint64_t t1 = 0;
> +	uint64_t t2 = 0;
> +	uint64_t cycles, ns;
> +	uint32_t i;
> +	uint32_t tot = 0;
> +
> +	if (create_queues(thr, msg_pool, prio))
> +		return -1;
> +
> +	/* Start sched-enq loop */
> +	t1 = odp_time_get_cycles();
> +
> +	for (i = 0; i < QUEUE_ROUNDS; i++) {
> +		buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
> +		odp_schedule_release_atomic();
> +
> +	t2     = odp_time_get_cycles();
> +	cycles = odp_time_diff_cycles(t1, t2);
> +	ns     = odp_time_cycles_to_ns(cycles);
> +	tot    = i;
> +
> +	odp_barrier_sync(barrier);
> +	clear_sched_queues();
> +
> +	cycles = cycles/tot;
> +	ns     = ns/tot;
> +
> +	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +	       thr, str, cycles, ns);
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of a single queue - with odp_schedule()
> + *
> + * Enqueue a buffer to the shared queue. Schedule and enqueue the received
> + * buffer back into the queue.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_single(const char *str, int thr,
> +				odp_buffer_pool_t msg_pool,
> +				int prio, odp_barrier_t *barrier)
> +{
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +	uint64_t t1, t2, cycles, ns;
> +	uint32_t i;
> +	uint32_t tot = 0;
> +
> +	if (create_queue(thr, msg_pool, prio))
> +		return -1;
> +
> +	t1 = odp_time_get_cycles();
> +
> +	for (i = 0; i < QUEUE_ROUNDS; i++) {
> +		buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	/* Clear possible locally stored buffers */
> +	odp_schedule_pause();
> +
> +	tot = i;
> +
> +	while (1) {
> +		buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> +
> +		if (buf == ODP_BUFFER_INVALID)
> +			break;
> +
> +		tot++;
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	odp_schedule_resume();
> +
> +	t2     = odp_time_get_cycles();
> +	cycles = odp_time_diff_cycles(t1, t2);
> +	ns     = odp_time_cycles_to_ns(cycles);
> +
> +	odp_barrier_sync(barrier);
> +	clear_sched_queues();
> +
> +	cycles = cycles/tot;
> +	ns     = ns/tot;
> +
> +	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +	       thr, str, cycles, ns);
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues - with odp_schedule()
> + *
> + * Enqueue a buffer to each queue. Schedule and enqueue the received
> + * buffer back into the queue it came from.
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_many(const char *str, int thr,
> +			      odp_buffer_pool_t msg_pool,
> +			      int prio, odp_barrier_t *barrier)
> +{
> +	odp_buffer_t buf;
> +	odp_queue_t queue;
> +	uint64_t t1 = 0;
> +	uint64_t t2 = 0;
> +	uint64_t cycles, ns;
> +	uint32_t i;
> +	uint32_t tot = 0;
> +
> +	if (create_queues(thr, msg_pool, prio))
> +		return -1;
> +
> +	/* Start sched-enq loop */
> +	t1 = odp_time_get_cycles();
> +
> +	for (i = 0; i < QUEUE_ROUNDS; i++) {
> +		buf = odp_schedule(&queue, ODP_SCHED_WAIT);
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	/* Clear possible locally stored buffers */
> +	odp_schedule_pause();
> +
> +	tot = i;
> +
> +	while (1) {
> +		buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
> +
> +		if (buf == ODP_BUFFER_INVALID)
> +			break;
> +
> +		tot++;
> +
> +		if (odp_queue_enq(queue, buf)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	odp_schedule_resume();
> +
> +	t2     = odp_time_get_cycles();
> +	cycles = odp_time_diff_cycles(t1, t2);
> +	ns     = odp_time_cycles_to_ns(cycles);
> +
> +	odp_barrier_sync(barrier);
> +	clear_sched_queues();
> +
> +	cycles = cycles/tot;
> +	ns     = ns/tot;
> +
> +	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +	       thr, str, cycles, ns);
> +
> +	return 0;
> +}
> +
> +/**
> + * @internal Test scheduling of multiple queues with multi_sched and multi_enq
> + *
> + * @param str      Test case name string
> + * @param thr      Thread
> + * @param msg_pool Buffer pool
> + * @param prio     Priority
> + * @param barrier  Barrier
> + *
> + * @return 0 if successful
> + */
> +static int test_schedule_multi(const char *str, int thr,
> +			       odp_buffer_pool_t msg_pool,
> +			       int prio, odp_barrier_t *barrier)
> +{
> +	odp_buffer_t buf[MULTI_BUFS_MAX];
> +	odp_queue_t queue;
> +	uint64_t t1 = 0;
> +	uint64_t t2 = 0;
> +	uint64_t cycles, ns;
> +	int i, j;
> +	int num;
> +	uint32_t tot = 0;
> +	char name[] = "sched_XX_YY";
> +
> +	name[6] = '0' + prio/10;
> +	name[7] = '0' + prio - 10*(prio/10);
> +
> +	/* Alloc and enqueue a buffer per queue */
> +	for (i = 0; i < QUEUES_PER_PRIO; i++) {
> +		name[9]  = '0' + i/10;
> +		name[10] = '0' + i - 10*(i/10);
> +
> +		queue = odp_queue_lookup(name);
> +
> +		if (queue == ODP_QUEUE_INVALID) {
> +			ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
> +			return -1;
> +		}
> +
> +		for (j = 0; j < MULTI_BUFS_MAX; j++) {
> +			buf[j] = odp_buffer_alloc(msg_pool);
> +
> +			if (!odp_buffer_is_valid(buf[j])) {
> +				ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
> +				return -1;
> +			}
> +		}
> +
> +		if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	/* Start sched-enq loop */
> +	t1 = odp_time_get_cycles();
> +
> +	for (i = 0; i < QUEUE_ROUNDS; i++) {
> +		num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
> +					 MULTI_BUFS_MAX);
> +
> +		tot += num;
> +
> +		if (odp_queue_enq_multi(queue, buf, num)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	/* Clear possible locally stored buffers */
> +	odp_schedule_pause();
> +
> +	while (1) {
> +		num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, buf,
> +					 MULTI_BUFS_MAX);
> +
> +		if (num == 0)
> +			break;
> +
> +		tot += num;
> +
> +		if (odp_queue_enq_multi(queue, buf, num)) {
> +			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
> +			return -1;
> +		}
> +	}
> +
> +	odp_schedule_resume();
> +
> +
> +	t2     = odp_time_get_cycles();
> +	cycles = odp_time_diff_cycles(t1, t2);
> +	ns     = odp_time_cycles_to_ns(cycles);
> +
> +	odp_barrier_sync(barrier);
> +	clear_sched_queues();
> +
> +	if (tot) {
> +		cycles = cycles/tot;
> +		ns     = ns/tot;
> +	} else {
> +		cycles = 0;
> +		ns     = 0;
> +	}
> +
> +	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
> +	       thr, str, cycles, ns);
> +
> +	return 0;
> +}
> +
> +/**
> + * Template function for running the scheduler tests.
> + * The main reason for having this function is that CUnit does not offer a way
> + * to pass arguments to a testcase function.
> + * The other reason is that there are common steps for all testcases.
> + */
> +static void *exec_template(void *arg)
> +{
> +	odp_buffer_pool_t msg_pool;
> +	odp_shm_t shm;
> +	test_globals_t *globals;
> +	odp_barrier_t *barrier;
> +	test_case_args_t *args = (test_case_args_t*) arg;
> +
> +	shm     = odp_shm_lookup("test_globals");
> +	globals = odp_shm_addr(shm);
> +
> +	CU_ASSERT(globals != NULL);
> +
> +	barrier = &globals->barrier;
> +
> +	/*
> +	 * Sync before start
> +	 */
> +	odp_barrier_sync(barrier);
> +	
> +	/*
> +	 * Find the buffer pool
> +	 */
> +	msg_pool = odp_buffer_pool_lookup("msg_pool");
> +
> +	CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
> +
> +	odp_barrier_sync(barrier);
> +
> +	/*
> +	 * Now run the testcase routine passing the arguments
> +	 */
> +	args->func(args->name, odp_thread_id(), msg_pool,
> +		   args->prio, barrier);
> +
> +	return arg;
> +}
> +
> +/* Low prio */
> +
> +static void schedule_one_single_lo(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
> +	args.prio = ODP_SCHED_PRIO_LOWEST;
> +	args.func = test_schedule_one_single;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_single_lo(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_____s_lo");
> +	args.prio = ODP_SCHED_PRIO_LOWEST;
> +	args.func = test_schedule_single;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_one_many_lo(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
> +	args.prio = ODP_SCHED_PRIO_LOWEST;
> +	args.func = test_schedule_one_many;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_many_lo(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> +	args.prio = ODP_SCHED_PRIO_LOWEST;
> +	args.func = test_schedule_many;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_multi_lo(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_____m_lo");
> +	args.prio = ODP_SCHED_PRIO_LOWEST;
> +	args.func = test_schedule_multi;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +/* High prio */
> +
> +static void schedule_one_single_hi(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
> +	args.prio = ODP_SCHED_PRIO_HIGHEST;
> +	args.func = test_schedule_single;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_single_hi(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_____s_hi");
> +	args.prio = ODP_SCHED_PRIO_HIGHEST;
> +	args.func = test_schedule_single;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_one_many_hi(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
> +	args.prio = ODP_SCHED_PRIO_HIGHEST;
> +	args.func = test_schedule_one_many;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_many_hi(void)
> +{
> +	test_case_args_t args;
> +	snprintf(args.name, sizeof(args.name), "sched_____m_hi");
> +	args.prio = ODP_SCHED_PRIO_HIGHEST;
> +	args.func = test_schedule_many;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void schedule_multi_hi(void)
> +{
> +	test_case_args_t args;

IMO, We should avoid using variables allocated from stack to share
the data between different execution context.
It will work in linux threaded run-time environment.But it will
have issues in different run-time environment like bare-metal.
IMO if any memory needs to be shared across different
execution environment should use the memory allocated from odp shared mem.


> +	snprintf(args.name, sizeof(args.name), "sched_multi_hi");
> +	args.prio = ODP_SCHED_PRIO_HIGHEST;
> +	args.func = test_schedule_multi;
> +	execute_parallel(exec_template, &args);
> +}
> +
> +static void execute_parallel(void *(*start_routine) (void *),
> +			     test_case_args_t *test_case_args)
> +{
> +	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
> +	int first_core;
> +
> +	memset(thread_tbl, 0, sizeof(thread_tbl));
> +
> +	/*
> +	 * By default core #0 runs Linux kernel background tasks.
> +	 * Start mapping thread from core #1
> +	 */
> +	first_core = 1;
> +
> +	if (odp_sys_core_count() == 1)
> +		first_core = 0;
> +
> +	odph_linux_pthread_create(thread_tbl, num_workers, first_core,
> +					start_routine, test_case_args);
> +
> +	/* Wait for worker threads to terminate */
> +	odph_linux_pthread_join(thread_tbl, num_workers);
> +}
> +
> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
> +{
> +	void *pool_base;
> +	odp_shm_t shm;
> +	odp_buffer_pool_t pool;
> +
> +	shm = odp_shm_reserve("msg_pool",
> +			      MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> +
> +	pool_base = odp_shm_addr(shm);
> +
> +	if (NULL == pool_base) {
> +		printf("Shared memory reserve failed.\n");
> +		return -1;
> +	}
> +
> +	pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
> +				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
> +				      ODP_BUFFER_TYPE_RAW);
> +
> +	if (ODP_BUFFER_POOL_INVALID == pool) {
> +		printf("Pool create failed.\n");
> +		return -1;
> +	}
> +	return pool;
> +}
> +
> +int schedule_test_init(void)
> +{
> +	test_args_t args;
> +	odp_shm_t shm;
> +	test_globals_t *globals;
> +	int i, j;
> +	int prios;
> +
> +	if (0 != odp_init_global(NULL, NULL)) {
> +		printf("odp_init_global fail.\n");
> +		return -1;
> +	}
> +	if (0 != odp_init_local()) {
> +		printf("odp_init_local fail.\n");
> +		return -1;
> +	}
> +	if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
> +		printf("test_odp_buffer_pool_init fail.\n");
> +		return -1;
> +	}
> +
> +	/* A worker thread per core */
> +	num_workers = odp_sys_core_count();
> +
> +	if (args.core_count)

args.core_count is uninitialized

> +		num_workers = args.core_count;
> +
> +	/* force to max core count */
> +	if (num_workers > MAX_WORKERS)
> +		num_workers = MAX_WORKERS;
> +	shm = odp_shm_reserve("test_globals",
> +			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
> +
> +	globals = odp_shm_addr(shm);
> +
> +	if (globals == NULL) {
> +		ODP_ERR("Shared memory reserve failed.\n");
> +		return -1;
> +	}
> +
> +	memset(globals, 0, sizeof(test_globals_t));
> +
> +	/* Barrier to sync test case execution */
> +	odp_barrier_init_count(&globals->barrier, num_workers);
> +
> +	prios = odp_schedule_num_prio();
> +
> +	for (i = 0; i < prios; i++) {
> +		odp_queue_param_t param;
> +		odp_queue_t queue;
> +		char name[] = "sched_XX_YY";
> +
> +		if (i != ODP_SCHED_PRIO_HIGHEST &&
> +		    i != ODP_SCHED_PRIO_LOWEST)
> +			continue;
> +
> +		name[6] = '0' + i/10;
> +		name[7] = '0' + i - 10*(i/10);
> +
> +		param.sched.prio  = i;
> +		param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
> +		param.sched.group = ODP_SCHED_GROUP_DEFAULT;
> +
> +		for (j = 0; j < QUEUES_PER_PRIO; j++) {
> +			name[9]  = '0' + j/10;
> +			name[10] = '0' + j - 10*(j/10);
> +
> +			queue = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
> +						 &param);
> +
> +			if (queue == ODP_QUEUE_INVALID) {
> +				ODP_ERR("Schedule queue create failed.\n");
> +				return -1;
> +			}
> +		}
> +	}
> +	return 0;
> +}
> +
> +int schedule_test_finalize(void)
> +{
> +	odp_term_local();
> +	odp_term_global();
> +	return 0;
> +}
> +
> +struct CU_TestInfo schedule_tests[] = {
> +	_CU_TEST_INFO(schedule_wait_time),
> +	_CU_TEST_INFO(schedule_one_single_lo),
> +	_CU_TEST_INFO(schedule_single_lo),
> +	_CU_TEST_INFO(schedule_one_many_lo),
> +	_CU_TEST_INFO(schedule_many_lo),
> +	_CU_TEST_INFO(schedule_multi_lo),
> +	_CU_TEST_INFO(schedule_one_single_hi),
> +	_CU_TEST_INFO(schedule_single_hi),
> +	_CU_TEST_INFO(schedule_one_many_hi),
> +	_CU_TEST_INFO(schedule_many_hi),
> +	_CU_TEST_INFO(schedule_multi_hi),
> +	CU_TEST_INFO_NULL,
> +};
> diff --git a/test/cunit/schedule/odp_schedule_testsuites.c b/test/cunit/schedule/odp_schedule_testsuites.c
> new file mode 100644
> index 0000000..1053069
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_testsuites.c
> @@ -0,0 +1,35 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "odp_schedule_testsuites.h"
> +
> +static CU_SuiteInfo suites[] = {
> +	{
> +		"Scheduler tests" ,
> +		schedule_test_init,
> +		schedule_test_finalize,
> +		NULL,
> +		NULL,
> +		schedule_tests
> +	},
> +	CU_SUITE_INFO_NULL,
> +};
> +
> +int main(void)
> +{
> +	/* initialize the CUnit test registry */
> +	if (CUE_SUCCESS != CU_initialize_registry())
> +		return CU_get_error();
> +
> +	/* register suites */
> +	CU_register_suites(suites);
> +	/* Run all tests using the CUnit Basic interface */
> +	CU_basic_set_mode(CU_BRM_VERBOSE);
> +	CU_basic_run_tests();
> +	CU_cleanup_registry();
> +
> +	return CU_get_error();
> +}
> diff --git a/test/cunit/schedule/odp_schedule_testsuites.h b/test/cunit/schedule/odp_schedule_testsuites.h
> new file mode 100644
> index 0000000..67a2a69
> --- /dev/null
> +++ b/test/cunit/schedule/odp_schedule_testsuites.h
> @@ -0,0 +1,21 @@
> +/* Copyright (c) 2014, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#ifndef ODP_SCHEDULE_TESTSUITES_H_
> +#define ODP_SCHEDULE_TESTSUITES_H_
> +
> +#include "odp.h"
> +#include <CUnit/Basic.h>
> +
> +/* Helper macro for CU_TestInfo initialization */
> +#define _CU_TEST_INFO(test_func) {#test_func, test_func}
> +
> +extern struct CU_TestInfo schedule_tests[];
> +
> +extern int schedule_test_init(void);
> +extern int schedule_test_finalize(void);
> +
> +#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
> -- 
> 1.8.3.2
> 
> 
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
Balasubramanian Manoharan Nov. 21, 2014, 3:54 p.m. UTC | #14
Few points,

* Inorder to check ordered state of buffers from second queue they should
be dequeued by a single thread
as scheduler will despatch the buffers from ORDERED queue in initial order
but more than one thread can get the buffer from the same queue at the same
time.

Begards,
Bala

On 21 November 2014 20:28, Taras Kondratiuk <taras.kondratiuk@linaro.org>
wrote:

> On 11/21/2014 04:38 PM, Alexandru Badicioiu wrote:
>
>>
>>
>> On 21 November 2014 16:25, Taras Kondratiuk <taras.kondratiuk@linaro.org
>>     7. Two queues one of them ORDERED, several threads. Verify that
>> buffers
>>         scheduled from ORDERED queue are enqueue into the second queue in
>>         correct order.
>>
>> I think you meant "Verify that buffers scheduled from ORDERED queue are
>> _dequeued_ from the second queue in the correct order".
>>
>
> Right. To be more clear.
> - buffers are pushed to ORDERED queues in some order.
> - then they are scheduled from ORDERED queue and enqueued to the second
>   queue by several threads.
> - at the end buffers dequeued from the second queue should have the
>   initial order.
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Ciprian Barbu Nov. 21, 2014, 4:13 p.m. UTC | #15
>> +
>> +static void schedule_multi_hi(void)
>> +{
>> +     test_case_args_t args;
>
> IMO, We should avoid using variables allocated from stack to share
> the data between different execution context.
> It will work in linux threaded run-time environment.But it will
> have issues in different run-time environment like bare-metal.
> IMO if any memory needs to be shared across different
> execution environment should use the memory allocated from odp shared mem.

Ok, noted.

>
>
>> +     snprintf(args.name, sizeof(args.name), "sched_multi_hi");
>> +     args.prio = ODP_SCHED_PRIO_HIGHEST;
>> +     args.func = test_schedule_multi;
>> +     execute_parallel(exec_template, &args);
>> +}
>> +
>> +static void execute_parallel(void *(*start_routine) (void *),
>> +                          test_case_args_t *test_case_args)
>> +{
>> +     odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>> +     int first_core;
>> +
>> +     memset(thread_tbl, 0, sizeof(thread_tbl));
>> +
>> +     /*
>> +      * By default core #0 runs Linux kernel background tasks.
>> +      * Start mapping thread from core #1
>> +      */
>> +     first_core = 1;
>> +
>> +     if (odp_sys_core_count() == 1)
>> +             first_core = 0;
>> +
>> +     odph_linux_pthread_create(thread_tbl, num_workers, first_core,
>> +                                     start_routine, test_case_args);
>> +
>> +     /* Wait for worker threads to terminate */
>> +     odph_linux_pthread_join(thread_tbl, num_workers);
>> +}
>> +
>> +static odp_buffer_pool_t test_odp_buffer_pool_init(void)
>> +{
>> +     void *pool_base;
>> +     odp_shm_t shm;
>> +     odp_buffer_pool_t pool;
>> +
>> +     shm = odp_shm_reserve("msg_pool",
>> +                           MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
>> +
>> +     pool_base = odp_shm_addr(shm);
>> +
>> +     if (NULL == pool_base) {
>> +             printf("Shared memory reserve failed.\n");
>> +             return -1;
>> +     }
>> +
>> +     pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
>> +                                   BUF_SIZE, ODP_CACHE_LINE_SIZE,
>> +                                   ODP_BUFFER_TYPE_RAW);
>> +
>> +     if (ODP_BUFFER_POOL_INVALID == pool) {
>> +             printf("Pool create failed.\n");
>> +             return -1;
>> +     }
>> +     return pool;
>> +}
>> +
>> +int schedule_test_init(void)
>> +{
>> +     test_args_t args;
>> +     odp_shm_t shm;
>> +     test_globals_t *globals;
>> +     int i, j;
>> +     int prios;
>> +
>> +     if (0 != odp_init_global(NULL, NULL)) {
>> +             printf("odp_init_global fail.\n");
>> +             return -1;
>> +     }
>> +     if (0 != odp_init_local()) {
>> +             printf("odp_init_local fail.\n");
>> +             return -1;
>> +     }
>> +     if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
>> +             printf("test_odp_buffer_pool_init fail.\n");
>> +             return -1;
>> +     }
>> +
>> +     /* A worker thread per core */
>> +     num_workers = odp_sys_core_count();
>> +
>> +     if (args.core_count)
>
> args.core_count is uninitialized

Yes, I missed that at first. I got it fixed now.
Ciprian Barbu Nov. 21, 2014, 4:16 p.m. UTC | #16
On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
<bala.manoharan@linaro.org> wrote:
>
> Few points,
>
> * Inorder to check ordered state of buffers from second queue they should be dequeued by a single thread
> as scheduler will despatch the buffers from ORDERED queue in initial order but more than one thread can get the buffer from the same queue at the same time.

I was thinking something like this: q1 and q2 ORDERED queues. Buffers
will first be pushed to the q1 to have something to work with. Then
all buffers are dequeued and enqueued in q2 in, say, reverse order.
Then the buffers are dequeued from q1 and the order should match the
order in which they were pushed to q1. Did I get that right?

>
>
> Begards,
> Bala
>
> On 21 November 2014 20:28, Taras Kondratiuk <taras.kondratiuk@linaro.org> wrote:
>>
>> On 11/21/2014 04:38 PM, Alexandru Badicioiu wrote:
>>>
>>>
>>>
>>> On 21 November 2014 16:25, Taras Kondratiuk <taras.kondratiuk@linaro.org
>>>     7. Two queues one of them ORDERED, several threads. Verify that buffers
>>>         scheduled from ORDERED queue are enqueue into the second queue in
>>>         correct order.
>>>
>>> I think you meant "Verify that buffers scheduled from ORDERED queue are
>>> _dequeued_ from the second queue in the correct order".
>>
>>
>> Right. To be more clear.
>> - buffers are pushed to ORDERED queues in some order.
>> - then they are scheduled from ORDERED queue and enqueued to the second
>>   queue by several threads.
>> - at the end buffers dequeued from the second queue should have the
>>   initial order.
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Taras Kondratiuk Nov. 21, 2014, 5:30 p.m. UTC | #17
On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
> <bala.manoharan@linaro.org> wrote:
>>
>> Few points,
>>
>> * Inorder to check ordered state of buffers from second queue they should be dequeued by a single thread
>> as scheduler will despatch the buffers from ORDERED queue in initial order but more than one thread can get the buffer from the same queue at the same time.
>
> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
> will first be pushed to the q1 to have something to work with. Then
> all buffers are dequeued and enqueued in q2 in, say, reverse order.
> Then the buffers are dequeued from q1 and the order should match the
> order in which they were pushed to q1. Did I get that right?

That is actually more than you normally need from a scheduler.
Usually reordering happens because of packet processing parallelization 
on several cores, but not because one core reorders packets.

Petri, I don't remember if we discussed scenario described by Ciprian,
but previously you mentioned that ORDERED queues can be substituted by 
ATOMIC if ORDERED are not supported by platform. But that won't work 
when core reorders buffer intentionally.
Bill Fischofer Nov. 23, 2014, 1:55 a.m. UTC | #18
The semantics of ordered queues still need to be fully (and rigorously)
defined.  Otherwise it's impossible to ensure that different
implementations will yield the same results.  Once we get past the "sunny
day" tests, its the job of the test writer to be devious in trying to trick
implementations into doing something that the spec says they shouldn't do.
So Ciprian's scenario is a good one.

On Fri, Nov 21, 2014 at 11:30 AM, Taras Kondratiuk <
taras.kondratiuk@linaro.org> wrote:

> On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
>
>> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
>> <bala.manoharan@linaro.org> wrote:
>>
>>>
>>> Few points,
>>>
>>> * Inorder to check ordered state of buffers from second queue they
>>> should be dequeued by a single thread
>>> as scheduler will despatch the buffers from ORDERED queue in initial
>>> order but more than one thread can get the buffer from the same queue at
>>> the same time.
>>>
>>
>> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
>> will first be pushed to the q1 to have something to work with. Then
>> all buffers are dequeued and enqueued in q2 in, say, reverse order.
>> Then the buffers are dequeued from q1 and the order should match the
>> order in which they were pushed to q1. Did I get that right?
>>
>
> That is actually more than you normally need from a scheduler.
> Usually reordering happens because of packet processing parallelization on
> several cores, but not because one core reorders packets.
>
> Petri, I don't remember if we discussed scenario described by Ciprian,
> but previously you mentioned that ORDERED queues can be substituted by
> ATOMIC if ORDERED are not supported by platform. But that won't work when
> core reorders buffer intentionally.
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Balasubramanian Manoharan Nov. 24, 2014, 5:34 a.m. UTC | #19
Hi Ciprian,

The scenario which you have described makes sense when a core dequeues from
an ORDERED queue and enqueues into an ATOMIC queue. I think we need to
properly describe the definition of ORDERED and ATOMIC queue before writing
TCs for the scenario you have defined.

I would prefer to have only sunny day TC's for scheduler for the time being.

Regards,
Bala

On 23 November 2014 07:25, Bill Fischofer <bill.fischofer@linaro.org> wrote:

> The semantics of ordered queues still need to be fully (and rigorously)
> defined.  Otherwise it's impossible to ensure that different
> implementations will yield the same results.  Once we get past the "sunny
> day" tests, its the job of the test writer to be devious in trying to trick
> implementations into doing something that the spec says they shouldn't do.
> So Ciprian's scenario is a good one.
>
> On Fri, Nov 21, 2014 at 11:30 AM, Taras Kondratiuk <
> taras.kondratiuk@linaro.org> wrote:
>
>> On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
>>
>>> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
>>> <bala.manoharan@linaro.org> wrote:
>>>
>>>>
>>>> Few points,
>>>>
>>>> * Inorder to check ordered state of buffers from second queue they
>>>> should be dequeued by a single thread
>>>> as scheduler will despatch the buffers from ORDERED queue in initial
>>>> order but more than one thread can get the buffer from the same queue at
>>>> the same time.
>>>>
>>>
>>> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
>>> will first be pushed to the q1 to have something to work with. Then
>>> all buffers are dequeued and enqueued in q2 in, say, reverse order.
>>> Then the buffers are dequeued from q1 and the order should match the
>>> order in which they were pushed to q1. Did I get that right?
>>>
>>
>> That is actually more than you normally need from a scheduler.
>> Usually reordering happens because of packet processing parallelization
>> on several cores, but not because one core reorders packets.
>>
>> Petri, I don't remember if we discussed scenario described by Ciprian,
>> but previously you mentioned that ORDERED queues can be substituted by
>> ATOMIC if ORDERED are not supported by platform. But that won't work when
>> core reorders buffer intentionally.
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
Alexandru Badicioiu Nov. 24, 2014, 7:47 a.m. UTC | #20
If I understand correctly, this is the scenario Ciprian described:

Core : enqueue 1..n  ----> q1 (ORDERED) ------> Core : dequeue 1..n,
enqueue n..1 ------> q2 -----> Core : dequeue 1..n.

This scenario matches the definition of ORDERED queues, as proposed by
Petri, regardless of what kind of queue is q2. Ordering should be restored
even if q2 is a POLL queue, so in fact ordering is more a feature of
enqueue rather than scheduler (dequeue).
IMO this is a valid test of ORDERED queues (not ATOMIC). There is a main
difference of how ordering is insured by the two queue types - ATOMIC
preserves the order, ORDERED restores the order.

Alex



On 24 November 2014 at 07:34, Bala Manoharan <bala.manoharan@linaro.org>
wrote:

> Hi Ciprian,
>
> The scenario which you have described makes sense when a core dequeues
> from an ORDERED queue and enqueues into an ATOMIC queue. I think we need to
> properly describe the definition of ORDERED and ATOMIC queue before writing
> TCs for the scenario you have defined.
>
> I would prefer to have only sunny day TC's for scheduler for the time
> being.
>
> Regards,
> Bala
>
> On 23 November 2014 07:25, Bill Fischofer <bill.fischofer@linaro.org>
> wrote:
>
>> The semantics of ordered queues still need to be fully (and rigorously)
>> defined.  Otherwise it's impossible to ensure that different
>> implementations will yield the same results.  Once we get past the "sunny
>> day" tests, its the job of the test writer to be devious in trying to trick
>> implementations into doing something that the spec says they shouldn't do.
>> So Ciprian's scenario is a good one.
>>
>> On Fri, Nov 21, 2014 at 11:30 AM, Taras Kondratiuk <
>> taras.kondratiuk@linaro.org> wrote:
>>
>>> On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
>>>
>>>> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
>>>> <bala.manoharan@linaro.org> wrote:
>>>>
>>>>>
>>>>> Few points,
>>>>>
>>>>> * Inorder to check ordered state of buffers from second queue they
>>>>> should be dequeued by a single thread
>>>>> as scheduler will despatch the buffers from ORDERED queue in initial
>>>>> order but more than one thread can get the buffer from the same queue at
>>>>> the same time.
>>>>>
>>>>
>>>> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
>>>> will first be pushed to the q1 to have something to work with. Then
>>>> all buffers are dequeued and enqueued in q2 in, say, reverse order.
>>>> Then the buffers are dequeued from q1 and the order should match the
>>>> order in which they were pushed to q1. Did I get that right?
>>>>
>>>
>>> That is actually more than you normally need from a scheduler.
>>> Usually reordering happens because of packet processing parallelization
>>> on several cores, but not because one core reorders packets.
>>>
>>> Petri, I don't remember if we discussed scenario described by Ciprian,
>>> but previously you mentioned that ORDERED queues can be substituted by
>>> ATOMIC if ORDERED are not supported by platform. But that won't work when
>>> core reorders buffer intentionally.
>>>
>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>
>>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
Ciprian Barbu Nov. 24, 2014, 8:59 a.m. UTC | #21
On Mon, Nov 24, 2014 at 9:47 AM, Alexandru Badicioiu
<alexandru.badicioiu@linaro.org> wrote:
> If I understand correctly, this is the scenario Ciprian described:
>
> Core : enqueue 1..n  ----> q1 (ORDERED) ------> Core : dequeue 1..n, enqueue
> n..1 ------> q2 -----> Core : dequeue 1..n.
>
> This scenario matches the definition of ORDERED queues, as proposed by
> Petri, regardless of what kind of queue is q2. Ordering should be restored
> even if q2 is a POLL queue, so in fact ordering is more a feature of enqueue
> rather than scheduler (dequeue).
> IMO this is a valid test of ORDERED queues (not ATOMIC). There is a main
> difference of how ordering is insured by the two queue types - ATOMIC
> preserves the order, ORDERED restores the order.

Yeah, this was my understanding of ORDERED queues as well. We need to
talk more about this, but if this is not the definition of ORDERED
queues as we need to agree on for v1.0 then I need to properly
understand how to do a "sunnyday" test for restoring the order. Simply
queuing packets in the same order by a single thread matches the
definition for ATOMIC, right?

/Ciprian

>
> Alex
>
>
>
> On 24 November 2014 at 07:34, Bala Manoharan <bala.manoharan@linaro.org>
> wrote:
>>
>> Hi Ciprian,
>>
>> The scenario which you have described makes sense when a core dequeues
>> from an ORDERED queue and enqueues into an ATOMIC queue. I think we need to
>> properly describe the definition of ORDERED and ATOMIC queue before writing
>> TCs for the scenario you have defined.
>>
>> I would prefer to have only sunny day TC's for scheduler for the time
>> being.
>>
>> Regards,
>> Bala
>>
>> On 23 November 2014 07:25, Bill Fischofer <bill.fischofer@linaro.org>
>> wrote:
>>>
>>> The semantics of ordered queues still need to be fully (and rigorously)
>>> defined.  Otherwise it's impossible to ensure that different implementations
>>> will yield the same results.  Once we get past the "sunny day" tests, its
>>> the job of the test writer to be devious in trying to trick implementations
>>> into doing something that the spec says they shouldn't do.  So Ciprian's
>>> scenario is a good one.
>>>
>>> On Fri, Nov 21, 2014 at 11:30 AM, Taras Kondratiuk
>>> <taras.kondratiuk@linaro.org> wrote:
>>>>
>>>> On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
>>>>>
>>>>> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
>>>>> <bala.manoharan@linaro.org> wrote:
>>>>>>
>>>>>>
>>>>>> Few points,
>>>>>>
>>>>>> * Inorder to check ordered state of buffers from second queue they
>>>>>> should be dequeued by a single thread
>>>>>> as scheduler will despatch the buffers from ORDERED queue in initial
>>>>>> order but more than one thread can get the buffer from the same queue at the
>>>>>> same time.
>>>>>
>>>>>
>>>>> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
>>>>> will first be pushed to the q1 to have something to work with. Then
>>>>> all buffers are dequeued and enqueued in q2 in, say, reverse order.
>>>>> Then the buffers are dequeued from q1 and the order should match the
>>>>> order in which they were pushed to q1. Did I get that right?
>>>>
>>>>
>>>> That is actually more than you normally need from a scheduler.
>>>> Usually reordering happens because of packet processing parallelization
>>>> on several cores, but not because one core reorders packets.
>>>>
>>>> Petri, I don't remember if we discussed scenario described by Ciprian,
>>>> but previously you mentioned that ORDERED queues can be substituted by
>>>> ATOMIC if ORDERED are not supported by platform. But that won't work when
>>>> core reorders buffer intentionally.
>>>>
>>>>
>>>> _______________________________________________
>>>> lng-odp mailing list
>>>> lng-odp@lists.linaro.org
>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>>
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Taras Kondratiuk Nov. 24, 2014, 9:10 a.m. UTC | #22
On 11/23/2014 03:55 AM, Bill Fischofer wrote:
> The semantics of ordered queues still need to be fully (and rigorously)
> defined.  Otherwise it's impossible to ensure that different
> implementations will yield the same results.  Once we get past the
> "sunny day" tests, its the job of the test writer to be devious in
> trying to trick implementations into doing something that the spec says
> they shouldn't do.  So Ciprian's scenario is a good one.

It is not clear how to interpret results of this test scenario, because
exact behavior is not specified.
Alexandru Badicioiu Nov. 24, 2014, 9:14 a.m. UTC | #23
We can't talk about "atomicity" in a single thread context.
I verified my implementation of ATOMIC queue this way - I modified
odp_pktio to associate a POSIX binary semaphore initialized to 1 to each
queue.
When returning from odp_schedule() a thread does sem_trylock() on the queue
semaphore and after enqueue to output queue does sem_post() if it locked
the semaphore. For an atomic queue trylock should always succeed. Changing
the queue sync to NONE there should be failures of trylock() for a certain
rate injected into the device.

Alex


On 24 November 2014 at 10:59, Ciprian Barbu <ciprian.barbu@linaro.org>
wrote:

> On Mon, Nov 24, 2014 at 9:47 AM, Alexandru Badicioiu
> <alexandru.badicioiu@linaro.org> wrote:
> > If I understand correctly, this is the scenario Ciprian described:
> >
> > Core : enqueue 1..n  ----> q1 (ORDERED) ------> Core : dequeue 1..n,
> enqueue
> > n..1 ------> q2 -----> Core : dequeue 1..n.
> >
> > This scenario matches the definition of ORDERED queues, as proposed by
> > Petri, regardless of what kind of queue is q2. Ordering should be
> restored
> > even if q2 is a POLL queue, so in fact ordering is more a feature of
> enqueue
> > rather than scheduler (dequeue).
> > IMO this is a valid test of ORDERED queues (not ATOMIC). There is a main
> > difference of how ordering is insured by the two queue types - ATOMIC
> > preserves the order, ORDERED restores the order.
>
> Yeah, this was my understanding of ORDERED queues as well. We need to
> talk more about this, but if this is not the definition of ORDERED
> queues as we need to agree on for v1.0 then I need to properly
> understand how to do a "sunnyday" test for restoring the order. Simply
> queuing packets in the same order by a single thread matches the
> definition for ATOMIC, right?
>
> /Ciprian
>
> >
> > Alex
> >
> >
> >
> > On 24 November 2014 at 07:34, Bala Manoharan <bala.manoharan@linaro.org>
> > wrote:
> >>
> >> Hi Ciprian,
> >>
> >> The scenario which you have described makes sense when a core dequeues
> >> from an ORDERED queue and enqueues into an ATOMIC queue. I think we
> need to
> >> properly describe the definition of ORDERED and ATOMIC queue before
> writing
> >> TCs for the scenario you have defined.
> >>
> >> I would prefer to have only sunny day TC's for scheduler for the time
> >> being.
> >>
> >> Regards,
> >> Bala
> >>
> >> On 23 November 2014 07:25, Bill Fischofer <bill.fischofer@linaro.org>
> >> wrote:
> >>>
> >>> The semantics of ordered queues still need to be fully (and rigorously)
> >>> defined.  Otherwise it's impossible to ensure that different
> implementations
> >>> will yield the same results.  Once we get past the "sunny day" tests,
> its
> >>> the job of the test writer to be devious in trying to trick
> implementations
> >>> into doing something that the spec says they shouldn't do.  So
> Ciprian's
> >>> scenario is a good one.
> >>>
> >>> On Fri, Nov 21, 2014 at 11:30 AM, Taras Kondratiuk
> >>> <taras.kondratiuk@linaro.org> wrote:
> >>>>
> >>>> On 11/21/2014 06:16 PM, Ciprian Barbu wrote:
> >>>>>
> >>>>> On Fri, Nov 21, 2014 at 5:54 PM, Bala Manoharan
> >>>>> <bala.manoharan@linaro.org> wrote:
> >>>>>>
> >>>>>>
> >>>>>> Few points,
> >>>>>>
> >>>>>> * Inorder to check ordered state of buffers from second queue they
> >>>>>> should be dequeued by a single thread
> >>>>>> as scheduler will despatch the buffers from ORDERED queue in initial
> >>>>>> order but more than one thread can get the buffer from the same
> queue at the
> >>>>>> same time.
> >>>>>
> >>>>>
> >>>>> I was thinking something like this: q1 and q2 ORDERED queues. Buffers
> >>>>> will first be pushed to the q1 to have something to work with. Then
> >>>>> all buffers are dequeued and enqueued in q2 in, say, reverse order.
> >>>>> Then the buffers are dequeued from q1 and the order should match the
> >>>>> order in which they were pushed to q1. Did I get that right?
> >>>>
> >>>>
> >>>> That is actually more than you normally need from a scheduler.
> >>>> Usually reordering happens because of packet processing
> parallelization
> >>>> on several cores, but not because one core reorders packets.
> >>>>
> >>>> Petri, I don't remember if we discussed scenario described by Ciprian,
> >>>> but previously you mentioned that ORDERED queues can be substituted by
> >>>> ATOMIC if ORDERED are not supported by platform. But that won't work
> when
> >>>> core reorders buffer intentionally.
> >>>>
> >>>>
> >>>> _______________________________________________
> >>>> lng-odp mailing list
> >>>> lng-odp@lists.linaro.org
> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>>
> >>>
> >>
> >>
> >> _______________________________________________
> >> lng-odp mailing list
> >> lng-odp@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>
> >
> >
> > _______________________________________________
> > lng-odp mailing list
> > lng-odp@lists.linaro.org
> > http://lists.linaro.org/mailman/listinfo/lng-odp
> >
>
Balasubramanian Manoharan Nov. 24, 2014, 9:17 a.m. UTC | #24
Hi,

The concern I had with the above testing scenario is that IMO this scenario
does not match perfectly with an application usecase. The testing for
ORDERED queue in my opinion could be,

1. Dequeue work W1-W10 from ORDERED queue Q1
2. Enqueue work W1-W5 to ORDERED queue Q2
3. Enqueue work W6-W10 to ORDERED queue Q3

Now if Q2 and Q3 have same priority, the scheduler on calling
odp_schedule() should dispatch work W1 and W6 to different cores as they
both now from different queues with same priority.

Yes. I agree we need to define the semantics of ORDERED and ATOMIC queue
more clearly.

Regards,
Bala

On 24 November 2014 14:40, Taras Kondratiuk <taras.kondratiuk@linaro.org>
wrote:

> On 11/23/2014 03:55 AM, Bill Fischofer wrote:
>
>> The semantics of ordered queues still need to be fully (and rigorously)
>> defined.  Otherwise it's impossible to ensure that different
>> implementations will yield the same results.  Once we get past the
>> "sunny day" tests, its the job of the test writer to be devious in
>> trying to trick implementations into doing something that the spec says
>> they shouldn't do.  So Ciprian's scenario is a good one.
>>
>
> It is not clear how to interpret results of this test scenario, because
> exact behavior is not specified.
>
diff mbox

Patch

diff --git a/configure.ac b/configure.ac
index fcd7279..a47db72 100644
--- a/configure.ac
+++ b/configure.ac
@@ -173,6 +173,7 @@  AC_CONFIG_FILES([Makefile
 		 test/Makefile
 		 test/api_test/Makefile
                  test/cunit/Makefile
+                 test/cunit/schedule/Makefile
 		 pkgconfig/libodp.pc])
 
 AC_SEARCH_LIBS([timer_create],[rt posix4])
diff --git a/test/cunit/Makefile.am b/test/cunit/Makefile.am
index 439e134..b6033ee 100644
--- a/test/cunit/Makefile.am
+++ b/test/cunit/Makefile.am
@@ -3,6 +3,8 @@  include $(top_srcdir)/test/Makefile.inc
 AM_CFLAGS += -I$(CUNIT_PATH)/include
 AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
 
+SUBDIRS = schedule
+
 if ODP_CUNIT_ENABLED
 TESTS = ${bin_PROGRAMS}
 check_PROGRAMS = ${bin_PROGRAMS}
diff --git a/test/cunit/schedule/Makefile.am b/test/cunit/schedule/Makefile.am
new file mode 100644
index 0000000..ad68b03
--- /dev/null
+++ b/test/cunit/schedule/Makefile.am
@@ -0,0 +1,10 @@ 
+include $(top_srcdir)/test/Makefile.inc
+
+if ODP_CUNIT_ENABLED
+bin_PROGRAMS = odp_schedule_test
+odp_schedule_test_LDFLAGS = $(AM_LDFLAGS) -L$(CUNIT_PATH)/lib -static -lcunit
+odp_schedule_test_CFLAGS = $(AM_CFLAGS) -I$(CUNIT_PATH)/include
+endif
+
+dist_odp_schedule_test_SOURCES = odp_schedule_test.c \
+				 odp_schedule_testsuites.c
diff --git a/test/cunit/schedule/odp_schedule_test.c b/test/cunit/schedule/odp_schedule_test.c
new file mode 100644
index 0000000..fa67f6e
--- /dev/null
+++ b/test/cunit/schedule/odp_schedule_test.c
@@ -0,0 +1,844 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp_schedule_testsuites.h"
+#include <odph_linux.h>
+
+#define MAX_WORKERS		32            /**< Max worker threads */
+#define MSG_POOL_SIZE           (4*1024*1024)
+#define QUEUES_PER_PRIO		64            /**< Queue per priority */
+#define QUEUE_ROUNDS		(512*1024)    /**< Queue test rounds */
+#define MULTI_BUFS_MAX		4             /**< Buffer burst size */
+#define BUF_SIZE		64
+
+#define SCHED_MSG "Test_buff_FOR_simple_schedule"
+
+/** Test arguments */
+typedef struct {
+	int core_count; /**< Core count */
+	int proc_mode;  /**< Process mode */
+} test_args_t;
+
+typedef int (*test_case_routine)(const char *, int, odp_buffer_pool_t,
+				 int, odp_barrier_t *);
+
+/** Scheduler test case arguments */
+typedef struct {
+	char name[64];	/**< test case name */
+	int prio;
+	test_case_routine func;
+} test_case_args_t;
+
+/** Test global variables */
+typedef struct {
+	odp_barrier_t barrier;/**< @private Barrier for test synchronisation */
+	test_args_t test_args;/**< @private Test case function and arguments */
+} test_globals_t;
+
+static void execute_parallel(void *(*func) (void *), test_case_args_t *);
+static int num_workers;
+
+/**
+ * @internal CUnit test case for verifying functionality of
+ *           schedule_wait_time
+ */
+static void schedule_wait_time(void)
+{
+	uint64_t wait_time;
+
+	wait_time = odp_schedule_wait_time(0);
+	CU_ASSERT(wait_time > 0);
+	CU_PASS("schedule_wait_time(0)");
+
+	wait_time = odp_schedule_wait_time(1);
+	CU_ASSERT(wait_time > 0);
+	CU_PASS("schedule_wait_time(1)");
+
+	wait_time = odp_schedule_wait_time((uint64_t)-1LL);
+	CU_ASSERT(wait_time > 0);
+	CU_PASS("schedule_wait_time(MAX_LONG_INT)");
+}
+
+/**
+ * @internal Clear all scheduled queues. Retry to be sure that all
+ * buffers have been scheduled.
+ */
+static void clear_sched_queues(void)
+{
+	odp_buffer_t buf;
+
+	while (1) {
+		buf = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+		if (buf == ODP_BUFFER_INVALID)
+			break;
+
+		odp_buffer_free(buf);
+	}
+}
+
+/**
+ * @internal Create multiple queues from a pool of buffers
+ *
+ * @param thr  Thread
+ * @param msg_pool  Buffer pool
+ * @param prio   Queue priority
+ *
+ * @return 0 if successful
+ */
+static int create_queues(int thr, odp_buffer_pool_t msg_pool, int prio)
+{
+	char name[] = "sched_XX_YY";
+	odp_buffer_t buf;
+	odp_queue_t queue;
+	int i;
+
+	name[6] = '0' + prio/10;
+	name[7] = '0' + prio - 10*(prio/10);
+
+	/* Alloc and enqueue a buffer per queue */
+	for (i = 0; i < QUEUES_PER_PRIO; i++) {
+		name[9]  = '0' + i/10;
+		name[10] = '0' + i - 10*(i/10);
+
+		queue = odp_queue_lookup(name);
+
+		if (queue == ODP_QUEUE_INVALID) {
+			ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
+			return -1;
+		}
+
+		buf = odp_buffer_alloc(msg_pool);
+
+		if (!odp_buffer_is_valid(buf)) {
+			ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
+			return -1;
+		}
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * @internal Create a single queue from a pool of buffers
+ *
+ * @param thr  Thread
+ * @param msg_pool  Buffer pool
+ * @param prio   Queue priority
+ *
+ * @return 0 if successful
+ */
+static int create_queue(int thr, odp_buffer_pool_t msg_pool, int prio)
+{
+	char name[] = "sched_XX_00";
+	odp_buffer_t buf;
+	odp_queue_t queue;
+
+	buf = odp_buffer_alloc(msg_pool);
+
+	if (!odp_buffer_is_valid(buf)) {
+		ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
+		return -1;
+	}
+
+	name[6] = '0' + prio/10;
+	name[7] = '0' + prio - 10*(prio/10);
+
+	queue = odp_queue_lookup(name);
+
+	if (queue == ODP_QUEUE_INVALID) {
+		ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
+		return -1;
+	}
+
+	if (odp_queue_enq(queue, buf)) {
+		ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @internal Test scheduling of a single queue - with odp_schedule_one()
+ *
+ * Enqueue a buffer to the shared queue. Schedule and enqueue the received
+ * buffer back into the queue.
+ *
+ * @param str      Test case name string
+ * @param thr      Thread
+ * @param msg_pool Buffer pool
+ * @param prio     Priority
+ * @param barrier  Barrier
+ *
+ * @return 0 if successful
+ */
+static int test_schedule_one_single(const char *str, int thr,
+				    odp_buffer_pool_t msg_pool,
+				    int prio, odp_barrier_t *barrier)
+{
+	odp_buffer_t buf;
+	odp_queue_t queue;
+	uint64_t t1, t2, cycles, ns;
+	uint32_t i;
+	uint32_t tot = 0;
+
+	if (create_queue(thr, msg_pool, prio)) {
+		CU_FAIL_FATAL("lookup queue");
+		return -1;
+	}
+
+	t1 = odp_time_get_cycles();
+
+	for (i = 0; i < QUEUE_ROUNDS; i++) {
+		buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
+		odp_schedule_release_atomic();
+
+	t2     = odp_time_get_cycles();
+	cycles = odp_time_diff_cycles(t1, t2);
+	ns     = odp_time_cycles_to_ns(cycles);
+	tot    = i;
+
+	odp_barrier_sync(barrier);
+	clear_sched_queues();
+
+	cycles = cycles/tot;
+	ns     = ns/tot;
+
+	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
+	       thr, str, cycles, ns);
+
+	return 0;
+}
+
+/**
+ * @internal Test scheduling of multiple queues - with odp_schedule_one()
+ *
+ * Enqueue a buffer to each queue. Schedule and enqueue the received
+ * buffer back into the queue it came from.
+ *
+ * @param str      Test case name string
+ * @param thr      Thread
+ * @param msg_pool Buffer pool
+ * @param prio     Priority
+ * @param barrier  Barrier
+ *
+ * @return 0 if successful
+ */
+static int test_schedule_one_many(const char *str, int thr,
+				  odp_buffer_pool_t msg_pool,
+				  int prio, odp_barrier_t *barrier)
+{
+	odp_buffer_t buf;
+	odp_queue_t queue;
+	uint64_t t1 = 0;
+	uint64_t t2 = 0;
+	uint64_t cycles, ns;
+	uint32_t i;
+	uint32_t tot = 0;
+
+	if (create_queues(thr, msg_pool, prio))
+		return -1;
+
+	/* Start sched-enq loop */
+	t1 = odp_time_get_cycles();
+
+	for (i = 0; i < QUEUE_ROUNDS; i++) {
+		buf = odp_schedule_one(&queue, ODP_SCHED_WAIT);
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	if (odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ATOMIC)
+		odp_schedule_release_atomic();
+
+	t2     = odp_time_get_cycles();
+	cycles = odp_time_diff_cycles(t1, t2);
+	ns     = odp_time_cycles_to_ns(cycles);
+	tot    = i;
+
+	odp_barrier_sync(barrier);
+	clear_sched_queues();
+
+	cycles = cycles/tot;
+	ns     = ns/tot;
+
+	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
+	       thr, str, cycles, ns);
+
+	return 0;
+}
+
+/**
+ * @internal Test scheduling of a single queue - with odp_schedule()
+ *
+ * Enqueue a buffer to the shared queue. Schedule and enqueue the received
+ * buffer back into the queue.
+ *
+ * @param str      Test case name string
+ * @param thr      Thread
+ * @param msg_pool Buffer pool
+ * @param prio     Priority
+ * @param barrier  Barrier
+ *
+ * @return 0 if successful
+ */
+static int test_schedule_single(const char *str, int thr,
+				odp_buffer_pool_t msg_pool,
+				int prio, odp_barrier_t *barrier)
+{
+	odp_buffer_t buf;
+	odp_queue_t queue;
+	uint64_t t1, t2, cycles, ns;
+	uint32_t i;
+	uint32_t tot = 0;
+
+	if (create_queue(thr, msg_pool, prio))
+		return -1;
+
+	t1 = odp_time_get_cycles();
+
+	for (i = 0; i < QUEUE_ROUNDS; i++) {
+		buf = odp_schedule(&queue, ODP_SCHED_WAIT);
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	/* Clear possible locally stored buffers */
+	odp_schedule_pause();
+
+	tot = i;
+
+	while (1) {
+		buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
+
+		if (buf == ODP_BUFFER_INVALID)
+			break;
+
+		tot++;
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	odp_schedule_resume();
+
+	t2     = odp_time_get_cycles();
+	cycles = odp_time_diff_cycles(t1, t2);
+	ns     = odp_time_cycles_to_ns(cycles);
+
+	odp_barrier_sync(barrier);
+	clear_sched_queues();
+
+	cycles = cycles/tot;
+	ns     = ns/tot;
+
+	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
+	       thr, str, cycles, ns);
+
+	return 0;
+}
+
+/**
+ * @internal Test scheduling of multiple queues - with odp_schedule()
+ *
+ * Enqueue a buffer to each queue. Schedule and enqueue the received
+ * buffer back into the queue it came from.
+ *
+ * @param str      Test case name string
+ * @param thr      Thread
+ * @param msg_pool Buffer pool
+ * @param prio     Priority
+ * @param barrier  Barrier
+ *
+ * @return 0 if successful
+ */
+static int test_schedule_many(const char *str, int thr,
+			      odp_buffer_pool_t msg_pool,
+			      int prio, odp_barrier_t *barrier)
+{
+	odp_buffer_t buf;
+	odp_queue_t queue;
+	uint64_t t1 = 0;
+	uint64_t t2 = 0;
+	uint64_t cycles, ns;
+	uint32_t i;
+	uint32_t tot = 0;
+
+	if (create_queues(thr, msg_pool, prio))
+		return -1;
+
+	/* Start sched-enq loop */
+	t1 = odp_time_get_cycles();
+
+	for (i = 0; i < QUEUE_ROUNDS; i++) {
+		buf = odp_schedule(&queue, ODP_SCHED_WAIT);
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	/* Clear possible locally stored buffers */
+	odp_schedule_pause();
+
+	tot = i;
+
+	while (1) {
+		buf = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
+
+		if (buf == ODP_BUFFER_INVALID)
+			break;
+
+		tot++;
+
+		if (odp_queue_enq(queue, buf)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	odp_schedule_resume();
+
+	t2     = odp_time_get_cycles();
+	cycles = odp_time_diff_cycles(t1, t2);
+	ns     = odp_time_cycles_to_ns(cycles);
+
+	odp_barrier_sync(barrier);
+	clear_sched_queues();
+
+	cycles = cycles/tot;
+	ns     = ns/tot;
+
+	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
+	       thr, str, cycles, ns);
+
+	return 0;
+}
+
+/**
+ * @internal Test scheduling of multiple queues with multi_sched and multi_enq
+ *
+ * @param str      Test case name string
+ * @param thr      Thread
+ * @param msg_pool Buffer pool
+ * @param prio     Priority
+ * @param barrier  Barrier
+ *
+ * @return 0 if successful
+ */
+static int test_schedule_multi(const char *str, int thr,
+			       odp_buffer_pool_t msg_pool,
+			       int prio, odp_barrier_t *barrier)
+{
+	odp_buffer_t buf[MULTI_BUFS_MAX];
+	odp_queue_t queue;
+	uint64_t t1 = 0;
+	uint64_t t2 = 0;
+	uint64_t cycles, ns;
+	int i, j;
+	int num;
+	uint32_t tot = 0;
+	char name[] = "sched_XX_YY";
+
+	name[6] = '0' + prio/10;
+	name[7] = '0' + prio - 10*(prio/10);
+
+	/* Alloc and enqueue a buffer per queue */
+	for (i = 0; i < QUEUES_PER_PRIO; i++) {
+		name[9]  = '0' + i/10;
+		name[10] = '0' + i - 10*(i/10);
+
+		queue = odp_queue_lookup(name);
+
+		if (queue == ODP_QUEUE_INVALID) {
+			ODP_ERR("  [%i] Queue %s lookup failed.\n", thr, name);
+			return -1;
+		}
+
+		for (j = 0; j < MULTI_BUFS_MAX; j++) {
+			buf[j] = odp_buffer_alloc(msg_pool);
+
+			if (!odp_buffer_is_valid(buf[j])) {
+				ODP_ERR("  [%i] msg_pool alloc failed\n", thr);
+				return -1;
+			}
+		}
+
+		if (odp_queue_enq_multi(queue, buf, MULTI_BUFS_MAX)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	/* Start sched-enq loop */
+	t1 = odp_time_get_cycles();
+
+	for (i = 0; i < QUEUE_ROUNDS; i++) {
+		num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, buf,
+					 MULTI_BUFS_MAX);
+
+		tot += num;
+
+		if (odp_queue_enq_multi(queue, buf, num)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	/* Clear possible locally stored buffers */
+	odp_schedule_pause();
+
+	while (1) {
+		num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, buf,
+					 MULTI_BUFS_MAX);
+
+		if (num == 0)
+			break;
+
+		tot += num;
+
+		if (odp_queue_enq_multi(queue, buf, num)) {
+			ODP_ERR("  [%i] Queue enqueue failed.\n", thr);
+			return -1;
+		}
+	}
+
+	odp_schedule_resume();
+
+
+	t2     = odp_time_get_cycles();
+	cycles = odp_time_diff_cycles(t1, t2);
+	ns     = odp_time_cycles_to_ns(cycles);
+
+	odp_barrier_sync(barrier);
+	clear_sched_queues();
+
+	if (tot) {
+		cycles = cycles/tot;
+		ns     = ns/tot;
+	} else {
+		cycles = 0;
+		ns     = 0;
+	}
+
+	printf("  [%i] %s enq+deq %"PRIu64" cycles, %"PRIu64" ns\n",
+	       thr, str, cycles, ns);
+
+	return 0;
+}
+
+/**
+ * Template function for running the scheduler tests.
+ * The main reason for having this function is that CUnit does not offer a way
+ * to pass arguments to a testcase function.
+ * The other reason is that there are common steps for all testcases.
+ */
+static void *exec_template(void *arg)
+{
+	odp_buffer_pool_t msg_pool;
+	odp_shm_t shm;
+	test_globals_t *globals;
+	odp_barrier_t *barrier;
+	test_case_args_t *args = (test_case_args_t*) arg;
+
+	shm     = odp_shm_lookup("test_globals");
+	globals = odp_shm_addr(shm);
+
+	CU_ASSERT(globals != NULL);
+
+	barrier = &globals->barrier;
+
+	/*
+	 * Sync before start
+	 */
+	odp_barrier_sync(barrier);
+	
+	/*
+	 * Find the buffer pool
+	 */
+	msg_pool = odp_buffer_pool_lookup("msg_pool");
+
+	CU_ASSERT(msg_pool != ODP_BUFFER_POOL_INVALID);
+
+	odp_barrier_sync(barrier);
+
+	/*
+	 * Now run the testcase routine passing the arguments
+	 */
+	args->func(args->name, odp_thread_id(), msg_pool,
+		   args->prio, barrier);
+
+	return arg;
+}
+
+/* Low prio */
+
+static void schedule_one_single_lo(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_one_s_lo");
+	args.prio = ODP_SCHED_PRIO_LOWEST;
+	args.func = test_schedule_one_single;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_single_lo(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_____s_lo");
+	args.prio = ODP_SCHED_PRIO_LOWEST;
+	args.func = test_schedule_single;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_one_many_lo(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_one_m_lo");
+	args.prio = ODP_SCHED_PRIO_LOWEST;
+	args.func = test_schedule_one_many;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_many_lo(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_____m_lo");
+	args.prio = ODP_SCHED_PRIO_LOWEST;
+	args.func = test_schedule_many;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_multi_lo(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_____m_lo");
+	args.prio = ODP_SCHED_PRIO_LOWEST;
+	args.func = test_schedule_multi;
+	execute_parallel(exec_template, &args);
+}
+
+/* High prio */
+
+static void schedule_one_single_hi(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_one_s_hi");
+	args.prio = ODP_SCHED_PRIO_HIGHEST;
+	args.func = test_schedule_single;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_single_hi(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_____s_hi");
+	args.prio = ODP_SCHED_PRIO_HIGHEST;
+	args.func = test_schedule_single;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_one_many_hi(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_one_m_hi");
+	args.prio = ODP_SCHED_PRIO_HIGHEST;
+	args.func = test_schedule_one_many;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_many_hi(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_____m_hi");
+	args.prio = ODP_SCHED_PRIO_HIGHEST;
+	args.func = test_schedule_many;
+	execute_parallel(exec_template, &args);
+}
+
+static void schedule_multi_hi(void)
+{
+	test_case_args_t args;
+	snprintf(args.name, sizeof(args.name), "sched_multi_hi");
+	args.prio = ODP_SCHED_PRIO_HIGHEST;
+	args.func = test_schedule_multi;
+	execute_parallel(exec_template, &args);
+}
+
+static void execute_parallel(void *(*start_routine) (void *),
+			     test_case_args_t *test_case_args)
+{
+	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
+	int first_core;
+
+	memset(thread_tbl, 0, sizeof(thread_tbl));
+
+	/*
+	 * By default core #0 runs Linux kernel background tasks.
+	 * Start mapping thread from core #1
+	 */
+	first_core = 1;
+
+	if (odp_sys_core_count() == 1)
+		first_core = 0;
+
+	odph_linux_pthread_create(thread_tbl, num_workers, first_core,
+					start_routine, test_case_args);
+
+	/* Wait for worker threads to terminate */
+	odph_linux_pthread_join(thread_tbl, num_workers);
+}
+
+static odp_buffer_pool_t test_odp_buffer_pool_init(void)
+{
+	void *pool_base;
+	odp_shm_t shm;
+	odp_buffer_pool_t pool;
+
+	shm = odp_shm_reserve("msg_pool",
+			      MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
+
+	pool_base = odp_shm_addr(shm);
+
+	if (NULL == pool_base) {
+		printf("Shared memory reserve failed.\n");
+		return -1;
+	}
+
+	pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
+				      BUF_SIZE, ODP_CACHE_LINE_SIZE,
+				      ODP_BUFFER_TYPE_RAW);
+
+	if (ODP_BUFFER_POOL_INVALID == pool) {
+		printf("Pool create failed.\n");
+		return -1;
+	}
+	return pool;
+}
+
+int schedule_test_init(void)
+{
+	test_args_t args;
+	odp_shm_t shm;
+	test_globals_t *globals;
+	int i, j;
+	int prios;
+
+	if (0 != odp_init_global(NULL, NULL)) {
+		printf("odp_init_global fail.\n");
+		return -1;
+	}
+	if (0 != odp_init_local()) {
+		printf("odp_init_local fail.\n");
+		return -1;
+	}
+	if (ODP_BUFFER_POOL_INVALID == test_odp_buffer_pool_init()) {
+		printf("test_odp_buffer_pool_init fail.\n");
+		return -1;
+	}
+
+	/* A worker thread per core */
+	num_workers = odp_sys_core_count();
+
+	if (args.core_count)
+		num_workers = args.core_count;
+
+	/* force to max core count */
+	if (num_workers > MAX_WORKERS)
+		num_workers = MAX_WORKERS;
+	shm = odp_shm_reserve("test_globals",
+			      sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+	globals = odp_shm_addr(shm);
+
+	if (globals == NULL) {
+		ODP_ERR("Shared memory reserve failed.\n");
+		return -1;
+	}
+
+	memset(globals, 0, sizeof(test_globals_t));
+
+	/* Barrier to sync test case execution */
+	odp_barrier_init_count(&globals->barrier, num_workers);
+
+	prios = odp_schedule_num_prio();
+
+	for (i = 0; i < prios; i++) {
+		odp_queue_param_t param;
+		odp_queue_t queue;
+		char name[] = "sched_XX_YY";
+
+		if (i != ODP_SCHED_PRIO_HIGHEST &&
+		    i != ODP_SCHED_PRIO_LOWEST)
+			continue;
+
+		name[6] = '0' + i/10;
+		name[7] = '0' + i - 10*(i/10);
+
+		param.sched.prio  = i;
+		param.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
+		param.sched.group = ODP_SCHED_GROUP_DEFAULT;
+
+		for (j = 0; j < QUEUES_PER_PRIO; j++) {
+			name[9]  = '0' + j/10;
+			name[10] = '0' + j - 10*(j/10);
+
+			queue = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
+						 &param);
+
+			if (queue == ODP_QUEUE_INVALID) {
+				ODP_ERR("Schedule queue create failed.\n");
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+int schedule_test_finalize(void)
+{
+	odp_term_local();
+	odp_term_global();
+	return 0;
+}
+
+struct CU_TestInfo schedule_tests[] = {
+	_CU_TEST_INFO(schedule_wait_time),
+	_CU_TEST_INFO(schedule_one_single_lo),
+	_CU_TEST_INFO(schedule_single_lo),
+	_CU_TEST_INFO(schedule_one_many_lo),
+	_CU_TEST_INFO(schedule_many_lo),
+	_CU_TEST_INFO(schedule_multi_lo),
+	_CU_TEST_INFO(schedule_one_single_hi),
+	_CU_TEST_INFO(schedule_single_hi),
+	_CU_TEST_INFO(schedule_one_many_hi),
+	_CU_TEST_INFO(schedule_many_hi),
+	_CU_TEST_INFO(schedule_multi_hi),
+	CU_TEST_INFO_NULL,
+};
diff --git a/test/cunit/schedule/odp_schedule_testsuites.c b/test/cunit/schedule/odp_schedule_testsuites.c
new file mode 100644
index 0000000..1053069
--- /dev/null
+++ b/test/cunit/schedule/odp_schedule_testsuites.c
@@ -0,0 +1,35 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp_schedule_testsuites.h"
+
+static CU_SuiteInfo suites[] = {
+	{
+		"Scheduler tests" ,
+		schedule_test_init,
+		schedule_test_finalize,
+		NULL,
+		NULL,
+		schedule_tests
+	},
+	CU_SUITE_INFO_NULL,
+};
+
+int main(void)
+{
+	/* initialize the CUnit test registry */
+	if (CUE_SUCCESS != CU_initialize_registry())
+		return CU_get_error();
+
+	/* register suites */
+	CU_register_suites(suites);
+	/* Run all tests using the CUnit Basic interface */
+	CU_basic_set_mode(CU_BRM_VERBOSE);
+	CU_basic_run_tests();
+	CU_cleanup_registry();
+
+	return CU_get_error();
+}
diff --git a/test/cunit/schedule/odp_schedule_testsuites.h b/test/cunit/schedule/odp_schedule_testsuites.h
new file mode 100644
index 0000000..67a2a69
--- /dev/null
+++ b/test/cunit/schedule/odp_schedule_testsuites.h
@@ -0,0 +1,21 @@ 
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef ODP_SCHEDULE_TESTSUITES_H_
+#define ODP_SCHEDULE_TESTSUITES_H_
+
+#include "odp.h"
+#include <CUnit/Basic.h>
+
+/* Helper macro for CU_TestInfo initialization */
+#define _CU_TEST_INFO(test_func) {#test_func, test_func}
+
+extern struct CU_TestInfo schedule_tests[];
+
+extern int schedule_test_init(void);
+extern int schedule_test_finalize(void);
+
+#endif /* ODP_SCHEDULE_TESTSUITES_H_ */