diff mbox

validation: add odp_schedule_pause and odp_schedule_resume tests

Message ID 1418821811-11734-1-git-send-email-ciprian.barbu@linaro.org
State New
Headers show

Commit Message

Ciprian Barbu Dec. 17, 2014, 1:10 p.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
 test/validation/odp_schedule.c | 63 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 58 insertions(+), 5 deletions(-)

Comments

Ciprian Barbu Jan. 6, 2015, 1:17 p.m. UTC | #1
On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
<jerin.jacob@caviumnetworks.com> wrote:
> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> ---
>>  test/validation/odp_schedule.c | 63 ++++++++++++++++++++++++++++++++++++++----
>>  1 file changed, 58 insertions(+), 5 deletions(-)
>>
>> diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
>> index 31be742..bdbcf77 100644
>> --- a/test/validation/odp_schedule.c
>> +++ b/test/validation/odp_schedule.c
>> @@ -11,9 +11,11 @@
>>  #define MSG_POOL_SIZE                (4*1024*1024)
>>  #define QUEUES_PER_PRIO              16
>>  #define BUF_SIZE             64
>> -#define TEST_NUM_BUFS                100
>> +#define NUM_BUFS             100
>>  #define BURST_BUF_SIZE               4
>> -#define TEST_NUM_BUFS_EXCL   10000
>> +#define NUM_BUFS_EXCL                10000
>> +#define NUM_BUFS_PAUSE               1000
>> +#define NUM_BUFS_BEFORE_PAUSE        10
>>
>>  #define GLOBALS_SHM_NAME     "test_globals"
>>  #define MSG_POOL_NAME                "msg_pool"
>> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t sync, int num_queues,
>>       args.sync = sync;
>>       args.num_queues = num_queues;
>>       args.num_prio = num_prio;
>> -     args.num_bufs = TEST_NUM_BUFS;
>> +     args.num_bufs = NUM_BUFS;
>>       args.num_cores = 1;
>>       args.enable_schd_multi = enable_schd_multi;
>>       args.enable_excl_atomic = 0;    /* Not needed with a single core */
>> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
>>       thr_args->num_queues = num_queues;
>>       thr_args->num_prio = num_prio;
>>       if (enable_excl_atomic)
>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>       else
>> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> +             thr_args->num_bufs = NUM_BUFS;
>>       thr_args->num_cores = globals->core_count;
>>       thr_args->enable_schd_multi = enable_schd_multi;
>>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> @@ -459,6 +461,56 @@ static void test_schedule_multi_1q_mt_a_excl(void)
>>                        ENABLE_EXCL_ATOMIC);
>>  }
>>
>> +static void test_schedule_pause_resume(void)
>> +{
>> +     odp_queue_t queue;
>> +     odp_buffer_t buf;
>> +     odp_queue_t from;
>> +     int i;
>> +     int local_bufs = 0;
>> +
>> +     queue = odp_queue_lookup("sched_0_0_n");
>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> +
>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> +
>> +
>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> +             buf = odp_buffer_alloc(pool);
>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> +             odp_queue_enq(queue, buf);
>> +     }
>> +
>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +             CU_ASSERT(from == queue);
>> +             odp_buffer_free(buf);
>> +     }
>> +
>> +     odp_schedule_pause();
>> +
>> +     while (1) {
>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +             if (buf == ODP_BUFFER_INVALID)
>> +                     break;
>> +
>> +             CU_ASSERT(from == queue);
>> +             odp_buffer_free(buf);
>> +             local_bufs++;
>> +     }
>> +
>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
>
> Whats is the expected behavior here, Shouldn't it be CU_ASSERT(local_bufs == 0) ?
> meaning, the complete pause ?

Sorry about the delay, I've been playing around with mutt and I must
have accidentally marked this email as read.
The explanation here is that after pausing the scheduling, there might
still be locally reserved buffers (see the odp_schedule_pause
documentation). For linux-generic for instance the scheduler dequeues
buffers in bursts, odp_scheduler_pause only stops further dequeues,
buffers may still be in the 'reservoirs'. With that in mind, the check
above makes sure that after pausing only a limited number of packets
are still scheduled, or else said pausing seems to work, not all
packets being drained.

>
>> +
>> +     odp_schedule_resume();
>> +
>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> +             CU_ASSERT(from == queue);
>> +             odp_buffer_free(buf);
>> +     }
>> +}
>> +
>>  static int create_queues(void)
>>  {
>>       int i, j, prios;
>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_prio_a},
>>       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_prio_o},
>>       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_excl},
>> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>>       CU_TEST_INFO_NULL,
>>  };
>>
>> --
>> 1.8.3.2
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
Bill Fischofer Jan. 6, 2015, 1:40 p.m. UTC | #2
Caches should be transparent.  While this may be needed here, it's a poor
set of semantics to expose as part of the formal APIs.  This is definitely
something we need to address.  My suggestion is that a odp_schedule_pause()
should cause an implicit cache flush if the implementation is using a
scheduling cache.  That way any cache being used is truly transparent and
moreover there won't be unnecessary delays in event processing since who
knows how long a pause may last?  Clearly it won't be brief since otherwise
the application would not have bothered with a pause/resume in the first
place.

On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
wrote:

> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> <jerin.jacob@caviumnetworks.com> wrote:
> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> ---
> >>  test/validation/odp_schedule.c | 63
> ++++++++++++++++++++++++++++++++++++++----
> >>  1 file changed, 58 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/test/validation/odp_schedule.c
> b/test/validation/odp_schedule.c
> >> index 31be742..bdbcf77 100644
> >> --- a/test/validation/odp_schedule.c
> >> +++ b/test/validation/odp_schedule.c
> >> @@ -11,9 +11,11 @@
> >>  #define MSG_POOL_SIZE                (4*1024*1024)
> >>  #define QUEUES_PER_PRIO              16
> >>  #define BUF_SIZE             64
> >> -#define TEST_NUM_BUFS                100
> >> +#define NUM_BUFS             100
> >>  #define BURST_BUF_SIZE               4
> >> -#define TEST_NUM_BUFS_EXCL   10000
> >> +#define NUM_BUFS_EXCL                10000
> >> +#define NUM_BUFS_PAUSE               1000
> >> +#define NUM_BUFS_BEFORE_PAUSE        10
> >>
> >>  #define GLOBALS_SHM_NAME     "test_globals"
> >>  #define MSG_POOL_NAME                "msg_pool"
> >> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t
> sync, int num_queues,
> >>       args.sync = sync;
> >>       args.num_queues = num_queues;
> >>       args.num_prio = num_prio;
> >> -     args.num_bufs = TEST_NUM_BUFS;
> >> +     args.num_bufs = NUM_BUFS;
> >>       args.num_cores = 1;
> >>       args.enable_schd_multi = enable_schd_multi;
> >>       args.enable_excl_atomic = 0;    /* Not needed with a single core
> */
> >> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t
> sync, int num_queues,
> >>       thr_args->num_queues = num_queues;
> >>       thr_args->num_prio = num_prio;
> >>       if (enable_excl_atomic)
> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >>       else
> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >> +             thr_args->num_bufs = NUM_BUFS;
> >>       thr_args->num_cores = globals->core_count;
> >>       thr_args->enable_schd_multi = enable_schd_multi;
> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
> >> @@ -459,6 +461,56 @@ static void test_schedule_multi_1q_mt_a_excl(void)
> >>                        ENABLE_EXCL_ATOMIC);
> >>  }
> >>
> >> +static void test_schedule_pause_resume(void)
> >> +{
> >> +     odp_queue_t queue;
> >> +     odp_buffer_t buf;
> >> +     odp_queue_t from;
> >> +     int i;
> >> +     int local_bufs = 0;
> >> +
> >> +     queue = odp_queue_lookup("sched_0_0_n");
> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >> +
> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >> +
> >> +
> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >> +             buf = odp_buffer_alloc(pool);
> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >> +             odp_queue_enq(queue, buf);
> >> +     }
> >> +
> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> +             CU_ASSERT(from == queue);
> >> +             odp_buffer_free(buf);
> >> +     }
> >> +
> >> +     odp_schedule_pause();
> >> +
> >> +     while (1) {
> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> +             if (buf == ODP_BUFFER_INVALID)
> >> +                     break;
> >> +
> >> +             CU_ASSERT(from == queue);
> >> +             odp_buffer_free(buf);
> >> +             local_bufs++;
> >> +     }
> >> +
> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
> >
> > Whats is the expected behavior here, Shouldn't it be
> CU_ASSERT(local_bufs == 0) ?
> > meaning, the complete pause ?
>
> Sorry about the delay, I've been playing around with mutt and I must
> have accidentally marked this email as read.
> The explanation here is that after pausing the scheduling, there might
> still be locally reserved buffers (see the odp_schedule_pause
> documentation). For linux-generic for instance the scheduler dequeues
> buffers in bursts, odp_scheduler_pause only stops further dequeues,
> buffers may still be in the 'reservoirs'. With that in mind, the check
> above makes sure that after pausing only a limited number of packets
> are still scheduled, or else said pausing seems to work, not all
> packets being drained.
>
> >
> >> +
> >> +     odp_schedule_resume();
> >> +
> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE;
> i++) {
> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >> +             CU_ASSERT(from == queue);
> >> +             odp_buffer_free(buf);
> >> +     }
> >> +}
> >> +
> >>  static int create_queues(void)
> >>  {
> >>       int i, j, prios;
> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
> >>       {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_prio_a},
> >>       {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_prio_o},
> >>       {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_excl},
> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
> >>       CU_TEST_INFO_NULL,
> >>  };
> >>
> >> --
> >> 1.8.3.2
> >>
> >>
> >> _______________________________________________
> >> lng-odp mailing list
> >> lng-odp@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Mike Holmes Jan. 6, 2015, 1:48 p.m. UTC | #3
Should a bug be made to track a needed change or is it important for 1.0
and needs to be in the delta doc ?

On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
wrote:

> Caches should be transparent.  While this may be needed here, it's a poor
> set of semantics to expose as part of the formal APIs.  This is definitely
> something we need to address.  My suggestion is that a odp_schedule_pause()
> should cause an implicit cache flush if the implementation is using a
> scheduling cache.  That way any cache being used is truly transparent and
> moreover there won't be unnecessary delays in event processing since who
> knows how long a pause may last?  Clearly it won't be brief since otherwise
> the application would not have bothered with a pause/resume in the first
> place.
>
> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
>
>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> <jerin.jacob@caviumnetworks.com> wrote:
>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >> ---
>> >>  test/validation/odp_schedule.c | 63
>> ++++++++++++++++++++++++++++++++++++++----
>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>> >>
>> >> diff --git a/test/validation/odp_schedule.c
>> b/test/validation/odp_schedule.c
>> >> index 31be742..bdbcf77 100644
>> >> --- a/test/validation/odp_schedule.c
>> >> +++ b/test/validation/odp_schedule.c
>> >> @@ -11,9 +11,11 @@
>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>> >>  #define QUEUES_PER_PRIO              16
>> >>  #define BUF_SIZE             64
>> >> -#define TEST_NUM_BUFS                100
>> >> +#define NUM_BUFS             100
>> >>  #define BURST_BUF_SIZE               4
>> >> -#define TEST_NUM_BUFS_EXCL   10000
>> >> +#define NUM_BUFS_EXCL                10000
>> >> +#define NUM_BUFS_PAUSE               1000
>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >>
>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>> >>  #define MSG_POOL_NAME                "msg_pool"
>> >> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t
>> sync, int num_queues,
>> >>       args.sync = sync;
>> >>       args.num_queues = num_queues;
>> >>       args.num_prio = num_prio;
>> >> -     args.num_bufs = TEST_NUM_BUFS;
>> >> +     args.num_bufs = NUM_BUFS;
>> >>       args.num_cores = 1;
>> >>       args.enable_schd_multi = enable_schd_multi;
>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single core
>> */
>> >> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t
>> sync, int num_queues,
>> >>       thr_args->num_queues = num_queues;
>> >>       thr_args->num_prio = num_prio;
>> >>       if (enable_excl_atomic)
>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >>       else
>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >> +             thr_args->num_bufs = NUM_BUFS;
>> >>       thr_args->num_cores = globals->core_count;
>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> >> @@ -459,6 +461,56 @@ static void test_schedule_multi_1q_mt_a_excl(void)
>> >>                        ENABLE_EXCL_ATOMIC);
>> >>  }
>> >>
>> >> +static void test_schedule_pause_resume(void)
>> >> +{
>> >> +     odp_queue_t queue;
>> >> +     odp_buffer_t buf;
>> >> +     odp_queue_t from;
>> >> +     int i;
>> >> +     int local_bufs = 0;
>> >> +
>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >> +
>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >> +
>> >> +
>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >> +             buf = odp_buffer_alloc(pool);
>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >> +             odp_queue_enq(queue, buf);
>> >> +     }
>> >> +
>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >> +             CU_ASSERT(from == queue);
>> >> +             odp_buffer_free(buf);
>> >> +     }
>> >> +
>> >> +     odp_schedule_pause();
>> >> +
>> >> +     while (1) {
>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >> +             if (buf == ODP_BUFFER_INVALID)
>> >> +                     break;
>> >> +
>> >> +             CU_ASSERT(from == queue);
>> >> +             odp_buffer_free(buf);
>> >> +             local_bufs++;
>> >> +     }
>> >> +
>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
>> >
>> > Whats is the expected behavior here, Shouldn't it be
>> CU_ASSERT(local_bufs == 0) ?
>> > meaning, the complete pause ?
>>
>> Sorry about the delay, I've been playing around with mutt and I must
>> have accidentally marked this email as read.
>> The explanation here is that after pausing the scheduling, there might
>> still be locally reserved buffers (see the odp_schedule_pause
>> documentation). For linux-generic for instance the scheduler dequeues
>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>> buffers may still be in the 'reservoirs'. With that in mind, the check
>> above makes sure that after pausing only a limited number of packets
>> are still scheduled, or else said pausing seems to work, not all
>> packets being drained.
>>
>> >
>> >> +
>> >> +     odp_schedule_resume();
>> >> +
>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE;
>> i++) {
>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >> +             CU_ASSERT(from == queue);
>> >> +             odp_buffer_free(buf);
>> >> +     }
>> >> +}
>> >> +
>> >>  static int create_queues(void)
>> >>  {
>> >>       int i, j, prios;
>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >>       {"schedule_multi_mq_mt_prio_a",
>> test_schedule_multi_mq_mt_prio_a},
>> >>       {"schedule_multi_mq_mt_prio_o",
>> test_schedule_multi_mq_mt_prio_o},
>> >>       {"schedule_multi_1q_mt_a_excl",
>> test_schedule_multi_1q_mt_a_excl},
>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>> >>       CU_TEST_INFO_NULL,
>> >>  };
>> >>
>> >> --
>> >> 1.8.3.2
>> >>
>> >>
>> >> _______________________________________________
>> >> lng-odp mailing list
>> >> lng-odp@lists.linaro.org
>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
Bill Fischofer Jan. 6, 2015, 2:03 p.m. UTC | #4
I think it's something we need to discuss during the sync call.

On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org> wrote:

> Should a bug be made to track a needed change or is it important for 1.0
> and needs to be in the delta doc ?
>
> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
> wrote:
>
>> Caches should be transparent.  While this may be needed here, it's a poor
>> set of semantics to expose as part of the formal APIs.  This is definitely
>> something we need to address.  My suggestion is that a odp_schedule_pause()
>> should cause an implicit cache flush if the implementation is using a
>> scheduling cache.  That way any cache being used is truly transparent and
>> moreover there won't be unnecessary delays in event processing since who
>> knows how long a pause may last?  Clearly it won't be brief since otherwise
>> the application would not have bothered with a pause/resume in the first
>> place.
>>
>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
>> wrote:
>>
>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>> <jerin.jacob@caviumnetworks.com> wrote:
>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>> >> ---
>>> >>  test/validation/odp_schedule.c | 63
>>> ++++++++++++++++++++++++++++++++++++++----
>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>>> >>
>>> >> diff --git a/test/validation/odp_schedule.c
>>> b/test/validation/odp_schedule.c
>>> >> index 31be742..bdbcf77 100644
>>> >> --- a/test/validation/odp_schedule.c
>>> >> +++ b/test/validation/odp_schedule.c
>>> >> @@ -11,9 +11,11 @@
>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>>> >>  #define QUEUES_PER_PRIO              16
>>> >>  #define BUF_SIZE             64
>>> >> -#define TEST_NUM_BUFS                100
>>> >> +#define NUM_BUFS             100
>>> >>  #define BURST_BUF_SIZE               4
>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>>> >> +#define NUM_BUFS_EXCL                10000
>>> >> +#define NUM_BUFS_PAUSE               1000
>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>>> >>
>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>>> >>  #define MSG_POOL_NAME                "msg_pool"
>>> >> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t
>>> sync, int num_queues,
>>> >>       args.sync = sync;
>>> >>       args.num_queues = num_queues;
>>> >>       args.num_prio = num_prio;
>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>>> >> +     args.num_bufs = NUM_BUFS;
>>> >>       args.num_cores = 1;
>>> >>       args.enable_schd_multi = enable_schd_multi;
>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
>>> core */
>>> >> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t
>>> sync, int num_queues,
>>> >>       thr_args->num_queues = num_queues;
>>> >>       thr_args->num_prio = num_prio;
>>> >>       if (enable_excl_atomic)
>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>> >>       else
>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>> >> +             thr_args->num_bufs = NUM_BUFS;
>>> >>       thr_args->num_cores = globals->core_count;
>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>>> >> @@ -459,6 +461,56 @@ static void
>>> test_schedule_multi_1q_mt_a_excl(void)
>>> >>                        ENABLE_EXCL_ATOMIC);
>>> >>  }
>>> >>
>>> >> +static void test_schedule_pause_resume(void)
>>> >> +{
>>> >> +     odp_queue_t queue;
>>> >> +     odp_buffer_t buf;
>>> >> +     odp_queue_t from;
>>> >> +     int i;
>>> >> +     int local_bufs = 0;
>>> >> +
>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>> >> +
>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>> >> +
>>> >> +
>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>> >> +             buf = odp_buffer_alloc(pool);
>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>> >> +             odp_queue_enq(queue, buf);
>>> >> +     }
>>> >> +
>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>> >> +             CU_ASSERT(from == queue);
>>> >> +             odp_buffer_free(buf);
>>> >> +     }
>>> >> +
>>> >> +     odp_schedule_pause();
>>> >> +
>>> >> +     while (1) {
>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>> >> +             if (buf == ODP_BUFFER_INVALID)
>>> >> +                     break;
>>> >> +
>>> >> +             CU_ASSERT(from == queue);
>>> >> +             odp_buffer_free(buf);
>>> >> +             local_bufs++;
>>> >> +     }
>>> >> +
>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
>>> >
>>> > Whats is the expected behavior here, Shouldn't it be
>>> CU_ASSERT(local_bufs == 0) ?
>>> > meaning, the complete pause ?
>>>
>>> Sorry about the delay, I've been playing around with mutt and I must
>>> have accidentally marked this email as read.
>>> The explanation here is that after pausing the scheduling, there might
>>> still be locally reserved buffers (see the odp_schedule_pause
>>> documentation). For linux-generic for instance the scheduler dequeues
>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>>> buffers may still be in the 'reservoirs'. With that in mind, the check
>>> above makes sure that after pausing only a limited number of packets
>>> are still scheduled, or else said pausing seems to work, not all
>>> packets being drained.
>>>
>>> >
>>> >> +
>>> >> +     odp_schedule_resume();
>>> >> +
>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>> NUM_BUFS_PAUSE; i++) {
>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>> >> +             CU_ASSERT(from == queue);
>>> >> +             odp_buffer_free(buf);
>>> >> +     }
>>> >> +}
>>> >> +
>>> >>  static int create_queues(void)
>>> >>  {
>>> >>       int i, j, prios;
>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>> >>       {"schedule_multi_mq_mt_prio_a",
>>> test_schedule_multi_mq_mt_prio_a},
>>> >>       {"schedule_multi_mq_mt_prio_o",
>>> test_schedule_multi_mq_mt_prio_o},
>>> >>       {"schedule_multi_1q_mt_a_excl",
>>> test_schedule_multi_1q_mt_a_excl},
>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>>> >>       CU_TEST_INFO_NULL,
>>> >>  };
>>> >>
>>> >> --
>>> >> 1.8.3.2
>>> >>
>>> >>
>>> >> _______________________________________________
>>> >> lng-odp mailing list
>>> >> lng-odp@lists.linaro.org
>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>>
>
>
> --
> *Mike Holmes*
> Linaro  Sr Technical Manager
> LNG - ODP
>
Ciprian Barbu Jan. 7, 2015, 9:39 a.m. UTC | #5
On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
<bill.fischofer@linaro.org> wrote:
> I think it's something we need to discuss during the sync call.
>
> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org> wrote:
>>
>> Should a bug be made to track a needed change or is it important for 1.0
>> and needs to be in the delta doc ?
>>
>> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
>> wrote:
>>>
>>> Caches should be transparent.  While this may be needed here, it's a poor
>>> set of semantics to expose as part of the formal APIs.  This is definitely
>>> something we need to address.  My suggestion is that a odp_schedule_pause()
>>> should cause an implicit cache flush if the implementation is using a
>>> scheduling cache.  That way any cache being used is truly transparent and
>>> moreover there won't be unnecessary delays in event processing since who
>>> knows how long a pause may last?  Clearly it won't be brief since otherwise
>>> the application would not have bothered with a pause/resume in the first
>>> place.

Sorry, I couldn't join you in the ODP call yesterday, mind if you give
a brief update on what was decided?

>>>
>>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
>>> wrote:
>>>>
>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>>> <jerin.jacob@caviumnetworks.com> wrote:
>>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>> >> ---
>>>> >>  test/validation/odp_schedule.c | 63
>>>> >> ++++++++++++++++++++++++++++++++++++++----
>>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>>>> >>
>>>> >> diff --git a/test/validation/odp_schedule.c
>>>> >> b/test/validation/odp_schedule.c
>>>> >> index 31be742..bdbcf77 100644
>>>> >> --- a/test/validation/odp_schedule.c
>>>> >> +++ b/test/validation/odp_schedule.c
>>>> >> @@ -11,9 +11,11 @@
>>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>>>> >>  #define QUEUES_PER_PRIO              16
>>>> >>  #define BUF_SIZE             64
>>>> >> -#define TEST_NUM_BUFS                100
>>>> >> +#define NUM_BUFS             100
>>>> >>  #define BURST_BUF_SIZE               4
>>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>>>> >> +#define NUM_BUFS_EXCL                10000
>>>> >> +#define NUM_BUFS_PAUSE               1000
>>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>>>> >>
>>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>>>> >>  #define MSG_POOL_NAME                "msg_pool"
>>>> >> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t
>>>> >> sync, int num_queues,
>>>> >>       args.sync = sync;
>>>> >>       args.num_queues = num_queues;
>>>> >>       args.num_prio = num_prio;
>>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>>>> >> +     args.num_bufs = NUM_BUFS;
>>>> >>       args.num_cores = 1;
>>>> >>       args.enable_schd_multi = enable_schd_multi;
>>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
>>>> >> core */
>>>> >> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t
>>>> >> sync, int num_queues,
>>>> >>       thr_args->num_queues = num_queues;
>>>> >>       thr_args->num_prio = num_prio;
>>>> >>       if (enable_excl_atomic)
>>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>>> >>       else
>>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>>> >> +             thr_args->num_bufs = NUM_BUFS;
>>>> >>       thr_args->num_cores = globals->core_count;
>>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>>>> >> @@ -459,6 +461,56 @@ static void
>>>> >> test_schedule_multi_1q_mt_a_excl(void)
>>>> >>                        ENABLE_EXCL_ATOMIC);
>>>> >>  }
>>>> >>
>>>> >> +static void test_schedule_pause_resume(void)
>>>> >> +{
>>>> >> +     odp_queue_t queue;
>>>> >> +     odp_buffer_t buf;
>>>> >> +     odp_queue_t from;
>>>> >> +     int i;
>>>> >> +     int local_bufs = 0;
>>>> >> +
>>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>>> >> +
>>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>> >> +
>>>> >> +
>>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>>> >> +             buf = odp_buffer_alloc(pool);
>>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>> >> +             odp_queue_enq(queue, buf);
>>>> >> +     }
>>>> >> +
>>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>> >> +             CU_ASSERT(from == queue);
>>>> >> +             odp_buffer_free(buf);
>>>> >> +     }
>>>> >> +
>>>> >> +     odp_schedule_pause();
>>>> >> +
>>>> >> +     while (1) {
>>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>> >> +             if (buf == ODP_BUFFER_INVALID)
>>>> >> +                     break;
>>>> >> +
>>>> >> +             CU_ASSERT(from == queue);
>>>> >> +             odp_buffer_free(buf);
>>>> >> +             local_bufs++;
>>>> >> +     }
>>>> >> +
>>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
>>>> >
>>>> > Whats is the expected behavior here, Shouldn't it be
>>>> > CU_ASSERT(local_bufs == 0) ?
>>>> > meaning, the complete pause ?
>>>>
>>>> Sorry about the delay, I've been playing around with mutt and I must
>>>> have accidentally marked this email as read.
>>>> The explanation here is that after pausing the scheduling, there might
>>>> still be locally reserved buffers (see the odp_schedule_pause
>>>> documentation). For linux-generic for instance the scheduler dequeues
>>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>>>> buffers may still be in the 'reservoirs'. With that in mind, the check
>>>> above makes sure that after pausing only a limited number of packets
>>>> are still scheduled, or else said pausing seems to work, not all
>>>> packets being drained.
>>>>
>>>> >
>>>> >> +
>>>> >> +     odp_schedule_resume();
>>>> >> +
>>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>>> >> NUM_BUFS_PAUSE; i++) {
>>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>>> >> +             CU_ASSERT(from == queue);
>>>> >> +             odp_buffer_free(buf);
>>>> >> +     }
>>>> >> +}
>>>> >> +
>>>> >>  static int create_queues(void)
>>>> >>  {
>>>> >>       int i, j, prios;
>>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>>> >>       {"schedule_multi_mq_mt_prio_a",
>>>> >> test_schedule_multi_mq_mt_prio_a},
>>>> >>       {"schedule_multi_mq_mt_prio_o",
>>>> >> test_schedule_multi_mq_mt_prio_o},
>>>> >>       {"schedule_multi_1q_mt_a_excl",
>>>> >> test_schedule_multi_1q_mt_a_excl},
>>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>>>> >>       CU_TEST_INFO_NULL,
>>>> >>  };
>>>> >>
>>>> >> --
>>>> >> 1.8.3.2
>>>> >>
>>>> >>
>>>> >> _______________________________________________
>>>> >> lng-odp mailing list
>>>> >> lng-odp@lists.linaro.org
>>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>
>>>> _______________________________________________
>>>> lng-odp mailing list
>>>> lng-odp@lists.linaro.org
>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>>
>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>
>>
>>
>> --
>> Mike Holmes
>> Linaro  Sr Technical Manager
>> LNG - ODP
>
>
Mike Holmes Jan. 7, 2015, 7:41 p.m. UTC | #6
I am unsure if I need to pay attention to this for 0.7.0

On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
> <bill.fischofer@linaro.org> wrote:
> > I think it's something we need to discuss during the sync call.
> >
> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
> wrote:
> >>
> >> Should a bug be made to track a needed change or is it important for 1.0
> >> and needs to be in the delta doc ?
> >>
> >> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
> >> wrote:
> >>>
> >>> Caches should be transparent.  While this may be needed here, it's a
> poor
> >>> set of semantics to expose as part of the formal APIs.  This is
> definitely
> >>> something we need to address.  My suggestion is that a
> odp_schedule_pause()
> >>> should cause an implicit cache flush if the implementation is using a
> >>> scheduling cache.  That way any cache being used is truly transparent
> and
> >>> moreover there won't be unnecessary delays in event processing since
> who
> >>> knows how long a pause may last?  Clearly it won't be brief since
> otherwise
> >>> the application would not have bothered with a pause/resume in the
> first
> >>> place.
>
> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
> a brief update on what was decided?
>
> >>>
> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu <
> ciprian.barbu@linaro.org>
> >>> wrote:
> >>>>
> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> >>>> <jerin.jacob@caviumnetworks.com> wrote:
> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >>>> >> ---
> >>>> >>  test/validation/odp_schedule.c | 63
> >>>> >> ++++++++++++++++++++++++++++++++++++++----
> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
> >>>> >>
> >>>> >> diff --git a/test/validation/odp_schedule.c
> >>>> >> b/test/validation/odp_schedule.c
> >>>> >> index 31be742..bdbcf77 100644
> >>>> >> --- a/test/validation/odp_schedule.c
> >>>> >> +++ b/test/validation/odp_schedule.c
> >>>> >> @@ -11,9 +11,11 @@
> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
> >>>> >>  #define QUEUES_PER_PRIO              16
> >>>> >>  #define BUF_SIZE             64
> >>>> >> -#define TEST_NUM_BUFS                100
> >>>> >> +#define NUM_BUFS             100
> >>>> >>  #define BURST_BUF_SIZE               4
> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
> >>>> >> +#define NUM_BUFS_EXCL                10000
> >>>> >> +#define NUM_BUFS_PAUSE               1000
> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
> >>>> >>
> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
> >>>> >> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t
> >>>> >> sync, int num_queues,
> >>>> >>       args.sync = sync;
> >>>> >>       args.num_queues = num_queues;
> >>>> >>       args.num_prio = num_prio;
> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
> >>>> >> +     args.num_bufs = NUM_BUFS;
> >>>> >>       args.num_cores = 1;
> >>>> >>       args.enable_schd_multi = enable_schd_multi;
> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
> >>>> >> core */
> >>>> >> @@ -261,9 +263,9 @@ static void
> parallel_execute(odp_schedule_sync_t
> >>>> >> sync, int num_queues,
> >>>> >>       thr_args->num_queues = num_queues;
> >>>> >>       thr_args->num_prio = num_prio;
> >>>> >>       if (enable_excl_atomic)
> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >>>> >>       else
> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
> >>>> >>       thr_args->num_cores = globals->core_count;
> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
> >>>> >> @@ -459,6 +461,56 @@ static void
> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
> >>>> >>                        ENABLE_EXCL_ATOMIC);
> >>>> >>  }
> >>>> >>
> >>>> >> +static void test_schedule_pause_resume(void)
> >>>> >> +{
> >>>> >> +     odp_queue_t queue;
> >>>> >> +     odp_buffer_t buf;
> >>>> >> +     odp_queue_t from;
> >>>> >> +     int i;
> >>>> >> +     int local_bufs = 0;
> >>>> >> +
> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >>>> >> +
> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >>>> >> +
> >>>> >> +
> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >>>> >> +             buf = odp_buffer_alloc(pool);
> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >>>> >> +             odp_queue_enq(queue, buf);
> >>>> >> +     }
> >>>> >> +
> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >>>> >> +             CU_ASSERT(from == queue);
> >>>> >> +             odp_buffer_free(buf);
> >>>> >> +     }
> >>>> >> +
> >>>> >> +     odp_schedule_pause();
> >>>> >> +
> >>>> >> +     while (1) {
> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
> >>>> >> +                     break;
> >>>> >> +
> >>>> >> +             CU_ASSERT(from == queue);
> >>>> >> +             odp_buffer_free(buf);
> >>>> >> +             local_bufs++;
> >>>> >> +     }
> >>>> >> +
> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
> NUM_BUFS_BEFORE_PAUSE);
> >>>> >
> >>>> > Whats is the expected behavior here, Shouldn't it be
> >>>> > CU_ASSERT(local_bufs == 0) ?
> >>>> > meaning, the complete pause ?
> >>>>
> >>>> Sorry about the delay, I've been playing around with mutt and I must
> >>>> have accidentally marked this email as read.
> >>>> The explanation here is that after pausing the scheduling, there might
> >>>> still be locally reserved buffers (see the odp_schedule_pause
> >>>> documentation). For linux-generic for instance the scheduler dequeues
> >>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
> >>>> buffers may still be in the 'reservoirs'. With that in mind, the check
> >>>> above makes sure that after pausing only a limited number of packets
> >>>> are still scheduled, or else said pausing seems to work, not all
> >>>> packets being drained.
> >>>>
> >>>> >
> >>>> >> +
> >>>> >> +     odp_schedule_resume();
> >>>> >> +
> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
> >>>> >> NUM_BUFS_PAUSE; i++) {
> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >>>> >> +             CU_ASSERT(from == queue);
> >>>> >> +             odp_buffer_free(buf);
> >>>> >> +     }
> >>>> >> +}
> >>>> >> +
> >>>> >>  static int create_queues(void)
> >>>> >>  {
> >>>> >>       int i, j, prios;
> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
> >>>> >>       {"schedule_multi_mq_mt_prio_a",
> >>>> >> test_schedule_multi_mq_mt_prio_a},
> >>>> >>       {"schedule_multi_mq_mt_prio_o",
> >>>> >> test_schedule_multi_mq_mt_prio_o},
> >>>> >>       {"schedule_multi_1q_mt_a_excl",
> >>>> >> test_schedule_multi_1q_mt_a_excl},
> >>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
> >>>> >>       CU_TEST_INFO_NULL,
> >>>> >>  };
> >>>> >>
> >>>> >> --
> >>>> >> 1.8.3.2
> >>>> >>
> >>>> >>
> >>>> >> _______________________________________________
> >>>> >> lng-odp mailing list
> >>>> >> lng-odp@lists.linaro.org
> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>>>
> >>>> _______________________________________________
> >>>> lng-odp mailing list
> >>>> lng-odp@lists.linaro.org
> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>>
> >>>
> >>>
> >>> _______________________________________________
> >>> lng-odp mailing list
> >>> lng-odp@lists.linaro.org
> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>>
> >>
> >>
> >>
> >> --
> >> Mike Holmes
> >> Linaro  Sr Technical Manager
> >> LNG - ODP
> >
> >
>
Ciprian Barbu Jan. 8, 2015, 12:27 p.m. UTC | #7
On Wed, Jan 7, 2015 at 9:41 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
> I am unsure if I need to pay attention to this for 0.7.0

Still abit unclear after the discussion we had with Petri, but I think
we need to keep the behavior as it is, meaning applications need to
take care of the scheduling "cache", and consume everything after
issuing an odp_schedule_pause call. This would also mean my test case
behaves as expected.

>
> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>> <bill.fischofer@linaro.org> wrote:
>> > I think it's something we need to discuss during the sync call.
>> >
>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>> > wrote:
>> >>
>> >> Should a bug be made to track a needed change or is it important for
>> >> 1.0
>> >> and needs to be in the delta doc ?
>> >>
>> >> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
>> >> wrote:
>> >>>
>> >>> Caches should be transparent.  While this may be needed here, it's a
>> >>> poor
>> >>> set of semantics to expose as part of the formal APIs.  This is
>> >>> definitely
>> >>> something we need to address.  My suggestion is that a
>> >>> odp_schedule_pause()
>> >>> should cause an implicit cache flush if the implementation is using a
>> >>> scheduling cache.  That way any cache being used is truly transparent
>> >>> and
>> >>> moreover there won't be unnecessary delays in event processing since
>> >>> who
>> >>> knows how long a pause may last?  Clearly it won't be brief since
>> >>> otherwise
>> >>> the application would not have bothered with a pause/resume in the
>> >>> first
>> >>> place.
>>
>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>> a brief update on what was decided?
>>
>> >>>
>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>> >>> <ciprian.barbu@linaro.org>
>> >>> wrote:
>> >>>>
>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>>> >> ---
>> >>>> >>  test/validation/odp_schedule.c | 63
>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>> >>>> >>
>> >>>> >> diff --git a/test/validation/odp_schedule.c
>> >>>> >> b/test/validation/odp_schedule.c
>> >>>> >> index 31be742..bdbcf77 100644
>> >>>> >> --- a/test/validation/odp_schedule.c
>> >>>> >> +++ b/test/validation/odp_schedule.c
>> >>>> >> @@ -11,9 +11,11 @@
>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>> >>>> >>  #define QUEUES_PER_PRIO              16
>> >>>> >>  #define BUF_SIZE             64
>> >>>> >> -#define TEST_NUM_BUFS                100
>> >>>> >> +#define NUM_BUFS             100
>> >>>> >>  #define BURST_BUF_SIZE               4
>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>> >>>> >> +#define NUM_BUFS_EXCL                10000
>> >>>> >> +#define NUM_BUFS_PAUSE               1000
>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >>>> >>
>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
>> >>>> >> @@ -229,7 +231,7 @@ static void
>> >>>> >> schedule_common(odp_schedule_sync_t
>> >>>> >> sync, int num_queues,
>> >>>> >>       args.sync = sync;
>> >>>> >>       args.num_queues = num_queues;
>> >>>> >>       args.num_prio = num_prio;
>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>> >>>> >> +     args.num_bufs = NUM_BUFS;
>> >>>> >>       args.num_cores = 1;
>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
>> >>>> >> core */
>> >>>> >> @@ -261,9 +263,9 @@ static void
>> >>>> >> parallel_execute(odp_schedule_sync_t
>> >>>> >> sync, int num_queues,
>> >>>> >>       thr_args->num_queues = num_queues;
>> >>>> >>       thr_args->num_prio = num_prio;
>> >>>> >>       if (enable_excl_atomic)
>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >>>> >>       else
>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
>> >>>> >>       thr_args->num_cores = globals->core_count;
>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> >>>> >> @@ -459,6 +461,56 @@ static void
>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
>> >>>> >>                        ENABLE_EXCL_ATOMIC);
>> >>>> >>  }
>> >>>> >>
>> >>>> >> +static void test_schedule_pause_resume(void)
>> >>>> >> +{
>> >>>> >> +     odp_queue_t queue;
>> >>>> >> +     odp_buffer_t buf;
>> >>>> >> +     odp_queue_t from;
>> >>>> >> +     int i;
>> >>>> >> +     int local_bufs = 0;
>> >>>> >> +
>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >>>> >> +
>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >>>> >> +
>> >>>> >> +
>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >>>> >> +             buf = odp_buffer_alloc(pool);
>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >>>> >> +             odp_queue_enq(queue, buf);
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     odp_schedule_pause();
>> >>>> >> +
>> >>>> >> +     while (1) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
>> >>>> >> +                     break;
>> >>>> >> +
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +             local_bufs++;
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
>> >>>> >
>> >>>> > Whats is the expected behavior here, Shouldn't it be
>> >>>> > CU_ASSERT(local_bufs == 0) ?
>> >>>> > meaning, the complete pause ?
>> >>>>
>> >>>> Sorry about the delay, I've been playing around with mutt and I must
>> >>>> have accidentally marked this email as read.
>> >>>> The explanation here is that after pausing the scheduling, there
>> >>>> might
>> >>>> still be locally reserved buffers (see the odp_schedule_pause
>> >>>> documentation). For linux-generic for instance the scheduler dequeues
>> >>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
>> >>>> check
>> >>>> above makes sure that after pausing only a limited number of packets
>> >>>> are still scheduled, or else said pausing seems to work, not all
>> >>>> packets being drained.
>> >>>>
>> >>>> >
>> >>>> >> +
>> >>>> >> +     odp_schedule_resume();
>> >>>> >> +
>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>> >>>> >> NUM_BUFS_PAUSE; i++) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +     }
>> >>>> >> +}
>> >>>> >> +
>> >>>> >>  static int create_queues(void)
>> >>>> >>  {
>> >>>> >>       int i, j, prios;
>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
>> >>>> >> test_schedule_multi_mq_mt_prio_a},
>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
>> >>>> >> test_schedule_multi_mq_mt_prio_o},
>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
>> >>>> >> test_schedule_multi_1q_mt_a_excl},
>> >>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>> >>>> >>       CU_TEST_INFO_NULL,
>> >>>> >>  };
>> >>>> >>
>> >>>> >> --
>> >>>> >> 1.8.3.2
>> >>>> >>
>> >>>> >>
>> >>>> >> _______________________________________________
>> >>>> >> lng-odp mailing list
>> >>>> >> lng-odp@lists.linaro.org
>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>
>> >>>> _______________________________________________
>> >>>> lng-odp mailing list
>> >>>> lng-odp@lists.linaro.org
>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>
>> >>>
>> >>>
>> >>> _______________________________________________
>> >>> lng-odp mailing list
>> >>> lng-odp@lists.linaro.org
>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>
>> >>
>> >>
>> >>
>> >> --
>> >> Mike Holmes
>> >> Linaro  Sr Technical Manager
>> >> LNG - ODP
>> >
>> >
>
>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
Bill Fischofer Jan. 8, 2015, 12:37 p.m. UTC | #8
That certainly seems to be the upshot for now from yesterday's
discussions.  Whether this is something that will persist longer-term
remains to be seen.

The net is that if you want to pause there a certain amount of choreography
that the application needs to do to perform such role-switching in a
graceful manner.  A good reason, perhaps, to consider designing the
application so that pauses are not needed.

We still need to discuss how to deal with potentially cached work if we
want to be able to support a more robust programming model in which
threads/processes can fail without killing the entire application.

On Thu, Jan 8, 2015 at 6:27 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
wrote:

> On Wed, Jan 7, 2015 at 9:41 PM, Mike Holmes <mike.holmes@linaro.org>
> wrote:
> > I am unsure if I need to pay attention to this for 0.7.0
>
> Still abit unclear after the discussion we had with Petri, but I think
> we need to keep the behavior as it is, meaning applications need to
> take care of the scheduling "cache", and consume everything after
> issuing an odp_schedule_pause call. This would also mean my test case
> behaves as expected.
>
> >
> > On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
> >>
> >> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
> >> <bill.fischofer@linaro.org> wrote:
> >> > I think it's something we need to discuss during the sync call.
> >> >
> >> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
> >> > wrote:
> >> >>
> >> >> Should a bug be made to track a needed change or is it important for
> >> >> 1.0
> >> >> and needs to be in the delta doc ?
> >> >>
> >> >> On 6 January 2015 at 08:40, Bill Fischofer <
> bill.fischofer@linaro.org>
> >> >> wrote:
> >> >>>
> >> >>> Caches should be transparent.  While this may be needed here, it's a
> >> >>> poor
> >> >>> set of semantics to expose as part of the formal APIs.  This is
> >> >>> definitely
> >> >>> something we need to address.  My suggestion is that a
> >> >>> odp_schedule_pause()
> >> >>> should cause an implicit cache flush if the implementation is using
> a
> >> >>> scheduling cache.  That way any cache being used is truly
> transparent
> >> >>> and
> >> >>> moreover there won't be unnecessary delays in event processing since
> >> >>> who
> >> >>> knows how long a pause may last?  Clearly it won't be brief since
> >> >>> otherwise
> >> >>> the application would not have bothered with a pause/resume in the
> >> >>> first
> >> >>> place.
> >>
> >> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
> >> a brief update on what was decided?
> >>
> >> >>>
> >> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
> >> >>> <ciprian.barbu@linaro.org>
> >> >>> wrote:
> >> >>>>
> >> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> >> >>>> <jerin.jacob@caviumnetworks.com> wrote:
> >> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
> >> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> >>>> >> ---
> >> >>>> >>  test/validation/odp_schedule.c | 63
> >> >>>> >> ++++++++++++++++++++++++++++++++++++++----
> >> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
> >> >>>> >>
> >> >>>> >> diff --git a/test/validation/odp_schedule.c
> >> >>>> >> b/test/validation/odp_schedule.c
> >> >>>> >> index 31be742..bdbcf77 100644
> >> >>>> >> --- a/test/validation/odp_schedule.c
> >> >>>> >> +++ b/test/validation/odp_schedule.c
> >> >>>> >> @@ -11,9 +11,11 @@
> >> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
> >> >>>> >>  #define QUEUES_PER_PRIO              16
> >> >>>> >>  #define BUF_SIZE             64
> >> >>>> >> -#define TEST_NUM_BUFS                100
> >> >>>> >> +#define NUM_BUFS             100
> >> >>>> >>  #define BURST_BUF_SIZE               4
> >> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
> >> >>>> >> +#define NUM_BUFS_EXCL                10000
> >> >>>> >> +#define NUM_BUFS_PAUSE               1000
> >> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
> >> >>>> >>
> >> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
> >> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
> >> >>>> >> @@ -229,7 +231,7 @@ static void
> >> >>>> >> schedule_common(odp_schedule_sync_t
> >> >>>> >> sync, int num_queues,
> >> >>>> >>       args.sync = sync;
> >> >>>> >>       args.num_queues = num_queues;
> >> >>>> >>       args.num_prio = num_prio;
> >> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
> >> >>>> >> +     args.num_bufs = NUM_BUFS;
> >> >>>> >>       args.num_cores = 1;
> >> >>>> >>       args.enable_schd_multi = enable_schd_multi;
> >> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a
> single
> >> >>>> >> core */
> >> >>>> >> @@ -261,9 +263,9 @@ static void
> >> >>>> >> parallel_execute(odp_schedule_sync_t
> >> >>>> >> sync, int num_queues,
> >> >>>> >>       thr_args->num_queues = num_queues;
> >> >>>> >>       thr_args->num_prio = num_prio;
> >> >>>> >>       if (enable_excl_atomic)
> >> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >> >>>> >>       else
> >> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
> >> >>>> >>       thr_args->num_cores = globals->core_count;
> >> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
> >> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
> >> >>>> >> @@ -459,6 +461,56 @@ static void
> >> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
> >> >>>> >>                        ENABLE_EXCL_ATOMIC);
> >> >>>> >>  }
> >> >>>> >>
> >> >>>> >> +static void test_schedule_pause_resume(void)
> >> >>>> >> +{
> >> >>>> >> +     odp_queue_t queue;
> >> >>>> >> +     odp_buffer_t buf;
> >> >>>> >> +     odp_queue_t from;
> >> >>>> >> +     int i;
> >> >>>> >> +     int local_bufs = 0;
> >> >>>> >> +
> >> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
> >> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >> >>>> >> +
> >> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >> >>>> >> +
> >> >>>> >> +
> >> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >> >>>> >> +             buf = odp_buffer_alloc(pool);
> >> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >> >>>> >> +             odp_queue_enq(queue, buf);
> >> >>>> >> +     }
> >> >>>> >> +
> >> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> >>>> >> +             CU_ASSERT(from == queue);
> >> >>>> >> +             odp_buffer_free(buf);
> >> >>>> >> +     }
> >> >>>> >> +
> >> >>>> >> +     odp_schedule_pause();
> >> >>>> >> +
> >> >>>> >> +     while (1) {
> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
> >> >>>> >> +                     break;
> >> >>>> >> +
> >> >>>> >> +             CU_ASSERT(from == queue);
> >> >>>> >> +             odp_buffer_free(buf);
> >> >>>> >> +             local_bufs++;
> >> >>>> >> +     }
> >> >>>> >> +
> >> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
> >> >>>> >> NUM_BUFS_BEFORE_PAUSE);
> >> >>>> >
> >> >>>> > Whats is the expected behavior here, Shouldn't it be
> >> >>>> > CU_ASSERT(local_bufs == 0) ?
> >> >>>> > meaning, the complete pause ?
> >> >>>>
> >> >>>> Sorry about the delay, I've been playing around with mutt and I
> must
> >> >>>> have accidentally marked this email as read.
> >> >>>> The explanation here is that after pausing the scheduling, there
> >> >>>> might
> >> >>>> still be locally reserved buffers (see the odp_schedule_pause
> >> >>>> documentation). For linux-generic for instance the scheduler
> dequeues
> >> >>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
> >> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
> >> >>>> check
> >> >>>> above makes sure that after pausing only a limited number of
> packets
> >> >>>> are still scheduled, or else said pausing seems to work, not all
> >> >>>> packets being drained.
> >> >>>>
> >> >>>> >
> >> >>>> >> +
> >> >>>> >> +     odp_schedule_resume();
> >> >>>> >> +
> >> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
> >> >>>> >> NUM_BUFS_PAUSE; i++) {
> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >> >>>> >> +             CU_ASSERT(from == queue);
> >> >>>> >> +             odp_buffer_free(buf);
> >> >>>> >> +     }
> >> >>>> >> +}
> >> >>>> >> +
> >> >>>> >>  static int create_queues(void)
> >> >>>> >>  {
> >> >>>> >>       int i, j, prios;
> >> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
> >> >>>> >>       {"schedule_multi_mq_mt_prio_a",
> >> >>>> >> test_schedule_multi_mq_mt_prio_a},
> >> >>>> >>       {"schedule_multi_mq_mt_prio_o",
> >> >>>> >> test_schedule_multi_mq_mt_prio_o},
> >> >>>> >>       {"schedule_multi_1q_mt_a_excl",
> >> >>>> >> test_schedule_multi_1q_mt_a_excl},
> >> >>>> >> +     {"schedule_pause_resume",
>  test_schedule_pause_resume},
> >> >>>> >>       CU_TEST_INFO_NULL,
> >> >>>> >>  };
> >> >>>> >>
> >> >>>> >> --
> >> >>>> >> 1.8.3.2
> >> >>>> >>
> >> >>>> >>
> >> >>>> >> _______________________________________________
> >> >>>> >> lng-odp mailing list
> >> >>>> >> lng-odp@lists.linaro.org
> >> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>>
> >> >>>> _______________________________________________
> >> >>>> lng-odp mailing list
> >> >>>> lng-odp@lists.linaro.org
> >> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>
> >> >>>
> >> >>>
> >> >>> _______________________________________________
> >> >>> lng-odp mailing list
> >> >>> lng-odp@lists.linaro.org
> >> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>
> >> >>
> >> >>
> >> >>
> >> >> --
> >> >> Mike Holmes
> >> >> Linaro  Sr Technical Manager
> >> >> LNG - ODP
> >> >
> >> >
> >
> >
> >
> >
> > --
> > Mike Holmes
> > Linaro  Sr Technical Manager
> > LNG - ODP
>
Ciprian Barbu Jan. 14, 2015, 1:22 p.m. UTC | #9
ping

On Thu, Jan 8, 2015 at 2:37 PM, Bill Fischofer
<bill.fischofer@linaro.org> wrote:
> That certainly seems to be the upshot for now from yesterday's discussions.
> Whether this is something that will persist longer-term remains to be seen.
>
> The net is that if you want to pause there a certain amount of choreography
> that the application needs to do to perform such role-switching in a
> graceful manner.  A good reason, perhaps, to consider designing the
> application so that pauses are not needed.
>
> We still need to discuss how to deal with potentially cached work if we want
> to be able to support a more robust programming model in which
> threads/processes can fail without killing the entire application.
>
> On Thu, Jan 8, 2015 at 6:27 AM, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
>>
>> On Wed, Jan 7, 2015 at 9:41 PM, Mike Holmes <mike.holmes@linaro.org>
>> wrote:
>> > I am unsure if I need to pay attention to this for 0.7.0
>>
>> Still abit unclear after the discussion we had with Petri, but I think
>> we need to keep the behavior as it is, meaning applications need to
>> take care of the scheduling "cache", and consume everything after
>> issuing an odp_schedule_pause call. This would also mean my test case
>> behaves as expected.
>>
>> >
>> > On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>> > wrote:
>> >>
>> >> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>> >> <bill.fischofer@linaro.org> wrote:
>> >> > I think it's something we need to discuss during the sync call.
>> >> >
>> >> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>> >> > wrote:
>> >> >>
>> >> >> Should a bug be made to track a needed change or is it important for
>> >> >> 1.0
>> >> >> and needs to be in the delta doc ?
>> >> >>
>> >> >> On 6 January 2015 at 08:40, Bill Fischofer
>> >> >> <bill.fischofer@linaro.org>
>> >> >> wrote:
>> >> >>>
>> >> >>> Caches should be transparent.  While this may be needed here, it's
>> >> >>> a
>> >> >>> poor
>> >> >>> set of semantics to expose as part of the formal APIs.  This is
>> >> >>> definitely
>> >> >>> something we need to address.  My suggestion is that a
>> >> >>> odp_schedule_pause()
>> >> >>> should cause an implicit cache flush if the implementation is using
>> >> >>> a
>> >> >>> scheduling cache.  That way any cache being used is truly
>> >> >>> transparent
>> >> >>> and
>> >> >>> moreover there won't be unnecessary delays in event processing
>> >> >>> since
>> >> >>> who
>> >> >>> knows how long a pause may last?  Clearly it won't be brief since
>> >> >>> otherwise
>> >> >>> the application would not have bothered with a pause/resume in the
>> >> >>> first
>> >> >>> place.
>> >>
>> >> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>> >> a brief update on what was decided?
>> >>
>> >> >>>
>> >> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>> >> >>> <ciprian.barbu@linaro.org>
>> >> >>> wrote:
>> >> >>>>
>> >> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> >> >>>> <jerin.jacob@caviumnetworks.com> wrote:
>> >> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >> >>>> >> ---
>> >> >>>> >>  test/validation/odp_schedule.c | 63
>> >> >>>> >> ++++++++++++++++++++++++++++++++++++++----
>> >> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>> >> >>>> >>
>> >> >>>> >> diff --git a/test/validation/odp_schedule.c
>> >> >>>> >> b/test/validation/odp_schedule.c
>> >> >>>> >> index 31be742..bdbcf77 100644
>> >> >>>> >> --- a/test/validation/odp_schedule.c
>> >> >>>> >> +++ b/test/validation/odp_schedule.c
>> >> >>>> >> @@ -11,9 +11,11 @@
>> >> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>> >> >>>> >>  #define QUEUES_PER_PRIO              16
>> >> >>>> >>  #define BUF_SIZE             64
>> >> >>>> >> -#define TEST_NUM_BUFS                100
>> >> >>>> >> +#define NUM_BUFS             100
>> >> >>>> >>  #define BURST_BUF_SIZE               4
>> >> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>> >> >>>> >> +#define NUM_BUFS_EXCL                10000
>> >> >>>> >> +#define NUM_BUFS_PAUSE               1000
>> >> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >> >>>> >>
>> >> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>> >> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
>> >> >>>> >> @@ -229,7 +231,7 @@ static void
>> >> >>>> >> schedule_common(odp_schedule_sync_t
>> >> >>>> >> sync, int num_queues,
>> >> >>>> >>       args.sync = sync;
>> >> >>>> >>       args.num_queues = num_queues;
>> >> >>>> >>       args.num_prio = num_prio;
>> >> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>> >> >>>> >> +     args.num_bufs = NUM_BUFS;
>> >> >>>> >>       args.num_cores = 1;
>> >> >>>> >>       args.enable_schd_multi = enable_schd_multi;
>> >> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a
>> >> >>>> >> single
>> >> >>>> >> core */
>> >> >>>> >> @@ -261,9 +263,9 @@ static void
>> >> >>>> >> parallel_execute(odp_schedule_sync_t
>> >> >>>> >> sync, int num_queues,
>> >> >>>> >>       thr_args->num_queues = num_queues;
>> >> >>>> >>       thr_args->num_prio = num_prio;
>> >> >>>> >>       if (enable_excl_atomic)
>> >> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >> >>>> >>       else
>> >> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
>> >> >>>> >>       thr_args->num_cores = globals->core_count;
>> >> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>> >> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> >> >>>> >> @@ -459,6 +461,56 @@ static void
>> >> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
>> >> >>>> >>                        ENABLE_EXCL_ATOMIC);
>> >> >>>> >>  }
>> >> >>>> >>
>> >> >>>> >> +static void test_schedule_pause_resume(void)
>> >> >>>> >> +{
>> >> >>>> >> +     odp_queue_t queue;
>> >> >>>> >> +     odp_buffer_t buf;
>> >> >>>> >> +     odp_queue_t from;
>> >> >>>> >> +     int i;
>> >> >>>> >> +     int local_bufs = 0;
>> >> >>>> >> +
>> >> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>> >> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >> >>>> >> +
>> >> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >> >>>> >> +
>> >> >>>> >> +
>> >> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >> >>>> >> +             buf = odp_buffer_alloc(pool);
>> >> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >> >>>> >> +             odp_queue_enq(queue, buf);
>> >> >>>> >> +     }
>> >> >>>> >> +
>> >> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >> >>>> >> +             CU_ASSERT(from == queue);
>> >> >>>> >> +             odp_buffer_free(buf);
>> >> >>>> >> +     }
>> >> >>>> >> +
>> >> >>>> >> +     odp_schedule_pause();
>> >> >>>> >> +
>> >> >>>> >> +     while (1) {
>> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
>> >> >>>> >> +                     break;
>> >> >>>> >> +
>> >> >>>> >> +             CU_ASSERT(from == queue);
>> >> >>>> >> +             odp_buffer_free(buf);
>> >> >>>> >> +             local_bufs++;
>> >> >>>> >> +     }
>> >> >>>> >> +
>> >> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>> >> >>>> >> NUM_BUFS_BEFORE_PAUSE);
>> >> >>>> >
>> >> >>>> > Whats is the expected behavior here, Shouldn't it be
>> >> >>>> > CU_ASSERT(local_bufs == 0) ?
>> >> >>>> > meaning, the complete pause ?
>> >> >>>>
>> >> >>>> Sorry about the delay, I've been playing around with mutt and I
>> >> >>>> must
>> >> >>>> have accidentally marked this email as read.
>> >> >>>> The explanation here is that after pausing the scheduling, there
>> >> >>>> might
>> >> >>>> still be locally reserved buffers (see the odp_schedule_pause
>> >> >>>> documentation). For linux-generic for instance the scheduler
>> >> >>>> dequeues
>> >> >>>> buffers in bursts, odp_scheduler_pause only stops further
>> >> >>>> dequeues,
>> >> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
>> >> >>>> check
>> >> >>>> above makes sure that after pausing only a limited number of
>> >> >>>> packets
>> >> >>>> are still scheduled, or else said pausing seems to work, not all
>> >> >>>> packets being drained.
>> >> >>>>
>> >> >>>> >
>> >> >>>> >> +
>> >> >>>> >> +     odp_schedule_resume();
>> >> >>>> >> +
>> >> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>> >> >>>> >> NUM_BUFS_PAUSE; i++) {
>> >> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >> >>>> >> +             CU_ASSERT(from == queue);
>> >> >>>> >> +             odp_buffer_free(buf);
>> >> >>>> >> +     }
>> >> >>>> >> +}
>> >> >>>> >> +
>> >> >>>> >>  static int create_queues(void)
>> >> >>>> >>  {
>> >> >>>> >>       int i, j, prios;
>> >> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >> >>>> >>       {"schedule_multi_mq_mt_prio_a",
>> >> >>>> >> test_schedule_multi_mq_mt_prio_a},
>> >> >>>> >>       {"schedule_multi_mq_mt_prio_o",
>> >> >>>> >> test_schedule_multi_mq_mt_prio_o},
>> >> >>>> >>       {"schedule_multi_1q_mt_a_excl",
>> >> >>>> >> test_schedule_multi_1q_mt_a_excl},
>> >> >>>> >> +     {"schedule_pause_resume",
>> >> >>>> >> test_schedule_pause_resume},
>> >> >>>> >>       CU_TEST_INFO_NULL,
>> >> >>>> >>  };
>> >> >>>> >>
>> >> >>>> >> --
>> >> >>>> >> 1.8.3.2
>> >> >>>> >>
>> >> >>>> >>
>> >> >>>> >> _______________________________________________
>> >> >>>> >> lng-odp mailing list
>> >> >>>> >> lng-odp@lists.linaro.org
>> >> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >> >>>>
>> >> >>>> _______________________________________________
>> >> >>>> lng-odp mailing list
>> >> >>>> lng-odp@lists.linaro.org
>> >> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >> >>>
>> >> >>>
>> >> >>>
>> >> >>> _______________________________________________
>> >> >>> lng-odp mailing list
>> >> >>> lng-odp@lists.linaro.org
>> >> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >> >>>
>> >> >>
>> >> >>
>> >> >>
>> >> >> --
>> >> >> Mike Holmes
>> >> >> Linaro  Sr Technical Manager
>> >> >> LNG - ODP
>> >> >
>> >> >
>> >
>> >
>> >
>> >
>> > --
>> > Mike Holmes
>> > Linaro  Sr Technical Manager
>> > LNG - ODP
>
>
Ola Liljedahl Jan. 14, 2015, 1:28 p.m. UTC | #10
On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
> I am unsure if I need to pay attention to this for 0.7.0
We need to have a decision (and implementation) for ODP 1.0 though.
Scheduling and its semantics are important aspects of ODP.

>
> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>> <bill.fischofer@linaro.org> wrote:
>> > I think it's something we need to discuss during the sync call.
>> >
>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>> > wrote:
>> >>
>> >> Should a bug be made to track a needed change or is it important for
>> >> 1.0
>> >> and needs to be in the delta doc ?
>> >>
>> >> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
>> >> wrote:
>> >>>
>> >>> Caches should be transparent.  While this may be needed here, it's a
>> >>> poor
>> >>> set of semantics to expose as part of the formal APIs.  This is
>> >>> definitely
>> >>> something we need to address.  My suggestion is that a
>> >>> odp_schedule_pause()
>> >>> should cause an implicit cache flush if the implementation is using a
>> >>> scheduling cache.  That way any cache being used is truly transparent
>> >>> and
>> >>> moreover there won't be unnecessary delays in event processing since
>> >>> who
>> >>> knows how long a pause may last?  Clearly it won't be brief since
>> >>> otherwise
>> >>> the application would not have bothered with a pause/resume in the
>> >>> first
>> >>> place.
>>
>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>> a brief update on what was decided?
>>
>> >>>
>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>> >>> <ciprian.barbu@linaro.org>
>> >>> wrote:
>> >>>>
>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>>> >> ---
>> >>>> >>  test/validation/odp_schedule.c | 63
>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>> >>>> >>
>> >>>> >> diff --git a/test/validation/odp_schedule.c
>> >>>> >> b/test/validation/odp_schedule.c
>> >>>> >> index 31be742..bdbcf77 100644
>> >>>> >> --- a/test/validation/odp_schedule.c
>> >>>> >> +++ b/test/validation/odp_schedule.c
>> >>>> >> @@ -11,9 +11,11 @@
>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>> >>>> >>  #define QUEUES_PER_PRIO              16
>> >>>> >>  #define BUF_SIZE             64
>> >>>> >> -#define TEST_NUM_BUFS                100
>> >>>> >> +#define NUM_BUFS             100
>> >>>> >>  #define BURST_BUF_SIZE               4
>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>> >>>> >> +#define NUM_BUFS_EXCL                10000
>> >>>> >> +#define NUM_BUFS_PAUSE               1000
>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >>>> >>
>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
>> >>>> >> @@ -229,7 +231,7 @@ static void
>> >>>> >> schedule_common(odp_schedule_sync_t
>> >>>> >> sync, int num_queues,
>> >>>> >>       args.sync = sync;
>> >>>> >>       args.num_queues = num_queues;
>> >>>> >>       args.num_prio = num_prio;
>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>> >>>> >> +     args.num_bufs = NUM_BUFS;
>> >>>> >>       args.num_cores = 1;
>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
>> >>>> >> core */
>> >>>> >> @@ -261,9 +263,9 @@ static void
>> >>>> >> parallel_execute(odp_schedule_sync_t
>> >>>> >> sync, int num_queues,
>> >>>> >>       thr_args->num_queues = num_queues;
>> >>>> >>       thr_args->num_prio = num_prio;
>> >>>> >>       if (enable_excl_atomic)
>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >>>> >>       else
>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
>> >>>> >>       thr_args->num_cores = globals->core_count;
>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> >>>> >> @@ -459,6 +461,56 @@ static void
>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
>> >>>> >>                        ENABLE_EXCL_ATOMIC);
>> >>>> >>  }
>> >>>> >>
>> >>>> >> +static void test_schedule_pause_resume(void)
>> >>>> >> +{
>> >>>> >> +     odp_queue_t queue;
>> >>>> >> +     odp_buffer_t buf;
>> >>>> >> +     odp_queue_t from;
>> >>>> >> +     int i;
>> >>>> >> +     int local_bufs = 0;
>> >>>> >> +
>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >>>> >> +
>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >>>> >> +
>> >>>> >> +
>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >>>> >> +             buf = odp_buffer_alloc(pool);
>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >>>> >> +             odp_queue_enq(queue, buf);
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     odp_schedule_pause();
>> >>>> >> +
>> >>>> >> +     while (1) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
>> >>>> >> +                     break;
>> >>>> >> +
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +             local_bufs++;
>> >>>> >> +     }
>> >>>> >> +
>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
>> >>>> >
>> >>>> > Whats is the expected behavior here, Shouldn't it be
>> >>>> > CU_ASSERT(local_bufs == 0) ?
>> >>>> > meaning, the complete pause ?
>> >>>>
>> >>>> Sorry about the delay, I've been playing around with mutt and I must
>> >>>> have accidentally marked this email as read.
>> >>>> The explanation here is that after pausing the scheduling, there
>> >>>> might
>> >>>> still be locally reserved buffers (see the odp_schedule_pause
>> >>>> documentation). For linux-generic for instance the scheduler dequeues
>> >>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
>> >>>> check
>> >>>> above makes sure that after pausing only a limited number of packets
>> >>>> are still scheduled, or else said pausing seems to work, not all
>> >>>> packets being drained.
>> >>>>
>> >>>> >
>> >>>> >> +
>> >>>> >> +     odp_schedule_resume();
>> >>>> >> +
>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>> >>>> >> NUM_BUFS_PAUSE; i++) {
>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >>>> >> +             CU_ASSERT(from == queue);
>> >>>> >> +             odp_buffer_free(buf);
>> >>>> >> +     }
>> >>>> >> +}
>> >>>> >> +
>> >>>> >>  static int create_queues(void)
>> >>>> >>  {
>> >>>> >>       int i, j, prios;
>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
>> >>>> >> test_schedule_multi_mq_mt_prio_a},
>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
>> >>>> >> test_schedule_multi_mq_mt_prio_o},
>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
>> >>>> >> test_schedule_multi_1q_mt_a_excl},
>> >>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>> >>>> >>       CU_TEST_INFO_NULL,
>> >>>> >>  };
>> >>>> >>
>> >>>> >> --
>> >>>> >> 1.8.3.2
>> >>>> >>
>> >>>> >>
>> >>>> >> _______________________________________________
>> >>>> >> lng-odp mailing list
>> >>>> >> lng-odp@lists.linaro.org
>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>
>> >>>> _______________________________________________
>> >>>> lng-odp mailing list
>> >>>> lng-odp@lists.linaro.org
>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>
>> >>>
>> >>>
>> >>> _______________________________________________
>> >>> lng-odp mailing list
>> >>> lng-odp@lists.linaro.org
>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>
>> >>
>> >>
>> >>
>> >> --
>> >> Mike Holmes
>> >> Linaro  Sr Technical Manager
>> >> LNG - ODP
>> >
>> >
>
>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Ciprian Barbu Jan. 14, 2015, 1:35 p.m. UTC | #11
On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org> wrote:
> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
>> I am unsure if I need to pay attention to this for 0.7.0
> We need to have a decision (and implementation) for ODP 1.0 though.
> Scheduling and its semantics are important aspects of ODP.

The odp_schedule_pause API is already documented and implemented, I
didn't exactly catch from Petri if we will keep the behavior for 1.0,
but what is the problem with covering this API in its current form for
at least 0.7 and 0.8?

>
>>
>> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>>
>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>>> <bill.fischofer@linaro.org> wrote:
>>> > I think it's something we need to discuss during the sync call.
>>> >
>>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>>> > wrote:
>>> >>
>>> >> Should a bug be made to track a needed change or is it important for
>>> >> 1.0
>>> >> and needs to be in the delta doc ?
>>> >>
>>> >> On 6 January 2015 at 08:40, Bill Fischofer <bill.fischofer@linaro.org>
>>> >> wrote:
>>> >>>
>>> >>> Caches should be transparent.  While this may be needed here, it's a
>>> >>> poor
>>> >>> set of semantics to expose as part of the formal APIs.  This is
>>> >>> definitely
>>> >>> something we need to address.  My suggestion is that a
>>> >>> odp_schedule_pause()
>>> >>> should cause an implicit cache flush if the implementation is using a
>>> >>> scheduling cache.  That way any cache being used is truly transparent
>>> >>> and
>>> >>> moreover there won't be unnecessary delays in event processing since
>>> >>> who
>>> >>> knows how long a pause may last?  Clearly it won't be brief since
>>> >>> otherwise
>>> >>> the application would not have bothered with a pause/resume in the
>>> >>> first
>>> >>> place.
>>>
>>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>>> a brief update on what was decided?
>>>
>>> >>>
>>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>>> >>> <ciprian.barbu@linaro.org>
>>> >>> wrote:
>>> >>>>
>>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
>>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>> >>>> >> ---
>>> >>>> >>  test/validation/odp_schedule.c | 63
>>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
>>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>>> >>>> >>
>>> >>>> >> diff --git a/test/validation/odp_schedule.c
>>> >>>> >> b/test/validation/odp_schedule.c
>>> >>>> >> index 31be742..bdbcf77 100644
>>> >>>> >> --- a/test/validation/odp_schedule.c
>>> >>>> >> +++ b/test/validation/odp_schedule.c
>>> >>>> >> @@ -11,9 +11,11 @@
>>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>>> >>>> >>  #define QUEUES_PER_PRIO              16
>>> >>>> >>  #define BUF_SIZE             64
>>> >>>> >> -#define TEST_NUM_BUFS                100
>>> >>>> >> +#define NUM_BUFS             100
>>> >>>> >>  #define BURST_BUF_SIZE               4
>>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>>> >>>> >> +#define NUM_BUFS_EXCL                10000
>>> >>>> >> +#define NUM_BUFS_PAUSE               1000
>>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>>> >>>> >>
>>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
>>> >>>> >> @@ -229,7 +231,7 @@ static void
>>> >>>> >> schedule_common(odp_schedule_sync_t
>>> >>>> >> sync, int num_queues,
>>> >>>> >>       args.sync = sync;
>>> >>>> >>       args.num_queues = num_queues;
>>> >>>> >>       args.num_prio = num_prio;
>>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>>> >>>> >> +     args.num_bufs = NUM_BUFS;
>>> >>>> >>       args.num_cores = 1;
>>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
>>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a single
>>> >>>> >> core */
>>> >>>> >> @@ -261,9 +263,9 @@ static void
>>> >>>> >> parallel_execute(odp_schedule_sync_t
>>> >>>> >> sync, int num_queues,
>>> >>>> >>       thr_args->num_queues = num_queues;
>>> >>>> >>       thr_args->num_prio = num_prio;
>>> >>>> >>       if (enable_excl_atomic)
>>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>> >>>> >>       else
>>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
>>> >>>> >>       thr_args->num_cores = globals->core_count;
>>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>>> >>>> >> @@ -459,6 +461,56 @@ static void
>>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
>>> >>>> >>                        ENABLE_EXCL_ATOMIC);
>>> >>>> >>  }
>>> >>>> >>
>>> >>>> >> +static void test_schedule_pause_resume(void)
>>> >>>> >> +{
>>> >>>> >> +     odp_queue_t queue;
>>> >>>> >> +     odp_buffer_t buf;
>>> >>>> >> +     odp_queue_t from;
>>> >>>> >> +     int i;
>>> >>>> >> +     int local_bufs = 0;
>>> >>>> >> +
>>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>> >>>> >> +
>>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>> >>>> >> +
>>> >>>> >> +
>>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>> >>>> >> +             buf = odp_buffer_alloc(pool);
>>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>> >>>> >> +             odp_queue_enq(queue, buf);
>>> >>>> >> +     }
>>> >>>> >> +
>>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>> >>>> >> +             CU_ASSERT(from == queue);
>>> >>>> >> +             odp_buffer_free(buf);
>>> >>>> >> +     }
>>> >>>> >> +
>>> >>>> >> +     odp_schedule_pause();
>>> >>>> >> +
>>> >>>> >> +     while (1) {
>>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
>>> >>>> >> +                     break;
>>> >>>> >> +
>>> >>>> >> +             CU_ASSERT(from == queue);
>>> >>>> >> +             odp_buffer_free(buf);
>>> >>>> >> +             local_bufs++;
>>> >>>> >> +     }
>>> >>>> >> +
>>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
>>> >>>> >
>>> >>>> > Whats is the expected behavior here, Shouldn't it be
>>> >>>> > CU_ASSERT(local_bufs == 0) ?
>>> >>>> > meaning, the complete pause ?
>>> >>>>
>>> >>>> Sorry about the delay, I've been playing around with mutt and I must
>>> >>>> have accidentally marked this email as read.
>>> >>>> The explanation here is that after pausing the scheduling, there
>>> >>>> might
>>> >>>> still be locally reserved buffers (see the odp_schedule_pause
>>> >>>> documentation). For linux-generic for instance the scheduler dequeues
>>> >>>> buffers in bursts, odp_scheduler_pause only stops further dequeues,
>>> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
>>> >>>> check
>>> >>>> above makes sure that after pausing only a limited number of packets
>>> >>>> are still scheduled, or else said pausing seems to work, not all
>>> >>>> packets being drained.
>>> >>>>
>>> >>>> >
>>> >>>> >> +
>>> >>>> >> +     odp_schedule_resume();
>>> >>>> >> +
>>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>> >>>> >> NUM_BUFS_PAUSE; i++) {
>>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>> >>>> >> +             CU_ASSERT(from == queue);
>>> >>>> >> +             odp_buffer_free(buf);
>>> >>>> >> +     }
>>> >>>> >> +}
>>> >>>> >> +
>>> >>>> >>  static int create_queues(void)
>>> >>>> >>  {
>>> >>>> >>       int i, j, prios;
>>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
>>> >>>> >> test_schedule_multi_mq_mt_prio_a},
>>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
>>> >>>> >> test_schedule_multi_mq_mt_prio_o},
>>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
>>> >>>> >> test_schedule_multi_1q_mt_a_excl},
>>> >>>> >> +     {"schedule_pause_resume",       test_schedule_pause_resume},
>>> >>>> >>       CU_TEST_INFO_NULL,
>>> >>>> >>  };
>>> >>>> >>
>>> >>>> >> --
>>> >>>> >> 1.8.3.2
>>> >>>> >>
>>> >>>> >>
>>> >>>> >> _______________________________________________
>>> >>>> >> lng-odp mailing list
>>> >>>> >> lng-odp@lists.linaro.org
>>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>>> >>>>
>>> >>>> _______________________________________________
>>> >>>> lng-odp mailing list
>>> >>>> lng-odp@lists.linaro.org
>>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>> >>>
>>> >>>
>>> >>>
>>> >>> _______________________________________________
>>> >>> lng-odp mailing list
>>> >>> lng-odp@lists.linaro.org
>>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>> >>>
>>> >>
>>> >>
>>> >>
>>> >> --
>>> >> Mike Holmes
>>> >> Linaro  Sr Technical Manager
>>> >> LNG - ODP
>>> >
>>> >
>>
>>
>>
>>
>> --
>> Mike Holmes
>> Linaro  Sr Technical Manager
>> LNG - ODP
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
Mike Holmes Jan. 14, 2015, 5:14 p.m. UTC | #12
Without any clear change in sight,  lets test what we have, this has been
on the list for a month

On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org>
> wrote:
> > On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
> >> I am unsure if I need to pay attention to this for 0.7.0
> > We need to have a decision (and implementation) for ODP 1.0 though.
> > Scheduling and its semantics are important aspects of ODP.
>
> The odp_schedule_pause API is already documented and implemented, I
> didn't exactly catch from Petri if we will keep the behavior for 1.0,
> but what is the problem with covering this API in its current form for
> at least 0.7 and 0.8?
>
> >
> >>
> >> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
> >>>
> >>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
> >>> <bill.fischofer@linaro.org> wrote:
> >>> > I think it's something we need to discuss during the sync call.
> >>> >
> >>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
> >>> > wrote:
> >>> >>
> >>> >> Should a bug be made to track a needed change or is it important for
> >>> >> 1.0
> >>> >> and needs to be in the delta doc ?
> >>> >>
> >>> >> On 6 January 2015 at 08:40, Bill Fischofer <
> bill.fischofer@linaro.org>
> >>> >> wrote:
> >>> >>>
> >>> >>> Caches should be transparent.  While this may be needed here, it's
> a
> >>> >>> poor
> >>> >>> set of semantics to expose as part of the formal APIs.  This is
> >>> >>> definitely
> >>> >>> something we need to address.  My suggestion is that a
> >>> >>> odp_schedule_pause()
> >>> >>> should cause an implicit cache flush if the implementation is
> using a
> >>> >>> scheduling cache.  That way any cache being used is truly
> transparent
> >>> >>> and
> >>> >>> moreover there won't be unnecessary delays in event processing
> since
> >>> >>> who
> >>> >>> knows how long a pause may last?  Clearly it won't be brief since
> >>> >>> otherwise
> >>> >>> the application would not have bothered with a pause/resume in the
> >>> >>> first
> >>> >>> place.
> >>>
> >>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
> >>> a brief update on what was decided?
> >>>
> >>> >>>
> >>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
> >>> >>> <ciprian.barbu@linaro.org>
> >>> >>> wrote:
> >>> >>>>
> >>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> >>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
> >>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
> >>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >>> >>>> >> ---
> >>> >>>> >>  test/validation/odp_schedule.c | 63
> >>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
> >>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
> >>> >>>> >>
> >>> >>>> >> diff --git a/test/validation/odp_schedule.c
> >>> >>>> >> b/test/validation/odp_schedule.c
> >>> >>>> >> index 31be742..bdbcf77 100644
> >>> >>>> >> --- a/test/validation/odp_schedule.c
> >>> >>>> >> +++ b/test/validation/odp_schedule.c
> >>> >>>> >> @@ -11,9 +11,11 @@
> >>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
> >>> >>>> >>  #define QUEUES_PER_PRIO              16
> >>> >>>> >>  #define BUF_SIZE             64
> >>> >>>> >> -#define TEST_NUM_BUFS                100
> >>> >>>> >> +#define NUM_BUFS             100
> >>> >>>> >>  #define BURST_BUF_SIZE               4
> >>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
> >>> >>>> >> +#define NUM_BUFS_EXCL                10000
> >>> >>>> >> +#define NUM_BUFS_PAUSE               1000
> >>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
> >>> >>>> >>
> >>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
> >>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
> >>> >>>> >> @@ -229,7 +231,7 @@ static void
> >>> >>>> >> schedule_common(odp_schedule_sync_t
> >>> >>>> >> sync, int num_queues,
> >>> >>>> >>       args.sync = sync;
> >>> >>>> >>       args.num_queues = num_queues;
> >>> >>>> >>       args.num_prio = num_prio;
> >>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
> >>> >>>> >> +     args.num_bufs = NUM_BUFS;
> >>> >>>> >>       args.num_cores = 1;
> >>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
> >>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a
> single
> >>> >>>> >> core */
> >>> >>>> >> @@ -261,9 +263,9 @@ static void
> >>> >>>> >> parallel_execute(odp_schedule_sync_t
> >>> >>>> >> sync, int num_queues,
> >>> >>>> >>       thr_args->num_queues = num_queues;
> >>> >>>> >>       thr_args->num_prio = num_prio;
> >>> >>>> >>       if (enable_excl_atomic)
> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >>> >>>> >>       else
> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
> >>> >>>> >>       thr_args->num_cores = globals->core_count;
> >>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
> >>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
> >>> >>>> >> @@ -459,6 +461,56 @@ static void
> >>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
> >>> >>>> >>                        ENABLE_EXCL_ATOMIC);
> >>> >>>> >>  }
> >>> >>>> >>
> >>> >>>> >> +static void test_schedule_pause_resume(void)
> >>> >>>> >> +{
> >>> >>>> >> +     odp_queue_t queue;
> >>> >>>> >> +     odp_buffer_t buf;
> >>> >>>> >> +     odp_queue_t from;
> >>> >>>> >> +     int i;
> >>> >>>> >> +     int local_bufs = 0;
> >>> >>>> >> +
> >>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
> >>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >>> >>>> >> +
> >>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >>> >>>> >> +
> >>> >>>> >> +
> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >>> >>>> >> +             buf = odp_buffer_alloc(pool);
> >>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >>> >>>> >> +             odp_queue_enq(queue, buf);
> >>> >>>> >> +     }
> >>> >>>> >> +
> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >>> >>>> >> +             CU_ASSERT(from == queue);
> >>> >>>> >> +             odp_buffer_free(buf);
> >>> >>>> >> +     }
> >>> >>>> >> +
> >>> >>>> >> +     odp_schedule_pause();
> >>> >>>> >> +
> >>> >>>> >> +     while (1) {
> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
> >>> >>>> >> +                     break;
> >>> >>>> >> +
> >>> >>>> >> +             CU_ASSERT(from == queue);
> >>> >>>> >> +             odp_buffer_free(buf);
> >>> >>>> >> +             local_bufs++;
> >>> >>>> >> +     }
> >>> >>>> >> +
> >>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
> >>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
> >>> >>>> >
> >>> >>>> > Whats is the expected behavior here, Shouldn't it be
> >>> >>>> > CU_ASSERT(local_bufs == 0) ?
> >>> >>>> > meaning, the complete pause ?
> >>> >>>>
> >>> >>>> Sorry about the delay, I've been playing around with mutt and I
> must
> >>> >>>> have accidentally marked this email as read.
> >>> >>>> The explanation here is that after pausing the scheduling, there
> >>> >>>> might
> >>> >>>> still be locally reserved buffers (see the odp_schedule_pause
> >>> >>>> documentation). For linux-generic for instance the scheduler
> dequeues
> >>> >>>> buffers in bursts, odp_scheduler_pause only stops further
> dequeues,
> >>> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
> >>> >>>> check
> >>> >>>> above makes sure that after pausing only a limited number of
> packets
> >>> >>>> are still scheduled, or else said pausing seems to work, not all
> >>> >>>> packets being drained.
> >>> >>>>
> >>> >>>> >
> >>> >>>> >> +
> >>> >>>> >> +     odp_schedule_resume();
> >>> >>>> >> +
> >>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
> >>> >>>> >> NUM_BUFS_PAUSE; i++) {
> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >>> >>>> >> +             CU_ASSERT(from == queue);
> >>> >>>> >> +             odp_buffer_free(buf);
> >>> >>>> >> +     }
> >>> >>>> >> +}
> >>> >>>> >> +
> >>> >>>> >>  static int create_queues(void)
> >>> >>>> >>  {
> >>> >>>> >>       int i, j, prios;
> >>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
> >>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
> >>> >>>> >> test_schedule_multi_mq_mt_prio_a},
> >>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
> >>> >>>> >> test_schedule_multi_mq_mt_prio_o},
> >>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
> >>> >>>> >> test_schedule_multi_1q_mt_a_excl},
> >>> >>>> >> +     {"schedule_pause_resume",
>  test_schedule_pause_resume},
> >>> >>>> >>       CU_TEST_INFO_NULL,
> >>> >>>> >>  };
> >>> >>>> >>
> >>> >>>> >> --
> >>> >>>> >> 1.8.3.2
> >>> >>>> >>
> >>> >>>> >>
> >>> >>>> >> _______________________________________________
> >>> >>>> >> lng-odp mailing list
> >>> >>>> >> lng-odp@lists.linaro.org
> >>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>> >>>>
> >>> >>>> _______________________________________________
> >>> >>>> lng-odp mailing list
> >>> >>>> lng-odp@lists.linaro.org
> >>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>> >>>
> >>> >>>
> >>> >>>
> >>> >>> _______________________________________________
> >>> >>> lng-odp mailing list
> >>> >>> lng-odp@lists.linaro.org
> >>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>> >>>
> >>> >>
> >>> >>
> >>> >>
> >>> >> --
> >>> >> Mike Holmes
> >>> >> Linaro  Sr Technical Manager
> >>> >> LNG - ODP
> >>> >
> >>> >
> >>
> >>
> >>
> >>
> >> --
> >> Mike Holmes
> >> Linaro  Sr Technical Manager
> >> LNG - ODP
> >>
> >> _______________________________________________
> >> lng-odp mailing list
> >> lng-odp@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >>
>
Ciprian Barbu Jan. 20, 2015, 2:23 p.m. UTC | #13
PING!

On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
> Without any clear change in sight,  lets test what we have, this has been on
> the list for a month
>
> On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>
>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org>
>> wrote:
>> > On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
>> >> I am unsure if I need to pay attention to this for 0.7.0
>> > We need to have a decision (and implementation) for ODP 1.0 though.
>> > Scheduling and its semantics are important aspects of ODP.
>>
>> The odp_schedule_pause API is already documented and implemented, I
>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>> but what is the problem with covering this API in its current form for
>> at least 0.7 and 0.8?
>>
>> >
>> >>
>> >> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>> >> wrote:
>> >>>
>> >>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>> >>> <bill.fischofer@linaro.org> wrote:
>> >>> > I think it's something we need to discuss during the sync call.
>> >>> >
>> >>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>> >>> > wrote:
>> >>> >>
>> >>> >> Should a bug be made to track a needed change or is it important
>> >>> >> for
>> >>> >> 1.0
>> >>> >> and needs to be in the delta doc ?
>> >>> >>
>> >>> >> On 6 January 2015 at 08:40, Bill Fischofer
>> >>> >> <bill.fischofer@linaro.org>
>> >>> >> wrote:
>> >>> >>>
>> >>> >>> Caches should be transparent.  While this may be needed here, it's
>> >>> >>> a
>> >>> >>> poor
>> >>> >>> set of semantics to expose as part of the formal APIs.  This is
>> >>> >>> definitely
>> >>> >>> something we need to address.  My suggestion is that a
>> >>> >>> odp_schedule_pause()
>> >>> >>> should cause an implicit cache flush if the implementation is
>> >>> >>> using a
>> >>> >>> scheduling cache.  That way any cache being used is truly
>> >>> >>> transparent
>> >>> >>> and
>> >>> >>> moreover there won't be unnecessary delays in event processing
>> >>> >>> since
>> >>> >>> who
>> >>> >>> knows how long a pause may last?  Clearly it won't be brief since
>> >>> >>> otherwise
>> >>> >>> the application would not have bothered with a pause/resume in the
>> >>> >>> first
>> >>> >>> place.
>> >>>
>> >>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>> >>> a brief update on what was decided?
>> >>>
>> >>> >>>
>> >>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>> >>> >>> <ciprian.barbu@linaro.org>
>> >>> >>> wrote:
>> >>> >>>>
>> >>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> >>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
>> >>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>> >>>> >> ---
>> >>> >>>> >>  test/validation/odp_schedule.c | 63
>> >>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
>> >>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
>> >>> >>>> >>
>> >>> >>>> >> diff --git a/test/validation/odp_schedule.c
>> >>> >>>> >> b/test/validation/odp_schedule.c
>> >>> >>>> >> index 31be742..bdbcf77 100644
>> >>> >>>> >> --- a/test/validation/odp_schedule.c
>> >>> >>>> >> +++ b/test/validation/odp_schedule.c
>> >>> >>>> >> @@ -11,9 +11,11 @@
>> >>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
>> >>> >>>> >>  #define QUEUES_PER_PRIO              16
>> >>> >>>> >>  #define BUF_SIZE             64
>> >>> >>>> >> -#define TEST_NUM_BUFS                100
>> >>> >>>> >> +#define NUM_BUFS             100
>> >>> >>>> >>  #define BURST_BUF_SIZE               4
>> >>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
>> >>> >>>> >> +#define NUM_BUFS_EXCL                10000
>> >>> >>>> >> +#define NUM_BUFS_PAUSE               1000
>> >>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >>> >>>> >>
>> >>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
>> >>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
>> >>> >>>> >> @@ -229,7 +231,7 @@ static void
>> >>> >>>> >> schedule_common(odp_schedule_sync_t
>> >>> >>>> >> sync, int num_queues,
>> >>> >>>> >>       args.sync = sync;
>> >>> >>>> >>       args.num_queues = num_queues;
>> >>> >>>> >>       args.num_prio = num_prio;
>> >>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
>> >>> >>>> >> +     args.num_bufs = NUM_BUFS;
>> >>> >>>> >>       args.num_cores = 1;
>> >>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
>> >>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a
>> >>> >>>> >> single
>> >>> >>>> >> core */
>> >>> >>>> >> @@ -261,9 +263,9 @@ static void
>> >>> >>>> >> parallel_execute(odp_schedule_sync_t
>> >>> >>>> >> sync, int num_queues,
>> >>> >>>> >>       thr_args->num_queues = num_queues;
>> >>> >>>> >>       thr_args->num_prio = num_prio;
>> >>> >>>> >>       if (enable_excl_atomic)
>> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >>> >>>> >>       else
>> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
>> >>> >>>> >>       thr_args->num_cores = globals->core_count;
>> >>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
>> >>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
>> >>> >>>> >> @@ -459,6 +461,56 @@ static void
>> >>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
>> >>> >>>> >>                        ENABLE_EXCL_ATOMIC);
>> >>> >>>> >>  }
>> >>> >>>> >>
>> >>> >>>> >> +static void test_schedule_pause_resume(void)
>> >>> >>>> >> +{
>> >>> >>>> >> +     odp_queue_t queue;
>> >>> >>>> >> +     odp_buffer_t buf;
>> >>> >>>> >> +     odp_queue_t from;
>> >>> >>>> >> +     int i;
>> >>> >>>> >> +     int local_bufs = 0;
>> >>> >>>> >> +
>> >>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
>> >>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >>> >>>> >> +
>> >>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >>> >>>> >> +
>> >>> >>>> >> +
>> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >>> >>>> >> +             buf = odp_buffer_alloc(pool);
>> >>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >>> >>>> >> +             odp_queue_enq(queue, buf);
>> >>> >>>> >> +     }
>> >>> >>>> >> +
>> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>> >>>> >> +             CU_ASSERT(from == queue);
>> >>> >>>> >> +             odp_buffer_free(buf);
>> >>> >>>> >> +     }
>> >>> >>>> >> +
>> >>> >>>> >> +     odp_schedule_pause();
>> >>> >>>> >> +
>> >>> >>>> >> +     while (1) {
>> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
>> >>> >>>> >> +                     break;
>> >>> >>>> >> +
>> >>> >>>> >> +             CU_ASSERT(from == queue);
>> >>> >>>> >> +             odp_buffer_free(buf);
>> >>> >>>> >> +             local_bufs++;
>> >>> >>>> >> +     }
>> >>> >>>> >> +
>> >>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>> >>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
>> >>> >>>> >
>> >>> >>>> > Whats is the expected behavior here, Shouldn't it be
>> >>> >>>> > CU_ASSERT(local_bufs == 0) ?
>> >>> >>>> > meaning, the complete pause ?
>> >>> >>>>
>> >>> >>>> Sorry about the delay, I've been playing around with mutt and I
>> >>> >>>> must
>> >>> >>>> have accidentally marked this email as read.
>> >>> >>>> The explanation here is that after pausing the scheduling, there
>> >>> >>>> might
>> >>> >>>> still be locally reserved buffers (see the odp_schedule_pause
>> >>> >>>> documentation). For linux-generic for instance the scheduler
>> >>> >>>> dequeues
>> >>> >>>> buffers in bursts, odp_scheduler_pause only stops further
>> >>> >>>> dequeues,
>> >>> >>>> buffers may still be in the 'reservoirs'. With that in mind, the
>> >>> >>>> check
>> >>> >>>> above makes sure that after pausing only a limited number of
>> >>> >>>> packets
>> >>> >>>> are still scheduled, or else said pausing seems to work, not all
>> >>> >>>> packets being drained.
>> >>> >>>>
>> >>> >>>> >
>> >>> >>>> >> +
>> >>> >>>> >> +     odp_schedule_resume();
>> >>> >>>> >> +
>> >>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>> >>> >>>> >> NUM_BUFS_PAUSE; i++) {
>> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >>> >>>> >> +             CU_ASSERT(from == queue);
>> >>> >>>> >> +             odp_buffer_free(buf);
>> >>> >>>> >> +     }
>> >>> >>>> >> +}
>> >>> >>>> >> +
>> >>> >>>> >>  static int create_queues(void)
>> >>> >>>> >>  {
>> >>> >>>> >>       int i, j, prios;
>> >>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
>> >>> >>>> >> test_schedule_multi_mq_mt_prio_a},
>> >>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
>> >>> >>>> >> test_schedule_multi_mq_mt_prio_o},
>> >>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
>> >>> >>>> >> test_schedule_multi_1q_mt_a_excl},
>> >>> >>>> >> +     {"schedule_pause_resume",
>> >>> >>>> >> test_schedule_pause_resume},
>> >>> >>>> >>       CU_TEST_INFO_NULL,
>> >>> >>>> >>  };
>> >>> >>>> >>
>> >>> >>>> >> --
>> >>> >>>> >> 1.8.3.2
>> >>> >>>> >>
>> >>> >>>> >>
>> >>> >>>> >> _______________________________________________
>> >>> >>>> >> lng-odp mailing list
>> >>> >>>> >> lng-odp@lists.linaro.org
>> >>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>> >>>>
>> >>> >>>> _______________________________________________
>> >>> >>>> lng-odp mailing list
>> >>> >>>> lng-odp@lists.linaro.org
>> >>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>> >>>
>> >>> >>>
>> >>> >>>
>> >>> >>> _______________________________________________
>> >>> >>> lng-odp mailing list
>> >>> >>> lng-odp@lists.linaro.org
>> >>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>> >>>
>> >>> >>
>> >>> >>
>> >>> >>
>> >>> >> --
>> >>> >> Mike Holmes
>> >>> >> Linaro  Sr Technical Manager
>> >>> >> LNG - ODP
>> >>> >
>> >>> >
>> >>
>> >>
>> >>
>> >>
>> >> --
>> >> Mike Holmes
>> >> Linaro  Sr Technical Manager
>> >> LNG - ODP
>> >>
>> >> _______________________________________________
>> >> lng-odp mailing list
>> >> lng-odp@lists.linaro.org
>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>
>
>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
Mike Holmes Jan. 20, 2015, 2:31 p.m. UTC | #14
Petri unless you don't think this fits I suggest we apply it to test what
is in the repo now

On 20 January 2015 at 09:23, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:

> PING!
>
> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org>
> wrote:
> > Without any clear change in sight,  lets test what we have, this has
> been on
> > the list for a month
> >
> > On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
> >>
> >> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <
> ola.liljedahl@linaro.org>
> >> wrote:
> >> > On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org>
> wrote:
> >> >> I am unsure if I need to pay attention to this for 0.7.0
> >> > We need to have a decision (and implementation) for ODP 1.0 though.
> >> > Scheduling and its semantics are important aspects of ODP.
> >>
> >> The odp_schedule_pause API is already documented and implemented, I
> >> didn't exactly catch from Petri if we will keep the behavior for 1.0,
> >> but what is the problem with covering this API in its current form for
> >> at least 0.7 and 0.8?
> >>
> >> >
> >> >>
> >> >> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
> >> >> wrote:
> >> >>>
> >> >>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
> >> >>> <bill.fischofer@linaro.org> wrote:
> >> >>> > I think it's something we need to discuss during the sync call.
> >> >>> >
> >> >>> > On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <
> mike.holmes@linaro.org>
> >> >>> > wrote:
> >> >>> >>
> >> >>> >> Should a bug be made to track a needed change or is it important
> >> >>> >> for
> >> >>> >> 1.0
> >> >>> >> and needs to be in the delta doc ?
> >> >>> >>
> >> >>> >> On 6 January 2015 at 08:40, Bill Fischofer
> >> >>> >> <bill.fischofer@linaro.org>
> >> >>> >> wrote:
> >> >>> >>>
> >> >>> >>> Caches should be transparent.  While this may be needed here,
> it's
> >> >>> >>> a
> >> >>> >>> poor
> >> >>> >>> set of semantics to expose as part of the formal APIs.  This is
> >> >>> >>> definitely
> >> >>> >>> something we need to address.  My suggestion is that a
> >> >>> >>> odp_schedule_pause()
> >> >>> >>> should cause an implicit cache flush if the implementation is
> >> >>> >>> using a
> >> >>> >>> scheduling cache.  That way any cache being used is truly
> >> >>> >>> transparent
> >> >>> >>> and
> >> >>> >>> moreover there won't be unnecessary delays in event processing
> >> >>> >>> since
> >> >>> >>> who
> >> >>> >>> knows how long a pause may last?  Clearly it won't be brief
> since
> >> >>> >>> otherwise
> >> >>> >>> the application would not have bothered with a pause/resume in
> the
> >> >>> >>> first
> >> >>> >>> place.
> >> >>>
> >> >>> Sorry, I couldn't join you in the ODP call yesterday, mind if you
> give
> >> >>> a brief update on what was decided?
> >> >>>
> >> >>> >>>
> >> >>> >>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
> >> >>> >>> <ciprian.barbu@linaro.org>
> >> >>> >>> wrote:
> >> >>> >>>>
> >> >>> >>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> >> >>> >>>> <jerin.jacob@caviumnetworks.com> wrote:
> >> >>> >>>> > On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu
> wrote:
> >> >>> >>>> >> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> >>> >>>> >> ---
> >> >>> >>>> >>  test/validation/odp_schedule.c | 63
> >> >>> >>>> >> ++++++++++++++++++++++++++++++++++++++----
> >> >>> >>>> >>  1 file changed, 58 insertions(+), 5 deletions(-)
> >> >>> >>>> >>
> >> >>> >>>> >> diff --git a/test/validation/odp_schedule.c
> >> >>> >>>> >> b/test/validation/odp_schedule.c
> >> >>> >>>> >> index 31be742..bdbcf77 100644
> >> >>> >>>> >> --- a/test/validation/odp_schedule.c
> >> >>> >>>> >> +++ b/test/validation/odp_schedule.c
> >> >>> >>>> >> @@ -11,9 +11,11 @@
> >> >>> >>>> >>  #define MSG_POOL_SIZE                (4*1024*1024)
> >> >>> >>>> >>  #define QUEUES_PER_PRIO              16
> >> >>> >>>> >>  #define BUF_SIZE             64
> >> >>> >>>> >> -#define TEST_NUM_BUFS                100
> >> >>> >>>> >> +#define NUM_BUFS             100
> >> >>> >>>> >>  #define BURST_BUF_SIZE               4
> >> >>> >>>> >> -#define TEST_NUM_BUFS_EXCL   10000
> >> >>> >>>> >> +#define NUM_BUFS_EXCL                10000
> >> >>> >>>> >> +#define NUM_BUFS_PAUSE               1000
> >> >>> >>>> >> +#define NUM_BUFS_BEFORE_PAUSE        10
> >> >>> >>>> >>
> >> >>> >>>> >>  #define GLOBALS_SHM_NAME     "test_globals"
> >> >>> >>>> >>  #define MSG_POOL_NAME                "msg_pool"
> >> >>> >>>> >> @@ -229,7 +231,7 @@ static void
> >> >>> >>>> >> schedule_common(odp_schedule_sync_t
> >> >>> >>>> >> sync, int num_queues,
> >> >>> >>>> >>       args.sync = sync;
> >> >>> >>>> >>       args.num_queues = num_queues;
> >> >>> >>>> >>       args.num_prio = num_prio;
> >> >>> >>>> >> -     args.num_bufs = TEST_NUM_BUFS;
> >> >>> >>>> >> +     args.num_bufs = NUM_BUFS;
> >> >>> >>>> >>       args.num_cores = 1;
> >> >>> >>>> >>       args.enable_schd_multi = enable_schd_multi;
> >> >>> >>>> >>       args.enable_excl_atomic = 0;    /* Not needed with a
> >> >>> >>>> >> single
> >> >>> >>>> >> core */
> >> >>> >>>> >> @@ -261,9 +263,9 @@ static void
> >> >>> >>>> >> parallel_execute(odp_schedule_sync_t
> >> >>> >>>> >> sync, int num_queues,
> >> >>> >>>> >>       thr_args->num_queues = num_queues;
> >> >>> >>>> >>       thr_args->num_prio = num_prio;
> >> >>> >>>> >>       if (enable_excl_atomic)
> >> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >> >>> >>>> >>       else
> >> >>> >>>> >> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >> >>> >>>> >> +             thr_args->num_bufs = NUM_BUFS;
> >> >>> >>>> >>       thr_args->num_cores = globals->core_count;
> >> >>> >>>> >>       thr_args->enable_schd_multi = enable_schd_multi;
> >> >>> >>>> >>       thr_args->enable_excl_atomic = enable_excl_atomic;
> >> >>> >>>> >> @@ -459,6 +461,56 @@ static void
> >> >>> >>>> >> test_schedule_multi_1q_mt_a_excl(void)
> >> >>> >>>> >>                        ENABLE_EXCL_ATOMIC);
> >> >>> >>>> >>  }
> >> >>> >>>> >>
> >> >>> >>>> >> +static void test_schedule_pause_resume(void)
> >> >>> >>>> >> +{
> >> >>> >>>> >> +     odp_queue_t queue;
> >> >>> >>>> >> +     odp_buffer_t buf;
> >> >>> >>>> >> +     odp_queue_t from;
> >> >>> >>>> >> +     int i;
> >> >>> >>>> >> +     int local_bufs = 0;
> >> >>> >>>> >> +
> >> >>> >>>> >> +     queue = odp_queue_lookup("sched_0_0_n");
> >> >>> >>>> >> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >> >>> >>>> >> +
> >> >>> >>>> >> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >> >>> >>>> >> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >> >>> >>>> >> +
> >> >>> >>>> >> +
> >> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >> >>> >>>> >> +             buf = odp_buffer_alloc(pool);
> >> >>> >>>> >> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >> >>> >>>> >> +             odp_queue_enq(queue, buf);
> >> >>> >>>> >> +     }
> >> >>> >>>> >> +
> >> >>> >>>> >> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> >>> >>>> >> +             CU_ASSERT(from == queue);
> >> >>> >>>> >> +             odp_buffer_free(buf);
> >> >>> >>>> >> +     }
> >> >>> >>>> >> +
> >> >>> >>>> >> +     odp_schedule_pause();
> >> >>> >>>> >> +
> >> >>> >>>> >> +     while (1) {
> >> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> >> >>> >>>> >> +             if (buf == ODP_BUFFER_INVALID)
> >> >>> >>>> >> +                     break;
> >> >>> >>>> >> +
> >> >>> >>>> >> +             CU_ASSERT(from == queue);
> >> >>> >>>> >> +             odp_buffer_free(buf);
> >> >>> >>>> >> +             local_bufs++;
> >> >>> >>>> >> +     }
> >> >>> >>>> >> +
> >> >>> >>>> >> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
> >> >>> >>>> >> NUM_BUFS_BEFORE_PAUSE);
> >> >>> >>>> >
> >> >>> >>>> > Whats is the expected behavior here, Shouldn't it be
> >> >>> >>>> > CU_ASSERT(local_bufs == 0) ?
> >> >>> >>>> > meaning, the complete pause ?
> >> >>> >>>>
> >> >>> >>>> Sorry about the delay, I've been playing around with mutt and I
> >> >>> >>>> must
> >> >>> >>>> have accidentally marked this email as read.
> >> >>> >>>> The explanation here is that after pausing the scheduling,
> there
> >> >>> >>>> might
> >> >>> >>>> still be locally reserved buffers (see the odp_schedule_pause
> >> >>> >>>> documentation). For linux-generic for instance the scheduler
> >> >>> >>>> dequeues
> >> >>> >>>> buffers in bursts, odp_scheduler_pause only stops further
> >> >>> >>>> dequeues,
> >> >>> >>>> buffers may still be in the 'reservoirs'. With that in mind,
> the
> >> >>> >>>> check
> >> >>> >>>> above makes sure that after pausing only a limited number of
> >> >>> >>>> packets
> >> >>> >>>> are still scheduled, or else said pausing seems to work, not
> all
> >> >>> >>>> packets being drained.
> >> >>> >>>>
> >> >>> >>>> >
> >> >>> >>>> >> +
> >> >>> >>>> >> +     odp_schedule_resume();
> >> >>> >>>> >> +
> >> >>> >>>> >> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
> >> >>> >>>> >> NUM_BUFS_PAUSE; i++) {
> >> >>> >>>> >> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >> >>> >>>> >> +             CU_ASSERT(from == queue);
> >> >>> >>>> >> +             odp_buffer_free(buf);
> >> >>> >>>> >> +     }
> >> >>> >>>> >> +}
> >> >>> >>>> >> +
> >> >>> >>>> >>  static int create_queues(void)
> >> >>> >>>> >>  {
> >> >>> >>>> >>       int i, j, prios;
> >> >>> >>>> >> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[]
> = {
> >> >>> >>>> >>       {"schedule_multi_mq_mt_prio_a",
> >> >>> >>>> >> test_schedule_multi_mq_mt_prio_a},
> >> >>> >>>> >>       {"schedule_multi_mq_mt_prio_o",
> >> >>> >>>> >> test_schedule_multi_mq_mt_prio_o},
> >> >>> >>>> >>       {"schedule_multi_1q_mt_a_excl",
> >> >>> >>>> >> test_schedule_multi_1q_mt_a_excl},
> >> >>> >>>> >> +     {"schedule_pause_resume",
> >> >>> >>>> >> test_schedule_pause_resume},
> >> >>> >>>> >>       CU_TEST_INFO_NULL,
> >> >>> >>>> >>  };
> >> >>> >>>> >>
> >> >>> >>>> >> --
> >> >>> >>>> >> 1.8.3.2
> >> >>> >>>> >>
> >> >>> >>>> >>
> >> >>> >>>> >> _______________________________________________
> >> >>> >>>> >> lng-odp mailing list
> >> >>> >>>> >> lng-odp@lists.linaro.org
> >> >>> >>>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>> >>>>
> >> >>> >>>> _______________________________________________
> >> >>> >>>> lng-odp mailing list
> >> >>> >>>> lng-odp@lists.linaro.org
> >> >>> >>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>> >>>
> >> >>> >>>
> >> >>> >>>
> >> >>> >>> _______________________________________________
> >> >>> >>> lng-odp mailing list
> >> >>> >>> lng-odp@lists.linaro.org
> >> >>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>> >>>
> >> >>> >>
> >> >>> >>
> >> >>> >>
> >> >>> >> --
> >> >>> >> Mike Holmes
> >> >>> >> Linaro  Sr Technical Manager
> >> >>> >> LNG - ODP
> >> >>> >
> >> >>> >
> >> >>
> >> >>
> >> >>
> >> >>
> >> >> --
> >> >> Mike Holmes
> >> >> Linaro  Sr Technical Manager
> >> >> LNG - ODP
> >> >>
> >> >> _______________________________________________
> >> >> lng-odp mailing list
> >> >> lng-odp@lists.linaro.org
> >> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>
> >
> >
> >
> >
> > --
> > Mike Holmes
> > Linaro  Sr Technical Manager
> > LNG - ODP
>
Maxim Uvarov Jan. 20, 2015, 9:34 p.m. UTC | #15
Who review this patch please add review-by.

Mike please add yours because it's validation patch.

Maxim.

On 01/20/2015 05:23 PM, Ciprian Barbu wrote:
> PING!
>
> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
>> Without any clear change in sight,  lets test what we have, this has been on
>> the list for a month
>>
>> On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org> wrote:
>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org>
>>> wrote:
>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
>>>>> I am unsure if I need to pay attention to this for 0.7.0
>>>> We need to have a decision (and implementation) for ODP 1.0 though.
>>>> Scheduling and its semantics are important aspects of ODP.
>>> The odp_schedule_pause API is already documented and implemented, I
>>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>>> but what is the problem with covering this API in its current form for
>>> at least 0.7 and 0.8?
>>>
>>>>> On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>> wrote:
>>>>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>>>>>> <bill.fischofer@linaro.org> wrote:
>>>>>>> I think it's something we need to discuss during the sync call.
>>>>>>>
>>>>>>> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org>
>>>>>>> wrote:
>>>>>>>> Should a bug be made to track a needed change or is it important
>>>>>>>> for
>>>>>>>> 1.0
>>>>>>>> and needs to be in the delta doc ?
>>>>>>>>
>>>>>>>> On 6 January 2015 at 08:40, Bill Fischofer
>>>>>>>> <bill.fischofer@linaro.org>
>>>>>>>> wrote:
>>>>>>>>> Caches should be transparent.  While this may be needed here, it's
>>>>>>>>> a
>>>>>>>>> poor
>>>>>>>>> set of semantics to expose as part of the formal APIs.  This is
>>>>>>>>> definitely
>>>>>>>>> something we need to address.  My suggestion is that a
>>>>>>>>> odp_schedule_pause()
>>>>>>>>> should cause an implicit cache flush if the implementation is
>>>>>>>>> using a
>>>>>>>>> scheduling cache.  That way any cache being used is truly
>>>>>>>>> transparent
>>>>>>>>> and
>>>>>>>>> moreover there won't be unnecessary delays in event processing
>>>>>>>>> since
>>>>>>>>> who
>>>>>>>>> knows how long a pause may last?  Clearly it won't be brief since
>>>>>>>>> otherwise
>>>>>>>>> the application would not have bothered with a pause/resume in the
>>>>>>>>> first
>>>>>>>>> place.
>>>>>> Sorry, I couldn't join you in the ODP call yesterday, mind if you give
>>>>>> a brief update on what was decided?
>>>>>>
>>>>>>>>> On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>>>>>>>>> <ciprian.barbu@linaro.org>
>>>>>>>>> wrote:
>>>>>>>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>>>>>>>>> <jerin.jacob@caviumnetworks.com> wrote:
>>>>>>>>>>> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>>>>>>>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>>>>>>>>> ---
>>>>>>>>>>>>   test/validation/odp_schedule.c | 63
>>>>>>>>>>>> ++++++++++++++++++++++++++++++++++++++----
>>>>>>>>>>>>   1 file changed, 58 insertions(+), 5 deletions(-)
>>>>>>>>>>>>
>>>>>>>>>>>> diff --git a/test/validation/odp_schedule.c
>>>>>>>>>>>> b/test/validation/odp_schedule.c
>>>>>>>>>>>> index 31be742..bdbcf77 100644
>>>>>>>>>>>> --- a/test/validation/odp_schedule.c
>>>>>>>>>>>> +++ b/test/validation/odp_schedule.c
>>>>>>>>>>>> @@ -11,9 +11,11 @@
>>>>>>>>>>>>   #define MSG_POOL_SIZE                (4*1024*1024)
>>>>>>>>>>>>   #define QUEUES_PER_PRIO              16
>>>>>>>>>>>>   #define BUF_SIZE             64
>>>>>>>>>>>> -#define TEST_NUM_BUFS                100
>>>>>>>>>>>> +#define NUM_BUFS             100
>>>>>>>>>>>>   #define BURST_BUF_SIZE               4
>>>>>>>>>>>> -#define TEST_NUM_BUFS_EXCL   10000
>>>>>>>>>>>> +#define NUM_BUFS_EXCL                10000
>>>>>>>>>>>> +#define NUM_BUFS_PAUSE               1000
>>>>>>>>>>>> +#define NUM_BUFS_BEFORE_PAUSE        10
>>>>>>>>>>>>
>>>>>>>>>>>>   #define GLOBALS_SHM_NAME     "test_globals"
>>>>>>>>>>>>   #define MSG_POOL_NAME                "msg_pool"
>>>>>>>>>>>> @@ -229,7 +231,7 @@ static void
>>>>>>>>>>>> schedule_common(odp_schedule_sync_t
>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>        args.sync = sync;
>>>>>>>>>>>>        args.num_queues = num_queues;
>>>>>>>>>>>>        args.num_prio = num_prio;
>>>>>>>>>>>> -     args.num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>> +     args.num_bufs = NUM_BUFS;
>>>>>>>>>>>>        args.num_cores = 1;
>>>>>>>>>>>>        args.enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>        args.enable_excl_atomic = 0;    /* Not needed with a
>>>>>>>>>>>> single
>>>>>>>>>>>> core */
>>>>>>>>>>>> @@ -261,9 +263,9 @@ static void
>>>>>>>>>>>> parallel_execute(odp_schedule_sync_t
>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>        thr_args->num_queues = num_queues;
>>>>>>>>>>>>        thr_args->num_prio = num_prio;
>>>>>>>>>>>>        if (enable_excl_atomic)
>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>>>>>>>>>>>        else
>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS;
>>>>>>>>>>>>        thr_args->num_cores = globals->core_count;
>>>>>>>>>>>>        thr_args->enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>        thr_args->enable_excl_atomic = enable_excl_atomic;
>>>>>>>>>>>> @@ -459,6 +461,56 @@ static void
>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl(void)
>>>>>>>>>>>>                         ENABLE_EXCL_ATOMIC);
>>>>>>>>>>>>   }
>>>>>>>>>>>>
>>>>>>>>>>>> +static void test_schedule_pause_resume(void)
>>>>>>>>>>>> +{
>>>>>>>>>>>> +     odp_queue_t queue;
>>>>>>>>>>>> +     odp_buffer_t buf;
>>>>>>>>>>>> +     odp_queue_t from;
>>>>>>>>>>>> +     int i;
>>>>>>>>>>>> +     int local_bufs = 0;
>>>>>>>>>>>> +
>>>>>>>>>>>> +     queue = odp_queue_lookup("sched_0_0_n");
>>>>>>>>>>>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>>>>>>>>>>> +
>>>>>>>>>>>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>>>>>>>>>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>>>>>>>>>> +
>>>>>>>>>>>> +
>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>> +             buf = odp_buffer_alloc(pool);
>>>>>>>>>>>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>>>>>>>>>> +             odp_queue_enq(queue, buf);
>>>>>>>>>>>> +     }
>>>>>>>>>>>> +
>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>> +     }
>>>>>>>>>>>> +
>>>>>>>>>>>> +     odp_schedule_pause();
>>>>>>>>>>>> +
>>>>>>>>>>>> +     while (1) {
>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>> +             if (buf == ODP_BUFFER_INVALID)
>>>>>>>>>>>> +                     break;
>>>>>>>>>>>> +
>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>> +             local_bufs++;
>>>>>>>>>>>> +     }
>>>>>>>>>>>> +
>>>>>>>>>>>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>>>>>>>>>>>> NUM_BUFS_BEFORE_PAUSE);
>>>>>>>>>>> Whats is the expected behavior here, Shouldn't it be
>>>>>>>>>>> CU_ASSERT(local_bufs == 0) ?
>>>>>>>>>>> meaning, the complete pause ?
>>>>>>>>>> Sorry about the delay, I've been playing around with mutt and I
>>>>>>>>>> must
>>>>>>>>>> have accidentally marked this email as read.
>>>>>>>>>> The explanation here is that after pausing the scheduling, there
>>>>>>>>>> might
>>>>>>>>>> still be locally reserved buffers (see the odp_schedule_pause
>>>>>>>>>> documentation). For linux-generic for instance the scheduler
>>>>>>>>>> dequeues
>>>>>>>>>> buffers in bursts, odp_scheduler_pause only stops further
>>>>>>>>>> dequeues,
>>>>>>>>>> buffers may still be in the 'reservoirs'. With that in mind, the
>>>>>>>>>> check
>>>>>>>>>> above makes sure that after pausing only a limited number of
>>>>>>>>>> packets
>>>>>>>>>> are still scheduled, or else said pausing seems to work, not all
>>>>>>>>>> packets being drained.
>>>>>>>>>>
>>>>>>>>>>>> +
>>>>>>>>>>>> +     odp_schedule_resume();
>>>>>>>>>>>> +
>>>>>>>>>>>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>>>>>>>>>>> NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>> +     }
>>>>>>>>>>>> +}
>>>>>>>>>>>> +
>>>>>>>>>>>>   static int create_queues(void)
>>>>>>>>>>>>   {
>>>>>>>>>>>>        int i, j, prios;
>>>>>>>>>>>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_a",
>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_a},
>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_o",
>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_o},
>>>>>>>>>>>>        {"schedule_multi_1q_mt_a_excl",
>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl},
>>>>>>>>>>>> +     {"schedule_pause_resume",
>>>>>>>>>>>> test_schedule_pause_resume},
>>>>>>>>>>>>        CU_TEST_INFO_NULL,
>>>>>>>>>>>>   };
>>>>>>>>>>>>
>>>>>>>>>>>> --
>>>>>>>>>>>> 1.8.3.2
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> _______________________________________________
>>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>> _______________________________________________
>>>>>>>>>> lng-odp mailing list
>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> _______________________________________________
>>>>>>>>> lng-odp mailing list
>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> --
>>>>>>>> Mike Holmes
>>>>>>>> Linaro  Sr Technical Manager
>>>>>>>> LNG - ODP
>>>>>>>
>>>>>
>>>>>
>>>>>
>>>>> --
>>>>> Mike Holmes
>>>>> Linaro  Sr Technical Manager
>>>>> LNG - ODP
>>>>>
>>>>> _______________________________________________
>>>>> lng-odp mailing list
>>>>> lng-odp@lists.linaro.org
>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>
>>
>>
>>
>> --
>> Mike Holmes
>> Linaro  Sr Technical Manager
>> LNG - ODP
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
Mike Holmes Jan. 20, 2015, 9:47 p.m. UTC | #16
It is still not clear to me in writing that we want this, we did discuss it
earlier but Jerin, Bill and Ola have questions on this thread and I am not
sure they are all addressed.

On 20 January 2015 at 16:34, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:

> Who review this patch please add review-by.
>
> Mike please add yours because it's validation patch.
>
> Maxim.
>
>
> On 01/20/2015 05:23 PM, Ciprian Barbu wrote:
>
>> PING!
>>
>> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org>
>> wrote:
>>
>>> Without any clear change in sight,  lets test what we have, this has
>>> been on
>>> the list for a month
>>>
>>> On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org>
>>> wrote:
>>>
>>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <
>>>> ola.liljedahl@linaro.org>
>>>> wrote:
>>>>
>>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org>
>>>>> wrote:
>>>>>
>>>>>> I am unsure if I need to pay attention to this for 0.7.0
>>>>>>
>>>>> We need to have a decision (and implementation) for ODP 1.0 though.
>>>>> Scheduling and its semantics are important aspects of ODP.
>>>>>
>>>> The odp_schedule_pause API is already documented and implemented, I
>>>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>>>> but what is the problem with covering this API in its current form for
>>>> at least 0.7 and 0.8?
>>>>
>>>>  On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>>> wrote:
>>>>>>
>>>>>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>>>>>>> <bill.fischofer@linaro.org> wrote:
>>>>>>>
>>>>>>>> I think it's something we need to discuss during the sync call.
>>>>>>>>
>>>>>>>> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <mike.holmes@linaro.org
>>>>>>>> >
>>>>>>>> wrote:
>>>>>>>>
>>>>>>>>> Should a bug be made to track a needed change or is it important
>>>>>>>>> for
>>>>>>>>> 1.0
>>>>>>>>> and needs to be in the delta doc ?
>>>>>>>>>
>>>>>>>>> On 6 January 2015 at 08:40, Bill Fischofer
>>>>>>>>> <bill.fischofer@linaro.org>
>>>>>>>>> wrote:
>>>>>>>>>
>>>>>>>>>> Caches should be transparent.  While this may be needed here, it's
>>>>>>>>>> a
>>>>>>>>>> poor
>>>>>>>>>> set of semantics to expose as part of the formal APIs.  This is
>>>>>>>>>> definitely
>>>>>>>>>> something we need to address.  My suggestion is that a
>>>>>>>>>> odp_schedule_pause()
>>>>>>>>>> should cause an implicit cache flush if the implementation is
>>>>>>>>>> using a
>>>>>>>>>> scheduling cache.  That way any cache being used is truly
>>>>>>>>>> transparent
>>>>>>>>>> and
>>>>>>>>>> moreover there won't be unnecessary delays in event processing
>>>>>>>>>> since
>>>>>>>>>> who
>>>>>>>>>> knows how long a pause may last?  Clearly it won't be brief since
>>>>>>>>>> otherwise
>>>>>>>>>> the application would not have bothered with a pause/resume in the
>>>>>>>>>> first
>>>>>>>>>> place.
>>>>>>>>>>
>>>>>>>>> Sorry, I couldn't join you in the ODP call yesterday, mind if you
>>>>>>> give
>>>>>>> a brief update on what was decided?
>>>>>>>
>>>>>>>  On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>>>>>>>>>> <ciprian.barbu@linaro.org>
>>>>>>>>>> wrote:
>>>>>>>>>>
>>>>>>>>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>>>>>>>>>> <jerin.jacob@caviumnetworks.com> wrote:
>>>>>>>>>>>
>>>>>>>>>>>> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>>>>>>>>>>>
>>>>>>>>>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>>>>>>>>>> ---
>>>>>>>>>>>>>   test/validation/odp_schedule.c | 63
>>>>>>>>>>>>> ++++++++++++++++++++++++++++++++++++++----
>>>>>>>>>>>>>   1 file changed, 58 insertions(+), 5 deletions(-)
>>>>>>>>>>>>>
>>>>>>>>>>>>> diff --git a/test/validation/odp_schedule.c
>>>>>>>>>>>>> b/test/validation/odp_schedule.c
>>>>>>>>>>>>> index 31be742..bdbcf77 100644
>>>>>>>>>>>>> --- a/test/validation/odp_schedule.c
>>>>>>>>>>>>> +++ b/test/validation/odp_schedule.c
>>>>>>>>>>>>> @@ -11,9 +11,11 @@
>>>>>>>>>>>>>   #define MSG_POOL_SIZE                (4*1024*1024)
>>>>>>>>>>>>>   #define QUEUES_PER_PRIO              16
>>>>>>>>>>>>>   #define BUF_SIZE             64
>>>>>>>>>>>>> -#define TEST_NUM_BUFS                100
>>>>>>>>>>>>> +#define NUM_BUFS             100
>>>>>>>>>>>>>   #define BURST_BUF_SIZE               4
>>>>>>>>>>>>> -#define TEST_NUM_BUFS_EXCL   10000
>>>>>>>>>>>>> +#define NUM_BUFS_EXCL                10000
>>>>>>>>>>>>> +#define NUM_BUFS_PAUSE               1000
>>>>>>>>>>>>> +#define NUM_BUFS_BEFORE_PAUSE        10
>>>>>>>>>>>>>
>>>>>>>>>>>>>   #define GLOBALS_SHM_NAME     "test_globals"
>>>>>>>>>>>>>   #define MSG_POOL_NAME                "msg_pool"
>>>>>>>>>>>>> @@ -229,7 +231,7 @@ static void
>>>>>>>>>>>>> schedule_common(odp_schedule_sync_t
>>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>>        args.sync = sync;
>>>>>>>>>>>>>        args.num_queues = num_queues;
>>>>>>>>>>>>>        args.num_prio = num_prio;
>>>>>>>>>>>>> -     args.num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>>> +     args.num_bufs = NUM_BUFS;
>>>>>>>>>>>>>        args.num_cores = 1;
>>>>>>>>>>>>>        args.enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>>        args.enable_excl_atomic = 0;    /* Not needed with a
>>>>>>>>>>>>> single
>>>>>>>>>>>>> core */
>>>>>>>>>>>>> @@ -261,9 +263,9 @@ static void
>>>>>>>>>>>>> parallel_execute(odp_schedule_sync_t
>>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>>        thr_args->num_queues = num_queues;
>>>>>>>>>>>>>        thr_args->num_prio = num_prio;
>>>>>>>>>>>>>        if (enable_excl_atomic)
>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>>>>>>>>>>>>        else
>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS;
>>>>>>>>>>>>>        thr_args->num_cores = globals->core_count;
>>>>>>>>>>>>>        thr_args->enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>>        thr_args->enable_excl_atomic = enable_excl_atomic;
>>>>>>>>>>>>> @@ -459,6 +461,56 @@ static void
>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl(void)
>>>>>>>>>>>>>                         ENABLE_EXCL_ATOMIC);
>>>>>>>>>>>>>   }
>>>>>>>>>>>>>
>>>>>>>>>>>>> +static void test_schedule_pause_resume(void)
>>>>>>>>>>>>> +{
>>>>>>>>>>>>> +     odp_queue_t queue;
>>>>>>>>>>>>> +     odp_buffer_t buf;
>>>>>>>>>>>>> +     odp_queue_t from;
>>>>>>>>>>>>> +     int i;
>>>>>>>>>>>>> +     int local_bufs = 0;
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     queue = odp_queue_lookup("sched_0_0_n");
>>>>>>>>>>>>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>>>>>>>>>>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>>> +             buf = odp_buffer_alloc(pool);
>>>>>>>>>>>>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>>>>>>>>>>> +             odp_queue_enq(queue, buf);
>>>>>>>>>>>>> +     }
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>> +     }
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     odp_schedule_pause();
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     while (1) {
>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>>> +             if (buf == ODP_BUFFER_INVALID)
>>>>>>>>>>>>> +                     break;
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>> +             local_bufs++;
>>>>>>>>>>>>> +     }
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>>>>>>>>>>>>> NUM_BUFS_BEFORE_PAUSE);
>>>>>>>>>>>>>
>>>>>>>>>>>> Whats is the expected behavior here, Shouldn't it be
>>>>>>>>>>>> CU_ASSERT(local_bufs == 0) ?
>>>>>>>>>>>> meaning, the complete pause ?
>>>>>>>>>>>>
>>>>>>>>>>> Sorry about the delay, I've been playing around with mutt and I
>>>>>>>>>>> must
>>>>>>>>>>> have accidentally marked this email as read.
>>>>>>>>>>> The explanation here is that after pausing the scheduling, there
>>>>>>>>>>> might
>>>>>>>>>>> still be locally reserved buffers (see the odp_schedule_pause
>>>>>>>>>>> documentation). For linux-generic for instance the scheduler
>>>>>>>>>>> dequeues
>>>>>>>>>>> buffers in bursts, odp_scheduler_pause only stops further
>>>>>>>>>>> dequeues,
>>>>>>>>>>> buffers may still be in the 'reservoirs'. With that in mind, the
>>>>>>>>>>> check
>>>>>>>>>>> above makes sure that after pausing only a limited number of
>>>>>>>>>>> packets
>>>>>>>>>>> are still scheduled, or else said pausing seems to work, not all
>>>>>>>>>>> packets being drained.
>>>>>>>>>>>
>>>>>>>>>>>  +
>>>>>>>>>>>>> +     odp_schedule_resume();
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>>>>>>>>>>>> NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>> +     }
>>>>>>>>>>>>> +}
>>>>>>>>>>>>> +
>>>>>>>>>>>>>   static int create_queues(void)
>>>>>>>>>>>>>   {
>>>>>>>>>>>>>        int i, j, prios;
>>>>>>>>>>>>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_a",
>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_a},
>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_o",
>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_o},
>>>>>>>>>>>>>        {"schedule_multi_1q_mt_a_excl",
>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl},
>>>>>>>>>>>>> +     {"schedule_pause_resume",
>>>>>>>>>>>>> test_schedule_pause_resume},
>>>>>>>>>>>>>        CU_TEST_INFO_NULL,
>>>>>>>>>>>>>   };
>>>>>>>>>>>>>
>>>>>>>>>>>>> --
>>>>>>>>>>>>> 1.8.3.2
>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>> _______________________________________________
>>>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>>>>
>>>>>>>>>>>> _______________________________________________
>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> _______________________________________________
>>>>>>>>>> lng-odp mailing list
>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>> --
>>>>>>>>> Mike Holmes
>>>>>>>>> Linaro  Sr Technical Manager
>>>>>>>>> LNG - ODP
>>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>
>>>>>>
>>>>>> --
>>>>>> Mike Holmes
>>>>>> Linaro  Sr Technical Manager
>>>>>> LNG - ODP
>>>>>>
>>>>>> _______________________________________________
>>>>>> lng-odp mailing list
>>>>>> lng-odp@lists.linaro.org
>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>
>>>>>>
>>>
>>>
>>> --
>>> Mike Holmes
>>> Linaro  Sr Technical Manager
>>> LNG - ODP
>>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Bill Fischofer Jan. 20, 2015, 10:25 p.m. UTC | #17
My questions were answered.  For now scheduling caches are non-transparent
and applications wishing to pause scheduling must drain any cached events
prior to exiting the scheduling loop.  We can revisit this post v1.0 when
we discuss various recovery scenarios.

On Tue, Jan 20, 2015 at 3:47 PM, Mike Holmes <mike.holmes@linaro.org> wrote:

> It is still not clear to me in writing that we want this, we did discuss
> it earlier but Jerin, Bill and Ola have questions on this thread and I am
> not sure they are all addressed.
>
> On 20 January 2015 at 16:34, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
>
>> Who review this patch please add review-by.
>>
>> Mike please add yours because it's validation patch.
>>
>> Maxim.
>>
>>
>> On 01/20/2015 05:23 PM, Ciprian Barbu wrote:
>>
>>> PING!
>>>
>>> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org>
>>> wrote:
>>>
>>>> Without any clear change in sight,  lets test what we have, this has
>>>> been on
>>>> the list for a month
>>>>
>>>> On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org>
>>>> wrote:
>>>>
>>>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <
>>>>> ola.liljedahl@linaro.org>
>>>>> wrote:
>>>>>
>>>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org>
>>>>>> wrote:
>>>>>>
>>>>>>> I am unsure if I need to pay attention to this for 0.7.0
>>>>>>>
>>>>>> We need to have a decision (and implementation) for ODP 1.0 though.
>>>>>> Scheduling and its semantics are important aspects of ODP.
>>>>>>
>>>>> The odp_schedule_pause API is already documented and implemented, I
>>>>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>>>>> but what is the problem with covering this API in its current form for
>>>>> at least 0.7 and 0.8?
>>>>>
>>>>>  On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>>>> wrote:
>>>>>>>
>>>>>>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>>>>>>>> <bill.fischofer@linaro.org> wrote:
>>>>>>>>
>>>>>>>>> I think it's something we need to discuss during the sync call.
>>>>>>>>>
>>>>>>>>> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <
>>>>>>>>> mike.holmes@linaro.org>
>>>>>>>>> wrote:
>>>>>>>>>
>>>>>>>>>> Should a bug be made to track a needed change or is it important
>>>>>>>>>> for
>>>>>>>>>> 1.0
>>>>>>>>>> and needs to be in the delta doc ?
>>>>>>>>>>
>>>>>>>>>> On 6 January 2015 at 08:40, Bill Fischofer
>>>>>>>>>> <bill.fischofer@linaro.org>
>>>>>>>>>> wrote:
>>>>>>>>>>
>>>>>>>>>>> Caches should be transparent.  While this may be needed here,
>>>>>>>>>>> it's
>>>>>>>>>>> a
>>>>>>>>>>> poor
>>>>>>>>>>> set of semantics to expose as part of the formal APIs.  This is
>>>>>>>>>>> definitely
>>>>>>>>>>> something we need to address.  My suggestion is that a
>>>>>>>>>>> odp_schedule_pause()
>>>>>>>>>>> should cause an implicit cache flush if the implementation is
>>>>>>>>>>> using a
>>>>>>>>>>> scheduling cache.  That way any cache being used is truly
>>>>>>>>>>> transparent
>>>>>>>>>>> and
>>>>>>>>>>> moreover there won't be unnecessary delays in event processing
>>>>>>>>>>> since
>>>>>>>>>>> who
>>>>>>>>>>> knows how long a pause may last?  Clearly it won't be brief since
>>>>>>>>>>> otherwise
>>>>>>>>>>> the application would not have bothered with a pause/resume in
>>>>>>>>>>> the
>>>>>>>>>>> first
>>>>>>>>>>> place.
>>>>>>>>>>>
>>>>>>>>>> Sorry, I couldn't join you in the ODP call yesterday, mind if you
>>>>>>>> give
>>>>>>>> a brief update on what was decided?
>>>>>>>>
>>>>>>>>  On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>>>>>>>>>>> <ciprian.barbu@linaro.org>
>>>>>>>>>>> wrote:
>>>>>>>>>>>
>>>>>>>>>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>>>>>>>>>>>> <jerin.jacob@caviumnetworks.com> wrote:
>>>>>>>>>>>>
>>>>>>>>>>>>> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>>>>>>>>>>>>>
>>>>>>>>>>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>>   test/validation/odp_schedule.c | 63
>>>>>>>>>>>>>> ++++++++++++++++++++++++++++++++++++++----
>>>>>>>>>>>>>>   1 file changed, 58 insertions(+), 5 deletions(-)
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> diff --git a/test/validation/odp_schedule.c
>>>>>>>>>>>>>> b/test/validation/odp_schedule.c
>>>>>>>>>>>>>> index 31be742..bdbcf77 100644
>>>>>>>>>>>>>> --- a/test/validation/odp_schedule.c
>>>>>>>>>>>>>> +++ b/test/validation/odp_schedule.c
>>>>>>>>>>>>>> @@ -11,9 +11,11 @@
>>>>>>>>>>>>>>   #define MSG_POOL_SIZE                (4*1024*1024)
>>>>>>>>>>>>>>   #define QUEUES_PER_PRIO              16
>>>>>>>>>>>>>>   #define BUF_SIZE             64
>>>>>>>>>>>>>> -#define TEST_NUM_BUFS                100
>>>>>>>>>>>>>> +#define NUM_BUFS             100
>>>>>>>>>>>>>>   #define BURST_BUF_SIZE               4
>>>>>>>>>>>>>> -#define TEST_NUM_BUFS_EXCL   10000
>>>>>>>>>>>>>> +#define NUM_BUFS_EXCL                10000
>>>>>>>>>>>>>> +#define NUM_BUFS_PAUSE               1000
>>>>>>>>>>>>>> +#define NUM_BUFS_BEFORE_PAUSE        10
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>   #define GLOBALS_SHM_NAME     "test_globals"
>>>>>>>>>>>>>>   #define MSG_POOL_NAME                "msg_pool"
>>>>>>>>>>>>>> @@ -229,7 +231,7 @@ static void
>>>>>>>>>>>>>> schedule_common(odp_schedule_sync_t
>>>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>>>        args.sync = sync;
>>>>>>>>>>>>>>        args.num_queues = num_queues;
>>>>>>>>>>>>>>        args.num_prio = num_prio;
>>>>>>>>>>>>>> -     args.num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>>>> +     args.num_bufs = NUM_BUFS;
>>>>>>>>>>>>>>        args.num_cores = 1;
>>>>>>>>>>>>>>        args.enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>>>        args.enable_excl_atomic = 0;    /* Not needed with a
>>>>>>>>>>>>>> single
>>>>>>>>>>>>>> core */
>>>>>>>>>>>>>> @@ -261,9 +263,9 @@ static void
>>>>>>>>>>>>>> parallel_execute(odp_schedule_sync_t
>>>>>>>>>>>>>> sync, int num_queues,
>>>>>>>>>>>>>>        thr_args->num_queues = num_queues;
>>>>>>>>>>>>>>        thr_args->num_prio = num_prio;
>>>>>>>>>>>>>>        if (enable_excl_atomic)
>>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>>>>>>>>>>>>>>        else
>>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS;
>>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS;
>>>>>>>>>>>>>>        thr_args->num_cores = globals->core_count;
>>>>>>>>>>>>>>        thr_args->enable_schd_multi = enable_schd_multi;
>>>>>>>>>>>>>>        thr_args->enable_excl_atomic = enable_excl_atomic;
>>>>>>>>>>>>>> @@ -459,6 +461,56 @@ static void
>>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl(void)
>>>>>>>>>>>>>>                         ENABLE_EXCL_ATOMIC);
>>>>>>>>>>>>>>   }
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> +static void test_schedule_pause_resume(void)
>>>>>>>>>>>>>> +{
>>>>>>>>>>>>>> +     odp_queue_t queue;
>>>>>>>>>>>>>> +     odp_buffer_t buf;
>>>>>>>>>>>>>> +     odp_queue_t from;
>>>>>>>>>>>>>> +     int i;
>>>>>>>>>>>>>> +     int local_bufs = 0;
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     queue = odp_queue_lookup("sched_0_0_n");
>>>>>>>>>>>>>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>>>>>>>>>>>>>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>>>> +             buf = odp_buffer_alloc(pool);
>>>>>>>>>>>>>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>>>>>>>>>>>>>> +             odp_queue_enq(queue, buf);
>>>>>>>>>>>>>> +     }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>>> +     }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     odp_schedule_pause();
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     while (1) {
>>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>>>>>>>>>>>>>> +             if (buf == ODP_BUFFER_INVALID)
>>>>>>>>>>>>>> +                     break;
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>>> +             local_bufs++;
>>>>>>>>>>>>>> +     }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>>>>>>>>>>>>>> NUM_BUFS_BEFORE_PAUSE);
>>>>>>>>>>>>>>
>>>>>>>>>>>>> Whats is the expected behavior here, Shouldn't it be
>>>>>>>>>>>>> CU_ASSERT(local_bufs == 0) ?
>>>>>>>>>>>>> meaning, the complete pause ?
>>>>>>>>>>>>>
>>>>>>>>>>>> Sorry about the delay, I've been playing around with mutt and I
>>>>>>>>>>>> must
>>>>>>>>>>>> have accidentally marked this email as read.
>>>>>>>>>>>> The explanation here is that after pausing the scheduling, there
>>>>>>>>>>>> might
>>>>>>>>>>>> still be locally reserved buffers (see the odp_schedule_pause
>>>>>>>>>>>> documentation). For linux-generic for instance the scheduler
>>>>>>>>>>>> dequeues
>>>>>>>>>>>> buffers in bursts, odp_scheduler_pause only stops further
>>>>>>>>>>>> dequeues,
>>>>>>>>>>>> buffers may still be in the 'reservoirs'. With that in mind, the
>>>>>>>>>>>> check
>>>>>>>>>>>> above makes sure that after pausing only a limited number of
>>>>>>>>>>>> packets
>>>>>>>>>>>> are still scheduled, or else said pausing seems to work, not all
>>>>>>>>>>>> packets being drained.
>>>>>>>>>>>>
>>>>>>>>>>>>  +
>>>>>>>>>>>>>> +     odp_schedule_resume();
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>>>>>>>>>>>>>> NUM_BUFS_PAUSE; i++) {
>>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>>>>>>>>>>>>>> +             odp_buffer_free(buf);
>>>>>>>>>>>>>> +     }
>>>>>>>>>>>>>> +}
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>   static int create_queues(void)
>>>>>>>>>>>>>>   {
>>>>>>>>>>>>>>        int i, j, prios;
>>>>>>>>>>>>>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_a",
>>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_a},
>>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_o",
>>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_o},
>>>>>>>>>>>>>>        {"schedule_multi_1q_mt_a_excl",
>>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl},
>>>>>>>>>>>>>> +     {"schedule_pause_resume",
>>>>>>>>>>>>>> test_schedule_pause_resume},
>>>>>>>>>>>>>>        CU_TEST_INFO_NULL,
>>>>>>>>>>>>>>   };
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> --
>>>>>>>>>>>>>> 1.8.3.2
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> _______________________________________________
>>>>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>>>>>
>>>>>>>>>>>>> _______________________________________________
>>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> _______________________________________________
>>>>>>>>>>> lng-odp mailing list
>>>>>>>>>>> lng-odp@lists.linaro.org
>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> --
>>>>>>>>>> Mike Holmes
>>>>>>>>>> Linaro  Sr Technical Manager
>>>>>>>>>> LNG - ODP
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>> --
>>>>>>> Mike Holmes
>>>>>>> Linaro  Sr Technical Manager
>>>>>>> LNG - ODP
>>>>>>>
>>>>>>> _______________________________________________
>>>>>>> lng-odp mailing list
>>>>>>> lng-odp@lists.linaro.org
>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>>>>>
>>>>>>>
>>>>
>>>>
>>>> --
>>>> Mike Holmes
>>>> Linaro  Sr Technical Manager
>>>> LNG - ODP
>>>>
>>> _______________________________________________
>>> lng-odp mailing list
>>> lng-odp@lists.linaro.org
>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>>
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
>
> --
> *Mike Holmes*
> Linaro  Sr Technical Manager
> LNG - ODP
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
Ola Liljedahl Jan. 21, 2015, 12:55 p.m. UTC | #18
On 21 January 2015 at 06:23, Jerin Jacob <jerin.jacob@caviumnetworks.com> wrote:
> On Tue, Jan 20, 2015 at 04:25:41PM -0600, Bill Fischofer wrote:
>> My questions were answered.  For now scheduling caches are non-transparent
>> and applications wishing to pause scheduling must drain any cached events
>> prior to exiting the scheduling loop.  We can revisit this post v1.0 when
>> we discuss various recovery scenarios.
>
> +1
++

>
>>
>> On Tue, Jan 20, 2015 at 3:47 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
>>
>> > It is still not clear to me in writing that we want this, we did discuss
>> > it earlier but Jerin, Bill and Ola have questions on this thread and I am
>> > not sure they are all addressed.
>> >
>> > On 20 January 2015 at 16:34, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
>> >
>> >> Who review this patch please add review-by.
>> >>
>> >> Mike please add yours because it's validation patch.
>> >>
>> >> Maxim.
>> >>
>> >>
>> >> On 01/20/2015 05:23 PM, Ciprian Barbu wrote:
>> >>
>> >>> PING!
>> >>>
>> >>> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <mike.holmes@linaro.org>
>> >>> wrote:
>> >>>
>> >>>> Without any clear change in sight,  lets test what we have, this has
>> >>>> been on
>> >>>> the list for a month
>> >>>>
>> >>>> On 14 January 2015 at 08:35, Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>>> wrote:
>> >>>>
>> >>>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <
>> >>>>> ola.liljedahl@linaro.org>
>> >>>>> wrote:
>> >>>>>
>> >>>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org>
>> >>>>>> wrote:
>> >>>>>>
>> >>>>>>> I am unsure if I need to pay attention to this for 0.7.0
>> >>>>>>>
>> >>>>>> We need to have a decision (and implementation) for ODP 1.0 though.
>> >>>>>> Scheduling and its semantics are important aspects of ODP.
>> >>>>>>
>> >>>>> The odp_schedule_pause API is already documented and implemented, I
>> >>>>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>> >>>>> but what is the problem with covering this API in its current form for
>> >>>>> at least 0.7 and 0.8?
>> >>>>>
>> >>>>>  On 7 January 2015 at 04:39, Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>>>>>> wrote:
>> >>>>>>>
>> >>>>>>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
>> >>>>>>>> <bill.fischofer@linaro.org> wrote:
>> >>>>>>>>
>> >>>>>>>>> I think it's something we need to discuss during the sync call.
>> >>>>>>>>>
>> >>>>>>>>> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <
>> >>>>>>>>> mike.holmes@linaro.org>
>> >>>>>>>>> wrote:
>> >>>>>>>>>
>> >>>>>>>>>> Should a bug be made to track a needed change or is it important
>> >>>>>>>>>> for
>> >>>>>>>>>> 1.0
>> >>>>>>>>>> and needs to be in the delta doc ?
>> >>>>>>>>>>
>> >>>>>>>>>> On 6 January 2015 at 08:40, Bill Fischofer
>> >>>>>>>>>> <bill.fischofer@linaro.org>
>> >>>>>>>>>> wrote:
>> >>>>>>>>>>
>> >>>>>>>>>>> Caches should be transparent.  While this may be needed here,
>> >>>>>>>>>>> it's
>> >>>>>>>>>>> a
>> >>>>>>>>>>> poor
>> >>>>>>>>>>> set of semantics to expose as part of the formal APIs.  This is
>> >>>>>>>>>>> definitely
>> >>>>>>>>>>> something we need to address.  My suggestion is that a
>> >>>>>>>>>>> odp_schedule_pause()
>> >>>>>>>>>>> should cause an implicit cache flush if the implementation is
>> >>>>>>>>>>> using a
>> >>>>>>>>>>> scheduling cache.  That way any cache being used is truly
>> >>>>>>>>>>> transparent
>> >>>>>>>>>>> and
>> >>>>>>>>>>> moreover there won't be unnecessary delays in event processing
>> >>>>>>>>>>> since
>> >>>>>>>>>>> who
>> >>>>>>>>>>> knows how long a pause may last?  Clearly it won't be brief since
>> >>>>>>>>>>> otherwise
>> >>>>>>>>>>> the application would not have bothered with a pause/resume in
>> >>>>>>>>>>> the
>> >>>>>>>>>>> first
>> >>>>>>>>>>> place.
>> >>>>>>>>>>>
>> >>>>>>>>>> Sorry, I couldn't join you in the ODP call yesterday, mind if you
>> >>>>>>>> give
>> >>>>>>>> a brief update on what was decided?
>> >>>>>>>>
>> >>>>>>>>  On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
>> >>>>>>>>>>> <ciprian.barbu@linaro.org>
>> >>>>>>>>>>> wrote:
>> >>>>>>>>>>>
>> >>>>>>>>>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
>> >>>>>>>>>>>> <jerin.jacob@caviumnetworks.com> wrote:
>> >>>>>>>>>>>>
>> >>>>>>>>>>>>> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu wrote:
>> >>>>>>>>>>>>>
>> >>>>>>>>>>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>> >>>>>>>>>>>>>> ---
>> >>>>>>>>>>>>>>   test/validation/odp_schedule.c | 63
>> >>>>>>>>>>>>>> ++++++++++++++++++++++++++++++++++++++----
>> >>>>>>>>>>>>>>   1 file changed, 58 insertions(+), 5 deletions(-)
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>> diff --git a/test/validation/odp_schedule.c
>> >>>>>>>>>>>>>> b/test/validation/odp_schedule.c
>> >>>>>>>>>>>>>> index 31be742..bdbcf77 100644
>> >>>>>>>>>>>>>> --- a/test/validation/odp_schedule.c
>> >>>>>>>>>>>>>> +++ b/test/validation/odp_schedule.c
>> >>>>>>>>>>>>>> @@ -11,9 +11,11 @@
>> >>>>>>>>>>>>>>   #define MSG_POOL_SIZE                (4*1024*1024)
>> >>>>>>>>>>>>>>   #define QUEUES_PER_PRIO              16
>> >>>>>>>>>>>>>>   #define BUF_SIZE             64
>> >>>>>>>>>>>>>> -#define TEST_NUM_BUFS                100
>> >>>>>>>>>>>>>> +#define NUM_BUFS             100
>> >>>>>>>>>>>>>>   #define BURST_BUF_SIZE               4
>> >>>>>>>>>>>>>> -#define TEST_NUM_BUFS_EXCL   10000
>> >>>>>>>>>>>>>> +#define NUM_BUFS_EXCL                10000
>> >>>>>>>>>>>>>> +#define NUM_BUFS_PAUSE               1000
>> >>>>>>>>>>>>>> +#define NUM_BUFS_BEFORE_PAUSE        10
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>>   #define GLOBALS_SHM_NAME     "test_globals"
>> >>>>>>>>>>>>>>   #define MSG_POOL_NAME                "msg_pool"
>> >>>>>>>>>>>>>> @@ -229,7 +231,7 @@ static void
>> >>>>>>>>>>>>>> schedule_common(odp_schedule_sync_t
>> >>>>>>>>>>>>>> sync, int num_queues,
>> >>>>>>>>>>>>>>        args.sync = sync;
>> >>>>>>>>>>>>>>        args.num_queues = num_queues;
>> >>>>>>>>>>>>>>        args.num_prio = num_prio;
>> >>>>>>>>>>>>>> -     args.num_bufs = TEST_NUM_BUFS;
>> >>>>>>>>>>>>>> +     args.num_bufs = NUM_BUFS;
>> >>>>>>>>>>>>>>        args.num_cores = 1;
>> >>>>>>>>>>>>>>        args.enable_schd_multi = enable_schd_multi;
>> >>>>>>>>>>>>>>        args.enable_excl_atomic = 0;    /* Not needed with a
>> >>>>>>>>>>>>>> single
>> >>>>>>>>>>>>>> core */
>> >>>>>>>>>>>>>> @@ -261,9 +263,9 @@ static void
>> >>>>>>>>>>>>>> parallel_execute(odp_schedule_sync_t
>> >>>>>>>>>>>>>> sync, int num_queues,
>> >>>>>>>>>>>>>>        thr_args->num_queues = num_queues;
>> >>>>>>>>>>>>>>        thr_args->num_prio = num_prio;
>> >>>>>>>>>>>>>>        if (enable_excl_atomic)
>> >>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> >>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
>> >>>>>>>>>>>>>>        else
>> >>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS;
>> >>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS;
>> >>>>>>>>>>>>>>        thr_args->num_cores = globals->core_count;
>> >>>>>>>>>>>>>>        thr_args->enable_schd_multi = enable_schd_multi;
>> >>>>>>>>>>>>>>        thr_args->enable_excl_atomic = enable_excl_atomic;
>> >>>>>>>>>>>>>> @@ -459,6 +461,56 @@ static void
>> >>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl(void)
>> >>>>>>>>>>>>>>                         ENABLE_EXCL_ATOMIC);
>> >>>>>>>>>>>>>>   }
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>> +static void test_schedule_pause_resume(void)
>> >>>>>>>>>>>>>> +{
>> >>>>>>>>>>>>>> +     odp_queue_t queue;
>> >>>>>>>>>>>>>> +     odp_buffer_t buf;
>> >>>>>>>>>>>>>> +     odp_queue_t from;
>> >>>>>>>>>>>>>> +     int i;
>> >>>>>>>>>>>>>> +     int local_bufs = 0;
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     queue = odp_queue_lookup("sched_0_0_n");
>> >>>>>>>>>>>>>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> >>>>>>>>>>>>>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> >>>>>>>>>>>>>> +             buf = odp_buffer_alloc(pool);
>> >>>>>>>>>>>>>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> >>>>>>>>>>>>>> +             odp_queue_enq(queue, buf);
>> >>>>>>>>>>>>>> +     }
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> >>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
>> >>>>>>>>>>>>>> +     }
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     odp_schedule_pause();
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     while (1) {
>> >>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> >>>>>>>>>>>>>> +             if (buf == ODP_BUFFER_INVALID)
>> >>>>>>>>>>>>>> +                     break;
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
>> >>>>>>>>>>>>>> +             local_bufs++;
>> >>>>>>>>>>>>>> +     }
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
>> >>>>>>>>>>>>>> NUM_BUFS_BEFORE_PAUSE);
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>> Whats is the expected behavior here, Shouldn't it be
>> >>>>>>>>>>>>> CU_ASSERT(local_bufs == 0) ?
>> >>>>>>>>>>>>> meaning, the complete pause ?
>> >>>>>>>>>>>>>
>> >>>>>>>>>>>> Sorry about the delay, I've been playing around with mutt and I
>> >>>>>>>>>>>> must
>> >>>>>>>>>>>> have accidentally marked this email as read.
>> >>>>>>>>>>>> The explanation here is that after pausing the scheduling, there
>> >>>>>>>>>>>> might
>> >>>>>>>>>>>> still be locally reserved buffers (see the odp_schedule_pause
>> >>>>>>>>>>>> documentation). For linux-generic for instance the scheduler
>> >>>>>>>>>>>> dequeues
>> >>>>>>>>>>>> buffers in bursts, odp_scheduler_pause only stops further
>> >>>>>>>>>>>> dequeues,
>> >>>>>>>>>>>> buffers may still be in the 'reservoirs'. With that in mind, the
>> >>>>>>>>>>>> check
>> >>>>>>>>>>>> above makes sure that after pausing only a limited number of
>> >>>>>>>>>>>> packets
>> >>>>>>>>>>>> are still scheduled, or else said pausing seems to work, not all
>> >>>>>>>>>>>> packets being drained.
>> >>>>>>>>>>>>
>> >>>>>>>>>>>>  +
>> >>>>>>>>>>>>>> +     odp_schedule_resume();
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
>> >>>>>>>>>>>>>> NUM_BUFS_PAUSE; i++) {
>> >>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
>> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
>> >>>>>>>>>>>>>> +     }
>> >>>>>>>>>>>>>> +}
>> >>>>>>>>>>>>>> +
>> >>>>>>>>>>>>>>   static int create_queues(void)
>> >>>>>>>>>>>>>>   {
>> >>>>>>>>>>>>>>        int i, j, prios;
>> >>>>>>>>>>>>>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>> >>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_a",
>> >>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_a},
>> >>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_o",
>> >>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_o},
>> >>>>>>>>>>>>>>        {"schedule_multi_1q_mt_a_excl",
>> >>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl},
>> >>>>>>>>>>>>>> +     {"schedule_pause_resume",
>> >>>>>>>>>>>>>> test_schedule_pause_resume},
>> >>>>>>>>>>>>>>        CU_TEST_INFO_NULL,
>> >>>>>>>>>>>>>>   };
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>> --
>> >>>>>>>>>>>>>> 1.8.3.2
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>>> _______________________________________________
>> >>>>>>>>>>>>>> lng-odp mailing list
>> >>>>>>>>>>>>>> lng-odp@lists.linaro.org
>> >>>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>>>>>>>>>>>
>> >>>>>>>>>>>>> _______________________________________________
>> >>>>>>>>>>>> lng-odp mailing list
>> >>>>>>>>>>>> lng-odp@lists.linaro.org
>> >>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>>>>>>>>>
>> >>>>>>>>>>>
>> >>>>>>>>>>>
>> >>>>>>>>>>> _______________________________________________
>> >>>>>>>>>>> lng-odp mailing list
>> >>>>>>>>>>> lng-odp@lists.linaro.org
>> >>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>>>>>>>>
>> >>>>>>>>>>>
>> >>>>>>>>>>
>> >>>>>>>>>> --
>> >>>>>>>>>> Mike Holmes
>> >>>>>>>>>> Linaro  Sr Technical Manager
>> >>>>>>>>>> LNG - ODP
>> >>>>>>>>>>
>> >>>>>>>>>
>> >>>>>>>>>
>> >>>>>>>
>> >>>>>>>
>> >>>>>>> --
>> >>>>>>> Mike Holmes
>> >>>>>>> Linaro  Sr Technical Manager
>> >>>>>>> LNG - ODP
>> >>>>>>>
>> >>>>>>> _______________________________________________
>> >>>>>>> lng-odp mailing list
>> >>>>>>> lng-odp@lists.linaro.org
>> >>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>>>>>
>> >>>>>>>
>> >>>>
>> >>>>
>> >>>> --
>> >>>> Mike Holmes
>> >>>> Linaro  Sr Technical Manager
>> >>>> LNG - ODP
>> >>>>
>> >>> _______________________________________________
>> >>> lng-odp mailing list
>> >>> lng-odp@lists.linaro.org
>> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>>
>> >>
>> >>
>> >> _______________________________________________
>> >> lng-odp mailing list
>> >> lng-odp@lists.linaro.org
>> >> http://lists.linaro.org/mailman/listinfo/lng-odp
>> >>
>> >
>> >
>> >
>> > --
>> > *Mike Holmes*
>> > Linaro  Sr Technical Manager
>> > LNG - ODP
>> >
>> > _______________________________________________
>> > lng-odp mailing list
>> > lng-odp@lists.linaro.org
>> > http://lists.linaro.org/mailman/listinfo/lng-odp
>> >
>> >
>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
Mike Holmes Jan. 21, 2015, 2:09 p.m. UTC | #19
Petri you are the scheduling expert, can we get your review, otherwise as a
validation test I can review it with less deep understanding.

On 21 January 2015 at 07:55, Ola Liljedahl <ola.liljedahl@linaro.org> wrote:

> On 21 January 2015 at 06:23, Jerin Jacob <jerin.jacob@caviumnetworks.com>
> wrote:
> > On Tue, Jan 20, 2015 at 04:25:41PM -0600, Bill Fischofer wrote:
> >> My questions were answered.  For now scheduling caches are
> non-transparent
> >> and applications wishing to pause scheduling must drain any cached
> events
> >> prior to exiting the scheduling loop.  We can revisit this post v1.0
> when
> >> we discuss various recovery scenarios.
> >
> > +1
> ++
>
> >
> >>
> >> On Tue, Jan 20, 2015 at 3:47 PM, Mike Holmes <mike.holmes@linaro.org>
> wrote:
> >>
> >> > It is still not clear to me in writing that we want this, we did
> discuss
> >> > it earlier but Jerin, Bill and Ola have questions on this thread and
> I am
> >> > not sure they are all addressed.
> >> >
> >> > On 20 January 2015 at 16:34, Maxim Uvarov <maxim.uvarov@linaro.org>
> wrote:
> >> >
> >> >> Who review this patch please add review-by.
> >> >>
> >> >> Mike please add yours because it's validation patch.
> >> >>
> >> >> Maxim.
> >> >>
> >> >>
> >> >> On 01/20/2015 05:23 PM, Ciprian Barbu wrote:
> >> >>
> >> >>> PING!
> >> >>>
> >> >>> On Wed, Jan 14, 2015 at 7:14 PM, Mike Holmes <
> mike.holmes@linaro.org>
> >> >>> wrote:
> >> >>>
> >> >>>> Without any clear change in sight,  lets test what we have, this
> has
> >> >>>> been on
> >> >>>> the list for a month
> >> >>>>
> >> >>>> On 14 January 2015 at 08:35, Ciprian Barbu <
> ciprian.barbu@linaro.org>
> >> >>>> wrote:
> >> >>>>
> >> >>>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <
> >> >>>>> ola.liljedahl@linaro.org>
> >> >>>>> wrote:
> >> >>>>>
> >> >>>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org>
> >> >>>>>> wrote:
> >> >>>>>>
> >> >>>>>>> I am unsure if I need to pay attention to this for 0.7.0
> >> >>>>>>>
> >> >>>>>> We need to have a decision (and implementation) for ODP 1.0
> though.
> >> >>>>>> Scheduling and its semantics are important aspects of ODP.
> >> >>>>>>
> >> >>>>> The odp_schedule_pause API is already documented and implemented,
> I
> >> >>>>> didn't exactly catch from Petri if we will keep the behavior for
> 1.0,
> >> >>>>> but what is the problem with covering this API in its current
> form for
> >> >>>>> at least 0.7 and 0.8?
> >> >>>>>
> >> >>>>>  On 7 January 2015 at 04:39, Ciprian Barbu <
> ciprian.barbu@linaro.org>
> >> >>>>>>> wrote:
> >> >>>>>>>
> >> >>>>>>>> On Tue, Jan 6, 2015 at 4:03 PM, Bill Fischofer
> >> >>>>>>>> <bill.fischofer@linaro.org> wrote:
> >> >>>>>>>>
> >> >>>>>>>>> I think it's something we need to discuss during the sync
> call.
> >> >>>>>>>>>
> >> >>>>>>>>> On Tue, Jan 6, 2015 at 7:48 AM, Mike Holmes <
> >> >>>>>>>>> mike.holmes@linaro.org>
> >> >>>>>>>>> wrote:
> >> >>>>>>>>>
> >> >>>>>>>>>> Should a bug be made to track a needed change or is it
> important
> >> >>>>>>>>>> for
> >> >>>>>>>>>> 1.0
> >> >>>>>>>>>> and needs to be in the delta doc ?
> >> >>>>>>>>>>
> >> >>>>>>>>>> On 6 January 2015 at 08:40, Bill Fischofer
> >> >>>>>>>>>> <bill.fischofer@linaro.org>
> >> >>>>>>>>>> wrote:
> >> >>>>>>>>>>
> >> >>>>>>>>>>> Caches should be transparent.  While this may be needed
> here,
> >> >>>>>>>>>>> it's
> >> >>>>>>>>>>> a
> >> >>>>>>>>>>> poor
> >> >>>>>>>>>>> set of semantics to expose as part of the formal APIs.
> This is
> >> >>>>>>>>>>> definitely
> >> >>>>>>>>>>> something we need to address.  My suggestion is that a
> >> >>>>>>>>>>> odp_schedule_pause()
> >> >>>>>>>>>>> should cause an implicit cache flush if the implementation
> is
> >> >>>>>>>>>>> using a
> >> >>>>>>>>>>> scheduling cache.  That way any cache being used is truly
> >> >>>>>>>>>>> transparent
> >> >>>>>>>>>>> and
> >> >>>>>>>>>>> moreover there won't be unnecessary delays in event
> processing
> >> >>>>>>>>>>> since
> >> >>>>>>>>>>> who
> >> >>>>>>>>>>> knows how long a pause may last?  Clearly it won't be brief
> since
> >> >>>>>>>>>>> otherwise
> >> >>>>>>>>>>> the application would not have bothered with a pause/resume
> in
> >> >>>>>>>>>>> the
> >> >>>>>>>>>>> first
> >> >>>>>>>>>>> place.
> >> >>>>>>>>>>>
> >> >>>>>>>>>> Sorry, I couldn't join you in the ODP call yesterday, mind
> if you
> >> >>>>>>>> give
> >> >>>>>>>> a brief update on what was decided?
> >> >>>>>>>>
> >> >>>>>>>>  On Tue, Jan 6, 2015 at 7:17 AM, Ciprian Barbu
> >> >>>>>>>>>>> <ciprian.barbu@linaro.org>
> >> >>>>>>>>>>> wrote:
> >> >>>>>>>>>>>
> >> >>>>>>>>>>>> On Wed, Dec 17, 2014 at 5:09 PM, Jerin Jacob
> >> >>>>>>>>>>>> <jerin.jacob@caviumnetworks.com> wrote:
> >> >>>>>>>>>>>>
> >> >>>>>>>>>>>>> On Wed, Dec 17, 2014 at 03:10:11PM +0200, Ciprian Barbu
> wrote:
> >> >>>>>>>>>>>>>
> >> >>>>>>>>>>>>>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
> >> >>>>>>>>>>>>>> ---
> >> >>>>>>>>>>>>>>   test/validation/odp_schedule.c | 63
> >> >>>>>>>>>>>>>> ++++++++++++++++++++++++++++++++++++++----
> >> >>>>>>>>>>>>>>   1 file changed, 58 insertions(+), 5 deletions(-)
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>> diff --git a/test/validation/odp_schedule.c
> >> >>>>>>>>>>>>>> b/test/validation/odp_schedule.c
> >> >>>>>>>>>>>>>> index 31be742..bdbcf77 100644
> >> >>>>>>>>>>>>>> --- a/test/validation/odp_schedule.c
> >> >>>>>>>>>>>>>> +++ b/test/validation/odp_schedule.c
> >> >>>>>>>>>>>>>> @@ -11,9 +11,11 @@
> >> >>>>>>>>>>>>>>   #define MSG_POOL_SIZE                (4*1024*1024)
> >> >>>>>>>>>>>>>>   #define QUEUES_PER_PRIO              16
> >> >>>>>>>>>>>>>>   #define BUF_SIZE             64
> >> >>>>>>>>>>>>>> -#define TEST_NUM_BUFS                100
> >> >>>>>>>>>>>>>> +#define NUM_BUFS             100
> >> >>>>>>>>>>>>>>   #define BURST_BUF_SIZE               4
> >> >>>>>>>>>>>>>> -#define TEST_NUM_BUFS_EXCL   10000
> >> >>>>>>>>>>>>>> +#define NUM_BUFS_EXCL                10000
> >> >>>>>>>>>>>>>> +#define NUM_BUFS_PAUSE               1000
> >> >>>>>>>>>>>>>> +#define NUM_BUFS_BEFORE_PAUSE        10
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>>   #define GLOBALS_SHM_NAME     "test_globals"
> >> >>>>>>>>>>>>>>   #define MSG_POOL_NAME                "msg_pool"
> >> >>>>>>>>>>>>>> @@ -229,7 +231,7 @@ static void
> >> >>>>>>>>>>>>>> schedule_common(odp_schedule_sync_t
> >> >>>>>>>>>>>>>> sync, int num_queues,
> >> >>>>>>>>>>>>>>        args.sync = sync;
> >> >>>>>>>>>>>>>>        args.num_queues = num_queues;
> >> >>>>>>>>>>>>>>        args.num_prio = num_prio;
> >> >>>>>>>>>>>>>> -     args.num_bufs = TEST_NUM_BUFS;
> >> >>>>>>>>>>>>>> +     args.num_bufs = NUM_BUFS;
> >> >>>>>>>>>>>>>>        args.num_cores = 1;
> >> >>>>>>>>>>>>>>        args.enable_schd_multi = enable_schd_multi;
> >> >>>>>>>>>>>>>>        args.enable_excl_atomic = 0;    /* Not needed
> with a
> >> >>>>>>>>>>>>>> single
> >> >>>>>>>>>>>>>> core */
> >> >>>>>>>>>>>>>> @@ -261,9 +263,9 @@ static void
> >> >>>>>>>>>>>>>> parallel_execute(odp_schedule_sync_t
> >> >>>>>>>>>>>>>> sync, int num_queues,
> >> >>>>>>>>>>>>>>        thr_args->num_queues = num_queues;
> >> >>>>>>>>>>>>>>        thr_args->num_prio = num_prio;
> >> >>>>>>>>>>>>>>        if (enable_excl_atomic)
> >> >>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> >> >>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS_EXCL;
> >> >>>>>>>>>>>>>>        else
> >> >>>>>>>>>>>>>> -             thr_args->num_bufs = TEST_NUM_BUFS;
> >> >>>>>>>>>>>>>> +             thr_args->num_bufs = NUM_BUFS;
> >> >>>>>>>>>>>>>>        thr_args->num_cores = globals->core_count;
> >> >>>>>>>>>>>>>>        thr_args->enable_schd_multi = enable_schd_multi;
> >> >>>>>>>>>>>>>>        thr_args->enable_excl_atomic = enable_excl_atomic;
> >> >>>>>>>>>>>>>> @@ -459,6 +461,56 @@ static void
> >> >>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl(void)
> >> >>>>>>>>>>>>>>                         ENABLE_EXCL_ATOMIC);
> >> >>>>>>>>>>>>>>   }
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>> +static void test_schedule_pause_resume(void)
> >> >>>>>>>>>>>>>> +{
> >> >>>>>>>>>>>>>> +     odp_queue_t queue;
> >> >>>>>>>>>>>>>> +     odp_buffer_t buf;
> >> >>>>>>>>>>>>>> +     odp_queue_t from;
> >> >>>>>>>>>>>>>> +     int i;
> >> >>>>>>>>>>>>>> +     int local_bufs = 0;
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     queue = odp_queue_lookup("sched_0_0_n");
> >> >>>>>>>>>>>>>> +     CU_ASSERT(queue != ODP_QUEUE_INVALID);
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> >> >>>>>>>>>>>>>> +     CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> >> >>>>>>>>>>>>>> +             buf = odp_buffer_alloc(pool);
> >> >>>>>>>>>>>>>> +             CU_ASSERT(buf != ODP_BUFFER_INVALID);
> >> >>>>>>>>>>>>>> +             odp_queue_enq(queue, buf);
> >> >>>>>>>>>>>>>> +     }
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> >> >>>>>>>>>>>>>> +             buf = odp_schedule(&from,
> ODP_SCHED_NO_WAIT);
> >> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
> >> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
> >> >>>>>>>>>>>>>> +     }
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     odp_schedule_pause();
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     while (1) {
> >> >>>>>>>>>>>>>> +             buf = odp_schedule(&from,
> ODP_SCHED_NO_WAIT);
> >> >>>>>>>>>>>>>> +             if (buf == ODP_BUFFER_INVALID)
> >> >>>>>>>>>>>>>> +                     break;
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
> >> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
> >> >>>>>>>>>>>>>> +             local_bufs++;
> >> >>>>>>>>>>>>>> +     }
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     CU_ASSERT(local_bufs < NUM_BUFS_PAUSE -
> >> >>>>>>>>>>>>>> NUM_BUFS_BEFORE_PAUSE);
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>> Whats is the expected behavior here, Shouldn't it be
> >> >>>>>>>>>>>>> CU_ASSERT(local_bufs == 0) ?
> >> >>>>>>>>>>>>> meaning, the complete pause ?
> >> >>>>>>>>>>>>>
> >> >>>>>>>>>>>> Sorry about the delay, I've been playing around with mutt
> and I
> >> >>>>>>>>>>>> must
> >> >>>>>>>>>>>> have accidentally marked this email as read.
> >> >>>>>>>>>>>> The explanation here is that after pausing the scheduling,
> there
> >> >>>>>>>>>>>> might
> >> >>>>>>>>>>>> still be locally reserved buffers (see the
> odp_schedule_pause
> >> >>>>>>>>>>>> documentation). For linux-generic for instance the
> scheduler
> >> >>>>>>>>>>>> dequeues
> >> >>>>>>>>>>>> buffers in bursts, odp_scheduler_pause only stops further
> >> >>>>>>>>>>>> dequeues,
> >> >>>>>>>>>>>> buffers may still be in the 'reservoirs'. With that in
> mind, the
> >> >>>>>>>>>>>> check
> >> >>>>>>>>>>>> above makes sure that after pausing only a limited number
> of
> >> >>>>>>>>>>>> packets
> >> >>>>>>>>>>>> are still scheduled, or else said pausing seems to work,
> not all
> >> >>>>>>>>>>>> packets being drained.
> >> >>>>>>>>>>>>
> >> >>>>>>>>>>>>  +
> >> >>>>>>>>>>>>>> +     odp_schedule_resume();
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>> +     for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i <
> >> >>>>>>>>>>>>>> NUM_BUFS_PAUSE; i++) {
> >> >>>>>>>>>>>>>> +             buf = odp_schedule(&from, ODP_SCHED_WAIT);
> >> >>>>>>>>>>>>>> +             CU_ASSERT(from == queue);
> >> >>>>>>>>>>>>>> +             odp_buffer_free(buf);
> >> >>>>>>>>>>>>>> +     }
> >> >>>>>>>>>>>>>> +}
> >> >>>>>>>>>>>>>> +
> >> >>>>>>>>>>>>>>   static int create_queues(void)
> >> >>>>>>>>>>>>>>   {
> >> >>>>>>>>>>>>>>        int i, j, prios;
> >> >>>>>>>>>>>>>> @@ -594,6 +646,7 @@ struct CU_TestInfo
> test_odp_schedule[] = {
> >> >>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_a",
> >> >>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_a},
> >> >>>>>>>>>>>>>>        {"schedule_multi_mq_mt_prio_o",
> >> >>>>>>>>>>>>>> test_schedule_multi_mq_mt_prio_o},
> >> >>>>>>>>>>>>>>        {"schedule_multi_1q_mt_a_excl",
> >> >>>>>>>>>>>>>> test_schedule_multi_1q_mt_a_excl},
> >> >>>>>>>>>>>>>> +     {"schedule_pause_resume",
> >> >>>>>>>>>>>>>> test_schedule_pause_resume},
> >> >>>>>>>>>>>>>>        CU_TEST_INFO_NULL,
> >> >>>>>>>>>>>>>>   };
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>> --
> >> >>>>>>>>>>>>>> 1.8.3.2
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>>> _______________________________________________
> >> >>>>>>>>>>>>>> lng-odp mailing list
> >> >>>>>>>>>>>>>> lng-odp@lists.linaro.org
> >> >>>>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>>>>>>>>>>>>
> >> >>>>>>>>>>>>> _______________________________________________
> >> >>>>>>>>>>>> lng-odp mailing list
> >> >>>>>>>>>>>> lng-odp@lists.linaro.org
> >> >>>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>>>>>>>>>>
> >> >>>>>>>>>>>
> >> >>>>>>>>>>>
> >> >>>>>>>>>>> _______________________________________________
> >> >>>>>>>>>>> lng-odp mailing list
> >> >>>>>>>>>>> lng-odp@lists.linaro.org
> >> >>>>>>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>>>>>>>>>
> >> >>>>>>>>>>>
> >> >>>>>>>>>>
> >> >>>>>>>>>> --
> >> >>>>>>>>>> Mike Holmes
> >> >>>>>>>>>> Linaro  Sr Technical Manager
> >> >>>>>>>>>> LNG - ODP
> >> >>>>>>>>>>
> >> >>>>>>>>>
> >> >>>>>>>>>
> >> >>>>>>>
> >> >>>>>>>
> >> >>>>>>> --
> >> >>>>>>> Mike Holmes
> >> >>>>>>> Linaro  Sr Technical Manager
> >> >>>>>>> LNG - ODP
> >> >>>>>>>
> >> >>>>>>> _______________________________________________
> >> >>>>>>> lng-odp mailing list
> >> >>>>>>> lng-odp@lists.linaro.org
> >> >>>>>>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>>>>>
> >> >>>>>>>
> >> >>>>
> >> >>>>
> >> >>>> --
> >> >>>> Mike Holmes
> >> >>>> Linaro  Sr Technical Manager
> >> >>>> LNG - ODP
> >> >>>>
> >> >>> _______________________________________________
> >> >>> lng-odp mailing list
> >> >>> lng-odp@lists.linaro.org
> >> >>> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>>
> >> >>
> >> >>
> >> >> _______________________________________________
> >> >> lng-odp mailing list
> >> >> lng-odp@lists.linaro.org
> >> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >>
> >> >
> >> >
> >> >
> >> > --
> >> > *Mike Holmes*
> >> > Linaro  Sr Technical Manager
> >> > LNG - ODP
> >> >
> >> > _______________________________________________
> >> > lng-odp mailing list
> >> > lng-odp@lists.linaro.org
> >> > http://lists.linaro.org/mailman/listinfo/lng-odp
> >> >
> >> >
> >
> >> _______________________________________________
> >> lng-odp mailing list
> >> lng-odp@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/lng-odp
> >
> >
> > _______________________________________________
> > lng-odp mailing list
> > lng-odp@lists.linaro.org
> > http://lists.linaro.org/mailman/listinfo/lng-odp
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Maxim Uvarov Jan. 22, 2015, 11:55 a.m. UTC | #20
On 01/22/2015 12:06 PM, Savolainen, Petri (NSN - FI/Espoo) wrote:
>
>> -----Original Message-----
>> From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp-
>> bounces@lists.linaro.org] On Behalf Of ext Ciprian Barbu
>> Sent: Wednesday, January 14, 2015 3:36 PM
>> To: Ola Liljedahl
>> Cc: lng-odp
>> Subject: Re: [lng-odp] [PATCH] validation: add odp_schedule_pause and
>> odp_schedule_resume tests
>>
>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org>
>> wrote:
>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
>>>> I am unsure if I need to pay attention to this for 0.7.0
>>> We need to have a decision (and implementation) for ODP 1.0 though.
>>> Scheduling and its semantics are important aspects of ODP.
>> The odp_schedule_pause API is already documented and implemented, I
>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>> but what is the problem with covering this API in its current form for
>> at least 0.7 and 0.8?
>
> There are no plans to change schedule pause/resume API.
>
> -Petri
So we are adding this patch, right?

Maxim.
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
Mike Holmes Jan. 22, 2015, 11:57 a.m. UTC | #21
No one appears to be willing to review this one :)

I will review it for validation style.

On 22 January 2015 at 06:55, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:

> On 01/22/2015 12:06 PM, Savolainen, Petri (NSN - FI/Espoo) wrote:
>
>>
>>  -----Original Message-----
>>> From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp-
>>> bounces@lists.linaro.org] On Behalf Of ext Ciprian Barbu
>>> Sent: Wednesday, January 14, 2015 3:36 PM
>>> To: Ola Liljedahl
>>> Cc: lng-odp
>>> Subject: Re: [lng-odp] [PATCH] validation: add odp_schedule_pause and
>>> odp_schedule_resume tests
>>>
>>> On Wed, Jan 14, 2015 at 3:28 PM, Ola Liljedahl <ola.liljedahl@linaro.org
>>> >
>>> wrote:
>>>
>>>> On 7 January 2015 at 20:41, Mike Holmes <mike.holmes@linaro.org> wrote:
>>>>
>>>>> I am unsure if I need to pay attention to this for 0.7.0
>>>>>
>>>> We need to have a decision (and implementation) for ODP 1.0 though.
>>>> Scheduling and its semantics are important aspects of ODP.
>>>>
>>> The odp_schedule_pause API is already documented and implemented, I
>>> didn't exactly catch from Petri if we will keep the behavior for 1.0,
>>> but what is the problem with covering this API in its current form for
>>> at least 0.7 and 0.8?
>>>
>>
>> There are no plans to change schedule pause/resume API.
>>
>> -Petri
>>
> So we are adding this patch, right?
>
> Maxim.
>
>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
diff mbox

Patch

diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
index 31be742..bdbcf77 100644
--- a/test/validation/odp_schedule.c
+++ b/test/validation/odp_schedule.c
@@ -11,9 +11,11 @@ 
 #define MSG_POOL_SIZE		(4*1024*1024)
 #define QUEUES_PER_PRIO		16
 #define BUF_SIZE		64
-#define TEST_NUM_BUFS		100
+#define NUM_BUFS		100
 #define BURST_BUF_SIZE		4
-#define TEST_NUM_BUFS_EXCL	10000
+#define NUM_BUFS_EXCL		10000
+#define NUM_BUFS_PAUSE		1000
+#define NUM_BUFS_BEFORE_PAUSE	10
 
 #define GLOBALS_SHM_NAME	"test_globals"
 #define MSG_POOL_NAME		"msg_pool"
@@ -229,7 +231,7 @@  static void schedule_common(odp_schedule_sync_t sync, int num_queues,
 	args.sync = sync;
 	args.num_queues = num_queues;
 	args.num_prio = num_prio;
-	args.num_bufs = TEST_NUM_BUFS;
+	args.num_bufs = NUM_BUFS;
 	args.num_cores = 1;
 	args.enable_schd_multi = enable_schd_multi;
 	args.enable_excl_atomic = 0;	/* Not needed with a single core */
@@ -261,9 +263,9 @@  static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
 	thr_args->num_queues = num_queues;
 	thr_args->num_prio = num_prio;
 	if (enable_excl_atomic)
-		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
+		thr_args->num_bufs = NUM_BUFS_EXCL;
 	else
-		thr_args->num_bufs = TEST_NUM_BUFS;
+		thr_args->num_bufs = NUM_BUFS;
 	thr_args->num_cores = globals->core_count;
 	thr_args->enable_schd_multi = enable_schd_multi;
 	thr_args->enable_excl_atomic = enable_excl_atomic;
@@ -459,6 +461,56 @@  static void test_schedule_multi_1q_mt_a_excl(void)
 			 ENABLE_EXCL_ATOMIC);
 }
 
+static void test_schedule_pause_resume(void)
+{
+	odp_queue_t queue;
+	odp_buffer_t buf;
+	odp_queue_t from;
+	int i;
+	int local_bufs = 0;
+
+	queue = odp_queue_lookup("sched_0_0_n");
+	CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+
+	for (i = 0; i < NUM_BUFS_PAUSE; i++) {
+		buf = odp_buffer_alloc(pool);
+		CU_ASSERT(buf != ODP_BUFFER_INVALID);
+		odp_queue_enq(queue, buf);
+	}
+
+	for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+		buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+	}
+
+	odp_schedule_pause();
+
+	while (1) {
+		buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+		if (buf == ODP_BUFFER_INVALID)
+			break;
+
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+		local_bufs++;
+	}
+
+	CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
+
+	odp_schedule_resume();
+
+	for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
+		buf = odp_schedule(&from, ODP_SCHED_WAIT);
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+	}
+}
+
 static int create_queues(void)
 {
 	int i, j, prios;
@@ -594,6 +646,7 @@  struct CU_TestInfo test_odp_schedule[] = {
 	{"schedule_multi_mq_mt_prio_a",	test_schedule_multi_mq_mt_prio_a},
 	{"schedule_multi_mq_mt_prio_o",	test_schedule_multi_mq_mt_prio_o},
 	{"schedule_multi_1q_mt_a_excl",	test_schedule_multi_1q_mt_a_excl},
+	{"schedule_pause_resume",	test_schedule_pause_resume},
 	CU_TEST_INFO_NULL,
 };