diff mbox

validation: add odp_schedule_pause and odp_schedule_resume tests

Message ID 1418821763-11684-1-git-send-email-ciprian.barbu@linaro.org
State New
Headers show

Commit Message

Ciprian Barbu Dec. 17, 2014, 1:09 p.m. UTC
Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
---
 test/validation/odp_schedule.c | 63 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 58 insertions(+), 5 deletions(-)

Comments

Mike Holmes Jan. 22, 2015, 4:12 p.m. UTC | #1
After rebase for naming of cores to cpu


On 17 December 2014 at 08:09, Ciprian Barbu <ciprian.barbu@linaro.org>
wrote:

> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>

Reviewed-by: Mike Holmes <mike.holmes@linaro.org>


> ---
>  test/validation/odp_schedule.c | 63
> ++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 58 insertions(+), 5 deletions(-)
>
> diff --git a/test/validation/odp_schedule.c
> b/test/validation/odp_schedule.c
> index 31be742..bdbcf77 100644
> --- a/test/validation/odp_schedule.c
> +++ b/test/validation/odp_schedule.c
> @@ -11,9 +11,11 @@
>  #define MSG_POOL_SIZE          (4*1024*1024)
>  #define QUEUES_PER_PRIO                16
>  #define BUF_SIZE               64
> -#define TEST_NUM_BUFS          100
> +#define NUM_BUFS               100
>  #define BURST_BUF_SIZE         4
> -#define TEST_NUM_BUFS_EXCL     10000
> +#define NUM_BUFS_EXCL          10000
> +#define NUM_BUFS_PAUSE         1000
> +#define NUM_BUFS_BEFORE_PAUSE  10
>
>  #define GLOBALS_SHM_NAME       "test_globals"
>  #define MSG_POOL_NAME          "msg_pool"
> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t sync,
> int num_queues,
>         args.sync = sync;
>         args.num_queues = num_queues;
>         args.num_prio = num_prio;
> -       args.num_bufs = TEST_NUM_BUFS;
> +       args.num_bufs = NUM_BUFS;
>         args.num_cores = 1;
>         args.enable_schd_multi = enable_schd_multi;
>         args.enable_excl_atomic = 0;    /* Not needed with a single core */
> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t sync,
> int num_queues,
>         thr_args->num_queues = num_queues;
>         thr_args->num_prio = num_prio;
>         if (enable_excl_atomic)
> -               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
> +               thr_args->num_bufs = NUM_BUFS_EXCL;
>         else
> -               thr_args->num_bufs = TEST_NUM_BUFS;
> +               thr_args->num_bufs = NUM_BUFS;
>         thr_args->num_cores = globals->core_count;
>         thr_args->enable_schd_multi = enable_schd_multi;
>         thr_args->enable_excl_atomic = enable_excl_atomic;
> @@ -459,6 +461,56 @@ static void test_schedule_multi_1q_mt_a_excl(void)
>                          ENABLE_EXCL_ATOMIC);
>  }
>
> +static void test_schedule_pause_resume(void)
> +{
> +       odp_queue_t queue;
> +       odp_buffer_t buf;
> +       odp_queue_t from;
> +       int i;
> +       int local_bufs = 0;
> +
> +       queue = odp_queue_lookup("sched_0_0_n");
> +       CU_ASSERT(queue != ODP_QUEUE_INVALID);
> +
> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
> +
> +
> +       for (i = 0; i < NUM_BUFS_PAUSE; i++) {
> +               buf = odp_buffer_alloc(pool);
> +               CU_ASSERT(buf != ODP_BUFFER_INVALID);
> +               odp_queue_enq(queue, buf);
> +       }
> +
> +       for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
> +               buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> +               CU_ASSERT(from == queue);
> +               odp_buffer_free(buf);
> +       }
> +
> +       odp_schedule_pause();
> +
> +       while (1) {
> +               buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
> +               if (buf == ODP_BUFFER_INVALID)
> +                       break;
> +
> +               CU_ASSERT(from == queue);
> +               odp_buffer_free(buf);
> +               local_bufs++;
> +       }
> +
> +       CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
> +
> +       odp_schedule_resume();
> +
> +       for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE;
> i++) {
> +               buf = odp_schedule(&from, ODP_SCHED_WAIT);
> +               CU_ASSERT(from == queue);
> +               odp_buffer_free(buf);
> +       }
> +}
> +
>  static int create_queues(void)
>  {
>         int i, j, prios;
> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>         {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_prio_a},
>         {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_prio_o},
>         {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_excl},
> +       {"schedule_pause_resume",       test_schedule_pause_resume},
>         CU_TEST_INFO_NULL,
>  };
>
> --
> 1.8.3.2
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Ciprian Barbu Jan. 22, 2015, 4:20 p.m. UTC | #2
You beat me to it, I sent v2 before seeing your Reviewed-by

https://patches.linaro.org/43536/

Thank you!

On Thu, Jan 22, 2015 at 6:12 PM, Mike Holmes <mike.holmes@linaro.org> wrote:
> After rebase for naming of cores to cpu
>
>
> On 17 December 2014 at 08:09, Ciprian Barbu <ciprian.barbu@linaro.org>
> wrote:
>>
>> Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org>
>
>
> Reviewed-by: Mike Holmes <mike.holmes@linaro.org>
>
>>
>> ---
>>  test/validation/odp_schedule.c | 63
>> ++++++++++++++++++++++++++++++++++++++----
>>  1 file changed, 58 insertions(+), 5 deletions(-)
>>
>> diff --git a/test/validation/odp_schedule.c
>> b/test/validation/odp_schedule.c
>> index 31be742..bdbcf77 100644
>> --- a/test/validation/odp_schedule.c
>> +++ b/test/validation/odp_schedule.c
>> @@ -11,9 +11,11 @@
>>  #define MSG_POOL_SIZE          (4*1024*1024)
>>  #define QUEUES_PER_PRIO                16
>>  #define BUF_SIZE               64
>> -#define TEST_NUM_BUFS          100
>> +#define NUM_BUFS               100
>>  #define BURST_BUF_SIZE         4
>> -#define TEST_NUM_BUFS_EXCL     10000
>> +#define NUM_BUFS_EXCL          10000
>> +#define NUM_BUFS_PAUSE         1000
>> +#define NUM_BUFS_BEFORE_PAUSE  10
>>
>>  #define GLOBALS_SHM_NAME       "test_globals"
>>  #define MSG_POOL_NAME          "msg_pool"
>> @@ -229,7 +231,7 @@ static void schedule_common(odp_schedule_sync_t sync,
>> int num_queues,
>>         args.sync = sync;
>>         args.num_queues = num_queues;
>>         args.num_prio = num_prio;
>> -       args.num_bufs = TEST_NUM_BUFS;
>> +       args.num_bufs = NUM_BUFS;
>>         args.num_cores = 1;
>>         args.enable_schd_multi = enable_schd_multi;
>>         args.enable_excl_atomic = 0;    /* Not needed with a single core
>> */
>> @@ -261,9 +263,9 @@ static void parallel_execute(odp_schedule_sync_t sync,
>> int num_queues,
>>         thr_args->num_queues = num_queues;
>>         thr_args->num_prio = num_prio;
>>         if (enable_excl_atomic)
>> -               thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
>> +               thr_args->num_bufs = NUM_BUFS_EXCL;
>>         else
>> -               thr_args->num_bufs = TEST_NUM_BUFS;
>> +               thr_args->num_bufs = NUM_BUFS;
>>         thr_args->num_cores = globals->core_count;
>>         thr_args->enable_schd_multi = enable_schd_multi;
>>         thr_args->enable_excl_atomic = enable_excl_atomic;
>> @@ -459,6 +461,56 @@ static void test_schedule_multi_1q_mt_a_excl(void)
>>                          ENABLE_EXCL_ATOMIC);
>>  }
>>
>> +static void test_schedule_pause_resume(void)
>> +{
>> +       odp_queue_t queue;
>> +       odp_buffer_t buf;
>> +       odp_queue_t from;
>> +       int i;
>> +       int local_bufs = 0;
>> +
>> +       queue = odp_queue_lookup("sched_0_0_n");
>> +       CU_ASSERT(queue != ODP_QUEUE_INVALID);
>> +
>> +       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
>> +       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
>> +
>> +
>> +       for (i = 0; i < NUM_BUFS_PAUSE; i++) {
>> +               buf = odp_buffer_alloc(pool);
>> +               CU_ASSERT(buf != ODP_BUFFER_INVALID);
>> +               odp_queue_enq(queue, buf);
>> +       }
>> +
>> +       for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
>> +               buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +               CU_ASSERT(from == queue);
>> +               odp_buffer_free(buf);
>> +       }
>> +
>> +       odp_schedule_pause();
>> +
>> +       while (1) {
>> +               buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
>> +               if (buf == ODP_BUFFER_INVALID)
>> +                       break;
>> +
>> +               CU_ASSERT(from == queue);
>> +               odp_buffer_free(buf);
>> +               local_bufs++;
>> +       }
>> +
>> +       CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
>> +
>> +       odp_schedule_resume();
>> +
>> +       for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE;
>> i++) {
>> +               buf = odp_schedule(&from, ODP_SCHED_WAIT);
>> +               CU_ASSERT(from == queue);
>> +               odp_buffer_free(buf);
>> +       }
>> +}
>> +
>>  static int create_queues(void)
>>  {
>>         int i, j, prios;
>> @@ -594,6 +646,7 @@ struct CU_TestInfo test_odp_schedule[] = {
>>         {"schedule_multi_mq_mt_prio_a", test_schedule_multi_mq_mt_prio_a},
>>         {"schedule_multi_mq_mt_prio_o", test_schedule_multi_mq_mt_prio_o},
>>         {"schedule_multi_1q_mt_a_excl", test_schedule_multi_1q_mt_a_excl},
>> +       {"schedule_pause_resume",       test_schedule_pause_resume},
>>         CU_TEST_INFO_NULL,
>>  };
>>
>> --
>> 1.8.3.2
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> lng-odp@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
>
>
> --
> Mike Holmes
> Linaro  Sr Technical Manager
> LNG - ODP
diff mbox

Patch

diff --git a/test/validation/odp_schedule.c b/test/validation/odp_schedule.c
index 31be742..bdbcf77 100644
--- a/test/validation/odp_schedule.c
+++ b/test/validation/odp_schedule.c
@@ -11,9 +11,11 @@ 
 #define MSG_POOL_SIZE		(4*1024*1024)
 #define QUEUES_PER_PRIO		16
 #define BUF_SIZE		64
-#define TEST_NUM_BUFS		100
+#define NUM_BUFS		100
 #define BURST_BUF_SIZE		4
-#define TEST_NUM_BUFS_EXCL	10000
+#define NUM_BUFS_EXCL		10000
+#define NUM_BUFS_PAUSE		1000
+#define NUM_BUFS_BEFORE_PAUSE	10
 
 #define GLOBALS_SHM_NAME	"test_globals"
 #define MSG_POOL_NAME		"msg_pool"
@@ -229,7 +231,7 @@  static void schedule_common(odp_schedule_sync_t sync, int num_queues,
 	args.sync = sync;
 	args.num_queues = num_queues;
 	args.num_prio = num_prio;
-	args.num_bufs = TEST_NUM_BUFS;
+	args.num_bufs = NUM_BUFS;
 	args.num_cores = 1;
 	args.enable_schd_multi = enable_schd_multi;
 	args.enable_excl_atomic = 0;	/* Not needed with a single core */
@@ -261,9 +263,9 @@  static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
 	thr_args->num_queues = num_queues;
 	thr_args->num_prio = num_prio;
 	if (enable_excl_atomic)
-		thr_args->num_bufs = TEST_NUM_BUFS_EXCL;
+		thr_args->num_bufs = NUM_BUFS_EXCL;
 	else
-		thr_args->num_bufs = TEST_NUM_BUFS;
+		thr_args->num_bufs = NUM_BUFS;
 	thr_args->num_cores = globals->core_count;
 	thr_args->enable_schd_multi = enable_schd_multi;
 	thr_args->enable_excl_atomic = enable_excl_atomic;
@@ -459,6 +461,56 @@  static void test_schedule_multi_1q_mt_a_excl(void)
 			 ENABLE_EXCL_ATOMIC);
 }
 
+static void test_schedule_pause_resume(void)
+{
+	odp_queue_t queue;
+	odp_buffer_t buf;
+	odp_queue_t from;
+	int i;
+	int local_bufs = 0;
+
+	queue = odp_queue_lookup("sched_0_0_n");
+	CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+	pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+	CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+
+	for (i = 0; i < NUM_BUFS_PAUSE; i++) {
+		buf = odp_buffer_alloc(pool);
+		CU_ASSERT(buf != ODP_BUFFER_INVALID);
+		odp_queue_enq(queue, buf);
+	}
+
+	for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+		buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+	}
+
+	odp_schedule_pause();
+
+	while (1) {
+		buf = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+		if (buf == ODP_BUFFER_INVALID)
+			break;
+
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+		local_bufs++;
+	}
+
+	CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
+
+	odp_schedule_resume();
+
+	for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
+		buf = odp_schedule(&from, ODP_SCHED_WAIT);
+		CU_ASSERT(from == queue);
+		odp_buffer_free(buf);
+	}
+}
+
 static int create_queues(void)
 {
 	int i, j, prios;
@@ -594,6 +646,7 @@  struct CU_TestInfo test_odp_schedule[] = {
 	{"schedule_multi_mq_mt_prio_a",	test_schedule_multi_mq_mt_prio_a},
 	{"schedule_multi_mq_mt_prio_o",	test_schedule_multi_mq_mt_prio_o},
 	{"schedule_multi_1q_mt_a_excl",	test_schedule_multi_1q_mt_a_excl},
+	{"schedule_pause_resume",	test_schedule_pause_resume},
 	CU_TEST_INFO_NULL,
 };