diff mbox

[PATCHv2] validation: remove MAX_WORKERS

Message ID 1441710324-11148-1-git-send-email-maxim.uvarov@linaro.org
State New
Headers show

Commit Message

Maxim Uvarov Sept. 8, 2015, 11:05 a.m. UTC
ODP has api to request available number of workers. Now
no need limit that inside application.

Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---

  v2: timer.c: do not substract -1 from number of workers.

 test/validation/common/odp_cunit_common.c     |  7 ++--
 test/validation/common/odp_cunit_common.h     |  2 --
 test/validation/scheduler/scheduler.c         |  3 --
 test/validation/shmem/shmem.c                 |  6 ++--
 test/validation/synchronizers/synchronizers.c | 48 ++++++++++-----------------
 test/validation/timer/timer.c                 | 10 ++----
 6 files changed, 27 insertions(+), 49 deletions(-)

Comments

Nicolas Morey-Chaisemartin Sept. 9, 2015, 7:24 a.m. UTC | #1
On 09/08/2015 01:05 PM, Maxim Uvarov wrote:
> ODP has api to request available number of workers. Now
> no need limit that inside application.
>
> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
> ---
>
>   v2: timer.c: do not substract -1 from number of workers.
>
>  test/validation/common/odp_cunit_common.c     |  7 ++--
>  test/validation/common/odp_cunit_common.h     |  2 --
>  test/validation/scheduler/scheduler.c         |  3 --
>  test/validation/shmem/shmem.c                 |  6 ++--
>  test/validation/synchronizers/synchronizers.c | 48 ++++++++++-----------------
>  test/validation/timer/timer.c                 | 10 ++----
>  6 files changed, 27 insertions(+), 49 deletions(-)
>
> diff --git a/test/validation/common/odp_cunit_common.c b/test/validation/common/odp_cunit_common.c
> index d995ad3..01ea87b 100644
> --- a/test/validation/common/odp_cunit_common.c
> +++ b/test/validation/common/odp_cunit_common.c
> @@ -9,7 +9,7 @@
>  #include <odp_cunit_common.h>
>  #include <odp/helper/linux.h>
>  /* Globals */
> -static odph_linux_pthread_t thread_tbl[MAX_WORKERS];
> +static odph_linux_pthread_t *thread_tbl;
>  
>  /*
>   * global init/term functions which may be registered
> @@ -26,9 +26,11 @@ static struct {
>  int odp_cunit_thread_create(void *func_ptr(void *), pthrd_arg *arg)
>  {
>  	odp_cpumask_t cpumask;
> +	int num;
>  
>  	/* Create and init additional threads */
> -	odp_cpumask_def_worker(&cpumask, arg->numthrds);
> +	num = odp_cpumask_def_worker(&cpumask, arg->numthrds);
> +	thread_tbl = calloc(sizeof(odph_linux_pthread_t), num);
Maybe add a check of the return value here? Everything else looks good
Reviewed-by: Nicolas Morey-Chaisemartin <nmorey@kalray.eu>

>  
>  	return odph_linux_pthread_create(thread_tbl, &cpumask, func_ptr,
>  					 (void *)arg);
> @@ -39,6 +41,7 @@ int odp_cunit_thread_exit(pthrd_arg *arg)
>  {
>  	/* Wait for other threads to exit */
>  	odph_linux_pthread_join(thread_tbl, arg->numthrds);
> +	free(thread_tbl);
>  
>  	return 0;
>  }
> diff --git a/test/validation/common/odp_cunit_common.h b/test/validation/common/odp_cunit_common.h
> index 6cafaaa..f94b44e 100644
> --- a/test/validation/common/odp_cunit_common.h
> +++ b/test/validation/common/odp_cunit_common.h
> @@ -16,8 +16,6 @@
>  #include <stdint.h>
>  #include "CUnit/Basic.h"
>  
> -#define MAX_WORKERS 32 /**< Maximum number of work threads */
> -
>  /* the function, called by module main(), to run the testsuites: */
>  int odp_cunit_run(CU_SuiteInfo testsuites[]);
>  
> diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c
> index 1874889..e12895d 100644
> --- a/test/validation/scheduler/scheduler.c
> +++ b/test/validation/scheduler/scheduler.c
> @@ -8,7 +8,6 @@
>  #include "odp_cunit_common.h"
>  #include "scheduler.h"
>  
> -#define MAX_WORKERS_THREADS	32
>  #define MSG_POOL_SIZE		(4 * 1024 * 1024)
>  #define QUEUES_PER_PRIO		16
>  #define BUF_SIZE		64
> @@ -1018,8 +1017,6 @@ int scheduler_suite_init(void)
>  	memset(globals, 0, sizeof(test_globals_t));
>  
>  	globals->num_workers = odp_cpumask_def_worker(&mask, 0);
> -	if (globals->num_workers > MAX_WORKERS)
> -		globals->num_workers = MAX_WORKERS;
>  
>  	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>  			      ODP_CACHE_LINE_SIZE, 0);
> diff --git a/test/validation/shmem/shmem.c b/test/validation/shmem/shmem.c
> index 6dc579a..dfa5310 100644
> --- a/test/validation/shmem/shmem.c
> +++ b/test/validation/shmem/shmem.c
> @@ -49,6 +49,7 @@ void shmem_test_odp_shm_sunnyday(void)
>  	pthrd_arg thrdarg;
>  	odp_shm_t shm;
>  	test_shared_data_t *test_shared_data;
> +	odp_cpumask_t mask;
>  
>  	shm = odp_shm_reserve(TESTNAME,
>  			      sizeof(test_shared_data_t), ALIGE_SIZE, 0);
> @@ -67,10 +68,7 @@ void shmem_test_odp_shm_sunnyday(void)
>  	test_shared_data->foo = TEST_SHARE_FOO;
>  	test_shared_data->bar = TEST_SHARE_BAR;
>  
> -	thrdarg.numthrds = odp_cpu_count();
> -
> -	if (thrdarg.numthrds > MAX_WORKERS)
> -		thrdarg.numthrds = MAX_WORKERS;
> +	thrdarg.numthrds = odp_cpumask_def_worker(&mask, 0);
>  
>  	odp_cunit_thread_create(run_shm_thread, &thrdarg);
>  	odp_cunit_thread_exit(&thrdarg);
> diff --git a/test/validation/synchronizers/synchronizers.c b/test/validation/synchronizers/synchronizers.c
> index 0a31a40..914b37e 100644
> --- a/test/validation/synchronizers/synchronizers.c
> +++ b/test/validation/synchronizers/synchronizers.c
> @@ -45,7 +45,7 @@ typedef struct {
>  
>  typedef struct {
>  	/* Global variables */
> -	uint32_t g_num_threads;
> +	uint32_t g_num_workers;
>  	uint32_t g_iterations;
>  	uint32_t g_verbose;
>  	uint32_t g_max_num_cores;
> @@ -169,7 +169,7 @@ static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
>  
>  	thread_num = odp_thread_id();
>  	global_mem = per_thread_mem->global_mem;
> -	num_threads = global_mem->g_num_threads;
> +	num_threads = global_mem->g_num_workers;
>  	iterations = BARRIER_ITERATIONS;
>  
>  	barrier_errs = 0;
> @@ -710,7 +710,7 @@ static void barrier_test_init(void)
>  {
>  	uint32_t num_threads, idx;
>  
> -	num_threads = global_mem->g_num_threads;
> +	num_threads = global_mem->g_num_workers;
>  
>  	for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
>  		odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
> @@ -924,7 +924,7 @@ void synchronizers_test_no_barrier_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	barrier_test_init();
>  	odp_cunit_thread_create(no_barrier_functional_test, &arg);
>  	odp_cunit_thread_exit(&arg);
> @@ -934,7 +934,7 @@ void synchronizers_test_barrier_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	barrier_test_init();
>  	odp_cunit_thread_create(barrier_functional_test, &arg);
>  	odp_cunit_thread_exit(&arg);
> @@ -951,7 +951,7 @@ void synchronizers_test_no_lock_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_cunit_thread_create(no_lock_functional_test, &arg);
>  	odp_cunit_thread_exit(&arg);
>  }
> @@ -966,7 +966,7 @@ void synchronizers_test_spinlock_api(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_cunit_thread_create(spinlock_api_tests, &arg);
>  	odp_cunit_thread_exit(&arg);
>  }
> @@ -975,7 +975,7 @@ void synchronizers_test_spinlock_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_spinlock_init(&global_mem->global_spinlock);
>  	odp_cunit_thread_create(spinlock_functional_test, &arg);
>  	odp_cunit_thread_exit(&arg);
> @@ -992,7 +992,7 @@ void synchronizers_test_ticketlock_api(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_cunit_thread_create(ticketlock_api_tests, &arg);
>  	odp_cunit_thread_exit(&arg);
>  }
> @@ -1001,7 +1001,7 @@ void synchronizers_test_ticketlock_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_ticketlock_init(&global_mem->global_ticketlock);
>  
>  	odp_cunit_thread_create(ticketlock_functional_test, &arg);
> @@ -1019,7 +1019,7 @@ void synchronizers_test_rwlock_api(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_cunit_thread_create(rwlock_api_tests, &arg);
>  	odp_cunit_thread_exit(&arg);
>  }
> @@ -1028,7 +1028,7 @@ void synchronizers_test_rwlock_functional(void)
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	odp_rwlock_init(&global_mem->global_rwlock);
>  	odp_cunit_thread_create(rwlock_functional_test, &arg);
>  	odp_cunit_thread_exit(&arg);
> @@ -1044,7 +1044,7 @@ int synchronizers_suite_init(void)
>  {
>  	uint32_t num_threads, idx;
>  
> -	num_threads = global_mem->g_num_threads;
> +	num_threads = global_mem->g_num_workers;
>  	odp_barrier_init(&global_mem->global_barrier, num_threads);
>  	for (idx = 0; idx < NUM_RESYNC_BARRIERS; idx++)
>  		odp_barrier_init(&global_mem->barrier_array[idx], num_threads);
> @@ -1054,7 +1054,6 @@ int synchronizers_suite_init(void)
>  
>  int synchronizers_init(void)
>  {
> -	uint32_t workers_count, max_threads;
>  	int ret = 0;
>  	odp_cpumask_t mask;
>  
> @@ -1078,25 +1077,12 @@ int synchronizers_init(void)
>  	global_mem = odp_shm_addr(global_shm);
>  	memset(global_mem, 0, sizeof(global_shared_mem_t));
>  
> -	global_mem->g_num_threads = MAX_WORKERS;
> +	global_mem->g_num_workers = odp_cpumask_def_worker(&mask, 0);
>  	global_mem->g_iterations = MAX_ITERATIONS;
>  	global_mem->g_verbose = VERBOSE;
>  
> -	workers_count = odp_cpumask_def_worker(&mask, 0);
> -
> -	max_threads = (workers_count >= MAX_WORKERS) ?
> -			MAX_WORKERS : workers_count;
> -
> -	if (max_threads < global_mem->g_num_threads) {
> -		printf("Requested num of threads is too large\n");
> -		printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
> -		       global_mem->g_num_threads,
> -		       max_threads);
> -		global_mem->g_num_threads = max_threads;
> -	}
> -
> -	printf("Num of threads used = %" PRIu32 "\n",
> -	       global_mem->g_num_threads);
> +	printf("Num of workers used = %" PRIu32 "\n",
> +	       global_mem->g_num_workers);
>  
>  	return ret;
>  }
> @@ -1158,7 +1144,7 @@ static void test_atomic_functional(void *func_ptr(void *))
>  {
>  	pthrd_arg arg;
>  
> -	arg.numthrds = global_mem->g_num_threads;
> +	arg.numthrds = global_mem->g_num_workers;
>  	test_atomic_init();
>  	test_atomic_store();
>  	odp_cunit_thread_create(func_ptr, &arg);
> diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c
> index 7a8b98a..bcba3d4 100644
> --- a/test/validation/timer/timer.c
> +++ b/test/validation/timer/timer.c
> @@ -34,12 +34,6 @@ static odp_timer_pool_t tp;
>  /** @private Count of timeouts delivered too late */
>  static odp_atomic_u32_t ndelivtoolate;
>  
> -/** @private min() function */
> -static int min(int a, int b)
> -{
> -	return a < b ? a : b;
> -}
> -
>  /* @private Timer helper structure */
>  struct test_timer {
>  	odp_timer_t tim; /* Timer handle */
> @@ -441,10 +435,12 @@ void timer_test_odp_timer_all(void)
>  	int rc;
>  	odp_pool_param_t params;
>  	odp_timer_pool_param_t tparam;
> +	odp_cpumask_t mask;
> +
>  	/* Reserve at least one core for running other processes so the timer
>  	 * test hopefully can run undisturbed and thus get better timing
>  	 * results. */
> -	int num_workers = min(odp_cpu_count() - 1, MAX_WORKERS);
> +	int num_workers = odp_cpumask_def_worker(&mask, 0) - 1;
>  	/* On a single-CPU machine run at least one thread */
>  	if (num_workers < 1)
>  		num_workers = 1;
Maxim Uvarov Sept. 9, 2015, 7:49 a.m. UTC | #2
On 09/09/15 10:24, Nicolas Morey-Chaisemartin wrote:
>
> On 09/08/2015 01:05 PM, Maxim Uvarov wrote:
>> ODP has api to request available number of workers. Now
>> no need limit that inside application.
>>
>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
>> ---
>>
>>    v2: timer.c: do not substract -1 from number of workers.
>>
>>   test/validation/common/odp_cunit_common.c     |  7 ++--
>>   test/validation/common/odp_cunit_common.h     |  2 --
>>   test/validation/scheduler/scheduler.c         |  3 --
>>   test/validation/shmem/shmem.c                 |  6 ++--
>>   test/validation/synchronizers/synchronizers.c | 48 ++++++++++-----------------
>>   test/validation/timer/timer.c                 | 10 ++----
>>   6 files changed, 27 insertions(+), 49 deletions(-)
>>
>> diff --git a/test/validation/common/odp_cunit_common.c b/test/validation/common/odp_cunit_common.c
>> index d995ad3..01ea87b 100644
>> --- a/test/validation/common/odp_cunit_common.c
>> +++ b/test/validation/common/odp_cunit_common.c
>> @@ -9,7 +9,7 @@
>>   #include <odp_cunit_common.h>
>>   #include <odp/helper/linux.h>
>>   /* Globals */
>> -static odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>> +static odph_linux_pthread_t *thread_tbl;
>>   
>>   /*
>>    * global init/term functions which may be registered
>> @@ -26,9 +26,11 @@ static struct {
>>   int odp_cunit_thread_create(void *func_ptr(void *), pthrd_arg *arg)
>>   {
>>   	odp_cpumask_t cpumask;
>> +	int num;
>>   
>>   	/* Create and init additional threads */
>> -	odp_cpumask_def_worker(&cpumask, arg->numthrds);
>> +	num = odp_cpumask_def_worker(&cpumask, arg->numthrds);
>> +	thread_tbl = calloc(sizeof(odph_linux_pthread_t), num);
> Maybe add a check of the return value here? Everything else looks good
> Reviewed-by: Nicolas Morey-Chaisemartin <nmorey@kalray.eu>

I can add but usually over commit is turned on and all malloc() / 
calloc() pass and real allocation
postponed to memory access. So you never know if you allocated memory or 
kernel will kill app
on access.

Maxim.

>>   
>>   	return odph_linux_pthread_create(thread_tbl, &cpumask, func_ptr,
>>   					 (void *)arg);
>> @@ -39,6 +41,7 @@ int odp_cunit_thread_exit(pthrd_arg *arg)
>>   {
>>   	/* Wait for other threads to exit */
>>   	odph_linux_pthread_join(thread_tbl, arg->numthrds);
>> +	free(thread_tbl);
>>   
>>   	return 0;
>>   }
>> diff --git a/test/validation/common/odp_cunit_common.h b/test/validation/common/odp_cunit_common.h
>> index 6cafaaa..f94b44e 100644
>> --- a/test/validation/common/odp_cunit_common.h
>> +++ b/test/validation/common/odp_cunit_common.h
>> @@ -16,8 +16,6 @@
>>   #include <stdint.h>
>>   #include "CUnit/Basic.h"
>>   
>> -#define MAX_WORKERS 32 /**< Maximum number of work threads */
>> -
>>   /* the function, called by module main(), to run the testsuites: */
>>   int odp_cunit_run(CU_SuiteInfo testsuites[]);
>>   
>> diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c
>> index 1874889..e12895d 100644
>> --- a/test/validation/scheduler/scheduler.c
>> +++ b/test/validation/scheduler/scheduler.c
>> @@ -8,7 +8,6 @@
>>   #include "odp_cunit_common.h"
>>   #include "scheduler.h"
>>   
>> -#define MAX_WORKERS_THREADS	32
>>   #define MSG_POOL_SIZE		(4 * 1024 * 1024)
>>   #define QUEUES_PER_PRIO		16
>>   #define BUF_SIZE		64
>> @@ -1018,8 +1017,6 @@ int scheduler_suite_init(void)
>>   	memset(globals, 0, sizeof(test_globals_t));
>>   
>>   	globals->num_workers = odp_cpumask_def_worker(&mask, 0);
>> -	if (globals->num_workers > MAX_WORKERS)
>> -		globals->num_workers = MAX_WORKERS;
>>   
>>   	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
>>   			      ODP_CACHE_LINE_SIZE, 0);
>> diff --git a/test/validation/shmem/shmem.c b/test/validation/shmem/shmem.c
>> index 6dc579a..dfa5310 100644
>> --- a/test/validation/shmem/shmem.c
>> +++ b/test/validation/shmem/shmem.c
>> @@ -49,6 +49,7 @@ void shmem_test_odp_shm_sunnyday(void)
>>   	pthrd_arg thrdarg;
>>   	odp_shm_t shm;
>>   	test_shared_data_t *test_shared_data;
>> +	odp_cpumask_t mask;
>>   
>>   	shm = odp_shm_reserve(TESTNAME,
>>   			      sizeof(test_shared_data_t), ALIGE_SIZE, 0);
>> @@ -67,10 +68,7 @@ void shmem_test_odp_shm_sunnyday(void)
>>   	test_shared_data->foo = TEST_SHARE_FOO;
>>   	test_shared_data->bar = TEST_SHARE_BAR;
>>   
>> -	thrdarg.numthrds = odp_cpu_count();
>> -
>> -	if (thrdarg.numthrds > MAX_WORKERS)
>> -		thrdarg.numthrds = MAX_WORKERS;
>> +	thrdarg.numthrds = odp_cpumask_def_worker(&mask, 0);
>>   
>>   	odp_cunit_thread_create(run_shm_thread, &thrdarg);
>>   	odp_cunit_thread_exit(&thrdarg);
>> diff --git a/test/validation/synchronizers/synchronizers.c b/test/validation/synchronizers/synchronizers.c
>> index 0a31a40..914b37e 100644
>> --- a/test/validation/synchronizers/synchronizers.c
>> +++ b/test/validation/synchronizers/synchronizers.c
>> @@ -45,7 +45,7 @@ typedef struct {
>>   
>>   typedef struct {
>>   	/* Global variables */
>> -	uint32_t g_num_threads;
>> +	uint32_t g_num_workers;
>>   	uint32_t g_iterations;
>>   	uint32_t g_verbose;
>>   	uint32_t g_max_num_cores;
>> @@ -169,7 +169,7 @@ static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
>>   
>>   	thread_num = odp_thread_id();
>>   	global_mem = per_thread_mem->global_mem;
>> -	num_threads = global_mem->g_num_threads;
>> +	num_threads = global_mem->g_num_workers;
>>   	iterations = BARRIER_ITERATIONS;
>>   
>>   	barrier_errs = 0;
>> @@ -710,7 +710,7 @@ static void barrier_test_init(void)
>>   {
>>   	uint32_t num_threads, idx;
>>   
>> -	num_threads = global_mem->g_num_threads;
>> +	num_threads = global_mem->g_num_workers;
>>   
>>   	for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
>>   		odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
>> @@ -924,7 +924,7 @@ void synchronizers_test_no_barrier_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	barrier_test_init();
>>   	odp_cunit_thread_create(no_barrier_functional_test, &arg);
>>   	odp_cunit_thread_exit(&arg);
>> @@ -934,7 +934,7 @@ void synchronizers_test_barrier_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	barrier_test_init();
>>   	odp_cunit_thread_create(barrier_functional_test, &arg);
>>   	odp_cunit_thread_exit(&arg);
>> @@ -951,7 +951,7 @@ void synchronizers_test_no_lock_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_cunit_thread_create(no_lock_functional_test, &arg);
>>   	odp_cunit_thread_exit(&arg);
>>   }
>> @@ -966,7 +966,7 @@ void synchronizers_test_spinlock_api(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_cunit_thread_create(spinlock_api_tests, &arg);
>>   	odp_cunit_thread_exit(&arg);
>>   }
>> @@ -975,7 +975,7 @@ void synchronizers_test_spinlock_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_spinlock_init(&global_mem->global_spinlock);
>>   	odp_cunit_thread_create(spinlock_functional_test, &arg);
>>   	odp_cunit_thread_exit(&arg);
>> @@ -992,7 +992,7 @@ void synchronizers_test_ticketlock_api(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_cunit_thread_create(ticketlock_api_tests, &arg);
>>   	odp_cunit_thread_exit(&arg);
>>   }
>> @@ -1001,7 +1001,7 @@ void synchronizers_test_ticketlock_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_ticketlock_init(&global_mem->global_ticketlock);
>>   
>>   	odp_cunit_thread_create(ticketlock_functional_test, &arg);
>> @@ -1019,7 +1019,7 @@ void synchronizers_test_rwlock_api(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_cunit_thread_create(rwlock_api_tests, &arg);
>>   	odp_cunit_thread_exit(&arg);
>>   }
>> @@ -1028,7 +1028,7 @@ void synchronizers_test_rwlock_functional(void)
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	odp_rwlock_init(&global_mem->global_rwlock);
>>   	odp_cunit_thread_create(rwlock_functional_test, &arg);
>>   	odp_cunit_thread_exit(&arg);
>> @@ -1044,7 +1044,7 @@ int synchronizers_suite_init(void)
>>   {
>>   	uint32_t num_threads, idx;
>>   
>> -	num_threads = global_mem->g_num_threads;
>> +	num_threads = global_mem->g_num_workers;
>>   	odp_barrier_init(&global_mem->global_barrier, num_threads);
>>   	for (idx = 0; idx < NUM_RESYNC_BARRIERS; idx++)
>>   		odp_barrier_init(&global_mem->barrier_array[idx], num_threads);
>> @@ -1054,7 +1054,6 @@ int synchronizers_suite_init(void)
>>   
>>   int synchronizers_init(void)
>>   {
>> -	uint32_t workers_count, max_threads;
>>   	int ret = 0;
>>   	odp_cpumask_t mask;
>>   
>> @@ -1078,25 +1077,12 @@ int synchronizers_init(void)
>>   	global_mem = odp_shm_addr(global_shm);
>>   	memset(global_mem, 0, sizeof(global_shared_mem_t));
>>   
>> -	global_mem->g_num_threads = MAX_WORKERS;
>> +	global_mem->g_num_workers = odp_cpumask_def_worker(&mask, 0);
>>   	global_mem->g_iterations = MAX_ITERATIONS;
>>   	global_mem->g_verbose = VERBOSE;
>>   
>> -	workers_count = odp_cpumask_def_worker(&mask, 0);
>> -
>> -	max_threads = (workers_count >= MAX_WORKERS) ?
>> -			MAX_WORKERS : workers_count;
>> -
>> -	if (max_threads < global_mem->g_num_threads) {
>> -		printf("Requested num of threads is too large\n");
>> -		printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
>> -		       global_mem->g_num_threads,
>> -		       max_threads);
>> -		global_mem->g_num_threads = max_threads;
>> -	}
>> -
>> -	printf("Num of threads used = %" PRIu32 "\n",
>> -	       global_mem->g_num_threads);
>> +	printf("Num of workers used = %" PRIu32 "\n",
>> +	       global_mem->g_num_workers);
>>   
>>   	return ret;
>>   }
>> @@ -1158,7 +1144,7 @@ static void test_atomic_functional(void *func_ptr(void *))
>>   {
>>   	pthrd_arg arg;
>>   
>> -	arg.numthrds = global_mem->g_num_threads;
>> +	arg.numthrds = global_mem->g_num_workers;
>>   	test_atomic_init();
>>   	test_atomic_store();
>>   	odp_cunit_thread_create(func_ptr, &arg);
>> diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c
>> index 7a8b98a..bcba3d4 100644
>> --- a/test/validation/timer/timer.c
>> +++ b/test/validation/timer/timer.c
>> @@ -34,12 +34,6 @@ static odp_timer_pool_t tp;
>>   /** @private Count of timeouts delivered too late */
>>   static odp_atomic_u32_t ndelivtoolate;
>>   
>> -/** @private min() function */
>> -static int min(int a, int b)
>> -{
>> -	return a < b ? a : b;
>> -}
>> -
>>   /* @private Timer helper structure */
>>   struct test_timer {
>>   	odp_timer_t tim; /* Timer handle */
>> @@ -441,10 +435,12 @@ void timer_test_odp_timer_all(void)
>>   	int rc;
>>   	odp_pool_param_t params;
>>   	odp_timer_pool_param_t tparam;
>> +	odp_cpumask_t mask;
>> +
>>   	/* Reserve at least one core for running other processes so the timer
>>   	 * test hopefully can run undisturbed and thus get better timing
>>   	 * results. */
>> -	int num_workers = min(odp_cpu_count() - 1, MAX_WORKERS);
>> +	int num_workers = odp_cpumask_def_worker(&mask, 0) - 1;
>>   	/* On a single-CPU machine run at least one thread */
>>   	if (num_workers < 1)
>>   		num_workers = 1;
Nicolas Morey-Chaisemartin Sept. 9, 2015, 8:02 a.m. UTC | #3
On 09/09/2015 09:49 AM, Maxim Uvarov wrote:
> On 09/09/15 10:24, Nicolas Morey-Chaisemartin wrote:
>>
>> On 09/08/2015 01:05 PM, Maxim Uvarov wrote:
>>> ODP has api to request available number of workers. Now
>>> no need limit that inside application.
>>>
>>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
>>> ---
>>>
>>>    v2: timer.c: do not substract -1 from number of workers.
>>>
>>>   test/validation/common/odp_cunit_common.c     |  7 ++--
>>>   test/validation/common/odp_cunit_common.h     |  2 --
>>>   test/validation/scheduler/scheduler.c         |  3 --
>>>   test/validation/shmem/shmem.c                 |  6 ++--
>>>   test/validation/synchronizers/synchronizers.c | 48 ++++++++++-----------------
>>>   test/validation/timer/timer.c                 | 10 ++----
>>>   6 files changed, 27 insertions(+), 49 deletions(-)
>>>
>>> diff --git a/test/validation/common/odp_cunit_common.c b/test/validation/common/odp_cunit_common.c
>>> index d995ad3..01ea87b 100644
>>> --- a/test/validation/common/odp_cunit_common.c
>>> +++ b/test/validation/common/odp_cunit_common.c
>>> @@ -9,7 +9,7 @@
>>>   #include <odp_cunit_common.h>
>>>   #include <odp/helper/linux.h>
>>>   /* Globals */
>>> -static odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>>> +static odph_linux_pthread_t *thread_tbl;
>>>     /*
>>>    * global init/term functions which may be registered
>>> @@ -26,9 +26,11 @@ static struct {
>>>   int odp_cunit_thread_create(void *func_ptr(void *), pthrd_arg *arg)
>>>   {
>>>       odp_cpumask_t cpumask;
>>> +    int num;
>>>         /* Create and init additional threads */
>>> -    odp_cpumask_def_worker(&cpumask, arg->numthrds);
>>> +    num = odp_cpumask_def_worker(&cpumask, arg->numthrds);
>>> +    thread_tbl = calloc(sizeof(odph_linux_pthread_t), num);
>> Maybe add a check of the return value here? Everything else looks good
>> Reviewed-by: Nicolas Morey-Chaisemartin <nmorey@kalray.eu>
>
> I can add but usually over commit is turned on and all malloc() / calloc() pass and real allocation
> postponed to memory access. So you never know if you allocated memory or kernel will kill app
> on access.
>
> Maxim.
>
On linux yes. On smaller HW platform (like ours) we don't use overcommit.
As it's part of the common validation suite, it'd make sense to check it.

Nicolas
Maxim Uvarov Sept. 9, 2015, 8:42 a.m. UTC | #4
On 09/09/15 11:02, Nicolas Morey-Chaisemartin wrote:
>
> On 09/09/2015 09:49 AM, Maxim Uvarov wrote:
>> On 09/09/15 10:24, Nicolas Morey-Chaisemartin wrote:
>>> On 09/08/2015 01:05 PM, Maxim Uvarov wrote:
>>>> ODP has api to request available number of workers. Now
>>>> no need limit that inside application.
>>>>
>>>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
>>>> ---
>>>>
>>>>     v2: timer.c: do not substract -1 from number of workers.
>>>>
>>>>    test/validation/common/odp_cunit_common.c     |  7 ++--
>>>>    test/validation/common/odp_cunit_common.h     |  2 --
>>>>    test/validation/scheduler/scheduler.c         |  3 --
>>>>    test/validation/shmem/shmem.c                 |  6 ++--
>>>>    test/validation/synchronizers/synchronizers.c | 48 ++++++++++-----------------
>>>>    test/validation/timer/timer.c                 | 10 ++----
>>>>    6 files changed, 27 insertions(+), 49 deletions(-)
>>>>
>>>> diff --git a/test/validation/common/odp_cunit_common.c b/test/validation/common/odp_cunit_common.c
>>>> index d995ad3..01ea87b 100644
>>>> --- a/test/validation/common/odp_cunit_common.c
>>>> +++ b/test/validation/common/odp_cunit_common.c
>>>> @@ -9,7 +9,7 @@
>>>>    #include <odp_cunit_common.h>
>>>>    #include <odp/helper/linux.h>
>>>>    /* Globals */
>>>> -static odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>>>> +static odph_linux_pthread_t *thread_tbl;
>>>>      /*
>>>>     * global init/term functions which may be registered
>>>> @@ -26,9 +26,11 @@ static struct {
>>>>    int odp_cunit_thread_create(void *func_ptr(void *), pthrd_arg *arg)
>>>>    {
>>>>        odp_cpumask_t cpumask;
>>>> +    int num;
>>>>          /* Create and init additional threads */
>>>> -    odp_cpumask_def_worker(&cpumask, arg->numthrds);
>>>> +    num = odp_cpumask_def_worker(&cpumask, arg->numthrds);
>>>> +    thread_tbl = calloc(sizeof(odph_linux_pthread_t), num);
>>> Maybe add a check of the return value here? Everything else looks good
>>> Reviewed-by: Nicolas Morey-Chaisemartin <nmorey@kalray.eu>
>> I can add but usually over commit is turned on and all malloc() / calloc() pass and real allocation
>> postponed to memory access. So you never know if you allocated memory or kernel will kill app
>> on access.
>>
>> Maxim.
>>
> On linux yes. On smaller HW platform (like ours) we don't use overcommit.
> As it's part of the common validation suite, it'd make sense to check it.
>
> Nicolas
>
Ok,  in that case it's reasonable to account that. Will send v3.

Maxim.
diff mbox

Patch

diff --git a/test/validation/common/odp_cunit_common.c b/test/validation/common/odp_cunit_common.c
index d995ad3..01ea87b 100644
--- a/test/validation/common/odp_cunit_common.c
+++ b/test/validation/common/odp_cunit_common.c
@@ -9,7 +9,7 @@ 
 #include <odp_cunit_common.h>
 #include <odp/helper/linux.h>
 /* Globals */
-static odph_linux_pthread_t thread_tbl[MAX_WORKERS];
+static odph_linux_pthread_t *thread_tbl;
 
 /*
  * global init/term functions which may be registered
@@ -26,9 +26,11 @@  static struct {
 int odp_cunit_thread_create(void *func_ptr(void *), pthrd_arg *arg)
 {
 	odp_cpumask_t cpumask;
+	int num;
 
 	/* Create and init additional threads */
-	odp_cpumask_def_worker(&cpumask, arg->numthrds);
+	num = odp_cpumask_def_worker(&cpumask, arg->numthrds);
+	thread_tbl = calloc(sizeof(odph_linux_pthread_t), num);
 
 	return odph_linux_pthread_create(thread_tbl, &cpumask, func_ptr,
 					 (void *)arg);
@@ -39,6 +41,7 @@  int odp_cunit_thread_exit(pthrd_arg *arg)
 {
 	/* Wait for other threads to exit */
 	odph_linux_pthread_join(thread_tbl, arg->numthrds);
+	free(thread_tbl);
 
 	return 0;
 }
diff --git a/test/validation/common/odp_cunit_common.h b/test/validation/common/odp_cunit_common.h
index 6cafaaa..f94b44e 100644
--- a/test/validation/common/odp_cunit_common.h
+++ b/test/validation/common/odp_cunit_common.h
@@ -16,8 +16,6 @@ 
 #include <stdint.h>
 #include "CUnit/Basic.h"
 
-#define MAX_WORKERS 32 /**< Maximum number of work threads */
-
 /* the function, called by module main(), to run the testsuites: */
 int odp_cunit_run(CU_SuiteInfo testsuites[]);
 
diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c
index 1874889..e12895d 100644
--- a/test/validation/scheduler/scheduler.c
+++ b/test/validation/scheduler/scheduler.c
@@ -8,7 +8,6 @@ 
 #include "odp_cunit_common.h"
 #include "scheduler.h"
 
-#define MAX_WORKERS_THREADS	32
 #define MSG_POOL_SIZE		(4 * 1024 * 1024)
 #define QUEUES_PER_PRIO		16
 #define BUF_SIZE		64
@@ -1018,8 +1017,6 @@  int scheduler_suite_init(void)
 	memset(globals, 0, sizeof(test_globals_t));
 
 	globals->num_workers = odp_cpumask_def_worker(&mask, 0);
-	if (globals->num_workers > MAX_WORKERS)
-		globals->num_workers = MAX_WORKERS;
 
 	shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
 			      ODP_CACHE_LINE_SIZE, 0);
diff --git a/test/validation/shmem/shmem.c b/test/validation/shmem/shmem.c
index 6dc579a..dfa5310 100644
--- a/test/validation/shmem/shmem.c
+++ b/test/validation/shmem/shmem.c
@@ -49,6 +49,7 @@  void shmem_test_odp_shm_sunnyday(void)
 	pthrd_arg thrdarg;
 	odp_shm_t shm;
 	test_shared_data_t *test_shared_data;
+	odp_cpumask_t mask;
 
 	shm = odp_shm_reserve(TESTNAME,
 			      sizeof(test_shared_data_t), ALIGE_SIZE, 0);
@@ -67,10 +68,7 @@  void shmem_test_odp_shm_sunnyday(void)
 	test_shared_data->foo = TEST_SHARE_FOO;
 	test_shared_data->bar = TEST_SHARE_BAR;
 
-	thrdarg.numthrds = odp_cpu_count();
-
-	if (thrdarg.numthrds > MAX_WORKERS)
-		thrdarg.numthrds = MAX_WORKERS;
+	thrdarg.numthrds = odp_cpumask_def_worker(&mask, 0);
 
 	odp_cunit_thread_create(run_shm_thread, &thrdarg);
 	odp_cunit_thread_exit(&thrdarg);
diff --git a/test/validation/synchronizers/synchronizers.c b/test/validation/synchronizers/synchronizers.c
index 0a31a40..914b37e 100644
--- a/test/validation/synchronizers/synchronizers.c
+++ b/test/validation/synchronizers/synchronizers.c
@@ -45,7 +45,7 @@  typedef struct {
 
 typedef struct {
 	/* Global variables */
-	uint32_t g_num_threads;
+	uint32_t g_num_workers;
 	uint32_t g_iterations;
 	uint32_t g_verbose;
 	uint32_t g_max_num_cores;
@@ -169,7 +169,7 @@  static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
 
 	thread_num = odp_thread_id();
 	global_mem = per_thread_mem->global_mem;
-	num_threads = global_mem->g_num_threads;
+	num_threads = global_mem->g_num_workers;
 	iterations = BARRIER_ITERATIONS;
 
 	barrier_errs = 0;
@@ -710,7 +710,7 @@  static void barrier_test_init(void)
 {
 	uint32_t num_threads, idx;
 
-	num_threads = global_mem->g_num_threads;
+	num_threads = global_mem->g_num_workers;
 
 	for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
 		odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
@@ -924,7 +924,7 @@  void synchronizers_test_no_barrier_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	barrier_test_init();
 	odp_cunit_thread_create(no_barrier_functional_test, &arg);
 	odp_cunit_thread_exit(&arg);
@@ -934,7 +934,7 @@  void synchronizers_test_barrier_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	barrier_test_init();
 	odp_cunit_thread_create(barrier_functional_test, &arg);
 	odp_cunit_thread_exit(&arg);
@@ -951,7 +951,7 @@  void synchronizers_test_no_lock_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_cunit_thread_create(no_lock_functional_test, &arg);
 	odp_cunit_thread_exit(&arg);
 }
@@ -966,7 +966,7 @@  void synchronizers_test_spinlock_api(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_cunit_thread_create(spinlock_api_tests, &arg);
 	odp_cunit_thread_exit(&arg);
 }
@@ -975,7 +975,7 @@  void synchronizers_test_spinlock_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_spinlock_init(&global_mem->global_spinlock);
 	odp_cunit_thread_create(spinlock_functional_test, &arg);
 	odp_cunit_thread_exit(&arg);
@@ -992,7 +992,7 @@  void synchronizers_test_ticketlock_api(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_cunit_thread_create(ticketlock_api_tests, &arg);
 	odp_cunit_thread_exit(&arg);
 }
@@ -1001,7 +1001,7 @@  void synchronizers_test_ticketlock_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_ticketlock_init(&global_mem->global_ticketlock);
 
 	odp_cunit_thread_create(ticketlock_functional_test, &arg);
@@ -1019,7 +1019,7 @@  void synchronizers_test_rwlock_api(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_cunit_thread_create(rwlock_api_tests, &arg);
 	odp_cunit_thread_exit(&arg);
 }
@@ -1028,7 +1028,7 @@  void synchronizers_test_rwlock_functional(void)
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	odp_rwlock_init(&global_mem->global_rwlock);
 	odp_cunit_thread_create(rwlock_functional_test, &arg);
 	odp_cunit_thread_exit(&arg);
@@ -1044,7 +1044,7 @@  int synchronizers_suite_init(void)
 {
 	uint32_t num_threads, idx;
 
-	num_threads = global_mem->g_num_threads;
+	num_threads = global_mem->g_num_workers;
 	odp_barrier_init(&global_mem->global_barrier, num_threads);
 	for (idx = 0; idx < NUM_RESYNC_BARRIERS; idx++)
 		odp_barrier_init(&global_mem->barrier_array[idx], num_threads);
@@ -1054,7 +1054,6 @@  int synchronizers_suite_init(void)
 
 int synchronizers_init(void)
 {
-	uint32_t workers_count, max_threads;
 	int ret = 0;
 	odp_cpumask_t mask;
 
@@ -1078,25 +1077,12 @@  int synchronizers_init(void)
 	global_mem = odp_shm_addr(global_shm);
 	memset(global_mem, 0, sizeof(global_shared_mem_t));
 
-	global_mem->g_num_threads = MAX_WORKERS;
+	global_mem->g_num_workers = odp_cpumask_def_worker(&mask, 0);
 	global_mem->g_iterations = MAX_ITERATIONS;
 	global_mem->g_verbose = VERBOSE;
 
-	workers_count = odp_cpumask_def_worker(&mask, 0);
-
-	max_threads = (workers_count >= MAX_WORKERS) ?
-			MAX_WORKERS : workers_count;
-
-	if (max_threads < global_mem->g_num_threads) {
-		printf("Requested num of threads is too large\n");
-		printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
-		       global_mem->g_num_threads,
-		       max_threads);
-		global_mem->g_num_threads = max_threads;
-	}
-
-	printf("Num of threads used = %" PRIu32 "\n",
-	       global_mem->g_num_threads);
+	printf("Num of workers used = %" PRIu32 "\n",
+	       global_mem->g_num_workers);
 
 	return ret;
 }
@@ -1158,7 +1144,7 @@  static void test_atomic_functional(void *func_ptr(void *))
 {
 	pthrd_arg arg;
 
-	arg.numthrds = global_mem->g_num_threads;
+	arg.numthrds = global_mem->g_num_workers;
 	test_atomic_init();
 	test_atomic_store();
 	odp_cunit_thread_create(func_ptr, &arg);
diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c
index 7a8b98a..bcba3d4 100644
--- a/test/validation/timer/timer.c
+++ b/test/validation/timer/timer.c
@@ -34,12 +34,6 @@  static odp_timer_pool_t tp;
 /** @private Count of timeouts delivered too late */
 static odp_atomic_u32_t ndelivtoolate;
 
-/** @private min() function */
-static int min(int a, int b)
-{
-	return a < b ? a : b;
-}
-
 /* @private Timer helper structure */
 struct test_timer {
 	odp_timer_t tim; /* Timer handle */
@@ -441,10 +435,12 @@  void timer_test_odp_timer_all(void)
 	int rc;
 	odp_pool_param_t params;
 	odp_timer_pool_param_t tparam;
+	odp_cpumask_t mask;
+
 	/* Reserve at least one core for running other processes so the timer
 	 * test hopefully can run undisturbed and thus get better timing
 	 * results. */
-	int num_workers = min(odp_cpu_count() - 1, MAX_WORKERS);
+	int num_workers = odp_cpumask_def_worker(&mask, 0) - 1;
 	/* On a single-CPU machine run at least one thread */
 	if (num_workers < 1)
 		num_workers = 1;