diff mbox

[API_NEXT,RFC,V2,01/01] linux-generic: add support for initial cpumasks

Message ID 1452786320-20411-2-git-send-email-gary.robertson@linaro.org
State New
Headers show

Commit Message

gary.robertson@linaro.org Jan. 14, 2016, 3:45 p.m. UTC
These code changes depend on the addition of control and worker cpumasks
to the ODP initialization parameter data structure, and implement the
change in behavior suggested with that patch.  They serve as the 'glue'
between the input of the new ODP API initial cpuset masks and the use of
those new cpumasks by the ODP application or instance.

Specifically: if neither of the new cpumasks are populated prior to calling
odp_init_global(), then the behavior for allocation of control and worker
cpumasks is unchanged from its current (pre-patch) state.

However, if the cpumasks are populated prior to calling odp_init_global()
then that routine saves their contents into global variables
for later reference.  Then when odp_cpumask_default_control() or
odp_cpumask_default_worker() are called they build the caller's cpumasks
based on the saved contents of the pre-populated cpuset input.

The 'hooks' implemented here will enable a helper function to parse
cpuset specification options from the command line, initialize the new
cpuset arguments as specified by those command line options, and
enforce the proper usage of those specified cpusets
by the linux-generic reference platform implementation.

Alternatively some system CPU resource provisioning utility could be used
to monitor available CPU resources dynamically and to orchestrate their
use for ODP applications by manipulating the new cpuset arguments as new
ODP applications or instances were started.
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
index 49e23d9..47b63bd 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -38,6 +38,8 @@  struct odp_global_data_s {
 	odp_log_func_t log_fn;
 	odp_abort_func_t abort_fn;
 	odp_system_info_t system_info;
+	odp_cpumask_t control_cpus;
+	odp_cpumask_t worker_cpus;
 };
 
 extern struct odp_global_data_s odp_global_data;
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index 47cf6ea..e53b001 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -18,10 +18,28 @@  int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
 	int ret, cpu, i;
 	cpu_set_t cpuset;
 
-	ret = pthread_getaffinity_np(pthread_self(),
-				     sizeof(cpu_set_t), &cpuset);
-	if (ret != 0)
-		ODP_ABORT("failed to read CPU affinity value\n");
+        CPU_ZERO(&cpuset);
+
+	/*
+	 * If the available worker cpumask was specified prior to global init,
+	 * then allocate worker CPUs from that cpumask.
+         */
+	if (odp_cpumask_count(&odp_global_data.worker_cpus)) {
+		for (i = 0; i < CPU_SETSIZE; i++)
+			if (odp_cpumask_isset(&odp_global_data.worker_cpus, i))
+				CPU_SET(i, &cpuset);
+	} else {
+		/*
+		 * No initial worker cpumask was specified... implying that
+		 * CPU isolation is not needed for this ODP instance.
+		 * So allocate worker CPUs from the available CPUs defined
+		 * by the default OS environment.
+		 */
+		ret = pthread_getaffinity_np(pthread_self(),
+					     sizeof(cpu_set_t), &cpuset);
+		if (ret != 0)
+			ODP_ABORT("failed to read CPU affinity value\n");
+	}
 
 	odp_cpumask_zero(mask);
 
@@ -29,7 +47,7 @@  int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
 	 * If no user supplied number or it's too large, then attempt
 	 * to use all CPUs
 	 */
-	if (0 == num || CPU_SETSIZE < num)
+	if (0 == num || CPU_COUNT(&cpuset) < num)
 		num = CPU_COUNT(&cpuset);
 
 	/* build the mask, allocating down from highest numbered CPU */
@@ -49,10 +67,25 @@  int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
 
 int odp_cpumask_default_control(odp_cpumask_t *mask, int num ODP_UNUSED)
 {
+	int cpu_count = 0;
+
 	odp_cpumask_zero(mask);
-	/* By default all control threads on CPU 0 */
-	odp_cpumask_set(mask, 0);
-	return 1;
+
+	/*
+	 * If the available control cpumask was specified prior to global init,
+	 * then allocate control CPUs from that cpumask.
+         */
+	if ((cpu_count = odp_cpumask_count(&odp_global_data.control_cpus))) {
+		odp_cpumask_copy(mask, &odp_global_data.control_cpus);
+	}
+
+	/* CPU 0 must always be usable for control threads */
+	if (!odp_cpumask_isset(mask, 0)) {
+		odp_cpumask_set(mask, 0);
+       		cpu_count++;
+	}
+
+	return cpu_count;
 }
 
 int odp_cpumask_all_available(odp_cpumask_t *mask)
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 70a3dbe..7578b84 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -22,6 +22,23 @@  int odp_init_global(const odp_init_t *params,
 			odp_global_data.log_fn = params->log_fn;
 		if (params->abort_fn != NULL)
 			odp_global_data.abort_fn = params->abort_fn;
+		/*
+		 * Save the control and worker cpumask contents
+		 * in a globally accessible data structure
+		 * so odp_cpumask_default_control(),
+		 * odp_cpumask_default_worker(), and any
+		 * isolation support logic may reference them later.
+		 */
+		if (!odp_cpumask_count(&params->control_cpus))
+			odp_cpumask_zero(&odp_global_data.control_cpus);
+		else
+			odp_cpumask_copy(&odp_global_data.control_cpus,
+					 &params->control_cpus);
+		if (!odp_cpumask_count(&params->worker_cpus))
+			odp_cpumask_zero(&odp_global_data.worker_cpus);
+		else
+			odp_cpumask_copy(&odp_global_data.worker_cpus,
+					 &params->worker_cpus);
 	}
 
 	if (odp_time_global_init()) {