@@ -38,6 +38,8 @@ struct odp_global_data_s {
odp_log_func_t log_fn;
odp_abort_func_t abort_fn;
odp_system_info_t system_info;
+ odp_cpumask_t control_cpus;
+ odp_cpumask_t worker_cpus;
};
extern struct odp_global_data_s odp_global_data;
@@ -18,10 +18,28 @@ int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
int ret, cpu, i;
cpu_set_t cpuset;
- ret = pthread_getaffinity_np(pthread_self(),
- sizeof(cpu_set_t), &cpuset);
- if (ret != 0)
- ODP_ABORT("failed to read CPU affinity value\n");
+ CPU_ZERO(&cpuset);
+
+ /*
+ * If the available worker cpumask was specified prior to global init,
+ * then allocate worker CPUs from that cpumask.
+ */
+ if (odp_cpumask_count(&odp_global_data.worker_cpus)) {
+ for (i = 0; i < CPU_SETSIZE; i++)
+ if (odp_cpumask_isset(&odp_global_data.worker_cpus, i))
+ CPU_SET(i, &cpuset);
+ } else {
+ /*
+ * No initial worker cpumask was specified... implying that
+ * CPU isolation is not needed for this ODP instance.
+ * So allocate worker CPUs from the available CPUs defined
+ * by the default OS environment.
+ */
+ ret = pthread_getaffinity_np(pthread_self(),
+ sizeof(cpu_set_t), &cpuset);
+ if (ret != 0)
+ ODP_ABORT("failed to read CPU affinity value\n");
+ }
odp_cpumask_zero(mask);
@@ -29,7 +47,7 @@ int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
* If no user supplied number or it's too large, then attempt
* to use all CPUs
*/
- if (0 == num || CPU_SETSIZE < num)
+ if (0 == num || CPU_COUNT(&cpuset) < num)
num = CPU_COUNT(&cpuset);
/* build the mask, allocating down from highest numbered CPU */
@@ -49,10 +67,26 @@ int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
int odp_cpumask_default_control(odp_cpumask_t *mask, int num ODP_UNUSED)
{
+ int cpu_count = 0;
+ odp_cpumask_t all_cpus;
+
odp_cpumask_zero(mask);
- /* By default all control threads on CPU 0 */
- odp_cpumask_set(mask, 0);
- return 1;
+
+ /*
+ * If the available control cpumask was specified prior to global init,
+ * then allocate control CPUs from that cpumask.
+ */
+ if ((cpu_count = odp_cpumask_count(&odp_global_data.control_cpus))) {
+ odp_cpumask_copy(mask, &odp_global_data.control_cpus);
+ }
+
+ /* CPU 0 must always be usable for control threads */
+ if (!odp_cpumask_isset(mask, 0)) {
+ odp_cpumask_set(mask, 0);
+ cpu_count++;
+ }
+
+ return cpu_count;
}
int odp_cpumask_all_available(odp_cpumask_t *mask)
@@ -22,6 +22,23 @@ int odp_init_global(const odp_init_t *params,
odp_global_data.log_fn = params->log_fn;
if (params->abort_fn != NULL)
odp_global_data.abort_fn = params->abort_fn;
+ /*
+ * Save the control and worker cpumask contents
+ * in a globally accessible data structure
+ * so odp_cpumask_default_control(),
+ * odp_cpumask_default_worker(), and any
+ * isolation support logic may reference them later.
+ */
+ if (!odp_cpumask_count(¶ms->control_cpus))
+ odp_cpumask_zero(&odp_global_data.control_cpus);
+ else
+ odp_cpumask_copy(&odp_global_data.control_cpus,
+ ¶ms->control_cpus);
+ if (!odp_cpumask_count(¶ms->worker_cpus))
+ odp_cpumask_zero(&odp_global_data.worker_cpus);
+ else
+ odp_cpumask_copy(&odp_global_data.worker_cpus,
+ ¶ms->worker_cpus);
}
if (odp_time_global_init()) {