diff mbox

[V2,2/2] linux-generic: Make cpu detection work with NO_HZ_FULL

Message ID 1455634460-26806-3-git-send-email-gary.robertson@linaro.org
State New
Headers show

Commit Message

gary.robertson@linaro.org Feb. 16, 2016, 2:54 p.m. UTC
sched_getaffinity() and pthread_getaffinity_np() do not return
an accurate mask of all CPUs in the machine when the kernel
is compiled with NO_HZ_FULL support.

See Linaro BUG 2027 for details.

Additionally, performance of tasks on isolated CPU cores
may be compromised if other tasks are run on 'thread siblings'
of those cores which share portions of the CPU core hardware
(such as core-local cache memory).

This code replaces the 'getaffinity' based CPU discovery logic
and restricts worker CPU selection to use only one CPU per core
when there are 'thread siblings' -aka 'hyperthread CPUs'- present.
Thread siblings are not an issue for control CPUs and so are
included in the 'control' cpumask along with their 'primary'
counterpart.

The results of these changes which address BUG 2027 are:
(1) all CPUs known to the kernel at boot time are considered
    for use by ODP regardless of the default CPU affinity masks
    set by the kernel scheduler,
(2) hyperthreaded CPUs are not used for default worker cpumasks, -and-
(3) an additional 'hyperthreaded' CPU may be included in the
    default control cpumask.

Also - this code:
(a) adds control worker cpumasks to the linux-generic global data
(b) adds logic to odp_init_global() to initialize these masks, -and-
(c) reduces odp_cpumask_default_control() and
    odp_cpumask_default_worker() to use the content of these new
    cpumasks without modification.
These changes provide prerequisite infrastructure for pending changes
which will allow ODP to accept cpumasks passed in
from external entities such as a provisioning service.

Signed-off-by: Gary S. Robertson <gary.robertson@linaro.org>
---
 platform/linux-generic/include/odp_internal.h |  31 ++--
 platform/linux-generic/odp_cpumask_task.c     |  45 +++--
 platform/linux-generic/odp_init.c             | 230 +++++++++++++++++++++++++-
 platform/linux-generic/odp_system_info.c      |  14 +-
 4 files changed, 272 insertions(+), 48 deletions(-)
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
index e75154a..77ba6b0 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -19,6 +19,7 @@  extern "C" {
 #endif
 
 #include <odp/init.h>
+#include <odp/cpumask.h>
 #include <odp/thread.h>
 #include <stdio.h>
 
@@ -40,26 +41,32 @@  struct odp_global_data_s {
 	odp_log_func_t log_fn;
 	odp_abort_func_t abort_fn;
 	odp_system_info_t system_info;
+	odp_cpumask_t control_cpus;
+	odp_cpumask_t worker_cpus;
 };
 
 enum init_stage {
 	NO_INIT = 0,    /* No init stages completed */
-	TIME_INIT = 1,
-	SYSINFO_INIT = 2,
-	SHM_INIT = 3,
-	THREAD_INIT = 4,
-	POOL_INIT = 5,
-	QUEUE_INIT = 6,
-	SCHED_INIT = 7,
-	PKTIO_INIT = 8,
-	TIMER_INIT = 9,
-	CRYPTO_INIT = 10,
-	CLASSIFICATION_INIT = 11,
-	ALL_INIT = 12   /* All init stages completed */
+	CPUMASK_INIT = 1,
+	TIME_INIT = 2,
+	SYSINFO_INIT = 3,
+	SHM_INIT = 4,
+	THREAD_INIT = 5,
+	POOL_INIT = 6,
+	QUEUE_INIT = 7,
+	SCHED_INIT = 8,
+	PKTIO_INIT = 9,
+	TIMER_INIT = 10,
+	CRYPTO_INIT = 11,
+	CLASSIFICATION_INIT = 12,
+	ALL_INIT = 13   /* All init stages completed */
 };
 
 extern struct odp_global_data_s odp_global_data;
 
+/* Number of logical CPUs detected at boot time */
+extern int numcpus;
+
 int _odp_term_global(enum init_stage stage);
 int _odp_term_local(enum init_stage stage);
 
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index c5093e0..36a158b 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -12,55 +12,52 @@ 
 #include <odp/cpumask.h>
 #include <odp_debug_internal.h>
 
+/*
+ * The following functions assume that odp_init_global() or some external
+ * logic has previously initialized the globally accessible cpumasks
+ * for ODP control and worker CPU selections.
+ */
 int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
 {
-	int ret, cpu, i;
-	cpu_set_t cpuset;
-
-	ret = pthread_getaffinity_np(pthread_self(),
-				     sizeof(cpu_set_t), &cpuset);
-	if (ret != 0)
-		ODP_ABORT("failed to read CPU affinity value\n");
-
-	odp_cpumask_zero(mask);
+	odp_cpumask_t overlap;
+	int cpu, i;
 
 	/*
 	 * If no user supplied number or it's too large, then attempt
 	 * to use all CPUs
 	 */
-	if (0 == num || CPU_SETSIZE < num)
-		num = CPU_COUNT(&cpuset);
+	cpu = odp_cpumask_count(&odp_global_data.worker_cpus);
+	if (0 == num || cpu < num)
+		num = cpu;
 
 	/* build the mask, allocating down from highest numbered CPU */
+	odp_cpumask_zero(mask);
 	for (cpu = 0, i = CPU_SETSIZE - 1; i >= 0 && cpu < num; --i) {
-		if (CPU_ISSET(i, &cpuset)) {
+		if (odp_cpumask_isset(&odp_global_data.worker_cpus, i)) {
 			odp_cpumask_set(mask, i);
 			cpu++;
 		}
 	}
 
-	if (odp_cpumask_isset(mask, 0))
-		ODP_DBG("\n\tCPU0 will be used for both control and worker threads,\n"
-			"\tthis will likely have a performance impact on the worker thread.\n");
+	odp_cpumask_and(&overlap, mask, &odp_global_data.control_cpus);
+	if (odp_cpumask_count(&overlap))
+		ODP_DBG("\n\tWorker and Control CPU selections overlap...\n"
+			"\tthis will likely have a performance impact on the worker threads.\n");
 
 	return cpu;
 }
 
 int odp_cpumask_default_control(odp_cpumask_t *mask, int num ODP_UNUSED)
 {
-	odp_cpumask_zero(mask);
-	/* By default all control threads on CPU 0 */
-	odp_cpumask_set(mask, 0);
-	return 1;
+	odp_cpumask_copy(mask, &odp_global_data.control_cpus);
+
+	return odp_cpumask_count(mask);
 }
 
 int odp_cpumask_all_available(odp_cpumask_t *mask)
 {
-	odp_cpumask_t mask_work, mask_ctrl;
-
-	odp_cpumask_default_worker(&mask_work, 0);
-	odp_cpumask_default_control(&mask_ctrl, 0);
-	odp_cpumask_or(mask, &mask_work, &mask_ctrl);
+	odp_cpumask_or(mask, &odp_global_data.worker_cpus,
+		       &odp_global_data.control_cpus);
 
 	return odp_cpumask_count(mask);
 }
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 3a990d2..775be4b 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -4,13 +4,230 @@ 
  * SPDX-License-Identifier:     BSD-3-Clause
  */
 
-#include <odp/init.h>
-#include <odp_internal.h>
+#include <odp_posix_extensions.h>
+
+#include <dirent.h>
+#include <errno.h>
+#include <sched.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <odp/cpumask.h>
 #include <odp/debug.h>
 #include <odp_debug_internal.h>
+#include <odp/init.h>
+#include <odp_internal.h>
 
 struct odp_global_data_s odp_global_data;
 
+static char pathname_buf[257];
+static char cpuname[5];
+
+/* cpumask of logical CPUs representing physical CPU cores */
+static cpu_set_t primary_cpus;
+/* cpumask of thread siblings for CPU 0 */
+static cpu_set_t hyperthread_cpus;
+
+/*
+ * Populate a cpumask of 'primary' CPUs and a cpumask of
+ * hyperthread siblings for the current 'primary' CPU.
+ */
+static inline void process_sibling_list(void)
+{
+	char *endptr;
+	char *remaining;
+	char *cur_token;
+	long int cpu_long;
+	int cpu;
+
+	cur_token = strtok_r(pathname_buf, ",", &remaining);
+	if (cur_token) {
+		errno = 0;
+		cpu_long = strtol(cur_token, &endptr, 10);
+		if (!(errno || (endptr == cur_token))) {
+			/*
+			 * Mark the first sibling as a 'primary' CPU
+			 */
+			cpu = (int)cpu_long;
+			CPU_SET(cpu, &primary_cpus);
+
+			/* Check for hyperthread siblings */
+			cur_token = strtok_r((char *)NULL, ",", &remaining);
+			/*
+			 * For default CPU availability we only care about
+			 * siblings for CPU 0 - the only 'primary' CPU
+			 * used for 'control' tasks
+			 */
+			while (cur_token && !cpu) {
+				errno = 0;
+				cpu_long = strtol(cur_token, &endptr, 10);
+				if (!(errno || (endptr == cur_token))) {
+					/*
+					 * Mark any other siblings found
+					 * as 'hyperthread' CPUs
+					 */
+					CPU_SET(cpu_long, &hyperthread_cpus);
+				}
+				cur_token = strtok_r((char *)NULL, ",",
+						     &remaining);
+			}
+		}
+	}
+}
+
+/*
+ * Examine the topology information for the current configured 'logical CPU'
+ * and populate a cpumask of 'primary' CPUs and a cpumask of
+ * hyperthread siblings for the current 'primary' CPU.
+ */
+static int process_cpu_info_dir(long int cpu_idnum)
+{
+	FILE *cpulist_file;
+	char *unused;
+
+	/* Track number of logical CPUs discovered */
+	if (numcpus < (int)(cpu_idnum + 1))
+		numcpus = (int)(cpu_idnum + 1);
+
+	if (cpu_idnum < CPU_SETSIZE) {
+		/* Build a pathname to the CPU siblings list */
+		strcpy(pathname_buf, "/sys/devices/system/cpu/cpu");
+		sprintf(cpuname, "%ld", cpu_idnum);
+		strcat(pathname_buf, cpuname);
+		strcat(pathname_buf, "/topology/thread_siblings_list");
+
+		/* Open the siblings list file */
+		cpulist_file = fopen(pathname_buf, "r");
+		if (cpulist_file) {
+			/* Read and process the thread sibling list */
+			unused = fgets(pathname_buf,
+				       (int)(sizeof(pathname_buf) - 1),
+				       cpulist_file);
+			/* Make the C compiler happy - use 'unused' */
+			if (unused)
+				unused = (char *)NULL;
+			process_sibling_list();
+			fclose(cpulist_file);
+			return 0;
+		} else {
+			return -1;
+		}
+	} else {
+		return -1;
+	}
+}
+
+/*
+ * We need to know about all CPUs which were discovered at boot time.
+ * Furthermore, on platforms with 'hyperthreading' enabled, each physical
+ * CPU core shows up as two (or more) logical CPUs despite the fact that
+ * the 'logical CPUs' share some of the hardware in the physical CPU core.
+ * Consequently performance of an isolated CPU may be compromised if its
+ * 'hyperthread CPU' siblings are also running tasks.
+ * The code below populates a cpumask of available physical CPU cores
+ * as well as a cpumask of hyperthreaded siblings for 'control' CPU 0.
+ */
+static int get_cpu_topology(void)
+{
+	char *numptr;
+	char *endptr;
+	long int cpu_idnum;
+	DIR  *d;
+	struct dirent *dir;
+	int error = 0;
+
+	CPU_ZERO(&primary_cpus);
+	CPU_ZERO(&hyperthread_cpus);
+
+	/*
+	 * Scan the /sysfs pseudo-filesystem for CPU info directories.
+	 * There should be one subdirectory for each installed logical CPU
+	 */
+	d = opendir("/sys/devices/system/cpu");
+	if (d) {
+		while ((dir = readdir(d)) != NULL) {
+			cpu_idnum = CPU_SETSIZE;
+
+			/*
+			 * If the current directory entry doesn't represent
+			 * a CPU info subdirectory then skip to the next entry.
+			 */
+			if (dir->d_type == DT_DIR) {
+				if (!strncmp(dir->d_name, "cpu", 3)) {
+					/*
+					 * Directory name starts with "cpu"...
+					 * Try to extract a CPU ID number
+					 * from the remainder of the dirname.
+					 */
+					errno = 0;
+					numptr = dir->d_name;
+					numptr += 3;
+					cpu_idnum = strtol(numptr, &endptr,
+							   10);
+					if (errno || (endptr == numptr))
+						continue;
+				} else {
+					continue;
+				}
+			} else {
+				continue;
+			}
+			/*
+			 * If we get here the current directory entry specifies
+			 * a CPU info subdir for the CPU indexed by cpu_idnum.
+			 */
+			error = process_cpu_info_dir(cpu_idnum);
+			if (error)
+				break;
+		}
+		closedir(d);
+		return error;
+	} else {
+		return -1;
+	}
+}
+
+/*
+ * This function obtains system information specifying which cpus are
+ * available at boot time. These data are then used to produce cpumasks of
+ * configured CPUs which are appropriate for either isolated or 'non-isolated'
+ * task scheduling.
+ */
+static int get_available_cpus(void)
+{
+	int cpu;
+
+	/* Clear the global cpumasks for control and worker CPUs */
+	odp_cpumask_zero(&odp_global_data.control_cpus);
+	odp_cpumask_zero(&odp_global_data.worker_cpus);
+
+	/*
+	 * Derive cpumasks of configured 'primary' CPU cores and
+	 * a cpumask of thread siblings for each 'primary' CPU configured.
+	 */
+	if (get_cpu_topology())
+		return -1;
+
+	/*
+	 * First ensure that only 'primary' CPUs are considered from those
+	 * specified for the 'worker' scheduling cpumask.
+	 * Also ensure CPU 0 is not included in the worker mask.
+	 */
+	for (cpu = 1; cpu < CPU_SETSIZE; cpu++)
+		if (CPU_ISSET(cpu, &primary_cpus))
+			odp_cpumask_set(&odp_global_data.worker_cpus, cpu);
+
+	/*
+	 * Ensure CPU 0 is included in the control mask.
+	 * If CPU 0 has any hyperthread siblings, include them as well.
+	 */
+	odp_cpumask_set(&odp_global_data.control_cpus, 0);
+	for (cpu = 1; cpu < CPU_SETSIZE; cpu++)
+		if (CPU_ISSET(cpu, &hyperthread_cpus))
+			odp_cpumask_set(&odp_global_data.control_cpus, cpu);
+	return 0;
+}
+
 int odp_init_global(const odp_init_t *params,
 		    const odp_platform_init_t *platform_params ODP_UNUSED)
 {
@@ -25,6 +242,12 @@  int odp_init_global(const odp_init_t *params,
 			odp_global_data.abort_fn = params->abort_fn;
 	}
 
+	if (get_available_cpus()) {
+		ODP_ERR("ODP cpumask init failed.\n");
+		goto init_failed;
+	}
+	stage = CPUMASK_INIT;
+
 	if (odp_time_init_global()) {
 		ODP_ERR("ODP time init failed.\n");
 		goto init_failed;
@@ -187,6 +410,9 @@  int _odp_term_global(enum init_stage stage)
 		}
 		/* Fall through */
 
+	case CPUMASK_INIT:
+		/* Fall through */
+
 	case NO_INIT:
 		;
 	}
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index 42aef8a..11d44c8 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -30,21 +30,15 @@ 
 
 #define HUGE_PAGE_DIR "/sys/kernel/mm/hugepages"
 
+/* Number of logical CPUs detected at boot time */
+int numcpus;
 
 /*
- * Report the number of CPUs in the affinity mask of the main thread
+ * Report the number of logical CPUs detected at boot time
  */
 static int sysconf_cpu_count(void)
 {
-	cpu_set_t cpuset;
-	int ret;
-
-	ret = pthread_getaffinity_np(pthread_self(),
-				     sizeof(cpuset), &cpuset);
-	if (ret != 0)
-		return 0;
-
-	return CPU_COUNT(&cpuset);
+	return numcpus;
 }
 
 #if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \