@@ -65,6 +65,7 @@ void build_cpu_to_node_map(void);
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
| SD_WAKE_AFFINE, \
+ | arch_sd_local_flags(0)\
.last_balance = jiffies, \
.balance_interval = 1, \
.nr_balance_failed = 0, \
@@ -71,6 +71,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
| 0*SD_WAKE_AFFINE \
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
+ | arch_sd_local_flags(0) \
| 0*SD_SERIALIZE \
, \
.last_balance = jiffies, \
@@ -844,6 +844,7 @@ enum cpu_idle_type {
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
+#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
@@ -99,6 +99,8 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 1*SD_SHARE_CPUPOWER \
| 1*SD_SHARE_PKG_RESOURCES \
+ | arch_sd_local_flags(SD_SHARE_CPUPOWER|\
+ SD_SHARE_PKG_RESOURCES) \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
| arch_sd_sibling_asym_packing() \
@@ -131,6 +133,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 0*SD_SHARE_CPUPOWER \
| 1*SD_SHARE_PKG_RESOURCES \
+ | arch_sd_local_flags(SD_SHARE_PKG_RESOURCES)\
| 0*SD_SERIALIZE \
, \
.last_balance = jiffies, \
@@ -161,6 +164,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
+ | arch_sd_local_flags(0) \
| 0*SD_SERIALIZE \
| 1*SD_PREFER_SIBLING \
, \
@@ -5969,6 +5969,11 @@ int __weak arch_sd_sibling_asym_packing(void)
return 0*SD_ASYM_PACKING;
}
+int __weak arch_sd_local_flags(int level)
+{
+ return 1*SD_SHARE_POWERDOMAIN;
+}
+
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6209,6 +6214,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
| 0*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUPOWER
| 0*SD_SHARE_PKG_RESOURCES
+ | 1*SD_SHARE_POWERDOMAIN
| 1*SD_SERIALIZE
| 0*SD_PREFER_SIBLING
| 1*SD_NUMA
This new flag SD_SHARE_POWERDOMAIN is used to reflect whether groups of CPU in a sched_domain level can or not reach a different power state. If clusters can be power gated independently, as an example, the flag should be cleared at CPU level. This information is used to decide if it's worth packing some tasks in a group of CPUs in order to power gated the other groups instead of spreading the tasks. The default behavior of the scheduler is to spread tasks so the flag is set into all sched_domains Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- arch/ia64/include/asm/topology.h | 1 + arch/tile/include/asm/topology.h | 1 + include/linux/sched.h | 1 + include/linux/topology.h | 4 ++++ kernel/sched/core.c | 6 ++++++ 5 files changed, 13 insertions(+)