@@ -892,6 +892,7 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies,
.balance_interval = sd_weight,
+ .span_weight = sd_weight,
.smt_gain = 0,
.max_newidle_lb_cost = 0,
.next_decay_max_lb_cost = jiffies,
@@ -1373,6 +1374,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
cpumask_or(sched_domain_span(sd),
sched_domain_span(sd),
sched_domain_span(child));
+ sd->span_weight = cpumask_weight(sched_domain_span(sd));
}
}
@@ -1417,7 +1419,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
/* Build the groups for the domains */
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
- sd->span_weight = cpumask_weight(sched_domain_span(sd));
if (sd->flags & SD_OVERLAP) {
if (build_overlap_sched_groups(sd, i))
goto error;
Most of the sched domain structure gets initialized from sd_init() and it looks reasonable to initialize span_weight too from it. Currently it is getting initialized from build_sched_domains(), which doesn't looks to be the ideal place for doing so. With this change we need to additionally reset span_weight for a special error case, but that looks reasonable as span_weight must be updated every time domain span is updated. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> --- kernel/sched/topology.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -- 2.12.0.432.g71c3a4f4ba37