@@ -6862,9 +6862,12 @@ error:
}
static cpumask_var_t *doms_cur; /* current sched domains */
-static int ndoms_cur; /* number of sched domains in 'doms_cur' */
+/* effective number of sched domains in 'doms_cur' */
+static int ndoms_cur;
+/* original number of sched domains in 'doms_cur' */
+static int ndoms_orig;
+/* attribues of custom domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
- /* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
@@ -6918,6 +6921,7 @@ static int init_sched_domains(const struct cpumask *cpu_map)
int err;
arch_update_cpu_topology();
+ ndoms_orig = 1;
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
@@ -6990,6 +6994,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
int i, j, n;
int new_topology;
+ bool borrow_needed = false;
mutex_lock(&sched_domains_mutex);
@@ -6999,6 +7004,13 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
/* Let architecture update cpu core mappings. */
new_topology = arch_update_cpu_topology();
+ if (doms_new == NULL && doms_cur != &fallback_doms) {
+ doms_new = &fallback_doms;
+ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
+ borrow_needed = true;
+ WARN_ON_ONCE(dattr_new);
+ }
+
n = doms_new ? ndoms_new : 0;
/* Destroy deleted domains */
@@ -7036,10 +7048,22 @@ match2:
}
/* Remember the new sched domains */
- if (doms_cur != &fallback_doms)
- free_sched_domains(doms_cur, ndoms_cur);
+ if (doms_cur != &fallback_doms && !borrow_needed)
+ free_sched_domains(doms_cur, ndoms_orig);
kfree(dattr_cur); /* kfree(NULL) is safe */
- doms_cur = doms_new;
+
+ if (borrow_needed) {
+ /*
+ * Borrow previous doms_cur as new storage, so that
+ * fallback_doms can be used as the temporal storage
+ * in the future rebuilding.
+ */
+ cpumask_copy(doms_cur[0], doms_new[0]);
+ } else {
+ doms_cur = doms_new;
+ ndoms_orig = ndoms_new;
+ }
+
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;