diff mbox series

[v5,5/5] cpuset: Rebuild root domain deadline accounting information

Message ID 20180903142801.20046-6-juri.lelli@redhat.com
State New
Headers show
Series [v5,1/5] sched/topology: Adding function partition_sched_domains_locked() | expand

Commit Message

Juri Lelli Sept. 3, 2018, 2:28 p.m. UTC
From: Mathieu Poirier <mathieu.poirier@linaro.org>


When the topology of root domains is modified by CPUset or CPUhotplug
operations information about the current deadline bandwidth held in the
root domain is lost.

This patch address the issue by recalculating the lost deadline
bandwidth information by circling through the deadline tasks held in
CPUsets and adding their current load to the root domain they are
associated with.

Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>

---
 include/linux/sched.h          |  5 +++
 include/linux/sched/deadline.h |  8 +++++
 kernel/cgroup/cpuset.c         | 63 +++++++++++++++++++++++++++++++++-
 kernel/sched/deadline.c        | 31 +++++++++++++++++
 kernel/sched/sched.h           |  3 --
 kernel/sched/topology.c        | 13 ++++++-
 6 files changed, 118 insertions(+), 5 deletions(-)

-- 
2.17.1

Comments

Peter Zijlstra Sept. 25, 2018, 12:32 p.m. UTC | #1
On Mon, Sep 03, 2018 at 04:28:01PM +0200, Juri Lelli wrote:
> +/*

> + * Called with cpuset_mutex held (rebuild_sched_domains())

> + * Called with hotplug lock held (rebuild_sched_domains_locked())

> + * Called with sched_domains_mutex held (partition_and_rebuild_domains())


Isn't that what we have lockdep_assert_held() for?

> + */

> +static void rebuild_root_domains(void)

> +{

> +	struct cpuset *cs = NULL;

> +	struct cgroup_subsys_state *pos_css;

> +

> +	rcu_read_lock();

> +

> +	/*

> +	 * Clear default root domain DL accounting, it will be computed again

> +	 * if a task belongs to it.

> +	 */

> +	dl_clear_root_domain(&def_root_domain);

> +

> +	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {

> +

> +		if (cpumask_empty(cs->effective_cpus)) {

> +			pos_css = css_rightmost_descendant(pos_css);

> +			continue;

> +		}

> +

> +		css_get(&cs->css);

> +

> +		rcu_read_unlock();


That looks really dodgy, but I suppose the comment near
css_next_descendant_pre() spells out that this is in fact OK.

> +		update_tasks_root_domain(cs);

> +

> +		rcu_read_lock();

> +		css_put(&cs->css);

> +	}

> +	rcu_read_unlock();

> +}
Peter Zijlstra Sept. 25, 2018, 12:53 p.m. UTC | #2
On Mon, Sep 03, 2018 at 04:28:01PM +0200, Juri Lelli wrote:
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c

> index fb7ae691cb82..08128bdf3944 100644

> --- a/kernel/sched/topology.c

> +++ b/kernel/sched/topology.c

> @@ -1883,8 +1883,19 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],

>  	for (i = 0; i < ndoms_cur; i++) {

>  		for (j = 0; j < n && !new_topology; j++) {

>  			if (cpumask_equal(doms_cur[i], doms_new[j])

> -			    && dattrs_equal(dattr_cur, i, dattr_new, j))

> +			    && dattrs_equal(dattr_cur, i, dattr_new, j)) {


While there, please also fix that wrongly placed operator.

> +				struct root_domain *rd;

> +

> +				/*

> +				 * This domain won't be destroyed and as such

> +				 * its dl_bw->total_bw needs to be cleared.  It

> +				 * will be recomputed in function

> +				 * update_tasks_root_domain().

> +				 */

> +				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;

> +				dl_clear_root_domain(rd);

>  				goto match1;

> +			}

>  		}

>  		/* No match - a current sched domain not in new doms_new[] */

>  		detach_destroy_domains(doms_cur[i]);

> -- 

> 2.17.1

>
Juri Lelli Sept. 25, 2018, 1:07 p.m. UTC | #3
On 25/09/18 14:32, Peter Zijlstra wrote:
> On Mon, Sep 03, 2018 at 04:28:01PM +0200, Juri Lelli wrote:

> > +/*

> > + * Called with cpuset_mutex held (rebuild_sched_domains())

> > + * Called with hotplug lock held (rebuild_sched_domains_locked())

> > + * Called with sched_domains_mutex held (partition_and_rebuild_domains())

> 

> Isn't that what we have lockdep_assert_held() for?


Indeed. I can put three of them inside the function, even though we have
a single path to here atm. Guess makes sense to protect any future change.

> > + */

> > +static void rebuild_root_domains(void)

> > +{

> > +	struct cpuset *cs = NULL;

> > +	struct cgroup_subsys_state *pos_css;

> > +

> > +	rcu_read_lock();

> > +

> > +	/*

> > +	 * Clear default root domain DL accounting, it will be computed again

> > +	 * if a task belongs to it.

> > +	 */

> > +	dl_clear_root_domain(&def_root_domain);

> > +

> > +	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {

> > +

> > +		if (cpumask_empty(cs->effective_cpus)) {

> > +			pos_css = css_rightmost_descendant(pos_css);

> > +			continue;

> > +		}

> > +

> > +		css_get(&cs->css);

> > +

> > +		rcu_read_unlock();

> 

> That looks really dodgy, but I suppose the comment near

> css_next_descendant_pre() spells out that this is in fact OK.


Plus update_cpumasks_hier() seems to do something similar. Maybe I
should switch to use css_tryget_online() as well?

Thanks,

- Juri
Juri Lelli Sept. 25, 2018, 1:08 p.m. UTC | #4
On 25/09/18 14:53, Peter Zijlstra wrote:
> On Mon, Sep 03, 2018 at 04:28:01PM +0200, Juri Lelli wrote:

> > diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c

> > index fb7ae691cb82..08128bdf3944 100644

> > --- a/kernel/sched/topology.c

> > +++ b/kernel/sched/topology.c

> > @@ -1883,8 +1883,19 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],

> >  	for (i = 0; i < ndoms_cur; i++) {

> >  		for (j = 0; j < n && !new_topology; j++) {

> >  			if (cpumask_equal(doms_cur[i], doms_new[j])

> > -			    && dattrs_equal(dattr_cur, i, dattr_new, j))

> > +			    && dattrs_equal(dattr_cur, i, dattr_new, j)) {

> 

> While there, please also fix that wrongly placed operator.


Sure.

Thanks,

- Juri
diff mbox series

Patch

diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0f4f56c9310..2bf3edc8658d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -279,6 +279,11 @@  struct vtime {
 	u64			gtime;
 };
 
+#ifdef CONFIG_SMP
+extern struct root_domain def_root_domain;
+extern struct mutex sched_domains_mutex;
+#endif
+
 struct sched_info {
 #ifdef CONFIG_SCHED_INFO
 	/* Cumulative counters: */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 0cb034331cbb..1aff00b65f3c 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -24,3 +24,11 @@  static inline bool dl_time_before(u64 a, u64 b)
 {
 	return (s64)(a - b) < 0;
 }
+
+#ifdef CONFIG_SMP
+
+struct root_domain;
+extern void dl_add_task_root_domain(struct task_struct *p);
+extern void dl_clear_root_domain(struct root_domain *rd);
+
+#endif /* CONFIG_SMP */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8dc26005bb1e..e5d782c5b191 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -44,6 +44,7 @@ 
 #include <linux/proc_fs.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
+#include <linux/sched/deadline.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/task.h>
 #include <linux/seq_file.h>
@@ -813,6 +814,66 @@  static int generate_sched_domains(cpumask_var_t **domains,
 	return ndoms;
 }
 
+static void update_tasks_root_domain(struct cpuset *cs)
+{
+	struct css_task_iter it;
+	struct task_struct *task;
+
+	css_task_iter_start(&cs->css, 0, &it);
+
+	while ((task = css_task_iter_next(&it)))
+		dl_add_task_root_domain(task);
+
+	css_task_iter_end(&it);
+}
+
+/*
+ * Called with cpuset_mutex held (rebuild_sched_domains())
+ * Called with hotplug lock held (rebuild_sched_domains_locked())
+ * Called with sched_domains_mutex held (partition_and_rebuild_domains())
+ */
+static void rebuild_root_domains(void)
+{
+	struct cpuset *cs = NULL;
+	struct cgroup_subsys_state *pos_css;
+
+	rcu_read_lock();
+
+	/*
+	 * Clear default root domain DL accounting, it will be computed again
+	 * if a task belongs to it.
+	 */
+	dl_clear_root_domain(&def_root_domain);
+
+	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
+
+		if (cpumask_empty(cs->effective_cpus)) {
+			pos_css = css_rightmost_descendant(pos_css);
+			continue;
+		}
+
+		css_get(&cs->css);
+
+		rcu_read_unlock();
+
+		update_tasks_root_domain(cs);
+
+		rcu_read_lock();
+		css_put(&cs->css);
+	}
+	rcu_read_unlock();
+}
+
+static void
+partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+				    struct sched_domain_attr *dattr_new)
+{
+	mutex_lock(&sched_domains_mutex);
+	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+	rebuild_root_domains();
+	mutex_unlock(&sched_domains_mutex);
+}
+
 /*
  * Rebuild scheduler domains.
  *
@@ -845,7 +906,7 @@  static void rebuild_sched_domains_locked(void)
 	ndoms = generate_sched_domains(&doms, &attr);
 
 	/* Have scheduler rebuild the domains */
-	partition_sched_domains(ndoms, doms, attr);
+	partition_and_rebuild_sched_domains(ndoms, doms, attr);
 out:
 	put_online_cpus();
 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 997ea7b839fa..5c5938acf89a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2285,6 +2285,37 @@  void __init init_sched_dl_class(void)
 					GFP_KERNEL, cpu_to_node(i));
 }
 
+void dl_add_task_root_domain(struct task_struct *p)
+{
+	unsigned long flags;
+	struct rq_flags rf;
+	struct rq *rq;
+	struct dl_bw *dl_b;
+
+	rq = task_rq_lock(p, &rf);
+	if (!dl_task(p))
+		goto unlock;
+
+	dl_b = &rq->rd->dl_bw;
+	raw_spin_lock_irqsave(&dl_b->lock, flags);
+
+	dl_b->total_bw += p->dl.dl_bw;
+
+	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+unlock:
+	task_rq_unlock(rq, p, &rf);
+}
+
+void dl_clear_root_domain(struct root_domain *rd)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
+	rd->dl_bw.total_bw = 0;
+	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
+}
+
 #endif /* CONFIG_SMP */
 
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a2e8cae63c4..84215d464dd1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -750,9 +750,6 @@  struct root_domain {
 	unsigned long		max_cpu_capacity;
 };
 
-extern struct root_domain def_root_domain;
-extern struct mutex sched_domains_mutex;
-
 extern void init_defrootdomain(void);
 extern int sched_init_domains(const struct cpumask *cpu_map);
 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index fb7ae691cb82..08128bdf3944 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1883,8 +1883,19 @@  void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
 	for (i = 0; i < ndoms_cur; i++) {
 		for (j = 0; j < n && !new_topology; j++) {
 			if (cpumask_equal(doms_cur[i], doms_new[j])
-			    && dattrs_equal(dattr_cur, i, dattr_new, j))
+			    && dattrs_equal(dattr_cur, i, dattr_new, j)) {
+				struct root_domain *rd;
+
+				/*
+				 * This domain won't be destroyed and as such
+				 * its dl_bw->total_bw needs to be cleared.  It
+				 * will be recomputed in function
+				 * update_tasks_root_domain().
+				 */
+				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
+				dl_clear_root_domain(rd);
 				goto match1;
+			}
 		}
 		/* No match - a current sched domain not in new doms_new[] */
 		detach_destroy_domains(doms_cur[i]);