diff mbox series

sched/pelt: fix update of blocked pelt ordering

Message ID 1572434309-32512-1-git-send-email-vincent.guittot@linaro.org
State New
Headers show
Series sched/pelt: fix update of blocked pelt ordering | expand

Commit Message

Vincent Guittot Oct. 30, 2019, 11:18 a.m. UTC
update_cfs_rq_load_avg() can call cpufreq_update_util() to trigger an
update of the frequency. Make sure that RT, DL and IRQ pelt signals have
been updated before calling cpufreq

Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")
Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")
Fixes: 91c27493e78d ("sched/irq: Add IRQ utilization tracking")
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

---
 kernel/sched/fair.c | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)

-- 
2.7.4

Comments

Peter Zijlstra Oct. 30, 2019, 1:49 p.m. UTC | #1
On Wed, Oct 30, 2019 at 12:18:29PM +0100, Vincent Guittot wrote:
> update_cfs_rq_load_avg() can call cpufreq_update_util() to trigger an

> update of the frequency. Make sure that RT, DL and IRQ pelt signals have

> been updated before calling cpufreq

> 

> Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")

> Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")

> Fixes: 91c27493e78d ("sched/irq: Add IRQ utilization tracking")

> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>


Thanks!
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a144874..adc923f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7446,6 +7446,19 @@  static void update_blocked_averages(int cpu)
 	update_rq_clock(rq);
 
 	/*
+	 * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+	 * that RT, DL and IRQ signals have been updated before updating CFS.
+	 */
+	curr_class = rq->curr->sched_class;
+	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
+	update_irq_load_avg(rq, 0);
+
+	/* Don't need periodic decay once load/util_avg are null */
+	if (others_have_blocked(rq))
+		done = false;
+
+	/*
 	 * Iterates the task_group tree in a bottom up fashion, see
 	 * list_add_leaf_cfs_rq() for details.
 	 */
@@ -7472,14 +7485,6 @@  static void update_blocked_averages(int cpu)
 			done = false;
 	}
 
-	curr_class = rq->curr->sched_class;
-	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
-	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
-	update_irq_load_avg(rq, 0);
-	/* Don't need periodic decay once load/util_avg are null */
-	if (others_have_blocked(rq))
-		done = false;
-
 	update_blocked_load_status(rq, !done);
 	rq_unlock_irqrestore(rq, &rf);
 }
@@ -7540,12 +7545,18 @@  static inline void update_blocked_averages(int cpu)
 
 	rq_lock_irqsave(rq, &rf);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
+	/*
+	 * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+	 * that RT, DL and IRQ signals have been updated before updating CFS.
+	 */
 	curr_class = rq->curr->sched_class;
 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
 	update_irq_load_avg(rq, 0);
+
+	update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+
 	update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
 	rq_unlock_irqrestore(rq, &rf);
 }