diff mbox series

sched/pelt: fix update_blocked_averages() for dl and rt

Message ID 1535723908-10779-1-git-send-email-vincent.guittot@linaro.org
State New
Headers show
Series sched/pelt: fix update_blocked_averages() for dl and rt | expand

Commit Message

Vincent Guittot Aug. 31, 2018, 1:58 p.m. UTC
update_blocked_averages() is called to periodiccally decay the stalled load
of idle CPUs and to sync all loads before running load balance.

When cfs rq is idle, it trigs a load balance during pick_next_task_fair()
in order to potentially pull tasks and to use this newly idle CPU. This
load balance happens whereas prev task from another class has not been put
and its utilization updated yet. This may lead to wrongly account running
time as idle time for rt or dl classes.

Test that no rt or dl task is running when updating their utilization in
update_blocked_averages().

We still update rt and dl utilization instead of simply skipping them to
make sure that all metrics are synced when used during load balance.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

---
 kernel/sched/fair.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

-- 
2.7.4

Comments

Peter Zijlstra Aug. 31, 2018, 2:41 p.m. UTC | #1
On Fri, Aug 31, 2018 at 03:58:28PM +0200, Vincent Guittot wrote:
> update_blocked_averages() is called to periodiccally decay the stalled load

> of idle CPUs and to sync all loads before running load balance.

> 

> When cfs rq is idle, it trigs a load balance during pick_next_task_fair()

> in order to potentially pull tasks and to use this newly idle CPU. This

> load balance happens whereas prev task from another class has not been put

> and its utilization updated yet. This may lead to wrongly account running

> time as idle time for rt or dl classes.

> 

> Test that no rt or dl task is running when updating their utilization in

> update_blocked_averages().

> 

> We still update rt and dl utilization instead of simply skipping them to

> make sure that all metrics are synced when used during load balance.

> 


Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")
Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")

Right?

> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---

>  kernel/sched/fair.c | 5 +++--

>  1 file changed, 3 insertions(+), 2 deletions(-)

> 

> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> index 309c93f..a1babaf 100644

> --- a/kernel/sched/fair.c

> +++ b/kernel/sched/fair.c

> @@ -7262,6 +7262,7 @@ static void update_blocked_averages(int cpu)

>  {

>  	struct rq *rq = cpu_rq(cpu);

>  	struct cfs_rq *cfs_rq, *pos;

> +	const struct sched_class *curr_class = rq->curr->sched_class;

>  	struct rq_flags rf;

>  	bool done = true;

>  

> @@ -7298,8 +7299,8 @@ static void update_blocked_averages(int cpu)

>  		if (cfs_rq_has_blocked(cfs_rq))

>  			done = false;

>  	}

> -	update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);

> -	update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);

> +	update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);

> +	update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);

>  	update_irq_load_avg(rq, 0);

>  	/* Don't need periodic decay once load/util_avg are null */

>  	if (others_have_blocked(rq))


Did you forget to update the second implementation of
update_blocked_averages() ?
Vincent Guittot Aug. 31, 2018, 2:45 p.m. UTC | #2
On Fri, 31 Aug 2018 at 16:41, Peter Zijlstra <peterz@infradead.org> wrote:
>

> On Fri, Aug 31, 2018 at 03:58:28PM +0200, Vincent Guittot wrote:

> > update_blocked_averages() is called to periodiccally decay the stalled load

> > of idle CPUs and to sync all loads before running load balance.

> >

> > When cfs rq is idle, it trigs a load balance during pick_next_task_fair()

> > in order to potentially pull tasks and to use this newly idle CPU. This

> > load balance happens whereas prev task from another class has not been put

> > and its utilization updated yet. This may lead to wrongly account running

> > time as idle time for rt or dl classes.

> >

> > Test that no rt or dl task is running when updating their utilization in

> > update_blocked_averages().

> >

> > We still update rt and dl utilization instead of simply skipping them to

> > make sure that all metrics are synced when used during load balance.

> >

>

> Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")

> Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")

>

> Right?


Yes

>

> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

> > ---

> >  kernel/sched/fair.c | 5 +++--

> >  1 file changed, 3 insertions(+), 2 deletions(-)

> >

> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> > index 309c93f..a1babaf 100644

> > --- a/kernel/sched/fair.c

> > +++ b/kernel/sched/fair.c

> > @@ -7262,6 +7262,7 @@ static void update_blocked_averages(int cpu)

> >  {

> >       struct rq *rq = cpu_rq(cpu);

> >       struct cfs_rq *cfs_rq, *pos;

> > +     const struct sched_class *curr_class = rq->curr->sched_class;

> >       struct rq_flags rf;

> >       bool done = true;

> >

> > @@ -7298,8 +7299,8 @@ static void update_blocked_averages(int cpu)

> >               if (cfs_rq_has_blocked(cfs_rq))

> >                       done = false;

> >       }

> > -     update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);

> > -     update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);

> > +     update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);

> > +     update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);

> >       update_irq_load_avg(rq, 0);

> >       /* Don't need periodic decay once load/util_avg are null */

> >       if (others_have_blocked(rq))

>

> Did you forget to update the second implementation of

> update_blocked_averages() ?


Yes. I have sent this version to quickly.
I'm going to send an update
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 309c93f..a1babaf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7262,6 +7262,7 @@  static void update_blocked_averages(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 	struct cfs_rq *cfs_rq, *pos;
+	const struct sched_class *curr_class = rq->curr->sched_class;
 	struct rq_flags rf;
 	bool done = true;
 
@@ -7298,8 +7299,8 @@  static void update_blocked_averages(int cpu)
 		if (cfs_rq_has_blocked(cfs_rq))
 			done = false;
 	}
-	update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-	update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+	update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+	update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
 	update_irq_load_avg(rq, 0);
 	/* Don't need periodic decay once load/util_avg are null */
 	if (others_have_blocked(rq))