diff mbox series

[v5,2/2] sched/fair: update scale invariance of PELT

Message ID 1540570303-6097-3-git-send-email-vincent.guittot@linaro.org
State New
Headers show
Series sched/fair: update scale invariance of PELT | expand

Commit Message

Vincent Guittot Oct. 26, 2018, 4:11 p.m. UTC
The current implementation of load tracking invariance scales the
contribution with current frequency and uarch performance (only for
utilization) of the CPU. One main result of this formula is that the
figures are capped by current capacity of CPU. Another one is that the
load_avg is not invariant because not scaled with uarch.

The util_avg of a periodic task that runs r time slots every p time slots
varies in the range :

    U * (1-y^r)/(1-y^p) * y^i < Utilization < U * (1-y^r)/(1-y^p)

with U is the max util_avg value = SCHED_CAPACITY_SCALE

At a lower capacity, the range becomes:

    U * C * (1-y^r')/(1-y^p) * y^i' < Utilization <  U * C * (1-y^r')/(1-y^p)

with C reflecting the compute capacity ratio between current capacity and
max capacity.

so C tries to compensate changes in (1-y^r') but it can't be accurate.

Instead of scaling the contribution value of PELT algo, we should scale the
running time. The PELT signal aims to track the amount of computation of
tasks and/or rq so it seems more correct to scale the running time to
reflect the effective amount of computation done since the last update.

In order to be fully invariant, we need to apply the same amount of
running time and idle time whatever the current capacity. Because running
at lower capacity implies that the task will run longer, we have to ensure
that the same amount of idle time will be apply when system becomes idle
and no idle time has been "stolen". But reaching the maximum utilization
value (SCHED_CAPACITY_SCALE) means that the task is seen as an
always-running task whatever the capacity of the CPU (even at max compute
capacity). In this case, we can discard this "stolen" idle times which
becomes meaningless.

In order to achieve this time scaling, a new clock_pelt is created per rq.
The increase of this clock scales with current capacity when something
is running on rq and synchronizes with clock_task when rq is idle. With
this mecanism, we ensure the same running and idle time whatever the
current capacity. This also enables to simplify the pelt algorithm by
removing all references of uarch and frequency and applying the same
contribution to utilization and loads. Furthermore, the scaling is done
only once per update of clock (update_rq_clock_task()) instead of during
each update of sched_entities and cfs/rt/dl_rq of the rq like the current
implementation. This is interesting when cgroup are involved as shown in
the results below:

On a hikey (octo ARM platform).
Performance cpufreq governor and only shallowest c-state to remove variance
generated by those power features so we only track the impact of pelt algo.

each test runs 16 times

./perf bench sched pipe
(higher is better)
kernel	tip/sched/core     + patch
        ops/seconds        ops/seconds         diff
cgroup
root    59652(+/- 0.18%)   59876(+/- 0.24%)    +0.38%
level1  55608(+/- 0.27%)   55923(+/- 0.24%)    +0.57%
level2  52115(+/- 0.29%)   52564(+/- 0.22%)    +0.86%

hackbench -l 1000
(lower is better)
kernel	tip/sched/core     + patch
        duration(sec)      duration(sec)        diff
cgroup
root    4.453(+/- 2.37%)   4.383(+/- 2.88%)     -1.57%
level1  4.859(+/- 8.50%)   4.830(+/- 7.07%)     -0.60%
level2  5.063(+/- 9.83%)   4.928(+/- 9.66%)     -2.66%

The responsivness of PELT is improved when CPU is not running at max
capacity with this new algorithm. I have put below some examples of
duration to reach some typical load values according to the capacity of the
CPU with current implementation and with this patch. These values has been
computed based on the geometric serie and the half period value:

Util (%)     max capacity  half capacity(mainline)  half capacity(w/ patch)
972 (95%)    138ms         not reachable            276ms
486 (47.5%)  30ms          138ms                     60ms
256 (25%)    13ms           32ms                     26ms

On my hikey (octo ARM platform) with schedutil governor, the time to reach
max OPP when starting from a null utilization, decreases from 223ms with
current scale invariance down to 121ms with the new algorithm.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

---

 kernel/sched/core.c     |   1 +
 kernel/sched/deadline.c |   6 +--
 kernel/sched/fair.c     |  25 ++++++-----
 kernel/sched/pelt.c     |  23 +++++-----
 kernel/sched/pelt.h     | 109 ++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/rt.c       |   6 +--
 kernel/sched/sched.h    |   5 ++-
 7 files changed, 148 insertions(+), 27 deletions(-)

-- 
2.7.4

Comments

Pavan Kondeti Oct. 30, 2018, 9:19 a.m. UTC | #1
Hi Vincent,

On Fri, Oct 26, 2018 at 06:11:43PM +0200, Vincent Guittot wrote:
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> index 6806c27..7a69673 100644

> --- a/kernel/sched/fair.c

> +++ b/kernel/sched/fair.c

> @@ -674,9 +674,8 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)

>  	return calc_delta_fair(sched_slice(cfs_rq, se), se);

>  }

>  

> -#ifdef CONFIG_SMP

>  #include "pelt.h"

> -#include "sched-pelt.h"

> +#ifdef CONFIG_SMP

>  

>  static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

>  static unsigned long task_h_load(struct task_struct *p);

> @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

>  			 * such that the next switched_to_fair() has the

>  			 * expected state.

>  			 */

> -			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

> +			se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

>  			return;

>  		}

>  	}

> @@ -3466,7 +3465,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

>  /* Update task and its cfs_rq load average */

>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

>  {

> -	u64 now = cfs_rq_clock_task(cfs_rq);

> +	u64 now = cfs_rq_clock_pelt(cfs_rq);

>  	struct rq *rq = rq_of(cfs_rq);

>  	int cpu = cpu_of(rq);

>  	int decayed;

> @@ -6694,6 +6693,12 @@ done: __maybe_unused;

>  	if (new_tasks > 0)

>  		goto again;

>  

> +	/*

> +	 * rq is about to be idle, check if we need to update the

> +	 * lost_idle_time of clock_pelt

> +	 */

> +	update_idle_rq_clock_pelt(rq);

> +

>  	return NULL;

>  }


Do you think it is better to call this from pick_next_task_idle()? I don't see
any functional difference, but it may be easier to follow.

Thanks,
Pavan
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.
Vincent Guittot Oct. 30, 2018, 10:50 a.m. UTC | #2
Hi Pavan,

On Tue, 30 Oct 2018 at 10:19, Pavan Kondeti <pkondeti@codeaurora.org> wrote:
>

> Hi Vincent,

>

> On Fri, Oct 26, 2018 at 06:11:43PM +0200, Vincent Guittot wrote:

> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> > index 6806c27..7a69673 100644

> > --- a/kernel/sched/fair.c

> > +++ b/kernel/sched/fair.c

> > @@ -674,9 +674,8 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)

> >       return calc_delta_fair(sched_slice(cfs_rq, se), se);

> >  }

> >

> > -#ifdef CONFIG_SMP

> >  #include "pelt.h"

> > -#include "sched-pelt.h"

> > +#ifdef CONFIG_SMP

> >

> >  static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

> >  static unsigned long task_h_load(struct task_struct *p);

> > @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

> >                        * such that the next switched_to_fair() has the

> >                        * expected state.

> >                        */

> > -                     se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

> > +                     se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

> >                       return;

> >               }

> >       }

> > @@ -3466,7 +3465,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

> >  /* Update task and its cfs_rq load average */

> >  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

> >  {

> > -     u64 now = cfs_rq_clock_task(cfs_rq);

> > +     u64 now = cfs_rq_clock_pelt(cfs_rq);

> >       struct rq *rq = rq_of(cfs_rq);

> >       int cpu = cpu_of(rq);

> >       int decayed;

> > @@ -6694,6 +6693,12 @@ done: __maybe_unused;

> >       if (new_tasks > 0)

> >               goto again;

> >

> > +     /*

> > +      * rq is about to be idle, check if we need to update the

> > +      * lost_idle_time of clock_pelt

> > +      */

> > +     update_idle_rq_clock_pelt(rq);

> > +

> >       return NULL;

> >  }

>

> Do you think it is better to call this from pick_next_task_idle()? I don't see

> any functional difference, but it may be easier to follow.


Yes there is no functional difference. I have put it there just for
simplicity as there is no pelt related code in idle.c and keep things
contained

>

> Thanks,

> Pavan

> --

> Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.

> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.

>
Dietmar Eggemann Oct. 31, 2018, 7:20 a.m. UTC | #3
On 10/26/18 6:11 PM, Vincent Guittot wrote:

[...]

>   static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

>   static unsigned long task_h_load(struct task_struct *p);

> @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

>   			 * such that the next switched_to_fair() has the

>   			 * expected state.

>   			 */

> -			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

> +			se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

>   			return;

>   		}

>   	}


There is this 1/cpu scaling of se->avg.util_sum (running_sum) in 
update_tg_cfs_runnable() so it can be used to calculate 
se->avg.runnable_load_sum (runnable_sum). I guess with your approach 
this should be removed.

> @@ -3466,7 +3465,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

>   /* Update task and its cfs_rq load average */

>   static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

>   {

> -	u64 now = cfs_rq_clock_task(cfs_rq);

> +	u64 now = cfs_rq_clock_pelt(cfs_rq);

>   	struct rq *rq = rq_of(cfs_rq);

>   	int cpu = cpu_of(rq);

>   	int decayed;

> @@ -6694,6 +6693,12 @@ done: __maybe_unused;

>   	if (new_tasks > 0)

>   		goto again;

>   

> +	/*

> +	 * rq is about to be idle, check if we need to update the

> +	 * lost_idle_time of clock_pelt

> +	 */

> +	update_idle_rq_clock_pelt(rq);

> +

>   	return NULL;

>   }


[...]
Vincent Guittot Oct. 31, 2018, 9:18 a.m. UTC | #4
Hi Dietmar,

On Wed, 31 Oct 2018 at 08:20, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>

> On 10/26/18 6:11 PM, Vincent Guittot wrote:

>

> [...]

>

> >   static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

> >   static unsigned long task_h_load(struct task_struct *p);

> > @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

> >                        * such that the next switched_to_fair() has the

> >                        * expected state.

> >                        */

> > -                     se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

> > +                     se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

> >                       return;

> >               }

> >       }

>

> There is this 1/cpu scaling of se->avg.util_sum (running_sum) in

> update_tg_cfs_runnable() so it can be used to calculate

> se->avg.runnable_load_sum (runnable_sum). I guess with your approach

> this should be removed.


Yes good catch

>

> > @@ -3466,7 +3465,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

> >   /* Update task and its cfs_rq load average */

> >   static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

> >   {

> > -     u64 now = cfs_rq_clock_task(cfs_rq);

> > +     u64 now = cfs_rq_clock_pelt(cfs_rq);

> >       struct rq *rq = rq_of(cfs_rq);

> >       int cpu = cpu_of(rq);

> >       int decayed;

> > @@ -6694,6 +6693,12 @@ done: __maybe_unused;

> >       if (new_tasks > 0)

> >               goto again;

> >

> > +     /*

> > +      * rq is about to be idle, check if we need to update the

> > +      * lost_idle_time of clock_pelt

> > +      */

> > +     update_idle_rq_clock_pelt(rq);

> > +

> >       return NULL;

> >   }

>

> [...]
Dietmar Eggemann Nov. 1, 2018, 9:38 a.m. UTC | #5
On 10/31/18 10:18 AM, Vincent Guittot wrote:
> Hi Dietmar,

> 

> On Wed, 31 Oct 2018 at 08:20, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

>>

>> On 10/26/18 6:11 PM, Vincent Guittot wrote:

>>

>> [...]

>>

>>>    static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

>>>    static unsigned long task_h_load(struct task_struct *p);

>>> @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

>>>                         * such that the next switched_to_fair() has the

>>>                         * expected state.

>>>                         */

>>> -                     se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

>>> +                     se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

>>>                        return;

>>>                }

>>>        }

>>

>> There is this 1/cpu scaling of se->avg.util_sum (running_sum) in

>> update_tg_cfs_runnable() so it can be used to calculate

>> se->avg.runnable_load_sum (runnable_sum). I guess with your approach

>> this should be removed.

> 

> Yes good catch


Another thing, since you do not need the cpu parameter in 
accumulate_sum() anymore, you could also get rid of it in 
___update_load_sum() and further in __update_load_avg_blocked_se(),
__update_load_avg_cfs_rq() and __update_load_avg_se().

Nitpick: The function header of update_cfs_rq_load_avg() mentions '@now: 
current time, as per cfs_rq_clock_task()' ... should mention 
cfs_rq_clock_pelt() instead.

[...]
Dietmar Eggemann Nov. 2, 2018, 3:36 p.m. UTC | #6
On 10/26/18 6:11 PM, Vincent Guittot wrote:
> The current implementation of load tracking invariance scales the

> contribution with current frequency and uarch performance (only for

> utilization) of the CPU. One main result of this formula is that the

> figures are capped by current capacity of CPU. Another one is that the

> load_avg is not invariant because not scaled with uarch.

> 

> The util_avg of a periodic task that runs r time slots every p time slots

> varies in the range :

> 

>      U * (1-y^r)/(1-y^p) * y^i < Utilization < U * (1-y^r)/(1-y^p)

> 

> with U is the max util_avg value = SCHED_CAPACITY_SCALE

> 

> At a lower capacity, the range becomes:

> 

>      U * C * (1-y^r')/(1-y^p) * y^i' < Utilization <  U * C * (1-y^r')/(1-y^p)

> 

> with C reflecting the compute capacity ratio between current capacity and

> max capacity.

> 

> so C tries to compensate changes in (1-y^r') but it can't be accurate.

> 

> Instead of scaling the contribution value of PELT algo, we should scale the

> running time. The PELT signal aims to track the amount of computation of

> tasks and/or rq so it seems more correct to scale the running time to

> reflect the effective amount of computation done since the last update.

> 

> In order to be fully invariant, we need to apply the same amount of

> running time and idle time whatever the current capacity. Because running

> at lower capacity implies that the task will run longer, we have to ensure

> that the same amount of idle time will be apply when system becomes idle

> and no idle time has been "stolen". But reaching the maximum utilization

> value (SCHED_CAPACITY_SCALE) means that the task is seen as an

> always-running task whatever the capacity of the CPU (even at max compute

> capacity). In this case, we can discard this "stolen" idle times which

> becomes meaningless.

> 

> In order to achieve this time scaling, a new clock_pelt is created per rq.

> The increase of this clock scales with current capacity when something

> is running on rq and synchronizes with clock_task when rq is idle. With

> this mecanism, we ensure the same running and idle time whatever the

> current capacity.


Thinking about this new approach on a big.LITTLE platform:

CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

A 50% (runtime/period) task on a big CPU will become an always running 
task on the little CPU. The utilization signal of the task and the 
cfs_rq of the little CPU converges to 1024.

With contrib scaling the utilization signal of the 50% task converges to 
512 on the little CPU, even it is always running on it, and so does the 
one of the cfs_rq.

Two 25% tasks on a big CPU will become two 50% tasks on a little CPU. 
The utilization signal of the tasks converges to 512 and the one of the 
cfs_rq of the little CPU converges to 1024.

With contrib scaling the utilization signal of the 25% tasks converges 
to 256 on the little CPU, even they run each 50% on it, and the one of 
the cfs_rq converges to 512.

So what do we consider system-wide invariance? I thought that e.g. a 25% 
task should have a utilization value of 256 no matter on which CPU it is 
running?

In both cases, the little CPU is not going idle whereas the big CPU does.
Vincent Guittot Nov. 5, 2018, 7:59 a.m. UTC | #7
On Thu, 1 Nov 2018 at 10:38, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>

> On 10/31/18 10:18 AM, Vincent Guittot wrote:

> > Hi Dietmar,

> >

> > On Wed, 31 Oct 2018 at 08:20, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

> >>

> >> On 10/26/18 6:11 PM, Vincent Guittot wrote:

> >>

> >> [...]

> >>

> >>>    static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);

> >>>    static unsigned long task_h_load(struct task_struct *p);

> >>> @@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)

> >>>                         * such that the next switched_to_fair() has the

> >>>                         * expected state.

> >>>                         */

> >>> -                     se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);

> >>> +                     se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);

> >>>                        return;

> >>>                }

> >>>        }

> >>

> >> There is this 1/cpu scaling of se->avg.util_sum (running_sum) in

> >> update_tg_cfs_runnable() so it can be used to calculate

> >> se->avg.runnable_load_sum (runnable_sum). I guess with your approach

> >> this should be removed.

> >

> > Yes good catch

>

> Another thing, since you do not need the cpu parameter in

> accumulate_sum() anymore, you could also get rid of it in

> ___update_load_sum() and further in __update_load_avg_blocked_se(),

> __update_load_avg_cfs_rq() and __update_load_avg_se().


yes, I have to clean interface from reference to cpu

>

> Nitpick: The function header of update_cfs_rq_load_avg() mentions '@now:

> current time, as per cfs_rq_clock_task()' ... should mention

> cfs_rq_clock_pelt() instead.


ok

>

> [...]
Vincent Guittot Nov. 5, 2018, 9:10 a.m. UTC | #8
On Fri, 2 Nov 2018 at 16:36, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>

> On 10/26/18 6:11 PM, Vincent Guittot wrote:

> > The current implementation of load tracking invariance scales the

> > contribution with current frequency and uarch performance (only for

> > utilization) of the CPU. One main result of this formula is that the

> > figures are capped by current capacity of CPU. Another one is that the

> > load_avg is not invariant because not scaled with uarch.

> >

> > The util_avg of a periodic task that runs r time slots every p time slots

> > varies in the range :

> >

> >      U * (1-y^r)/(1-y^p) * y^i < Utilization < U * (1-y^r)/(1-y^p)

> >

> > with U is the max util_avg value = SCHED_CAPACITY_SCALE

> >

> > At a lower capacity, the range becomes:

> >

> >      U * C * (1-y^r')/(1-y^p) * y^i' < Utilization <  U * C * (1-y^r')/(1-y^p)

> >

> > with C reflecting the compute capacity ratio between current capacity and

> > max capacity.

> >

> > so C tries to compensate changes in (1-y^r') but it can't be accurate.

> >

> > Instead of scaling the contribution value of PELT algo, we should scale the

> > running time. The PELT signal aims to track the amount of computation of

> > tasks and/or rq so it seems more correct to scale the running time to

> > reflect the effective amount of computation done since the last update.

> >

> > In order to be fully invariant, we need to apply the same amount of

> > running time and idle time whatever the current capacity. Because running

> > at lower capacity implies that the task will run longer, we have to ensure

> > that the same amount of idle time will be apply when system becomes idle

> > and no idle time has been "stolen". But reaching the maximum utilization

> > value (SCHED_CAPACITY_SCALE) means that the task is seen as an

> > always-running task whatever the capacity of the CPU (even at max compute

> > capacity). In this case, we can discard this "stolen" idle times which

> > becomes meaningless.

> >

> > In order to achieve this time scaling, a new clock_pelt is created per rq.

> > The increase of this clock scales with current capacity when something

> > is running on rq and synchronizes with clock_task when rq is idle. With

> > this mecanism, we ensure the same running and idle time whatever the

> > current capacity.

>

> Thinking about this new approach on a big.LITTLE platform:

>

> CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

>

> A 50% (runtime/period) task on a big CPU will become an always running

> task on the little CPU. The utilization signal of the task and the

> cfs_rq of the little CPU converges to 1024.

>

> With contrib scaling the utilization signal of the 50% task converges to

> 512 on the little CPU, even it is always running on it, and so does the

> one of the cfs_rq.

>

> Two 25% tasks on a big CPU will become two 50% tasks on a little CPU.

> The utilization signal of the tasks converges to 512 and the one of the

> cfs_rq of the little CPU converges to 1024.

>

> With contrib scaling the utilization signal of the 25% tasks converges

> to 256 on the little CPU, even they run each 50% on it, and the one of

> the cfs_rq converges to 512.

>

> So what do we consider system-wide invariance? I thought that e.g. a 25%

> task should have a utilization value of 256 no matter on which CPU it is

> running?

>

> In both cases, the little CPU is not going idle whereas the big CPU does.


IMO, the key point here is that there is no idle time. As soon as
there is no idle time, you don't know if a task has enough compute
capacity so you can't make difference between the 50% running task or
an always running task on the little core.
That's also interesting to noticed that the task will reach the always
running state after more than 600ms on little core with utilization
starting from 0.

Then considering the system-wide invariance, the task are not really
invariant. If we take a 50% running task that run 40ms in a period of
80ms, the max utilization of the task will be 721 on the big core and
512 on the little core.
Then, if you take a 39ms running task instead, the utilization on the
big core will reach 709 but it will be 507 on little core. So your
utilization depends on the current capacity
With the new proposal, the max utilization will be 709 on big and
little cores for the 39ms running task. For the 40ms running task, the
utilization will be 721 on big core. then if the task moves on the
little, it will reach the value 721 after 80ms,  then 900 after more
than 160ms and 1000 after 320ms
Morten Rasmussen Nov. 5, 2018, 2:58 p.m. UTC | #9
On Mon, Nov 05, 2018 at 10:10:34AM +0100, Vincent Guittot wrote:
> On Fri, 2 Nov 2018 at 16:36, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

> >

> > On 10/26/18 6:11 PM, Vincent Guittot wrote:

> > > The current implementation of load tracking invariance scales the

> > > contribution with current frequency and uarch performance (only for

> > > utilization) of the CPU. One main result of this formula is that the

> > > figures are capped by current capacity of CPU. Another one is that the

> > > load_avg is not invariant because not scaled with uarch.

> > >

> > > The util_avg of a periodic task that runs r time slots every p time slots

> > > varies in the range :

> > >

> > >      U * (1-y^r)/(1-y^p) * y^i < Utilization < U * (1-y^r)/(1-y^p)

> > >

> > > with U is the max util_avg value = SCHED_CAPACITY_SCALE

> > >

> > > At a lower capacity, the range becomes:

> > >

> > >      U * C * (1-y^r')/(1-y^p) * y^i' < Utilization <  U * C * (1-y^r')/(1-y^p)

> > >

> > > with C reflecting the compute capacity ratio between current capacity and

> > > max capacity.

> > >

> > > so C tries to compensate changes in (1-y^r') but it can't be accurate.

> > >

> > > Instead of scaling the contribution value of PELT algo, we should scale the

> > > running time. The PELT signal aims to track the amount of computation of

> > > tasks and/or rq so it seems more correct to scale the running time to

> > > reflect the effective amount of computation done since the last update.

> > >

> > > In order to be fully invariant, we need to apply the same amount of

> > > running time and idle time whatever the current capacity. Because running

> > > at lower capacity implies that the task will run longer, we have to ensure

> > > that the same amount of idle time will be apply when system becomes idle

> > > and no idle time has been "stolen". But reaching the maximum utilization

> > > value (SCHED_CAPACITY_SCALE) means that the task is seen as an

> > > always-running task whatever the capacity of the CPU (even at max compute

> > > capacity). In this case, we can discard this "stolen" idle times which

> > > becomes meaningless.

> > >

> > > In order to achieve this time scaling, a new clock_pelt is created per rq.

> > > The increase of this clock scales with current capacity when something

> > > is running on rq and synchronizes with clock_task when rq is idle. With

> > > this mecanism, we ensure the same running and idle time whatever the

> > > current capacity.

> >

> > Thinking about this new approach on a big.LITTLE platform:

> >

> > CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

> >

> > A 50% (runtime/period) task on a big CPU will become an always running

> > task on the little CPU. The utilization signal of the task and the

> > cfs_rq of the little CPU converges to 1024.

> >

> > With contrib scaling the utilization signal of the 50% task converges to

> > 512 on the little CPU, even it is always running on it, and so does the

> > one of the cfs_rq.

> >

> > Two 25% tasks on a big CPU will become two 50% tasks on a little CPU.

> > The utilization signal of the tasks converges to 512 and the one of the

> > cfs_rq of the little CPU converges to 1024.

> >

> > With contrib scaling the utilization signal of the 25% tasks converges

> > to 256 on the little CPU, even they run each 50% on it, and the one of

> > the cfs_rq converges to 512.

> >

> > So what do we consider system-wide invariance? I thought that e.g. a 25%

> > task should have a utilization value of 256 no matter on which CPU it is

> > running?

> >

> > In both cases, the little CPU is not going idle whereas the big CPU does.

> 

> IMO, the key point here is that there is no idle time. As soon as

> there is no idle time, you don't know if a task has enough compute

> capacity so you can't make difference between the 50% running task or

> an always running task on the little core.

> That's also interesting to noticed that the task will reach the always

> running state after more than 600ms on little core with utilization

> starting from 0.

> 

> Then considering the system-wide invariance, the task are not really

> invariant. If we take a 50% running task that run 40ms in a period of

> 80ms, the max utilization of the task will be 721 on the big core and

> 512 on the little core.

> Then, if you take a 39ms running task instead, the utilization on the

> big core will reach 709 but it will be 507 on little core. So your

> utilization depends on the current capacity

> With the new proposal, the max utilization will be 709 on big and

> little cores for the 39ms running task. For the 40ms running task, the

> utilization will be 721 on big core. then if the task moves on the

> little, it will reach the value 721 after 80ms,  then 900 after more

> than 160ms and 1000 after 320ms


It has always been debatable what to do with utilization when there are
no spare cycles.

In Dietmar's example where two 25% tasks are put on a 512 (50%) capacity
CPU we add just enough utilization to have no spare cycles left. One
could argue that 25% is still the correct utilization for those tasks.
However, we only know their true utilization because they just ran
unconstrained on a higher capacity CPU. Once they are on the 512 capacity
CPU we wouldn't know if the tasks grew in utilization as there are no
spare cycles to use.

As I see it, the most fundamental difference between scaling
contribution and time for PELT is the characteristics when CPUs are
over-utilized.

With contribution scaling the PELT utilization of a task is a _minimum_
utilization. Regardless of where the task is currently/was running (and
provided that it doesn't change behaviour) its PELT utilization will
approximate its _minimum_ utilization on an idle 1024 capacity CPU.

With time scaling the PELT utilization doesn't really have a meaning on
its own. It has to be compared to the capacity of the CPU where it
is/was running to know what the its current PELT utilization means. When
the utilization over-shoots the capacity its value is no longer
represents utilization, it just means that it has a higher compute
demand than is offered on its current CPU and a high value means that it
has been suffering longer. It can't be used to predict the actual
utilization on an idle 1024 capacity any better than contribution scaled
PELT utilization.

This change might not be a showstopper, but it is something to be aware
off and take into account wherever PELT utilization is used.

Morten
Vincent Guittot Nov. 6, 2018, 2:27 p.m. UTC | #10
On Mon, 5 Nov 2018 at 15:59, Morten Rasmussen <morten.rasmussen@arm.com> wrote:
>

> On Mon, Nov 05, 2018 at 10:10:34AM +0100, Vincent Guittot wrote:

> > On Fri, 2 Nov 2018 at 16:36, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

> > >

...
> > > >

> > > > In order to achieve this time scaling, a new clock_pelt is created per rq.

> > > > The increase of this clock scales with current capacity when something

> > > > is running on rq and synchronizes with clock_task when rq is idle. With

> > > > this mecanism, we ensure the same running and idle time whatever the

> > > > current capacity.

> > >

> > > Thinking about this new approach on a big.LITTLE platform:

> > >

> > > CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

> > >

> > > A 50% (runtime/period) task on a big CPU will become an always running

> > > task on the little CPU. The utilization signal of the task and the

> > > cfs_rq of the little CPU converges to 1024.

> > >

> > > With contrib scaling the utilization signal of the 50% task converges to

> > > 512 on the little CPU, even it is always running on it, and so does the

> > > one of the cfs_rq.

> > >

> > > Two 25% tasks on a big CPU will become two 50% tasks on a little CPU.

> > > The utilization signal of the tasks converges to 512 and the one of the

> > > cfs_rq of the little CPU converges to 1024.

> > >

> > > With contrib scaling the utilization signal of the 25% tasks converges

> > > to 256 on the little CPU, even they run each 50% on it, and the one of

> > > the cfs_rq converges to 512.

> > >

> > > So what do we consider system-wide invariance? I thought that e.g. a 25%

> > > task should have a utilization value of 256 no matter on which CPU it is

> > > running?

> > >

> > > In both cases, the little CPU is not going idle whereas the big CPU does.

> >

> > IMO, the key point here is that there is no idle time. As soon as

> > there is no idle time, you don't know if a task has enough compute

> > capacity so you can't make difference between the 50% running task or

> > an always running task on the little core.

> > That's also interesting to noticed that the task will reach the always

> > running state after more than 600ms on little core with utilization

> > starting from 0.

> >

> > Then considering the system-wide invariance, the task are not really

> > invariant. If we take a 50% running task that run 40ms in a period of

> > 80ms, the max utilization of the task will be 721 on the big core and

> > 512 on the little core.

> > Then, if you take a 39ms running task instead, the utilization on the

> > big core will reach 709 but it will be 507 on little core. So your

> > utilization depends on the current capacity

> > With the new proposal, the max utilization will be 709 on big and

> > little cores for the 39ms running task. For the 40ms running task, the

> > utilization will be 721 on big core. then if the task moves on the

> > little, it will reach the value 721 after 80ms,  then 900 after more

> > than 160ms and 1000 after 320ms

>

> It has always been debatable what to do with utilization when there are

> no spare cycles.

>

> In Dietmar's example where two 25% tasks are put on a 512 (50%) capacity

> CPU we add just enough utilization to have no spare cycles left. One

> could argue that 25% is still the correct utilization for those tasks.

> However, we only know their true utilization because they just ran

> unconstrained on a higher capacity CPU. Once they are on the 512 capacity

> CPU we wouldn't know if the tasks grew in utilization as there are no

> spare cycles to use.

>

> As I see it, the most fundamental difference between scaling

> contribution and time for PELT is the characteristics when CPUs are

> over-utilized.


I agree that there is a big difference in the way the over utilization
state is handled

>

> With contribution scaling the PELT utilization of a task is a _minimum_

> utilization. Regardless of where the task is currently/was running (and

> provided that it doesn't change behaviour) its PELT utilization will

> approximate its _minimum_ utilization on an idle 1024 capacity CPU.


The main drawback is that the _minimum_ utilization depends on the CPU
capacity on which the task runs. The two 25% tasks on a 256 capacity
CPU will have an utilization of 128 as an example

>

> With time scaling the PELT utilization doesn't really have a meaning on

> its own. It has to be compared to the capacity of the CPU where it

> is/was running to know what the its current PELT utilization means. When


I would have said the opposite. The utilization of the task will
always reflect the same amount of work that has been already done
whatever the CPU capacity.
In fact, the new scaling mechanism uses the real amount of work that
has been already done to compute the utilization signal which is not
the case currently. This gives more information about the real amount
of worked that has been computed in the over utilization case.

> the utilization over-shoots the capacity its value is no longer

> represents utilization, it just means that it has a higher compute

> demand than is offered on its current CPU and a high value means that it

> has been suffering longer. It can't be used to predict the actual

> utilization on an idle 1024 capacity any better than contribution scaled

> PELT utilization.


I think that it provides earlier detection of over utilization and
more accurate signal for a longer time duration which can help the
load balance
Coming back to 50% task example . I will use a 50ms running time
during a 100ms period for the example below to make it easier

Starting from 0, the evolution of the utilization is:

With contribution scaling:
         time  0ms  50ms  100ms  150ms  200ms
capacity
1024           0    666
512            0    333   453
When the CPU start to be over utilized (@100ms), the utilization is
already too low (453 instead of 666) and scheduler doesn't detect yet
that we are over utilized
256            0    169   226    246    252
That's even worse with this lower capacity

With time scaling,
         time  0ms  50ms  100ms  150ms  200ms
capacity
1024           0    666
512            0    428   677
We know that the current capacity is not enough and the utilization
reflect the correct utilization level compare to 1024 capacity (the
666 vs 677 difference comes from the 1024us window so the last window
is not full in the case of max capacity)
256            0    234   468    564    677
At 100ms, we know that there is not enough capacity. (In fact we know
that at 56ms). And even at time 200ms, the amount of work is exactly
what would have been executed on a CPU 4x faster

>

> This change might not be a showstopper, but it is something to be aware

> off and take into account wherever PELT utilization is used.


The point above is clearly a big difference between the 2 approaches
of the no spare cycle case but I think it will help by giving more
information in the over utilization case

Vincent
>

> Morten
Peter Zijlstra Nov. 6, 2018, 2:59 p.m. UTC | #11
On Mon, Nov 05, 2018 at 02:58:54PM +0000, Morten Rasmussen wrote:

> It has always been debatable what to do with utilization when there are

> no spare cycles.

> 

> In Dietmar's example where two 25% tasks are put on a 512 (50%) capacity

> CPU we add just enough utilization to have no spare cycles left. One

> could argue that 25% is still the correct utilization for those tasks.

> However, we only know their true utilization because they just ran

> unconstrained on a higher capacity CPU. Once they are on the 512 capacity

> CPU we wouldn't know if the tasks grew in utilization as there are no

> spare cycles to use.

> 

> As I see it, the most fundamental difference between scaling

> contribution and time for PELT is the characteristics when CPUs are

> over-utilized.

> 

> With contribution scaling the PELT utilization of a task is a _minimum_

> utilization. Regardless of where the task is currently/was running (and

> provided that it doesn't change behaviour) its PELT utilization will

> approximate its _minimum_ utilization on an idle 1024 capacity CPU.

> 

> With time scaling the PELT utilization doesn't really have a meaning on

> its own. It has to be compared to the capacity of the CPU where it

> is/was running to know what the its current PELT utilization means. When

> the utilization over-shoots the capacity its value is no longer

> represents utilization, it just means that it has a higher compute

> demand than is offered on its current CPU and a high value means that it

> has been suffering longer. It can't be used to predict the actual

> utilization on an idle 1024 capacity any better than contribution scaled

> PELT utilization.

> 

> This change might not be a showstopper, but it is something to be aware

> off and take into account wherever PELT utilization is used.


So for things like x86, where we don't have immediate control over the
OPPs nor actually know the current dynamic max OPP (or even can know), I
much prefer the model Vincent proposes.

The one thing we do know is the lack of idle time, and I feel equating
no idle with u=1 makes perfect sense.

Luckily x86 does provide means of querying the current effective OPP
and so a utilization value can be usefully compared to another.
Dietmar Eggemann Nov. 7, 2018, 10:47 a.m. UTC | #12
On 11/5/18 10:10 AM, Vincent Guittot wrote:
> On Fri, 2 Nov 2018 at 16:36, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

>>

>> On 10/26/18 6:11 PM, Vincent Guittot wrote:


[...]

>> Thinking about this new approach on a big.LITTLE platform:

>>

>> CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

>>

>> A 50% (runtime/period) task on a big CPU will become an always running

>> task on the little CPU. The utilization signal of the task and the

>> cfs_rq of the little CPU converges to 1024.

>>

>> With contrib scaling the utilization signal of the 50% task converges to

>> 512 on the little CPU, even it is always running on it, and so does the

>> one of the cfs_rq.

>>

>> Two 25% tasks on a big CPU will become two 50% tasks on a little CPU.

>> The utilization signal of the tasks converges to 512 and the one of the

>> cfs_rq of the little CPU converges to 1024.

>>

>> With contrib scaling the utilization signal of the 25% tasks converges

>> to 256 on the little CPU, even they run each 50% on it, and the one of

>> the cfs_rq converges to 512.

>>

>> So what do we consider system-wide invariance? I thought that e.g. a 25%

>> task should have a utilization value of 256 no matter on which CPU it is

>> running?

>>

>> In both cases, the little CPU is not going idle whereas the big CPU does.

> 

> IMO, the key point here is that there is no idle time. As soon as

> there is no idle time, you don't know if a task has enough compute

> capacity so you can't make difference between the 50% running task or

> an always running task on the little core.


Agreed. My '2 25% tasks on a 512 cpu' was a special example in the sense 
that the tasks would stay invariant since they are not restricted by the 
cpu capacity yet. '2 35% tasks' would also have 256 utilization each 
with contrib scaling so that's not invariant either.

Could we say that in the overutilized case with contrib scaling each of 
the n tasks get cpu_cap/n utilization where with time scaling they get 
1024/n utilization? Even though there is no value in this information 
because of the over-utilized state.

> That's also interesting to noticed that the task will reach the always

> running state after more than 600ms on little core with utilization

> starting from 0.

> 

> Then considering the system-wide invariance, the task are not really

> invariant. If we take a 50% running task that run 40ms in a period of

> 80ms, the max utilization of the task will be 721 on the big core and

> 512 on the little core.


Agreed, the utilization of the task on the big CPU oscillates between 
721 and 321 so the average is still ~512.

> Then, if you take a 39ms running task instead, the utilization on the

> big core will reach 709 but it will be 507 on little core. So your

> utilization depends on the current capacity.


OK, but the average should be ~ 507 on big as well. There is idle time 
now even on the little CPU. But yeah, with longer period value, there 
are quite big amplitudes.

> With the new proposal, the max utilization will be 709 on big and

> little cores for the 39ms running task. For the 40ms running task, the

> utilization will be 721 on big core. then if the task moves on the

> little, it will reach the value 721 after 80ms,  then 900 after more

> than 160ms and 1000 after 320ms


We consider max values here? In this case, agreed. So this is a reminder 
that even if the average utilization of a task compared to the CPU 
capacity would mark the system as non-overutilized (39ms/80ms on a 512 
CPU), the utilization of that task looks different because of the 
oscillation which is pretty noticeable with long periods.

The important bit for EAS is that it only uses utilization in the 
non-overutilized case. Here, utilization signals should look the same 
between the two approaches, not considering tasks with long periods like 
the 39/80ms example above.
There are also some advantages for EAS with time scaling: (1) faster 
overutilization detection when a big task runs on a little CPU, (2) 
higher (initial) task utilization value when this task migrates from 
little to big CPU.

We should run our EAS task placement tests with your time scaling patches.
Vincent Guittot Nov. 7, 2018, 12:58 p.m. UTC | #13
On Wed, 7 Nov 2018 at 11:47, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>

> On 11/5/18 10:10 AM, Vincent Guittot wrote:

> > On Fri, 2 Nov 2018 at 16:36, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:

> >>

> >> On 10/26/18 6:11 PM, Vincent Guittot wrote:

>

> [...]

>

> >> Thinking about this new approach on a big.LITTLE platform:

> >>

> >> CPU Capacities big: 1024 LITTLE: 512, performance CPUfreq governor

> >>

> >> A 50% (runtime/period) task on a big CPU will become an always running

> >> task on the little CPU. The utilization signal of the task and the

> >> cfs_rq of the little CPU converges to 1024.

> >>

> >> With contrib scaling the utilization signal of the 50% task converges to

> >> 512 on the little CPU, even it is always running on it, and so does the

> >> one of the cfs_rq.

> >>

> >> Two 25% tasks on a big CPU will become two 50% tasks on a little CPU.

> >> The utilization signal of the tasks converges to 512 and the one of the

> >> cfs_rq of the little CPU converges to 1024.

> >>

> >> With contrib scaling the utilization signal of the 25% tasks converges

> >> to 256 on the little CPU, even they run each 50% on it, and the one of

> >> the cfs_rq converges to 512.

> >>

> >> So what do we consider system-wide invariance? I thought that e.g. a 25%

> >> task should have a utilization value of 256 no matter on which CPU it is

> >> running?

> >>

> >> In both cases, the little CPU is not going idle whereas the big CPU does.

> >

> > IMO, the key point here is that there is no idle time. As soon as

> > there is no idle time, you don't know if a task has enough compute

> > capacity so you can't make difference between the 50% running task or

> > an always running task on the little core.

>

> Agreed. My '2 25% tasks on a 512 cpu' was a special example in the sense

> that the tasks would stay invariant since they are not restricted by the

> cpu capacity yet. '2 35% tasks' would also have 256 utilization each

> with contrib scaling so that's not invariant either.

>

> Could we say that in the overutilized case with contrib scaling each of

> the n tasks get cpu_cap/n utilization where with time scaling they get

> 1024/n utilization? Even though there is no value in this information

> because of the over-utilized state.

>

> > That's also interesting to noticed that the task will reach the always

> > running state after more than 600ms on little core with utilization

> > starting from 0.

> >

> > Then considering the system-wide invariance, the task are not really

> > invariant. If we take a 50% running task that run 40ms in a period of

> > 80ms, the max utilization of the task will be 721 on the big core and

> > 512 on the little core.

>

> Agreed, the utilization of the task on the big CPU oscillates between

> 721 and 321 so the average is still ~512.

>

> > Then, if you take a 39ms running task instead, the utilization on the

> > big core will reach 709 but it will be 507 on little core. So your

> > utilization depends on the current capacity.

>

> OK, but the average should be ~ 507 on big as well. There is idle time


I don't know about the average, the utilization varies between 709-292
on big core and between 507-486 in little core

> now even on the little CPU. But yeah, with longer period value, there

> are quite big amplitudes.

>

> > With the new proposal, the max utilization will be 709 on big and

> > little cores for the 39ms running task. For the 40ms running task, the

> > utilization will be 721 on big core. then if the task moves on the

> > little, it will reach the value 721 after 80ms,  then 900 after more

> > than 160ms and 1000 after 320ms

>

> We consider max values here? In this case, agreed. So this is a reminder


Yes, we consider max value as it's what is mainly used especially with
util_est feature

> that even if the average utilization of a task compared to the CPU

> capacity would mark the system as non-overutilized (39ms/80ms on a 512

> CPU), the utilization of that task looks different because of the

> oscillation which is pretty noticeable with long periods.

>

> The important bit for EAS is that it only uses utilization in the

> non-overutilized case. Here, utilization signals should look the same

> between the two approaches, not considering tasks with long periods like

> the 39/80ms example above.

> There are also some advantages for EAS with time scaling: (1) faster

> overutilization detection when a big task runs on a little CPU, (2)

> higher (initial) task utilization value when this task migrates from

> little to big CPU.

>

> We should run our EAS task placement tests with your time scaling patches.
Quentin Perret Nov. 8, 2018, 11:35 a.m. UTC | #14
On Wednesday 07 Nov 2018 at 11:47:09 (+0100), Dietmar Eggemann wrote:
> The important bit for EAS is that it only uses utilization in the

> non-overutilized case. Here, utilization signals should look the same

> between the two approaches, not considering tasks with long periods like the

> 39/80ms example above.

> There are also some advantages for EAS with time scaling: (1) faster

> overutilization detection when a big task runs on a little CPU, (2) higher

> (initial) task utilization value when this task migrates from little to big

> CPU.


Agreed, these patches should help detecting the over-utilized scenarios
faster and more reliably, which is probably a good thing. I'll try to
have a look in more details soon.

> We should run our EAS task placement tests with your time scaling patches.


Right, I tried these patches with the synthetic tests we usually run
against our upstream EAS dev branch (see [1]), and I couldn't see any
regression, which is good sign :-)


<slightly off topic>
Since most people are probably not familiar with these tests, I'll try
to elaborate a little bit more. They are unit tests aimed to stress
particular behaviours of the scheduler on asymmetric platforms. More
precisely, they check that capacity-awareness/misfit and EAS are
actually able to up-migrate and down-migrate tasks between big and
little CPUs when necessary.

The tests are based on rt-app and ftrace. They basically run a whole lot
of scenarios with rt-app (small tasks, big tasks, a mix of both, tasks
changing behaviour, ramping up, ramping down, ...), pull a trace of the
execution and check that:

   1. the task(s) did not miss activations (which will basically be true
      only if the scheduler managed to provide each task with enough CPU
      capacity). We call that one 'test_slack';

   2. the task placement is close enough to the optimal placement
      energy-wise (which is computed off-line using the energy model
      and the rt-app conf). We call that one 'test_task_placement'.

For example, in order to pass the test, a periodic task that ramps up
from 10% to 70% over (say) 5s should probably start its execution on
little CPUs to not waste energy, and get up-migrated to big CPUs later
on to not miss activations. Otherwise one of the two checks will fail.

I'd like to emphasize that these test scenarios are *not* supposed to
look like real workloads at all. They've be design with the sole purpose
of stressing specific code paths of the scheduler to spot any obvious
breakage. They've proven quite useful for us in the past.

All the tests are publicly available in the LISA repo [2].
</slightly off topic>


So, to come back to Vincent's patches, I managed to get 10/10 pass rate
to most of the tests referred to as 'generic' in [1] on my Juno r0. The
kernel I tested had Morten's misfit patches, the EAS patches v8, and
Vincent's patches on top.

Although I still need to really get my head around all the implications
of changing PELT like that, I cannot see any obvious red flags from the
testing perspective here.

Thanks,
Quentin

---
[1] https://developer.arm.com/open-source/energy-aware-scheduling/eas-mainline-development
[2] https://github.com/ARM-software/lisa
Vincent Guittot Nov. 8, 2018, 4:04 p.m. UTC | #15
On Thu, 8 Nov 2018 at 12:35, Quentin Perret <quentin.perret@arm.com> wrote:
>

> On Wednesday 07 Nov 2018 at 11:47:09 (+0100), Dietmar Eggemann wrote:

> > The important bit for EAS is that it only uses utilization in the

> > non-overutilized case. Here, utilization signals should look the same

> > between the two approaches, not considering tasks with long periods like the

> > 39/80ms example above.

> > There are also some advantages for EAS with time scaling: (1) faster

> > overutilization detection when a big task runs on a little CPU, (2) higher

> > (initial) task utilization value when this task migrates from little to big

> > CPU.

>

> Agreed, these patches should help detecting the over-utilized scenarios

> faster and more reliably, which is probably a good thing. I'll try to

> have a look in more details soon.

>

> > We should run our EAS task placement tests with your time scaling patches.

>

> Right, I tried these patches with the synthetic tests we usually run

> against our upstream EAS dev branch (see [1]), and I couldn't see any

> regression, which is good sign :-)


Thanks for testing

>

>

> <slightly off topic>

> Since most people are probably not familiar with these tests, I'll try

> to elaborate a little bit more. They are unit tests aimed to stress

> particular behaviours of the scheduler on asymmetric platforms. More

> precisely, they check that capacity-awareness/misfit and EAS are

> actually able to up-migrate and down-migrate tasks between big and

> little CPUs when necessary.

>

> The tests are based on rt-app and ftrace. They basically run a whole lot

> of scenarios with rt-app (small tasks, big tasks, a mix of both, tasks

> changing behaviour, ramping up, ramping down, ...), pull a trace of the

> execution and check that:

>

>    1. the task(s) did not miss activations (which will basically be true

>       only if the scheduler managed to provide each task with enough CPU

>       capacity). We call that one 'test_slack';

>

>    2. the task placement is close enough to the optimal placement

>       energy-wise (which is computed off-line using the energy model

>       and the rt-app conf). We call that one 'test_task_placement'.

>

> For example, in order to pass the test, a periodic task that ramps up

> from 10% to 70% over (say) 5s should probably start its execution on

> little CPUs to not waste energy, and get up-migrated to big CPUs later

> on to not miss activations. Otherwise one of the two checks will fail.

>

> I'd like to emphasize that these test scenarios are *not* supposed to

> look like real workloads at all. They've be design with the sole purpose

> of stressing specific code paths of the scheduler to spot any obvious

> breakage. They've proven quite useful for us in the past.

>

> All the tests are publicly available in the LISA repo [2].

> </slightly off topic>

>

>

> So, to come back to Vincent's patches, I managed to get 10/10 pass rate

> to most of the tests referred to as 'generic' in [1] on my Juno r0. The

> kernel I tested had Morten's misfit patches, the EAS patches v8, and

> Vincent's patches on top.

>

> Although I still need to really get my head around all the implications

> of changing PELT like that, I cannot see any obvious red flags from the

> testing perspective here.

>

> Thanks,

> Quentin

>

> ---

> [1] https://developer.arm.com/open-source/energy-aware-scheduling/eas-mainline-development

> [2] https://github.com/ARM-software/lisa
diff mbox series

Patch

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe02231..2c87aaf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -180,6 +180,7 @@  static void update_rq_clock_task(struct rq *rq, s64 delta)
 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 		update_irq_load_avg(rq, irq_delta + steal);
 #endif
+	update_rq_clock_pelt(rq, delta);
 }
 
 void update_rq_clock(struct rq *rq)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 91e4202..f07bffe 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1761,7 +1761,7 @@  pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	deadline_queue_push_tasks(rq);
 
 	if (rq->curr->sched_class != &dl_sched_class)
-		update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
 
 	return p;
 }
@@ -1770,7 +1770,7 @@  static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
 {
 	update_curr_dl(rq);
 
-	update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
+	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
 		enqueue_pushable_dl_task(rq, p);
 }
@@ -1787,7 +1787,7 @@  static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
 {
 	update_curr_dl(rq);
 
-	update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
+	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
 	/*
 	 * Even when we have runtime, update_curr_dl() might have resulted in us
 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6806c27..7a69673 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -674,9 +674,8 @@  static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
 }
 
-#ifdef CONFIG_SMP
 #include "pelt.h"
-#include "sched-pelt.h"
+#ifdef CONFIG_SMP
 
 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
@@ -764,7 +763,7 @@  void post_init_entity_util_avg(struct sched_entity *se)
 			 * such that the next switched_to_fair() has the
 			 * expected state.
 			 */
-			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
+			se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
 			return;
 		}
 	}
@@ -3466,7 +3465,7 @@  static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 /* Update task and its cfs_rq load average */
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
-	u64 now = cfs_rq_clock_task(cfs_rq);
+	u64 now = cfs_rq_clock_pelt(cfs_rq);
 	struct rq *rq = rq_of(cfs_rq);
 	int cpu = cpu_of(rq);
 	int decayed;
@@ -6694,6 +6693,12 @@  done: __maybe_unused;
 	if (new_tasks > 0)
 		goto again;
 
+	/*
+	 * rq is about to be idle, check if we need to update the
+	 * lost_idle_time of clock_pelt
+	 */
+	update_idle_rq_clock_pelt(rq);
+
 	return NULL;
 }
 
@@ -7353,7 +7358,7 @@  static void update_blocked_averages(int cpu)
 		if (throttled_hierarchy(cfs_rq))
 			continue;
 
-		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+		if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
 			update_tg_load_avg(cfs_rq, 0);
 
 		/* Propagate pending load changes to the parent, if any: */
@@ -7374,8 +7379,8 @@  static void update_blocked_averages(int cpu)
 	}
 
 	curr_class = rq->curr->sched_class;
-	update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
-	update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
+	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
 	update_irq_load_avg(rq, 0);
 	/* Don't need periodic decay once load/util_avg are null */
 	if (others_have_blocked(rq))
@@ -7445,11 +7450,11 @@  static inline void update_blocked_averages(int cpu)
 
 	rq_lock_irqsave(rq, &rf);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
 	curr_class = rq->curr->sched_class;
-	update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
-	update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
+	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
 	update_irq_load_avg(rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
 	rq->last_blocked_load_update_tick = jiffies;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 90fb5bc..f5f78bf 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -26,7 +26,6 @@ 
 
 #include <linux/sched.h>
 #include "sched.h"
-#include "sched-pelt.h"
 #include "pelt.h"
 
 /*
@@ -106,16 +105,12 @@  static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
  *                     n=1
  */
 static __always_inline u32
-accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
+accumulate_sum(u64 delta, struct sched_avg *sa,
 	       unsigned long load, unsigned long runnable, int running)
 {
-	unsigned long scale_freq, scale_cpu;
 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
 	u64 periods;
 
-	scale_freq = arch_scale_freq_capacity(cpu);
-	scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
-
 	delta += sa->period_contrib;
 	periods = delta / 1024; /* A period is 1024us (~1ms) */
 
@@ -137,13 +132,12 @@  accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
 	}
 	sa->period_contrib = delta;
 
-	contrib = cap_scale(contrib, scale_freq);
 	if (load)
 		sa->load_sum += load * contrib;
 	if (runnable)
 		sa->runnable_load_sum += runnable * contrib;
 	if (running)
-		sa->util_sum += contrib * scale_cpu;
+		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
 
 	return periods;
 }
@@ -221,7 +215,7 @@  ___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
 	 * crossed period boundaries, finish.
 	 */
-	if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+	if (!accumulate_sum(delta, sa, load, runnable, running))
 		return 0;
 
 	return 1;
@@ -365,12 +359,21 @@  int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 int update_irq_load_avg(struct rq *rq, u64 running)
 {
 	int ret = 0;
+
+	/*
+	 * We can't use clock_pelt because irq time is not accounted in
+	 * clock_task. Instead we directly scale the running time to
+	 * reflect the real amount of computation
+	 */
+	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
+	running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+
 	/*
 	 * We know the time that has been used by interrupt since last update
 	 * but we don't when. Let be pessimistic and assume that interrupt has
 	 * happened just before the update. This is not so far from reality
 	 * because interrupt will most probably wake up task and trig an update
-	 * of rq clock during which the metric si updated.
+	 * of rq clock during which the metric is updated.
 	 * We start to decay with normal context time and then we add the
 	 * interrupt context time.
 	 * We can safely remove running from rq->clock because
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 7e56b48..fb39446 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,4 +1,5 @@ 
 #ifdef CONFIG_SMP
+#include "sched-pelt.h"
 
 int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
 int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
@@ -42,6 +43,102 @@  static inline void cfs_se_util_change(struct sched_avg *avg)
 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
 }
 
+/*
+ * The clock_pelt scales the time to reflect the effective amount of
+ * computation done during the running delta time but then sync back to
+ * clock_task when rq is idle.
+ *
+ *
+ * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
+ * @ max capacity  ------******---------------******---------------
+ * @ half capacity ------************---------************---------
+ * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
+ *
+ */
+static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
+{
+	if (unlikely(is_idle_task(rq->curr))) {
+		/* The rq is idle, we can sync to clock_task */
+		rq->clock_pelt  = rq_clock_task(rq);
+		return;
+	}
+
+	/*
+	 * When a rq runs at a lower compute capacity, it will need
+	 * more time to do the same amount of work than at max
+	 * capacity: either because it takes more time to compute the
+	 * same amount of work or because taking more time means
+	 * sharing more often the CPU between entities.
+	 * In order to be invariant, we scale the delta to reflect how
+	 * much work has been really done.
+	 * Running at lower capacity also means running longer to do
+	 * the same amount of work and this results in stealing some
+	 * idle time that will disturb the load signal compared to
+	 * max capacity; This stolen idle time will be automaticcally
+	 * reflected when the rq will be idle and the clock will be
+	 * synced with rq_clock_task.
+	 */
+
+	/*
+	 * scale the elapsed time to reflect the real amount of
+	 * computation
+	 */
+	delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
+
+	rq->clock_pelt += delta;
+}
+
+/*
+ * When rq becomes idle, we have to check if it has lost some idle time
+ * because it was fully busy. A rq is fully used when the /Sum util_sum
+ * is greater or equal to:
+ * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
+ * For optimization and computing rounding purpose, we don't take into account
+ * the position in the current window (period_contrib) and we use the maximum
+ * util_avg value minus 1
+ */
+static inline void update_idle_rq_clock_pelt(struct rq *rq)
+{
+	u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
+	u32 overload = rq->cfs.avg.util_sum;
+	overload += rq->avg_rt.util_sum;
+	overload += rq->avg_dl.util_sum;
+
+	/*
+	 * Reflecting some stolen time makes sense only if the idle
+	 * phase would be present at max capacity. As soon as the
+	 * utilization of a rq has reached the maximum value, it is
+	 * considered as an always runnnig rq without idle time to
+	 * steal. This potential idle time is considered as lost in
+	 * this case. We keep track of this lost idle time compare to
+	 * rq's clock_task.
+	 */
+	if ((overload >= divider))
+		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
+}
+
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+	return rq->clock_pelt - rq->lost_idle_time;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
+static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+{
+	if (unlikely(cfs_rq->throttle_count))
+		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
+
+	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
+}
+#else
+static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+{
+	return rq_clock_pelt(rq_of(cfs_rq));
+}
+#endif
+
 #else
 
 static inline int
@@ -67,6 +164,18 @@  update_irq_load_avg(struct rq *rq, u64 running)
 {
 	return 0;
 }
+
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+	return rq_clock_task(rq);
+}
+
+static inline void
+update_rq_clock_pelt(struct rq *rq, s64 delta) { }
+
+static inline void
+update_idle_rq_clock_pelt(struct rq *rq) { }
+
 #endif
 
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2e2955a..f62f2d5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1584,7 +1584,7 @@  pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	 * rt task
 	 */
 	if (rq->curr->sched_class != &rt_sched_class)
-		update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
 
 	return p;
 }
@@ -1593,7 +1593,7 @@  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
 	update_curr_rt(rq);
 
-	update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
 
 	/*
 	 * The previous task needs to be made eligible for pushing
@@ -2324,7 +2324,7 @@  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 	struct sched_rt_entity *rt_se = &p->rt;
 
 	update_curr_rt(rq);
-	update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
 
 	watchdog(rq, p);
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618b578..1e162b7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -832,7 +832,10 @@  struct rq {
 
 	unsigned int		clock_update_flags;
 	u64			clock;
-	u64			clock_task;
+	/* Ensure that all clocks are in the same cache line */
+	u64			clock_task ____cacheline_aligned;
+	u64			clock_pelt;
+	unsigned long		lost_idle_time;
 
 	atomic_t		nr_iowait;