diff mbox series

[3/3] sched/fair: fix unnecessary increase of balance interval

Message ID 1545292547-18770-4-git-send-email-vincent.guittot@linaro.org
State Superseded
Headers show
Series sched/fair: some fixes for asym_packing | expand

Commit Message

Vincent Guittot Dec. 20, 2018, 7:55 a.m. UTC
In case of active balance, we increase the balance interval to cover
pinned tasks cases not covered by all_pinned logic. Neverthless, the
active migration triggered by asym packing should be treated as the normal
unbalanced case and reset the interval to default value otherwise active
migration for asym_packing can be easily delayed for hundreds of ms
because of this pinned task detection mecanism.
The same happen to other conditions tested in need_active_balance() like
mistfit task and when the capacity of src_cpu is reduced compared to
dst_cpu (see comments in need_active_balance() for details).

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

---
 kernel/sched/fair.c | 40 +++++++++++++++++++++++++++-------------
 1 file changed, 27 insertions(+), 13 deletions(-)

-- 
2.7.4

Comments

Valentin Schneider Dec. 20, 2018, 11:22 a.m. UTC | #1
On 20/12/2018 07:55, Vincent Guittot wrote:
> In case of active balance, we increase the balance interval to cover

> pinned tasks cases not covered by all_pinned logic. Neverthless, the

> active migration triggered by asym packing should be treated as the normal

> unbalanced case and reset the interval to default value otherwise active

> migration for asym_packing can be easily delayed for hundreds of ms

> because of this pinned task detection mecanism.

> The same happen to other conditions tested in need_active_balance() like

> mistfit task and when the capacity of src_cpu is reduced compared to

> dst_cpu (see comments in need_active_balance() for details).

> 

> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---

>  kernel/sched/fair.c | 40 +++++++++++++++++++++++++++-------------

>  1 file changed, 27 insertions(+), 13 deletions(-)

> 

> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> index 487c73e..9b1e701 100644

> --- a/kernel/sched/fair.c

> +++ b/kernel/sched/fair.c

> @@ -8849,21 +8849,25 @@ static struct rq *find_busiest_queue(struct lb_env *env,

>   */

>  #define MAX_PINNED_INTERVAL	512

>  

> -static int need_active_balance(struct lb_env *env)

> +static inline bool

> +asym_active_balance(struct lb_env *env)

>  {

> -	struct sched_domain *sd = env->sd;

> +	/*

> +	 * ASYM_PACKING needs to force migrate tasks from busy but

> +	 * lower priority CPUs in order to pack all tasks in the

> +	 * highest priority CPUs.

> +	 */

> +	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&

> +	       sched_asym_prefer(env->dst_cpu, env->src_cpu);

> +}

>  

> -	if (env->idle != CPU_NOT_IDLE) {

> +static inline bool

> +voluntary_active_balance(struct lb_env *env)

> +{

> +	struct sched_domain *sd = env->sd;

>  

> -		/*

> -		 * ASYM_PACKING needs to force migrate tasks from busy but

> -		 * lower priority CPUs in order to pack all tasks in the

> -		 * highest priority CPUs.

> -		 */

> -		if ((sd->flags & SD_ASYM_PACKING) &&

> -		    sched_asym_prefer(env->dst_cpu, env->src_cpu))

> -			return 1;

> -	}

> +	if (asym_active_balance(env))

> +		return 1;

>  

>  	/*

>  	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.

> @@ -8881,6 +8885,16 @@ static int need_active_balance(struct lb_env *env)

>  	if (env->src_grp_type == group_misfit_task)

>  		return 1;

>  

> +	return 0;

> +}

> +


Yeah so that's the active balance classification I was afraid of, and I
don't agree with it.

The way I see things, we happen to have some mechanisms that let us know
straight away if we need an active balance (asym packing, misfit, lowered
capacity), and we rely on the sd->nr_balance_failed threshold for the
scenarios where we don't have any more information.

We do happen to have a threshold because we don't want to go overboard with
it, but when it is reached it's a clear sign that we *do* want to active
balance because that's all we can do to try and solve the imbalance.

To me, those are all legitimate reasons to. So they're all "voluntary"
really, we *do* want all of these.

> +static int need_active_balance(struct lb_env *env)

> +{

> +	struct sched_domain *sd = env->sd;

> +

> +	if (voluntary_active_balance(env))

> +		return 1;

> +

>  	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);

>  }

>  

> @@ -9142,7 +9156,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,

>  	} else

>  		sd->nr_balance_failed = 0;

>  

> -	if (likely(!active_balance)) {

> +	if (likely(!active_balance) || voluntary_active_balance(&env)) {


So now  we reset the interval for all active balances (expect last active
balance case), even when it is done as a last resort because all other
tasks were pinned.

Arguably the current code isn't much better (always increase the interval
when active balancing), but at least it covers this case. It would be a
waste not to take this into account when we can detect this scenario
(I'll reiterate my LBF_ALL_PINNED suggestion).

>  		/* We were unbalanced, so reset the balancing interval */

>  		sd->balance_interval = sd->min_interval;

>  	} else {

>
Vincent Guittot Dec. 20, 2018, 2:50 p.m. UTC | #2
On Thu, 20 Dec 2018 at 12:22, Valentin Schneider
<valentin.schneider@arm.com> wrote:
>

> On 20/12/2018 07:55, Vincent Guittot wrote:

> > In case of active balance, we increase the balance interval to cover

> > pinned tasks cases not covered by all_pinned logic. Neverthless, the

> > active migration triggered by asym packing should be treated as the normal

> > unbalanced case and reset the interval to default value otherwise active

> > migration for asym_packing can be easily delayed for hundreds of ms

> > because of this pinned task detection mecanism.

> > The same happen to other conditions tested in need_active_balance() like

> > mistfit task and when the capacity of src_cpu is reduced compared to

> > dst_cpu (see comments in need_active_balance() for details).

> >

> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

> > ---

> >  kernel/sched/fair.c | 40 +++++++++++++++++++++++++++-------------

> >  1 file changed, 27 insertions(+), 13 deletions(-)

> >

> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> > index 487c73e..9b1e701 100644

> > --- a/kernel/sched/fair.c

> > +++ b/kernel/sched/fair.c

> > @@ -8849,21 +8849,25 @@ static struct rq *find_busiest_queue(struct lb_env *env,

> >   */

> >  #define MAX_PINNED_INTERVAL  512

> >

> > -static int need_active_balance(struct lb_env *env)

> > +static inline bool

> > +asym_active_balance(struct lb_env *env)

> >  {

> > -     struct sched_domain *sd = env->sd;

> > +     /*

> > +      * ASYM_PACKING needs to force migrate tasks from busy but

> > +      * lower priority CPUs in order to pack all tasks in the

> > +      * highest priority CPUs.

> > +      */

> > +     return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&

> > +            sched_asym_prefer(env->dst_cpu, env->src_cpu);

> > +}

> >

> > -     if (env->idle != CPU_NOT_IDLE) {

> > +static inline bool

> > +voluntary_active_balance(struct lb_env *env)

> > +{

> > +     struct sched_domain *sd = env->sd;

> >

> > -             /*

> > -              * ASYM_PACKING needs to force migrate tasks from busy but

> > -              * lower priority CPUs in order to pack all tasks in the

> > -              * highest priority CPUs.

> > -              */

> > -             if ((sd->flags & SD_ASYM_PACKING) &&

> > -                 sched_asym_prefer(env->dst_cpu, env->src_cpu))

> > -                     return 1;

> > -     }

> > +     if (asym_active_balance(env))

> > +             return 1;

> >

> >       /*

> >        * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.

> > @@ -8881,6 +8885,16 @@ static int need_active_balance(struct lb_env *env)

> >       if (env->src_grp_type == group_misfit_task)

> >               return 1;

> >

> > +     return 0;

> > +}

> > +

>

> Yeah so that's the active balance classification I was afraid of, and I

> don't agree with it.

>

> The way I see things, we happen to have some mechanisms that let us know

> straight away if we need an active balance (asym packing, misfit, lowered

> capacity), and we rely on the sd->nr_balance_failed threshold for the

> scenarios where we don't have any more information.

>

> We do happen to have a threshold because we don't want to go overboard with

> it, but when it is reached it's a clear sign that we *do* want to active

> balance because that's all we can do to try and solve the imbalance.

>

> To me, those are all legitimate reasons to. So they're all "voluntary"

> really, we *do* want all of these.

>

> > +static int need_active_balance(struct lb_env *env)

> > +{

> > +     struct sched_domain *sd = env->sd;

> > +

> > +     if (voluntary_active_balance(env))

> > +             return 1;

> > +

> >       return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);

> >  }

> >

> > @@ -9142,7 +9156,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,

> >       } else

> >               sd->nr_balance_failed = 0;

> >

> > -     if (likely(!active_balance)) {

> > +     if (likely(!active_balance) || voluntary_active_balance(&env)) {

>

> So now  we reset the interval for all active balances (expect last active

> balance case), even when it is done as a last resort because all other

> tasks were pinned.

>

> Arguably the current code isn't much better (always increase the interval

> when active balancing), but at least it covers this case. It would be a

> waste not to take this into account when we can detect this scenario


So i'd like to remind the $subject of this patchset: fix some known
issues for asym_packing.
While looking at this, we have added few other voluntary active
balances because it was "obvious" that this active migration were
voluntary one. But in fact, we don't have any UC which show a problem
for those additional UC so far.

The default behavior for active migration is to increase the interval
Now you want to extend the exception to others active migration UC
whereas it's not clear that we don't fall in the default active
migration UC and we should not increase the interval.

What you want is changing the behavior of the scheduler for UC that
haven't raised any problem where asym_packing has known problem/

Changing default behavior for active migration is not subject of this
patchset and should be treated in another one like the one discussed
with peter few days ago

> (I'll reiterate my LBF_ALL_PINNED suggestion).

>

> >               /* We were unbalanced, so reset the balancing interval */

> >               sd->balance_interval = sd->min_interval;

> >       } else {

> >
Valentin Schneider Dec. 20, 2018, 5:24 p.m. UTC | #3
On 20/12/2018 14:50, Vincent Guittot wrote:
[...]
>> So now  we reset the interval for all active balances (expect last active

>> balance case), even when it is done as a last resort because all other

>> tasks were pinned.

>>

>> Arguably the current code isn't much better (always increase the interval

>> when active balancing), but at least it covers this case. It would be a

>> waste not to take this into account when we can detect this scenario

> 

> So i'd like to remind the $subject of this patchset: fix some known

> issues for asym_packing.

> While looking at this, we have added few other voluntary active

> balances because it was "obvious" that this active migration were

> voluntary one. But in fact, we don't have any UC which show a problem

> for those additional UC so far.

> 

> The default behavior for active migration is to increase the interval

> Now you want to extend the exception to others active migration UC

> whereas it's not clear that we don't fall in the default active

> migration UC and we should not increase the interval.

 
Well if we stick to the rule of only increasing balance_interval when
pinned tasks get in the way, it's clear to me that this use case shouldn't
be segregated from the others.

I do agree however that it's not entirely clear if that balance_interval
increase was also intended to slow down the nr_balance_failed migrations.

I had a look at the history and stumbled on:

	8102679447da ("[PATCH] sched: improve load balancing pinned tasks")

Which explains the reasoning behind the active_balance balance_interval
increase:

	"""
	this one attempts to work out whether the balancing failure has
	been due to too many tasks pinned on the runqueue.  This allows it
	to be basically invisible to the regular blancing paths (ie. when
	there are no pinned tasks).
	"""

So AFAICT that is indeed the rule we should be following, and I would only
increase the balance_interval when there are pinned tasks, not because
of active_balance categories. So here it's a matter of fixing that
condition into what it was meant to be doing.

> What you want is changing the behavior of the scheduler for UC that

> haven't raised any problem where asym_packing has known problem/

> 

> Changing default behavior for active migration is not subject of this

> patchset and should be treated in another one like the one discussed

> with peter few days ago

> >> (I'll reiterate my LBF_ALL_PINNED suggestion).

>>

>>>               /* We were unbalanced, so reset the balancing interval */

>>>               sd->balance_interval = sd->min_interval;

>>>       } else {

>>>
Vincent Guittot Dec. 21, 2018, 2:49 p.m. UTC | #4
On Thu, 20 Dec 2018 at 18:24, Valentin Schneider
<valentin.schneider@arm.com> wrote:
>

> On 20/12/2018 14:50, Vincent Guittot wrote:

> [...]

> >> So now  we reset the interval for all active balances (expect last active

> >> balance case), even when it is done as a last resort because all other

> >> tasks were pinned.

> >>

> >> Arguably the current code isn't much better (always increase the interval

> >> when active balancing), but at least it covers this case. It would be a

> >> waste not to take this into account when we can detect this scenario

> >

> > So i'd like to remind the $subject of this patchset: fix some known

> > issues for asym_packing.

> > While looking at this, we have added few other voluntary active

> > balances because it was "obvious" that this active migration were

> > voluntary one. But in fact, we don't have any UC which show a problem

> > for those additional UC so far.

> >

> > The default behavior for active migration is to increase the interval

> > Now you want to extend the exception to others active migration UC

> > whereas it's not clear that we don't fall in the default active

> > migration UC and we should not increase the interval.

>

> Well if we stick to the rule of only increasing balance_interval when

> pinned tasks get in the way, it's clear to me that this use case shouldn't

> be segregated from the others.

>

> I do agree however that it's not entirely clear if that balance_interval

> increase was also intended to slow down the nr_balance_failed migrations.

>

> I had a look at the history and stumbled on:

>

>         8102679447da ("[PATCH] sched: improve load balancing pinned tasks")

>

> Which explains the reasoning behind the active_balance balance_interval

> increase:

>

>         """

>         this one attempts to work out whether the balancing failure has

>         been due to too many tasks pinned on the runqueue.  This allows it

>         to be basically invisible to the regular blancing paths (ie. when

>         there are no pinned tasks).

>         """

>

> So AFAICT that is indeed the rule we should be following, and I would only

> increase the balance_interval when there are pinned tasks, not because

> of active_balance categories. So here it's a matter of fixing that

> condition into what it was meant to be doing.


After looking at shed.c at this sha1,  (sd->nr_balance_failed >
sd->cache_nice_tries+2) was the only condition for doing active
migration and as a result it was the only reason for doubling
sd->balance_interval.
My patch keeps exactly the same behavior for this condition
'sd->nr_balance_failed > sd->cache_nice_tries+2). And, I'm even more
convinced to exclude (sd->nr_balance_failed > sd->cache_nice_tries+2)
condition because it's the condition that has introduced the doubling
of the interval.

As said previously, you can argue that this behavior is not optimal
and discuss its validity, but the sha1 that you mentioned above,
introduced the current policy for (sd->nr_balance_failed >
sd->cache_nice_tries+2) condition.
Reverting such behavior would need more studies, tests and cares which
are out of the scope of this patchset and more related to a whole
refactoring of load_balance and calculte_imbalance; FYI, I have
submitted a topic on the subject for the next OSPM

>

> > What you want is changing the behavior of the scheduler for UC that

> > haven't raised any problem where asym_packing has known problem/

> >

> > Changing default behavior for active migration is not subject of this

> > patchset and should be treated in another one like the one discussed

> > with peter few days ago

> > >> (I'll reiterate my LBF_ALL_PINNED suggestion).

> >>

> >>>               /* We were unbalanced, so reset the balancing interval */

> >>>               sd->balance_interval = sd->min_interval;

> >>>       } else {

> >>>
Valentin Schneider Dec. 21, 2018, 5:15 p.m. UTC | #5
On 21/12/2018 14:49, Vincent Guittot wrote:
[...]
> After looking at shed.c at this sha1,  (sd->nr_balance_failed >

> sd->cache_nice_tries+2) was the only condition for doing active

> migration and as a result it was the only reason for doubling

> sd->balance_interval.

> My patch keeps exactly the same behavior for this condition

> 'sd->nr_balance_failed > sd->cache_nice_tries+2). And, I'm even more

> convinced to exclude (sd->nr_balance_failed > sd->cache_nice_tries+2)

> condition because it's the condition that has introduced the doubling

> of the interval.

> 

> As said previously, you can argue that this behavior is not optimal

> and discuss its validity, but the sha1 that you mentioned above,

> introduced the current policy for (sd->nr_balance_failed >

> sd->cache_nice_tries+2) condition.

> Reverting such behavior would need more studies, tests and cares


I agree with you on that, those are valid concerns.

What I'm arguing for is instead of doing this in two steps (reset interval
only for some active balance types, then experiment only increasing it for
"active balance as a last resort"), I'd prefer doing it in one step.

Mostly because I think the intermediate step adds an active balance
categorization that can easily become confusing. Furthermore, that 2005
commit explicitly states it wants to cater to pinned tasks, but we didn't
have those LBF_* flags back then, so if we are to do something about it
we should be improving upon the original intent.

In the end it's not for me to decide, I just happen to find doing it that
way more elegant (from a functionality & code readability PoV).

> which

> are out of the scope of this patchset and more related to a whole

> refactoring of load_balance and calculte_imbalance; FYI, I have

> submitted a topic on the subject for the next OSPM
Vincent Guittot Dec. 21, 2018, 5:41 p.m. UTC | #6
On Fri, 21 Dec 2018 at 18:15, Valentin Schneider
<valentin.schneider@arm.com> wrote:
>

> On 21/12/2018 14:49, Vincent Guittot wrote:

> [...]

> > After looking at shed.c at this sha1,  (sd->nr_balance_failed >

> > sd->cache_nice_tries+2) was the only condition for doing active

> > migration and as a result it was the only reason for doubling

> > sd->balance_interval.

> > My patch keeps exactly the same behavior for this condition

> > 'sd->nr_balance_failed > sd->cache_nice_tries+2). And, I'm even more

> > convinced to exclude (sd->nr_balance_failed > sd->cache_nice_tries+2)

> > condition because it's the condition that has introduced the doubling

> > of the interval.

> >

> > As said previously, you can argue that this behavior is not optimal

> > and discuss its validity, but the sha1 that you mentioned above,

> > introduced the current policy for (sd->nr_balance_failed >

> > sd->cache_nice_tries+2) condition.

> > Reverting such behavior would need more studies, tests and cares

>

> I agree with you on that, those are valid concerns.

>

> What I'm arguing for is instead of doing this in two steps (reset interval

> only for some active balance types, then experiment only increasing it for

> "active balance as a last resort"), I'd prefer doing it in one step.


Doing in 2 steps has the advantage of not delaying the current fix and
gives enough time for a complete study on the other step

>

> Mostly because I think the intermediate step adds an active balance

> categorization that can easily become confusing. Furthermore, that 2005

> commit explicitly states it wants to cater to pinned tasks, but we didn't

> have those LBF_* flags back then, so if we are to do something about it

> we should be improving upon the original intent.

>

> In the end it's not for me to decide, I just happen to find doing it that

> way more elegant (from a functionality & code readability PoV).

>

> > which

> > are out of the scope of this patchset and more related to a whole

> > refactoring of load_balance and calculte_imbalance; FYI, I have

> > submitted a topic on the subject for the next OSPM

>
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 487c73e..9b1e701 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8849,21 +8849,25 @@  static struct rq *find_busiest_queue(struct lb_env *env,
  */
 #define MAX_PINNED_INTERVAL	512
 
-static int need_active_balance(struct lb_env *env)
+static inline bool
+asym_active_balance(struct lb_env *env)
 {
-	struct sched_domain *sd = env->sd;
+	/*
+	 * ASYM_PACKING needs to force migrate tasks from busy but
+	 * lower priority CPUs in order to pack all tasks in the
+	 * highest priority CPUs.
+	 */
+	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
+	       sched_asym_prefer(env->dst_cpu, env->src_cpu);
+}
 
-	if (env->idle != CPU_NOT_IDLE) {
+static inline bool
+voluntary_active_balance(struct lb_env *env)
+{
+	struct sched_domain *sd = env->sd;
 
-		/*
-		 * ASYM_PACKING needs to force migrate tasks from busy but
-		 * lower priority CPUs in order to pack all tasks in the
-		 * highest priority CPUs.
-		 */
-		if ((sd->flags & SD_ASYM_PACKING) &&
-		    sched_asym_prefer(env->dst_cpu, env->src_cpu))
-			return 1;
-	}
+	if (asym_active_balance(env))
+		return 1;
 
 	/*
 	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
@@ -8881,6 +8885,16 @@  static int need_active_balance(struct lb_env *env)
 	if (env->src_grp_type == group_misfit_task)
 		return 1;
 
+	return 0;
+}
+
+static int need_active_balance(struct lb_env *env)
+{
+	struct sched_domain *sd = env->sd;
+
+	if (voluntary_active_balance(env))
+		return 1;
+
 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -9142,7 +9156,7 @@  static int load_balance(int this_cpu, struct rq *this_rq,
 	} else
 		sd->nr_balance_failed = 0;
 
-	if (likely(!active_balance)) {
+	if (likely(!active_balance) || voluntary_active_balance(&env)) {
 		/* We were unbalanced, so reset the balancing interval */
 		sd->balance_interval = sd->min_interval;
 	} else {