diff mbox series

[v2,1/4] sched/fair: always used unsigned long for utilization

Message ID 20171205171018.9203-2-patrick.bellasi@arm.com
State Accepted
Commit f01415fdbfe83380c2dfcf90b7b26042f88963aa
Headers show
Series Utilization estimation (util_est) for FAIR tasks | expand

Commit Message

Patrick Bellasi Dec. 5, 2017, 5:10 p.m. UTC
Utilization and capacity are tracked as unsigned long, however some
functions using them return an int which is ultimately assigned back to
unsigned long variables.

Since there is not scope on using a different and signed type, this
consolidate the signature of functions returning utilization to always
use the native type.
As well as improving code consistency this is expected also benefits
code paths where utilizations should be clamped by avoiding further type
conversions or ugly type casts.

Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>

Reviewed-by: Chris Redpath <chris.redpath@arm.com>

Reviewed-by: Brendan Jackman <brendan.jackman@arm.com>

Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: linux-kernel@vger.kernel.org

---
Changes v1->v2:
 - rebase on top of v4.15-rc2
 - tested that overhauled PELT code does not affect the util_est
---
 kernel/sched/fair.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

-- 
2.14.1

Comments

Vincent Guittot Dec. 6, 2017, 8:56 a.m. UTC | #1
On 5 December 2017 at 18:10, Patrick Bellasi <patrick.bellasi@arm.com> wrote:
> Utilization and capacity are tracked as unsigned long, however some

> functions using them return an int which is ultimately assigned back to

> unsigned long variables.

>

> Since there is not scope on using a different and signed type, this

> consolidate the signature of functions returning utilization to always

> use the native type.

> As well as improving code consistency this is expected also benefits

> code paths where utilizations should be clamped by avoiding further type

> conversions or ugly type casts.

>

> Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>

> Reviewed-by: Chris Redpath <chris.redpath@arm.com>

> Reviewed-by: Brendan Jackman <brendan.jackman@arm.com>

> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>

> Cc: Ingo Molnar <mingo@redhat.com>

> Cc: Peter Zijlstra <peterz@infradead.org>

> Cc: Vincent Guittot <vincent.guittot@linaro.org>

> Cc: Morten Rasmussen <morten.rasmussen@arm.com>

> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>

> Cc: linux-kernel@vger.kernel.org


Acked-by: Vincent Guittot <vincent.guittot@linaro.org>


>

> ---

> Changes v1->v2:

>  - rebase on top of v4.15-rc2

>  - tested that overhauled PELT code does not affect the util_est

> ---

>  kernel/sched/fair.c | 10 +++++-----

>  1 file changed, 5 insertions(+), 5 deletions(-)

>

> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> index 4037e19bbca2..ad21550d008c 100644

> --- a/kernel/sched/fair.c

> +++ b/kernel/sched/fair.c

> @@ -5721,8 +5721,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,

>         return affine;

>  }

>

> -static inline int task_util(struct task_struct *p);

> -static int cpu_util_wake(int cpu, struct task_struct *p);

> +static inline unsigned long task_util(struct task_struct *p);

> +static unsigned long cpu_util_wake(int cpu, struct task_struct *p);

>

>  static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)

>  {

> @@ -6203,7 +6203,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)

>   * capacity_orig) as it useful for predicting the capacity required after task

>   * migrations (scheduler-driven DVFS).

>   */

> -static int cpu_util(int cpu)

> +static unsigned long cpu_util(int cpu)

>  {

>         unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;

>         unsigned long capacity = capacity_orig_of(cpu);

> @@ -6211,7 +6211,7 @@ static int cpu_util(int cpu)

>         return (util >= capacity) ? capacity : util;

>  }

>

> -static inline int task_util(struct task_struct *p)

> +static inline unsigned long task_util(struct task_struct *p)

>  {

>         return p->se.avg.util_avg;

>  }

> @@ -6220,7 +6220,7 @@ static inline int task_util(struct task_struct *p)

>   * cpu_util_wake: Compute cpu utilization with any contributions from

>   * the waking task p removed.

>   */

> -static int cpu_util_wake(int cpu, struct task_struct *p)

> +static unsigned long cpu_util_wake(int cpu, struct task_struct *p)

>  {

>         unsigned long util, capacity;

>

> --

> 2.14.1

>
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4037e19bbca2..ad21550d008c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5721,8 +5721,8 @@  static int wake_affine(struct sched_domain *sd, struct task_struct *p,
 	return affine;
 }
 
-static inline int task_util(struct task_struct *p);
-static int cpu_util_wake(int cpu, struct task_struct *p);
+static inline unsigned long task_util(struct task_struct *p);
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
 
 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
 {
@@ -6203,7 +6203,7 @@  static int select_idle_sibling(struct task_struct *p, int prev, int target)
  * capacity_orig) as it useful for predicting the capacity required after task
  * migrations (scheduler-driven DVFS).
  */
-static int cpu_util(int cpu)
+static unsigned long cpu_util(int cpu)
 {
 	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
 	unsigned long capacity = capacity_orig_of(cpu);
@@ -6211,7 +6211,7 @@  static int cpu_util(int cpu)
 	return (util >= capacity) ? capacity : util;
 }
 
-static inline int task_util(struct task_struct *p)
+static inline unsigned long task_util(struct task_struct *p)
 {
 	return p->se.avg.util_avg;
 }
@@ -6220,7 +6220,7 @@  static inline int task_util(struct task_struct *p)
  * cpu_util_wake: Compute cpu utilization with any contributions from
  * the waking task p removed.
  */
-static int cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
 {
 	unsigned long util, capacity;