diff mbox series

[v5,3/6] sched/fair: Enable periodic update of average thermal pressure

Message ID 1572979786-20361-4-git-send-email-thara.gopinath@linaro.org
State New
Headers show
Series Introduce Thermal Pressure | expand

Commit Message

Thara Gopinath Nov. 5, 2019, 6:49 p.m. UTC
Introduce support in CFS periodic tick and other bookkeeping apis
to trigger the process of computing average thermal pressure for a
cpu. Also consider avg_thermal.load_avg in others_have_blocked
which allows for decay of pelt signals.

Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org>

---

v4->v5:
	- Updated both versions of update_blocked_averages to trigger the
	  process of computing average thermal pressure.
	- Updated others_have_blocked to considerd avg_thermal.load_avg.

 kernel/sched/fair.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

-- 
2.1.4

Comments

Vincent Guittot Nov. 6, 2019, 8:32 a.m. UTC | #1
On Tue, 5 Nov 2019 at 19:49, Thara Gopinath <thara.gopinath@linaro.org> wrote:
>

> Introduce support in CFS periodic tick and other bookkeeping apis

> to trigger the process of computing average thermal pressure for a

> cpu. Also consider avg_thermal.load_avg in others_have_blocked

> which allows for decay of pelt signals.

>

> Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org>

> ---

>

> v4->v5:

>         - Updated both versions of update_blocked_averages to trigger the

>           process of computing average thermal pressure.

>         - Updated others_have_blocked to considerd avg_thermal.load_avg.

>

>  kernel/sched/fair.c | 10 ++++++++++

>  1 file changed, 10 insertions(+)

>

> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

> index 2e907cc..9fb0494 100644

> --- a/kernel/sched/fair.c

> +++ b/kernel/sched/fair.c

> @@ -92,6 +92,8 @@ const_debug unsigned int sysctl_sched_migration_cost  = 500000UL;

>   */

>  static DEFINE_PER_CPU(unsigned long, thermal_pressure);

>

> +static void trigger_thermal_pressure_average(struct rq *rq);

> +

>  #ifdef CONFIG_SMP

>  /*

>   * For asym packing, by default the lower numbered CPU has higher priority.

> @@ -7493,6 +7495,9 @@ static inline bool others_have_blocked(struct rq *rq)

>         if (READ_ONCE(rq->avg_dl.util_avg))

>                 return true;

>

> +       if (READ_ONCE(rq->avg_thermal.load_avg))

> +               return true;

> +

>  #ifdef CONFIG_HAVE_SCHED_AVG_IRQ

>         if (READ_ONCE(rq->avg_irq.util_avg))

>                 return true;

> @@ -7580,6 +7585,8 @@ static void update_blocked_averages(int cpu)

>                 done = false;

>

>         update_blocked_load_status(rq, !done);

> +

> +       trigger_thermal_pressure_average(rq);


This must be called before others_have_blocked() to take into account
the latest update

>         rq_unlock_irqrestore(rq, &rf);

>  }

>

> @@ -7646,6 +7653,7 @@ static inline void update_blocked_averages(int cpu)

>         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);

>         update_irq_load_avg(rq, 0);

>         update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));

> +       trigger_thermal_pressure_average(rq);


idem

>         rq_unlock_irqrestore(rq, &rf);

>  }

>

> @@ -9939,6 +9947,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

>

>         update_misfit_status(curr, rq);

>         update_overutilized_status(task_rq(curr));

> +


remove blank line

> +       trigger_thermal_pressure_average(rq);

>  }

>

>  /*

> --

> 2.1.4

>
Thara Gopinath Nov. 6, 2019, 5:01 p.m. UTC | #2
On 11/06/2019 03:32 AM, Vincent Guittot wrote:
> On Tue, 5 Nov 2019 at 19:49, Thara Gopinath <thara.gopinath@linaro.org> wrote:

>>

>> Introduce support in CFS periodic tick and other bookkeeping apis

>> to trigger the process of computing average thermal pressure for a

>> cpu. Also consider avg_thermal.load_avg in others_have_blocked

>> which allows for decay of pelt signals.

>>

>> Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org>

>> ---

>>

>> v4->v5:

>>         - Updated both versions of update_blocked_averages to trigger the

>>           process of computing average thermal pressure.

>>         - Updated others_have_blocked to considerd avg_thermal.load_avg.

>>

>>  kernel/sched/fair.c | 10 ++++++++++

>>  1 file changed, 10 insertions(+)

>>

>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c

>> index 2e907cc..9fb0494 100644

>> --- a/kernel/sched/fair.c

>> +++ b/kernel/sched/fair.c

>> @@ -92,6 +92,8 @@ const_debug unsigned int sysctl_sched_migration_cost  = 500000UL;

>>   */

>>  static DEFINE_PER_CPU(unsigned long, thermal_pressure);

>>

>> +static void trigger_thermal_pressure_average(struct rq *rq);

>> +

>>  #ifdef CONFIG_SMP

>>  /*

>>   * For asym packing, by default the lower numbered CPU has higher priority.

>> @@ -7493,6 +7495,9 @@ static inline bool others_have_blocked(struct rq *rq)

>>         if (READ_ONCE(rq->avg_dl.util_avg))

>>                 return true;

>>

>> +       if (READ_ONCE(rq->avg_thermal.load_avg))

>> +               return true;

>> +

>>  #ifdef CONFIG_HAVE_SCHED_AVG_IRQ

>>         if (READ_ONCE(rq->avg_irq.util_avg))

>>                 return true;

>> @@ -7580,6 +7585,8 @@ static void update_blocked_averages(int cpu)

>>                 done = false;

>>

>>         update_blocked_load_status(rq, !done);

>> +

>> +       trigger_thermal_pressure_average(rq);

> 

> This must be called before others_have_blocked() to take into account

> the latest update


It is a bug. I agree. Will fix it.
> 

>>         rq_unlock_irqrestore(rq, &rf);

>>  }

>>

>> @@ -7646,6 +7653,7 @@ static inline void update_blocked_averages(int cpu)

>>         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);

>>         update_irq_load_avg(rq, 0);

>>         update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));

>> +       trigger_thermal_pressure_average(rq);

> 

> idem

> 

>>         rq_unlock_irqrestore(rq, &rf);

>>  }

>>

>> @@ -9939,6 +9947,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

>>

>>         update_misfit_status(curr, rq);

>>         update_overutilized_status(task_rq(curr));

>> +

> 

> remove blank line

> 

>> +       trigger_thermal_pressure_average(rq);

>>  }

>>

>>  /*

>> --

>> 2.1.4

>>



-- 
Warm Regards
Thara
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2e907cc..9fb0494 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -92,6 +92,8 @@  const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
  */
 static DEFINE_PER_CPU(unsigned long, thermal_pressure);
 
+static void trigger_thermal_pressure_average(struct rq *rq);
+
 #ifdef CONFIG_SMP
 /*
  * For asym packing, by default the lower numbered CPU has higher priority.
@@ -7493,6 +7495,9 @@  static inline bool others_have_blocked(struct rq *rq)
 	if (READ_ONCE(rq->avg_dl.util_avg))
 		return true;
 
+	if (READ_ONCE(rq->avg_thermal.load_avg))
+		return true;
+
 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	if (READ_ONCE(rq->avg_irq.util_avg))
 		return true;
@@ -7580,6 +7585,8 @@  static void update_blocked_averages(int cpu)
 		done = false;
 
 	update_blocked_load_status(rq, !done);
+
+	trigger_thermal_pressure_average(rq);
 	rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -7646,6 +7653,7 @@  static inline void update_blocked_averages(int cpu)
 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
 	update_irq_load_avg(rq, 0);
 	update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
+	trigger_thermal_pressure_average(rq);
 	rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -9939,6 +9947,8 @@  static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 
 	update_misfit_status(curr, rq);
 	update_overutilized_status(task_rq(curr));
+
+	trigger_thermal_pressure_average(rq);
 }
 
 /*