@@ -940,7 +940,9 @@ static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
- u64 delta_exec;
+ u64 delta_exec, scaled_delta_exec;
+ unsigned long scale_freq, scale_cpu;
+ int cpu = cpu_of(rq);
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
@@ -974,9 +976,26 @@ static void update_curr_dl(struct rq *rq)
if (unlikely(dl_entity_is_special(dl_se)))
return;
- if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
- delta_exec = grub_reclaim(delta_exec, rq, curr->dl.dl_bw);
- dl_se->runtime -= delta_exec;
+ /*
+ * XXX When clock frequency is controlled by the scheduler (via
+ * schedutil governor) we implement GRUB-PA: the spare reclaimed
+ * bandwidth is used to clock down frequency.
+ *
+ * However, what below seems to assume scheduler to always be in
+ * control of clock frequency; when running at a fixed frequency
+ * (e.g., performance or userspace governor), shouldn't we instead
+ * use the grub_reclaim mechanism below?
+ *
+ * if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
+ * delta_exec = grub_reclaim(delta_exec, rq, curr->dl.dl_bw);
+ * dl_se->runtime -= delta_exec;
+ */
+ scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+ scaled_delta_exec = cap_scale(delta_exec, scale_freq);
+ scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
+ dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
@@ -2818,8 +2818,6 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-
/*
* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
@@ -155,6 +155,8 @@ static inline int task_has_dl_policy(struct task_struct *p)
return dl_policy(p->policy);
}
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
static inline int dl_entity_is_special(struct sched_dl_entity *dl_se)
{
return dl_se->flags & SCHED_FLAG_SPECIAL;
Apply frequency and cpu scale-invariance correction factor to bandwidth enforcement (similar to what we already do to fair utilization tracking). Each delta_exec gets scaled considering current frequency and maximum cpu capacity; which means that the reservation runtime parameter (that need to be specified profiling the task execution at max frequency on biggest capacity core) gets thus scaled accordingly. Signed-off-by: Juri Lelli <juri.lelli@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: Luca Abeni <luca.abeni@santannapisa.it> Cc: Claudio Scordino <claudio@evidence.eu.com> --- kernel/sched/deadline.c | 27 +++++++++++++++++++++++---- kernel/sched/fair.c | 2 -- kernel/sched/sched.h | 2 ++ 3 files changed, 25 insertions(+), 6 deletions(-) -- 2.10.0