@@ -466,4 +466,24 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
-#endif
+#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
+
+/*
+ * Approximate the new util_avg value assuming an entity has continued to run
+ * for @delta us.
+ */
+unsigned long approximate_util_avg(unsigned long util, u64 delta)
+{
+ struct sched_avg sa = {
+ .util_sum = util * PELT_MIN_DIVIDER,
+ .util_avg = util,
+ };
+
+ if (unlikely(!delta))
+ return util;
+
+ accumulate_sum(delta, &sa, 1, 0, 1);
+ ___update_load_avg(&sa, 0);
+
+ return sa.util_avg;
+}
@@ -3064,6 +3064,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
unsigned long min,
unsigned long max);
+unsigned long approximate_util_avg(unsigned long util, u64 delta);
/*
* Verify the fitness of task @p to run on @cpu taking into account the
Given a util_avg value, the new function will return the future one given a runtime delta. This will be useful in later patches to help replace some magic margins with more deterministic behavior. Signed-off-by: Qais Yousef <qyousef@layalina.io> --- kernel/sched/pelt.c | 22 +++++++++++++++++++++- kernel/sched/sched.h | 1 + 2 files changed, 22 insertions(+), 1 deletion(-)