@@ -24,6 +24,9 @@
* Author: Vincent Guittot <vincent.guittot@linaro.org>
*/
+static __read_mostly unsigned int sched_pelt_lshift;
+static unsigned int sched_pelt_multiplier = 1;
+
/*
* Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
@@ -180,6 +183,7 @@ static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
+ int time_shift;
u64 delta;
delta = now - sa->last_update_time;
@@ -195,12 +199,17 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
/*
* Use 1024ns as the unit of measurement since it's a reasonable
* approximation of 1us and fast to compute.
+ * On top of this, we can change the half-time period from the default
+ * 32ms to a shorter value. This is equivalent to left shifting the
+ * time.
+ * Merge both right and left shifts in one single right shift
*/
- delta >>= 10;
+ time_shift = 10 - sched_pelt_lshift;
+ delta >>= time_shift;
if (!delta)
return 0;
- sa->last_update_time += delta << 10;
+ sa->last_update_time += delta << time_shift;
/*
* running is a subset of runnable (weight) so running can't be set if
@@ -468,6 +477,51 @@ int update_irq_load_avg(struct rq *rq, u64 running)
}
#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
+static int set_sched_pelt_multiplier(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret)
+ goto error;
+
+ switch (sched_pelt_multiplier) {
+ case 1:
+ fallthrough;
+ case 2:
+ fallthrough;
+ case 4:
+ WRITE_ONCE(sched_pelt_lshift,
+ sched_pelt_multiplier >> 1);
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sched_pelt_multiplier = 1;
+ return ret;
+}
+
+static const struct kernel_param_ops sched_pelt_multiplier_ops = {
+ .set = set_sched_pelt_multiplier,
+ .get = param_get_int,
+};
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+/* XXX: should we use sched as prefix? */
+#define MODULE_PARAM_PREFIX "kernel."
+module_param_cb(sched_pelt_multiplier, &sched_pelt_multiplier_ops, &sched_pelt_multiplier, 0444);
+MODULE_PARM_DESC(sched_pelt_multiplier, "PELT HALFLIFE helps control the responsiveness of the system.");
+MODULE_PARM_DESC(sched_pelt_multiplier, "Accepted value: 1 32ms PELT HALIFE - roughly 200ms to go from 0 to max performance point (default).");
+MODULE_PARM_DESC(sched_pelt_multiplier, " 2 16ms PELT HALIFE - roughly 100ms to go from 0 to max performance point.");
+MODULE_PARM_DESC(sched_pelt_multiplier, " 4 8ms PELT HALIFE - roughly 50ms to go from 0 to max performance point.");
+
/*
* Approximate the new util_avg value assuming an entity has continued to run
* for @delta us.
@@ -482,7 +536,7 @@ unsigned long approximate_util_avg(unsigned long util, u64 delta)
if (unlikely(!delta))
return util;
- accumulate_sum(delta, &sa, 1, 0, 1);
+ accumulate_sum(delta << sched_pelt_lshift, &sa, 1, 0, 1);
___update_load_avg(&sa, 0);
return sa.util_avg;
@@ -494,7 +548,7 @@ unsigned long approximate_util_avg(unsigned long util, u64 delta)
u64 approximate_runtime(unsigned long util)
{
struct sched_avg sa = {};
- u64 delta = 1024; // period = 1024 = ~1ms
+ u64 delta = 1024 << sched_pelt_lshift; // period = 1024 = ~1ms
u64 runtime = 0;
if (unlikely(!util))