Message ID | 20220416003314.59211-3-olvaffe@gmail.com |
---|---|
State | Accepted |
Commit | 78f815c1cf8fc5f05dc5cec29eb1895cb53470e9 |
Headers | show |
Series | [1/3] drm/msm: remove explicit devfreq status reset | expand |
On Fri, Apr 15, 2022 at 5:33 PM Chia-I Wu <olvaffe@gmail.com> wrote: > > simple_ondemand interacts poorly with clamp_to_idle. It only looks at > the load since the last get_dev_status call, while it should really look > at the load over polling_ms. When clamp_to_idle true, it almost always > picks the lowest frequency on active because the gpu is idle between > msm_devfreq_idle/msm_devfreq_active. > > This logic could potentially be moved into devfreq core. The idea is to extend devfreq_simple_ondemand_data to specify whether devfreq_simple_ondemand_func should use "last status" or "average status" to determine the target frequency. devfreq core will need to store "struct devfreq_dev_status average_status", which will be updated when a device uses simple_ondemand and asks for average_status instead of last_status. > > Fixes: 7c0ffcd40b16 ("drm/msm/gpu: Respect PM QoS constraints") > Signed-off-by: Chia-I Wu <olvaffe@gmail.com> > Cc: Rob Clark <robdclark@chromium.org> > --- > drivers/gpu/drm/msm/msm_gpu.h | 3 ++ > drivers/gpu/drm/msm/msm_gpu_devfreq.c | 60 ++++++++++++++++++++++++++- > 2 files changed, 62 insertions(+), 1 deletion(-) > > diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h > index 389c6dab751b..143c56f5185b 100644 > --- a/drivers/gpu/drm/msm/msm_gpu.h > +++ b/drivers/gpu/drm/msm/msm_gpu.h > @@ -9,6 +9,7 @@ > > #include <linux/adreno-smmu-priv.h> > #include <linux/clk.h> > +#include <linux/devfreq.h> > #include <linux/interconnect.h> > #include <linux/pm_opp.h> > #include <linux/regulator/consumer.h> > @@ -117,6 +118,8 @@ struct msm_gpu_devfreq { > /** idle_time: Time of last transition to idle: */ > ktime_t idle_time; > > + struct devfreq_dev_status average_status; > + > /** > * idle_work: > * > diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c > index f531015107c3..d2539ca78c29 100644 > --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c > +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c > @@ -9,6 +9,7 @@ > > #include <linux/devfreq.h> > #include <linux/devfreq_cooling.h> > +#include <linux/math64.h> > #include <linux/units.h> > > /* > @@ -75,12 +76,69 @@ static void get_raw_dev_status(struct msm_gpu *gpu, > status->busy_time = busy_time; > } > > +static void update_average_dev_status(struct msm_gpu *gpu, > + const struct devfreq_dev_status *raw) > +{ > + struct msm_gpu_devfreq *df = &gpu->devfreq; > + const u32 polling_ms = df->devfreq->profile->polling_ms; > + const u32 max_history_ms = polling_ms * 11 / 10; > + struct devfreq_dev_status *avg = &df->average_status; > + u64 avg_freq; > + > + /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle. > + * When we enforce the constraint on idle, it calls get_dev_status > + * which would normally reset the stats. When we remove the > + * constraint on active, it calls get_dev_status again where busy_time > + * would be 0. > + * > + * To remedy this, we always return the average load over the past > + * polling_ms. > + */ > + > + /* raw is longer than polling_ms or avg has no history */ > + if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms || > + !avg->total_time) { > + *avg = *raw; > + return; > + } > + > + /* Truncate the oldest history first. > + * > + * Because we keep the history with a single devfreq_dev_status, > + * rather than a list of devfreq_dev_status, we have to assume freq > + * and load are the same over avg->total_time. We can scale down > + * avg->busy_time and avg->total_time by the same factor to drop > + * history. > + */ > + if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >= > + max_history_ms) { > + const u32 new_total_time = polling_ms * USEC_PER_MSEC - > + raw->total_time; > + avg->busy_time = div_u64( > + mul_u32_u32(avg->busy_time, new_total_time), > + avg->total_time); > + avg->total_time = new_total_time; > + } > + > + /* compute the average freq over avg->total_time + raw->total_time */ > + avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time); > + avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time); > + do_div(avg_freq, avg->total_time + raw->total_time); > + > + avg->current_frequency = avg_freq; > + avg->busy_time += raw->busy_time; > + avg->total_time += raw->total_time; > +} > + > static int msm_devfreq_get_dev_status(struct device *dev, > struct devfreq_dev_status *status) > { > struct msm_gpu *gpu = dev_to_gpu(dev); > + struct devfreq_dev_status raw; > > - get_raw_dev_status(gpu, status); > + get_raw_dev_status(gpu, &raw); > + update_average_dev_status(gpu, &raw); > + *status = gpu->devfreq.average_status; > > return 0; > } > -- > 2.36.0.rc0.470.gd361397f0d-goog >
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 389c6dab751b..143c56f5185b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -9,6 +9,7 @@ #include <linux/adreno-smmu-priv.h> #include <linux/clk.h> +#include <linux/devfreq.h> #include <linux/interconnect.h> #include <linux/pm_opp.h> #include <linux/regulator/consumer.h> @@ -117,6 +118,8 @@ struct msm_gpu_devfreq { /** idle_time: Time of last transition to idle: */ ktime_t idle_time; + struct devfreq_dev_status average_status; + /** * idle_work: * diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index f531015107c3..d2539ca78c29 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -9,6 +9,7 @@ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> +#include <linux/math64.h> #include <linux/units.h> /* @@ -75,12 +76,69 @@ static void get_raw_dev_status(struct msm_gpu *gpu, status->busy_time = busy_time; } +static void update_average_dev_status(struct msm_gpu *gpu, + const struct devfreq_dev_status *raw) +{ + struct msm_gpu_devfreq *df = &gpu->devfreq; + const u32 polling_ms = df->devfreq->profile->polling_ms; + const u32 max_history_ms = polling_ms * 11 / 10; + struct devfreq_dev_status *avg = &df->average_status; + u64 avg_freq; + + /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle. + * When we enforce the constraint on idle, it calls get_dev_status + * which would normally reset the stats. When we remove the + * constraint on active, it calls get_dev_status again where busy_time + * would be 0. + * + * To remedy this, we always return the average load over the past + * polling_ms. + */ + + /* raw is longer than polling_ms or avg has no history */ + if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms || + !avg->total_time) { + *avg = *raw; + return; + } + + /* Truncate the oldest history first. + * + * Because we keep the history with a single devfreq_dev_status, + * rather than a list of devfreq_dev_status, we have to assume freq + * and load are the same over avg->total_time. We can scale down + * avg->busy_time and avg->total_time by the same factor to drop + * history. + */ + if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >= + max_history_ms) { + const u32 new_total_time = polling_ms * USEC_PER_MSEC - + raw->total_time; + avg->busy_time = div_u64( + mul_u32_u32(avg->busy_time, new_total_time), + avg->total_time); + avg->total_time = new_total_time; + } + + /* compute the average freq over avg->total_time + raw->total_time */ + avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time); + avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time); + do_div(avg_freq, avg->total_time + raw->total_time); + + avg->current_frequency = avg_freq; + avg->busy_time += raw->busy_time; + avg->total_time += raw->total_time; +} + static int msm_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { struct msm_gpu *gpu = dev_to_gpu(dev); + struct devfreq_dev_status raw; - get_raw_dev_status(gpu, status); + get_raw_dev_status(gpu, &raw); + update_average_dev_status(gpu, &raw); + *status = gpu->devfreq.average_status; return 0; }
simple_ondemand interacts poorly with clamp_to_idle. It only looks at the load since the last get_dev_status call, while it should really look at the load over polling_ms. When clamp_to_idle true, it almost always picks the lowest frequency on active because the gpu is idle between msm_devfreq_idle/msm_devfreq_active. This logic could potentially be moved into devfreq core. Fixes: 7c0ffcd40b16 ("drm/msm/gpu: Respect PM QoS constraints") Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Cc: Rob Clark <robdclark@chromium.org> --- drivers/gpu/drm/msm/msm_gpu.h | 3 ++ drivers/gpu/drm/msm/msm_gpu_devfreq.c | 60 ++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-)