@@ -330,8 +330,7 @@ static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
for_each_cpu(cpu, policy->cpus) {
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
- cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
- dbs_update_util_handler);
+ cpufreq_add_update_util_hook(cpu, &cdbs->update_util);
}
}
@@ -367,6 +366,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = policy_dbs;
+ j_cdbs->update_util.func = dbs_update_util_handler;
}
return policy_dbs;
}
@@ -1696,8 +1696,8 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
- cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
- intel_pstate_update_util);
+ cpu->update_util.func = intel_pstate_update_util;
+ cpufreq_add_update_util_hook(cpu_num, &cpu->update_util);
cpu->update_util_set = true;
}
@@ -18,9 +18,7 @@ struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
-void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
- void (*func)(struct update_util_data *data, u64 time,
- unsigned int flags));
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data);
void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */
@@ -17,31 +17,26 @@ DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
* cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
* @cpu: The CPU to set the pointer for.
* @data: New pointer value.
- * @func: Callback function to set for the CPU.
*
* Set and publish the update_util_data pointer for the given CPU.
*
- * The update_util_data pointer of @cpu is set to @data and the callback
- * function pointer in the target struct update_util_data is set to @func.
- * That function will be called by cpufreq_update_util() from RCU-sched
- * read-side critical sections, so it must not sleep. @data will always be
- * passed to it as the first argument which allows the function to get to the
- * target update_util_data structure and its container.
+ * The update_util_data pointer of @cpu is set to @data. The data->func
+ * function will be called by cpufreq_update_util() from RCU-sched read-side
+ * critical sections, so it must not sleep. @data will always be passed to it
+ * as the first argument which allows the function to get to the target
+ * update_util_data structure and its container.
*
* The update_util_data pointer of @cpu must be NULL when this function is
* called or it will WARN() and return with no effect.
*/
-void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
- void (*func)(struct update_util_data *data, u64 time,
- unsigned int flags))
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data)
{
- if (WARN_ON(!data || !func))
+ if (WARN_ON(!data || !data->func))
return;
if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
return;
- data->func = func;
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
}
EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
@@ -643,15 +643,17 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+
+ if (policy_is_shared(policy))
+ sg_cpu->update_util.func = sugov_update_shared;
+ else
+ sg_cpu->update_util.func = sugov_update_single;
}
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- policy_is_shared(policy) ?
- sugov_update_shared :
- sugov_update_single);
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util);
}
return 0;
}
The callers already have the structure (struct update_util_data) where the function pointer is saved by cpufreq_add_update_util_hook(). And its better if the callers fill it themselves, as they can do it from the governor->init() callback then, which is called only once per policy lifetime rather than doing it from governor->start which can get called multiple times. Note that the schedutil governor isn't updated (for now) to fill update_util.func from the governor->init() callback as its governor->start() callback is doing memset(sg_cpu, 0, ...) which will overwrite the update_util.func. Tested on ARM Hikey board with Ondemand and Schedutil governors. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> --- drivers/cpufreq/cpufreq_governor.c | 4 ++-- drivers/cpufreq/intel_pstate.c | 4 ++-- include/linux/sched/cpufreq.h | 4 +--- kernel/sched/cpufreq.c | 19 +++++++------------ kernel/sched/cpufreq_schedutil.c | 10 ++++++---- 5 files changed, 18 insertions(+), 23 deletions(-) -- 2.14.1.202.g24db08a6e8fe