@@ -1195,6 +1195,93 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (cppc_active) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(AMD_CPPC_EPP_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (cppc_active)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ amd_pstate_clear_update_util_hook(policy->cpu);
+
+ return amd_pstate_cpu_offline(policy);
+}
+
static void amd_pstate_verify_cpu_policy(struct amd_cpudata *cpudata,
struct cpufreq_policy_data *policy)
{
@@ -1229,6 +1316,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
.update_limits = amd_pstate_epp_update_limits,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
.name = "amd_pstate_epp",
.attr = amd_pstate_epp_attr,
};
@@ -99,6 +99,7 @@ struct amd_cpudata {
u64 cppc_cap1_cached;
struct update_util_data update_util;
struct amd_aperf_mperf sample;
+ bool suspended;
};
/**
Adds online and offline driver callback support to allow cpu cores go offline and help to restore the previous working states when core goes back online later for EPP driver mode. Signed-off-by: Perry Yuan <Perry.Yuan@amd.com> --- drivers/cpufreq/amd-pstate.c | 89 ++++++++++++++++++++++++++++++++++++ include/linux/amd-pstate.h | 1 + 2 files changed, 90 insertions(+)