@@ -268,8 +268,6 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- if (epp != cpudata->epp_cached)
- WRITE_ONCE(cpudata->epp_cached, epp);
return 0;
}
@@ -320,7 +318,6 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
}
/* update both so that msr_update_perf() can effectively check */
- WRITE_ONCE(cpudata->epp_cached, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
@@ -337,11 +334,14 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
u64 value;
int ret;
lockdep_assert_held(&cpudata->lock);
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+
if (trace_amd_pstate_epp_perf_enabled()) {
union perf_cached perf = cpudata->perf;
@@ -352,10 +352,10 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
cpudata->cppc_req_cached),
policy->boost_enabled,
- epp != cpudata->epp_cached);
+ epp != epp_cached);
}
- if (epp == cpudata->epp_cached)
+ if (epp == epp_cached)
return 0;
perf_ctrls.energy_perf = epp;
@@ -364,7 +364,6 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
- WRITE_ONCE(cpudata->epp_cached, epp);
value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~AMD_CPPC_EPP_PERF_MASK;
@@ -1218,9 +1217,11 @@ static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u8 preference;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- switch (cpudata->epp_cached) {
+ switch (epp) {
case AMD_CPPC_EPP_PERFORMANCE:
preference = EPP_INDEX_PERFORMANCE;
break;
@@ -1588,7 +1589,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
else
- epp = READ_ONCE(cpudata->epp_cached);
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
perf = READ_ONCE(cpudata->perf);
@@ -1624,23 +1625,24 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
union perf_cached perf = cpudata->perf;
int ret;
+ u8 epp;
+
+ guard(mutex)(&cpudata->lock);
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
ret = amd_pstate_cppc_enable(policy);
if (ret)
return ret;
-
- guard(mutex)(&cpudata->lock);
-
- ret = amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, cpudata->epp_cached, false);
+ ret = amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, epp, false);
if (ret)
return ret;
cpudata->suspended = false;
return 0;
-
}
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
@@ -100,7 +100,6 @@ struct amd_cpudata {
struct cpufreq_policy *policy;
/* EPP feature related attributes*/
- u8 epp_cached;
bool suspended;
u8 epp_default;
};