@@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
static u8 msr_get_epp(struct amd_cpudata *cpudata)
@@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
u64 value, prev;
int ret;
+ lockdep_assert_held(&cpudata->lock);
+
value = prev = READ_ONCE(cpudata->cppc_req_cached);
value &= ~AMD_CPPC_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
@@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
int ret;
struct cppc_perf_ctrls perf_ctrls;
+ lockdep_assert_held(&cpudata->lock);
+
if (epp == cpudata->epp_cached)
return 0;
@@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
struct amd_cpudata *cpudata = policy->driver_data;
u8 epp;
+ guard(mutex)(&cpudata->lock);
+
if (!pref_index)
epp = cpudata->epp_default;
else
@@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
refresh_frequency_limits(policy);
@@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
+ mutex_init(&cpudata->lock);
+ guard(mutex)(&cpudata->lock);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- guard(mutex)(&amd_pstate_limits_lock);
-
ret = amd_pstate_set_energy_pref_index(policy, ret);
return ret ? ret : count;
@@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size)
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
@@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
return ret < 0 ? ret : count;
@@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
+ mutex_init(&cpudata->lock);
+ guard(mutex)(&cpudata->lock);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
union perf_cached perf;
u8 epp;
+ guard(mutex)(&cpudata->lock);
+
amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
if (cpudata->suspended)
return 0;
- guard(mutex)(&amd_pstate_limits_lock);
-
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
AMD_CPPC_EPP_BALANCE_POWERSAVE,
@@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- guard(mutex)(&amd_pstate_limits_lock);
-
/* enable amd pstate from suspend state*/
amd_pstate_epp_reenable(policy);
@@ -96,6 +96,8 @@ struct amd_cpudata {
bool boost_supported;
bool hw_prefcore;
+ struct mutex lock;
+
/* EPP feature related attributes*/
u8 epp_cached;
u32 policy;