Message ID | 20250206215659.3350066-5-superm1@kernel.org |
---|---|
State | New |
Headers | show |
Series | amd-pstate cleanups | expand |
On 2/7/2025 3:26 AM, Mario Limonciello wrote: > From: Mario Limonciello <mario.limonciello@amd.com> > > amd_pstate_cpu_boost_update() and refresh_frequency_limits() both > update the policy state and have nothing to do with the amd-pstate > driver itself. > > A global "limits" lock doesn't make sense because each CPU can have > policies changed independently. Instead introduce locks into to the > cpudata structure and lock each CPU independently. > > The remaining "global" driver lock is used to ensure that only one > entity can change driver modes at a given time. > > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 27 +++++++++++++++++---------- > drivers/cpufreq/amd-pstate.h | 2 ++ > 2 files changed, 19 insertions(+), 10 deletions(-) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 77bc6418731ee..dd230ed3b9579 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) > return -EINVAL; > } > > -static DEFINE_MUTEX(amd_pstate_limits_lock); > static DEFINE_MUTEX(amd_pstate_driver_lock); > > static u8 msr_get_epp(struct amd_cpudata *cpudata) > @@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) > u64 value, prev; > int ret; > > + lockdep_assert_held(&cpudata->lock); After making the perf_cached variable writes atomic, do we still need a cpudata->lock ? Regards, Dhananjay > + > value = prev = READ_ONCE(cpudata->cppc_req_cached); > value &= ~AMD_CPPC_EPP_PERF_MASK; > value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); > @@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) > int ret; > struct cppc_perf_ctrls perf_ctrls; > > + lockdep_assert_held(&cpudata->lock); > + > if (epp == cpudata->epp_cached) > return 0; > > @@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, > struct amd_cpudata *cpudata = policy->driver_data; > u8 epp; > > + guard(mutex)(&cpudata->lock); > + > if (!pref_index) > epp = cpudata->epp_default; > else > @@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) > pr_err("Boost mode is not supported by this processor or SBIOS\n"); > return -EOPNOTSUPP; > } > - guard(mutex)(&amd_pstate_driver_lock); > > ret = amd_pstate_cpu_boost_update(policy, state); > refresh_frequency_limits(policy); > @@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > > cpudata->cpu = policy->cpu; > > + mutex_init(&cpudata->lock); > + guard(mutex)(&cpudata->lock); > + > ret = amd_pstate_init_perf(cpudata); > if (ret) > goto free_cpudata1; > @@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference( > if (ret < 0) > return -EINVAL; > > - guard(mutex)(&amd_pstate_limits_lock); > - > ret = amd_pstate_set_energy_pref_index(policy, ret); > > return ret ? ret : count; > @@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size) > if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) > return -EINVAL; > > - if (mode_state_machine[cppc_state][mode_idx]) > + if (mode_state_machine[cppc_state][mode_idx]) { > + guard(mutex)(&amd_pstate_driver_lock); > return mode_state_machine[cppc_state][mode_idx](mode_idx); > + } > > return 0; > } > @@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, > char *p = memchr(buf, '\n', count); > int ret; > > - guard(mutex)(&amd_pstate_driver_lock); > ret = amd_pstate_update_status(buf, p ? p - buf : count); > > return ret < 0 ? ret : count; > @@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) > > cpudata->cpu = policy->cpu; > > + mutex_init(&cpudata->lock); > + guard(mutex)(&cpudata->lock); > + > ret = amd_pstate_init_perf(cpudata); > if (ret) > goto free_cpudata1; > @@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) > union perf_cached perf; > u8 epp; > > + guard(mutex)(&cpudata->lock); > + > amd_pstate_update_min_max_limit(policy); > > if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) > @@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) > if (cpudata->suspended) > return 0; > > - guard(mutex)(&amd_pstate_limits_lock); > - > if (trace_amd_pstate_epp_perf_enabled()) { > trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > AMD_CPPC_EPP_BALANCE_POWERSAVE, > @@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) > struct amd_cpudata *cpudata = policy->driver_data; > > if (cpudata->suspended) { > - guard(mutex)(&amd_pstate_limits_lock); > - > /* enable amd pstate from suspend state*/ > amd_pstate_epp_reenable(policy); > > diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h > index a140704b97430..6d776c3e5712a 100644 > --- a/drivers/cpufreq/amd-pstate.h > +++ b/drivers/cpufreq/amd-pstate.h > @@ -96,6 +96,8 @@ struct amd_cpudata { > bool boost_supported; > bool hw_prefcore; > > + struct mutex lock; > + > /* EPP feature related attributes*/ > u8 epp_cached; > u32 policy;
On 2/10/2025 23:02, Dhananjay Ugwekar wrote: > On 2/7/2025 3:26 AM, Mario Limonciello wrote: >> From: Mario Limonciello <mario.limonciello@amd.com> >> >> amd_pstate_cpu_boost_update() and refresh_frequency_limits() both >> update the policy state and have nothing to do with the amd-pstate >> driver itself. >> >> A global "limits" lock doesn't make sense because each CPU can have >> policies changed independently. Instead introduce locks into to the >> cpudata structure and lock each CPU independently. >> >> The remaining "global" driver lock is used to ensure that only one >> entity can change driver modes at a given time. >> >> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> >> --- >> drivers/cpufreq/amd-pstate.c | 27 +++++++++++++++++---------- >> drivers/cpufreq/amd-pstate.h | 2 ++ >> 2 files changed, 19 insertions(+), 10 deletions(-) >> >> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c >> index 77bc6418731ee..dd230ed3b9579 100644 >> --- a/drivers/cpufreq/amd-pstate.c >> +++ b/drivers/cpufreq/amd-pstate.c >> @@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) >> return -EINVAL; >> } >> >> -static DEFINE_MUTEX(amd_pstate_limits_lock); >> static DEFINE_MUTEX(amd_pstate_driver_lock); >> >> static u8 msr_get_epp(struct amd_cpudata *cpudata) >> @@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) >> u64 value, prev; >> int ret; >> >> + lockdep_assert_held(&cpudata->lock); > > After making the perf_cached variable writes atomic, do we still need a cpudata->lock ? My concern was specifically that userspace could interact with multiple sysfs files that influence the atomic perf variable (and the HW) at the same time. So you would not have a deterministic behavior if they raced. But if you take the mutex on all the paths that this could happen it will be a FIFO. > > Regards, > Dhananjay > >> + >> value = prev = READ_ONCE(cpudata->cppc_req_cached); >> value &= ~AMD_CPPC_EPP_PERF_MASK; >> value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); >> @@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) >> int ret; >> struct cppc_perf_ctrls perf_ctrls; >> >> + lockdep_assert_held(&cpudata->lock); >> + >> if (epp == cpudata->epp_cached) >> return 0; >> >> @@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, >> struct amd_cpudata *cpudata = policy->driver_data; >> u8 epp; >> >> + guard(mutex)(&cpudata->lock); >> + >> if (!pref_index) >> epp = cpudata->epp_default; >> else >> @@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) >> pr_err("Boost mode is not supported by this processor or SBIOS\n"); >> return -EOPNOTSUPP; >> } >> - guard(mutex)(&amd_pstate_driver_lock); >> >> ret = amd_pstate_cpu_boost_update(policy, state); >> refresh_frequency_limits(policy); >> @@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) >> >> cpudata->cpu = policy->cpu; >> >> + mutex_init(&cpudata->lock); >> + guard(mutex)(&cpudata->lock); >> + >> ret = amd_pstate_init_perf(cpudata); >> if (ret) >> goto free_cpudata1; >> @@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference( >> if (ret < 0) >> return -EINVAL; >> >> - guard(mutex)(&amd_pstate_limits_lock); >> - >> ret = amd_pstate_set_energy_pref_index(policy, ret); >> >> return ret ? ret : count; >> @@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size) >> if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) >> return -EINVAL; >> >> - if (mode_state_machine[cppc_state][mode_idx]) >> + if (mode_state_machine[cppc_state][mode_idx]) { >> + guard(mutex)(&amd_pstate_driver_lock); >> return mode_state_machine[cppc_state][mode_idx](mode_idx); >> + } >> >> return 0; >> } >> @@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, >> char *p = memchr(buf, '\n', count); >> int ret; >> >> - guard(mutex)(&amd_pstate_driver_lock); >> ret = amd_pstate_update_status(buf, p ? p - buf : count); >> >> return ret < 0 ? ret : count; >> @@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) >> >> cpudata->cpu = policy->cpu; >> >> + mutex_init(&cpudata->lock); >> + guard(mutex)(&cpudata->lock); >> + >> ret = amd_pstate_init_perf(cpudata); >> if (ret) >> goto free_cpudata1; >> @@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) >> union perf_cached perf; >> u8 epp; >> >> + guard(mutex)(&cpudata->lock); >> + >> amd_pstate_update_min_max_limit(policy); >> >> if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) >> @@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) >> if (cpudata->suspended) >> return 0; >> >> - guard(mutex)(&amd_pstate_limits_lock); >> - >> if (trace_amd_pstate_epp_perf_enabled()) { >> trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, >> AMD_CPPC_EPP_BALANCE_POWERSAVE, >> @@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) >> struct amd_cpudata *cpudata = policy->driver_data; >> >> if (cpudata->suspended) { >> - guard(mutex)(&amd_pstate_limits_lock); >> - >> /* enable amd pstate from suspend state*/ >> amd_pstate_epp_reenable(policy); >> >> diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h >> index a140704b97430..6d776c3e5712a 100644 >> --- a/drivers/cpufreq/amd-pstate.h >> +++ b/drivers/cpufreq/amd-pstate.h >> @@ -96,6 +96,8 @@ struct amd_cpudata { >> bool boost_supported; >> bool hw_prefcore; >> >> + struct mutex lock; >> + >> /* EPP feature related attributes*/ >> u8 epp_cached; >> u32 policy; >
On 2/12/2025 3:24 AM, Mario Limonciello wrote: > On 2/10/2025 23:02, Dhananjay Ugwekar wrote: >> On 2/7/2025 3:26 AM, Mario Limonciello wrote: >>> From: Mario Limonciello <mario.limonciello@amd.com> >>> >>> amd_pstate_cpu_boost_update() and refresh_frequency_limits() both >>> update the policy state and have nothing to do with the amd-pstate >>> driver itself. >>> >>> A global "limits" lock doesn't make sense because each CPU can have >>> policies changed independently. Instead introduce locks into to the >>> cpudata structure and lock each CPU independently. >>> >>> The remaining "global" driver lock is used to ensure that only one >>> entity can change driver modes at a given time. >>> >>> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> >>> --- >>> drivers/cpufreq/amd-pstate.c | 27 +++++++++++++++++---------- >>> drivers/cpufreq/amd-pstate.h | 2 ++ >>> 2 files changed, 19 insertions(+), 10 deletions(-) >>> >>> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c >>> index 77bc6418731ee..dd230ed3b9579 100644 >>> --- a/drivers/cpufreq/amd-pstate.c >>> +++ b/drivers/cpufreq/amd-pstate.c >>> @@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) >>> return -EINVAL; >>> } >>> -static DEFINE_MUTEX(amd_pstate_limits_lock); >>> static DEFINE_MUTEX(amd_pstate_driver_lock); >>> static u8 msr_get_epp(struct amd_cpudata *cpudata) >>> @@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) >>> u64 value, prev; >>> int ret; >>> + lockdep_assert_held(&cpudata->lock); >> >> After making the perf_cached variable writes atomic, do we still need a cpudata->lock ? > > My concern was specifically that userspace could interact with multiple sysfs files that influence the atomic perf variable (and the HW) at the same time. So you would not have a deterministic behavior if they raced. But if you take the mutex on all the paths that this could happen it will be a FIFO. I guess, the lock still wont guarantee the ordering right? It will just ensure that one thread executes that code path for a specific CPU at a time. And do we even care about the ordering ? I'm having a hard time thinking of a scenario where we'll need the lock. Can you or Gautham think of any such scenario? > >> >> Regards, >> Dhananjay >> >>> + >>> value = prev = READ_ONCE(cpudata->cppc_req_cached); >>> value &= ~AMD_CPPC_EPP_PERF_MASK; >>> value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); >>> @@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) >>> int ret; >>> struct cppc_perf_ctrls perf_ctrls; >>> + lockdep_assert_held(&cpudata->lock); >>> + >>> if (epp == cpudata->epp_cached) >>> return 0; >>> @@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, >>> struct amd_cpudata *cpudata = policy->driver_data; >>> u8 epp; >>> + guard(mutex)(&cpudata->lock); >>> + >>> if (!pref_index) >>> epp = cpudata->epp_default; >>> else >>> @@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) >>> pr_err("Boost mode is not supported by this processor or SBIOS\n"); >>> return -EOPNOTSUPP; >>> } >>> - guard(mutex)(&amd_pstate_driver_lock); >>> ret = amd_pstate_cpu_boost_update(policy, state); >>> refresh_frequency_limits(policy); >>> @@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) >>> cpudata->cpu = policy->cpu; >>> + mutex_init(&cpudata->lock); >>> + guard(mutex)(&cpudata->lock); >>> + >>> ret = amd_pstate_init_perf(cpudata); >>> if (ret) >>> goto free_cpudata1; >>> @@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference( >>> if (ret < 0) >>> return -EINVAL; >>> - guard(mutex)(&amd_pstate_limits_lock); >>> - >>> ret = amd_pstate_set_energy_pref_index(policy, ret); >>> return ret ? ret : count; >>> @@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size) >>> if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) >>> return -EINVAL; >>> - if (mode_state_machine[cppc_state][mode_idx]) >>> + if (mode_state_machine[cppc_state][mode_idx]) { >>> + guard(mutex)(&amd_pstate_driver_lock); >>> return mode_state_machine[cppc_state][mode_idx](mode_idx); >>> + } >>> return 0; >>> } >>> @@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, >>> char *p = memchr(buf, '\n', count); >>> int ret; >>> - guard(mutex)(&amd_pstate_driver_lock); >>> ret = amd_pstate_update_status(buf, p ? p - buf : count); >>> return ret < 0 ? ret : count; >>> @@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) >>> cpudata->cpu = policy->cpu; >>> + mutex_init(&cpudata->lock); >>> + guard(mutex)(&cpudata->lock); >>> + >>> ret = amd_pstate_init_perf(cpudata); >>> if (ret) >>> goto free_cpudata1; >>> @@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) >>> union perf_cached perf; >>> u8 epp; >>> + guard(mutex)(&cpudata->lock); >>> + >>> amd_pstate_update_min_max_limit(policy); >>> if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) >>> @@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) >>> if (cpudata->suspended) >>> return 0; >>> - guard(mutex)(&amd_pstate_limits_lock); >>> - >>> if (trace_amd_pstate_epp_perf_enabled()) { >>> trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, >>> AMD_CPPC_EPP_BALANCE_POWERSAVE, >>> @@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) >>> struct amd_cpudata *cpudata = policy->driver_data; >>> if (cpudata->suspended) { >>> - guard(mutex)(&amd_pstate_limits_lock); >>> - >>> /* enable amd pstate from suspend state*/ >>> amd_pstate_epp_reenable(policy); >>> diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h >>> index a140704b97430..6d776c3e5712a 100644 >>> --- a/drivers/cpufreq/amd-pstate.h >>> +++ b/drivers/cpufreq/amd-pstate.h >>> @@ -96,6 +96,8 @@ struct amd_cpudata { >>> bool boost_supported; >>> bool hw_prefcore; >>> + struct mutex lock; >>> + >>> /* EPP feature related attributes*/ >>> u8 epp_cached; >>> u32 policy; >> >
On 2/11/2025 23:15, Dhananjay Ugwekar wrote: > On 2/12/2025 3:24 AM, Mario Limonciello wrote: >> On 2/10/2025 23:02, Dhananjay Ugwekar wrote: >>> On 2/7/2025 3:26 AM, Mario Limonciello wrote: >>>> From: Mario Limonciello <mario.limonciello@amd.com> >>>> >>>> amd_pstate_cpu_boost_update() and refresh_frequency_limits() both >>>> update the policy state and have nothing to do with the amd-pstate >>>> driver itself. >>>> >>>> A global "limits" lock doesn't make sense because each CPU can have >>>> policies changed independently. Instead introduce locks into to the >>>> cpudata structure and lock each CPU independently. >>>> >>>> The remaining "global" driver lock is used to ensure that only one >>>> entity can change driver modes at a given time. >>>> >>>> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> >>>> --- >>>> drivers/cpufreq/amd-pstate.c | 27 +++++++++++++++++---------- >>>> drivers/cpufreq/amd-pstate.h | 2 ++ >>>> 2 files changed, 19 insertions(+), 10 deletions(-) >>>> >>>> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c >>>> index 77bc6418731ee..dd230ed3b9579 100644 >>>> --- a/drivers/cpufreq/amd-pstate.c >>>> +++ b/drivers/cpufreq/amd-pstate.c >>>> @@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) >>>> return -EINVAL; >>>> } >>>> -static DEFINE_MUTEX(amd_pstate_limits_lock); >>>> static DEFINE_MUTEX(amd_pstate_driver_lock); >>>> static u8 msr_get_epp(struct amd_cpudata *cpudata) >>>> @@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) >>>> u64 value, prev; >>>> int ret; >>>> + lockdep_assert_held(&cpudata->lock); >>> >>> After making the perf_cached variable writes atomic, do we still need a cpudata->lock ? >> >> My concern was specifically that userspace could interact with multiple sysfs files that influence the atomic perf variable (and the HW) at the same time. So you would not have a deterministic behavior if they raced. But if you take the mutex on all the paths that this could happen it will be a FIFO. > > I guess, the lock still wont guarantee the ordering right? It will just ensure that one thread executes > that code path for a specific CPU at a time. And do we even care about the ordering ? I'm having a hard > time thinking of a scenario where we'll need the lock. Can you or Gautham think of any such scenario? > You're right; I can't really think of one either. Let me take out the private lock for the per-cpu device and I'll just overhaul the global lock locations. >> >>> >>> Regards, >>> Dhananjay >>> >>>> + >>>> value = prev = READ_ONCE(cpudata->cppc_req_cached); >>>> value &= ~AMD_CPPC_EPP_PERF_MASK; >>>> value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); >>>> @@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) >>>> int ret; >>>> struct cppc_perf_ctrls perf_ctrls; >>>> + lockdep_assert_held(&cpudata->lock); >>>> + >>>> if (epp == cpudata->epp_cached) >>>> return 0; >>>> @@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, >>>> struct amd_cpudata *cpudata = policy->driver_data; >>>> u8 epp; >>>> + guard(mutex)(&cpudata->lock); >>>> + >>>> if (!pref_index) >>>> epp = cpudata->epp_default; >>>> else >>>> @@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) >>>> pr_err("Boost mode is not supported by this processor or SBIOS\n"); >>>> return -EOPNOTSUPP; >>>> } >>>> - guard(mutex)(&amd_pstate_driver_lock); >>>> ret = amd_pstate_cpu_boost_update(policy, state); >>>> refresh_frequency_limits(policy); >>>> @@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) >>>> cpudata->cpu = policy->cpu; >>>> + mutex_init(&cpudata->lock); >>>> + guard(mutex)(&cpudata->lock); >>>> + >>>> ret = amd_pstate_init_perf(cpudata); >>>> if (ret) >>>> goto free_cpudata1; >>>> @@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference( >>>> if (ret < 0) >>>> return -EINVAL; >>>> - guard(mutex)(&amd_pstate_limits_lock); >>>> - >>>> ret = amd_pstate_set_energy_pref_index(policy, ret); >>>> return ret ? ret : count; >>>> @@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size) >>>> if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) >>>> return -EINVAL; >>>> - if (mode_state_machine[cppc_state][mode_idx]) >>>> + if (mode_state_machine[cppc_state][mode_idx]) { >>>> + guard(mutex)(&amd_pstate_driver_lock); >>>> return mode_state_machine[cppc_state][mode_idx](mode_idx); >>>> + } >>>> return 0; >>>> } >>>> @@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, >>>> char *p = memchr(buf, '\n', count); >>>> int ret; >>>> - guard(mutex)(&amd_pstate_driver_lock); >>>> ret = amd_pstate_update_status(buf, p ? p - buf : count); >>>> return ret < 0 ? ret : count; >>>> @@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) >>>> cpudata->cpu = policy->cpu; >>>> + mutex_init(&cpudata->lock); >>>> + guard(mutex)(&cpudata->lock); >>>> + >>>> ret = amd_pstate_init_perf(cpudata); >>>> if (ret) >>>> goto free_cpudata1; >>>> @@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) >>>> union perf_cached perf; >>>> u8 epp; >>>> + guard(mutex)(&cpudata->lock); >>>> + >>>> amd_pstate_update_min_max_limit(policy); >>>> if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) >>>> @@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) >>>> if (cpudata->suspended) >>>> return 0; >>>> - guard(mutex)(&amd_pstate_limits_lock); >>>> - >>>> if (trace_amd_pstate_epp_perf_enabled()) { >>>> trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, >>>> AMD_CPPC_EPP_BALANCE_POWERSAVE, >>>> @@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) >>>> struct amd_cpudata *cpudata = policy->driver_data; >>>> if (cpudata->suspended) { >>>> - guard(mutex)(&amd_pstate_limits_lock); >>>> - >>>> /* enable amd pstate from suspend state*/ >>>> amd_pstate_epp_reenable(policy); >>>> diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h >>>> index a140704b97430..6d776c3e5712a 100644 >>>> --- a/drivers/cpufreq/amd-pstate.h >>>> +++ b/drivers/cpufreq/amd-pstate.h >>>> @@ -96,6 +96,8 @@ struct amd_cpudata { >>>> bool boost_supported; >>>> bool hw_prefcore; >>>> + struct mutex lock; >>>> + >>>> /* EPP feature related attributes*/ >>>> u8 epp_cached; >>>> u32 policy; >>> >> >
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 77bc6418731ee..dd230ed3b9579 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -196,7 +196,6 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) return -EINVAL; } -static DEFINE_MUTEX(amd_pstate_limits_lock); static DEFINE_MUTEX(amd_pstate_driver_lock); static u8 msr_get_epp(struct amd_cpudata *cpudata) @@ -283,6 +282,8 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) u64 value, prev; int ret; + lockdep_assert_held(&cpudata->lock); + value = prev = READ_ONCE(cpudata->cppc_req_cached); value &= ~AMD_CPPC_EPP_PERF_MASK; value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); @@ -315,6 +316,8 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) int ret; struct cppc_perf_ctrls perf_ctrls; + lockdep_assert_held(&cpudata->lock); + if (epp == cpudata->epp_cached) return 0; @@ -335,6 +338,8 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, struct amd_cpudata *cpudata = policy->driver_data; u8 epp; + guard(mutex)(&cpudata->lock); + if (!pref_index) epp = cpudata->epp_default; else @@ -750,7 +755,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) pr_err("Boost mode is not supported by this processor or SBIOS\n"); return -EOPNOTSUPP; } - guard(mutex)(&amd_pstate_driver_lock); ret = amd_pstate_cpu_boost_update(policy, state); refresh_frequency_limits(policy); @@ -973,6 +977,9 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; + mutex_init(&cpudata->lock); + guard(mutex)(&cpudata->lock); + ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; @@ -1179,8 +1186,6 @@ static ssize_t store_energy_performance_preference( if (ret < 0) return -EINVAL; - guard(mutex)(&amd_pstate_limits_lock); - ret = amd_pstate_set_energy_pref_index(policy, ret); return ret ? ret : count; @@ -1353,8 +1358,10 @@ int amd_pstate_update_status(const char *buf, size_t size) if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) return -EINVAL; - if (mode_state_machine[cppc_state][mode_idx]) + if (mode_state_machine[cppc_state][mode_idx]) { + guard(mutex)(&amd_pstate_driver_lock); return mode_state_machine[cppc_state][mode_idx](mode_idx); + } return 0; } @@ -1375,7 +1382,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, char *p = memchr(buf, '\n', count); int ret; - guard(mutex)(&amd_pstate_driver_lock); ret = amd_pstate_update_status(buf, p ? p - buf : count); return ret < 0 ? ret : count; @@ -1472,6 +1478,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; + mutex_init(&cpudata->lock); + guard(mutex)(&cpudata->lock); + ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; @@ -1558,6 +1567,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) union perf_cached perf; u8 epp; + guard(mutex)(&cpudata->lock); + amd_pstate_update_min_max_limit(policy); if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) @@ -1646,8 +1657,6 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) if (cpudata->suspended) return 0; - guard(mutex)(&amd_pstate_limits_lock); - if (trace_amd_pstate_epp_perf_enabled()) { trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, AMD_CPPC_EPP_BALANCE_POWERSAVE, @@ -1684,8 +1693,6 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) struct amd_cpudata *cpudata = policy->driver_data; if (cpudata->suspended) { - guard(mutex)(&amd_pstate_limits_lock); - /* enable amd pstate from suspend state*/ amd_pstate_epp_reenable(policy); diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index a140704b97430..6d776c3e5712a 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -96,6 +96,8 @@ struct amd_cpudata { bool boost_supported; bool hw_prefcore; + struct mutex lock; + /* EPP feature related attributes*/ u8 epp_cached; u32 policy;