diff mbox series

[RFC,5/9] sched: cpufreq: remove smp_processor_id() in remote paths

Message ID 834d098efe029ee687bac7690bb482e9263a766b.1489058244.git.viresh.kumar@linaro.org
State New
Headers show
Series None | expand

Commit Message

Viresh Kumar March 9, 2017, 11:45 a.m. UTC
From: Steve Muckle <smuckle.linux@gmail.com>


Upcoming support for remote callbacks from the scheduler into schedutil
requires that the CPU identified in the hook structure be used to
indicate the CPU being operated on, rather than relying on
smp_processor_id().

Note that policy->cpu is passed to trace_cpu_frequency() in fast switch
path, as it is safe to use any CPU which is managed by the current
policy.

Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>

[ vk: updated commit log, minor code cleanups and use policy->cpu for
      traces ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

---
 kernel/sched/cpufreq_schedutil.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

-- 
2.7.1.410.g6faf27b

Comments

Rafael J. Wysocki April 11, 2017, 2 p.m. UTC | #1
On Tue, Apr 11, 2017 at 12:35 PM, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> On 29-03-17, 23:28, Rafael J. Wysocki wrote:

>> On Thursday, March 09, 2017 05:15:15 PM Viresh Kumar wrote:

>> > @@ -216,7 +216,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,

>> >     if (flags & SCHED_CPUFREQ_RT_DL) {

>> >             next_f = policy->cpuinfo.max_freq;

>> >     } else {

>> > -           sugov_get_util(&util, &max);

>> > +           sugov_get_util(&util, &max, hook->cpu);

>>

>> Why is this not racy?

>

> Why would reading the utilization values be racy? The only dynamic value here is

> "util_avg" and I am not sure if reading it is racy.

>

> But, this whole routine has races which I ignored as we may end up updating

> frequency simultaneously from two threads.


Those races aren't there if we don't update cross-CPU, which is my point. :-)

>> >             sugov_iowait_boost(sg_cpu, &util, &max);

>> >             next_f = get_next_freq(sg_policy, util, max);

>> >     }

>> > @@ -272,7 +272,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,

>> >     unsigned long util, max;

>> >     unsigned int next_f;

>> >

>> > -   sugov_get_util(&util, &max);

>> > +   sugov_get_util(&util, &max, hook->cpu);

>> >

>>

>> And here?

>>

>> >     raw_spin_lock(&sg_policy->update_lock);

>

> The lock prevents the same here though.

>

> So, if we are going to use this series, then we can use the same update-lock in

> case of single cpu per policies as well.


No, we can't.

The lock is unavoidable in the mulit-CPU policies case, but there's no
way I will agree on using a lock in the single-CPU case.

Thanks,
Rafael
Viresh Kumar April 12, 2017, 2:26 p.m. UTC | #2
On 11-04-17, 16:00, Rafael J. Wysocki wrote:
> On Tue, Apr 11, 2017 at 12:35 PM, Viresh Kumar <viresh.kumar@linaro.org> wrote:

> > On 29-03-17, 23:28, Rafael J. Wysocki wrote:

> >> On Thursday, March 09, 2017 05:15:15 PM Viresh Kumar wrote:

> >> > @@ -216,7 +216,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,

> >> >     if (flags & SCHED_CPUFREQ_RT_DL) {

> >> >             next_f = policy->cpuinfo.max_freq;

> >> >     } else {

> >> > -           sugov_get_util(&util, &max);

> >> > +           sugov_get_util(&util, &max, hook->cpu);

> >>

> >> Why is this not racy?

> >

> > Why would reading the utilization values be racy? The only dynamic value here is

> > "util_avg" and I am not sure if reading it is racy.

> >

> > But, this whole routine has races which I ignored as we may end up updating

> > frequency simultaneously from two threads.

> 

> Those races aren't there if we don't update cross-CPU, which is my point. :-)


Of course. There are no races without this series.

> >> >             sugov_iowait_boost(sg_cpu, &util, &max);

> >> >             next_f = get_next_freq(sg_policy, util, max);

> >> >     }

> >> > @@ -272,7 +272,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,

> >> >     unsigned long util, max;

> >> >     unsigned int next_f;

> >> >

> >> > -   sugov_get_util(&util, &max);

> >> > +   sugov_get_util(&util, &max, hook->cpu);

> >> >

> >>

> >> And here?

> >>

> >> >     raw_spin_lock(&sg_policy->update_lock);

> >

> > The lock prevents the same here though.

> >

> > So, if we are going to use this series, then we can use the same update-lock in

> > case of single cpu per policies as well.

> 

> No, we can't.

> 

> The lock is unavoidable in the mulit-CPU policies case, but there's no

> way I will agree on using a lock in the single-CPU case.


How do you suggest to avoid the locking here then ? Some atomic
variable read/write as done in cpufreq_governor.c ?

-- 
viresh
Rafael J. Wysocki April 12, 2017, 10:53 p.m. UTC | #3
On Wed, Apr 12, 2017 at 4:26 PM, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> On 11-04-17, 16:00, Rafael J. Wysocki wrote:

>> On Tue, Apr 11, 2017 at 12:35 PM, Viresh Kumar <viresh.kumar@linaro.org> wrote:

>> > On 29-03-17, 23:28, Rafael J. Wysocki wrote:

>> >> On Thursday, March 09, 2017 05:15:15 PM Viresh Kumar wrote:

>> >> > @@ -216,7 +216,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,

>> >> >     if (flags & SCHED_CPUFREQ_RT_DL) {

>> >> >             next_f = policy->cpuinfo.max_freq;

>> >> >     } else {

>> >> > -           sugov_get_util(&util, &max);

>> >> > +           sugov_get_util(&util, &max, hook->cpu);

>> >>

>> >> Why is this not racy?

>> >

>> > Why would reading the utilization values be racy? The only dynamic value here is

>> > "util_avg" and I am not sure if reading it is racy.

>> >

>> > But, this whole routine has races which I ignored as we may end up updating

>> > frequency simultaneously from two threads.

>>

>> Those races aren't there if we don't update cross-CPU, which is my point. :-)

>

> Of course. There are no races without this series.

>

>> >> >             sugov_iowait_boost(sg_cpu, &util, &max);

>> >> >             next_f = get_next_freq(sg_policy, util, max);

>> >> >     }

>> >> > @@ -272,7 +272,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,

>> >> >     unsigned long util, max;

>> >> >     unsigned int next_f;

>> >> >

>> >> > -   sugov_get_util(&util, &max);

>> >> > +   sugov_get_util(&util, &max, hook->cpu);

>> >> >

>> >>

>> >> And here?

>> >>

>> >> >     raw_spin_lock(&sg_policy->update_lock);

>> >

>> > The lock prevents the same here though.

>> >

>> > So, if we are going to use this series, then we can use the same update-lock in

>> > case of single cpu per policies as well.

>>

>> No, we can't.

>>

>> The lock is unavoidable in the mulit-CPU policies case, but there's no

>> way I will agree on using a lock in the single-CPU case.

>

> How do you suggest to avoid the locking here then ? Some atomic

> variable read/write as done in cpufreq_governor.c ?


That is a very good question. :-)

I need to look at the scheduler code that invokes those things and see
what happens in there.  Chances are there already is some sufficient
mutual exclusion in place.
diff mbox series

Patch

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index a418544c51b1..b168c31f1c8f 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -96,7 +96,7 @@  static void sugov_fast_switch(struct cpufreq_policy *policy,
 		return;
 
 	policy->cur = next_freq;
-	trace_cpu_frequency(next_freq, smp_processor_id());
+	trace_cpu_frequency(next_freq, policy->cpu);
 }
 
 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
@@ -106,7 +106,7 @@  static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 
 	if (policy->fast_switch_enabled) {
 		if (sg_policy->next_freq == next_freq) {
-			trace_cpu_frequency(policy->cur, smp_processor_id());
+			trace_cpu_frequency(policy->cur, policy->cpu);
 			return;
 		}
 		sg_policy->next_freq = next_freq;
@@ -157,12 +157,12 @@  static unsigned int get_next_freq(struct sugov_policy *sg_policy,
 	return cpufreq_driver_resolve_freq(policy, freq);
 }
 
-static void sugov_get_util(unsigned long *util, unsigned long *max)
+static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
 {
-	struct rq *rq = this_rq();
+	struct rq *rq = cpu_rq(cpu);
 	unsigned long cfs_max;
 
-	cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
+	cfs_max = arch_scale_cpu_capacity(NULL, cpu);
 
 	*util = min(rq->cfs.avg.util_avg, cfs_max);
 	*max = cfs_max;
@@ -216,7 +216,7 @@  static void sugov_update_single(struct update_util_data *hook, u64 time,
 	if (flags & SCHED_CPUFREQ_RT_DL) {
 		next_f = policy->cpuinfo.max_freq;
 	} else {
-		sugov_get_util(&util, &max);
+		sugov_get_util(&util, &max, hook->cpu);
 		sugov_iowait_boost(sg_cpu, &util, &max);
 		next_f = get_next_freq(sg_policy, util, max);
 	}
@@ -272,7 +272,7 @@  static void sugov_update_shared(struct update_util_data *hook, u64 time,
 	unsigned long util, max;
 	unsigned int next_f;
 
-	sugov_get_util(&util, &max);
+	sugov_get_util(&util, &max, hook->cpu);
 
 	raw_spin_lock(&sg_policy->update_lock);