diff mbox

[2/2] cpufreq: ppc-corenet: remove per-cpu variable 'cpu_mask'

Message ID 3d467a39ab1b96dc24ed8a71139d5e030239477f.1409629117.git.viresh.kumar@linaro.org
State New
Headers show

Commit Message

Viresh Kumar Sept. 2, 2014, 3:41 a.m. UTC
We are copying cpu_core_mask(cpu) in a per-cpu local variable: 'cpu_mask'. There
is no need of doing this and can be replaced by a call to cpu_core_mask().

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 drivers/cpufreq/ppc-corenet-cpufreq.c | 26 +++-----------------------
 1 file changed, 3 insertions(+), 23 deletions(-)

Comments

Tang Yuantian Sept. 2, 2014, 6:46 a.m. UTC | #1
Hello Viresh:

Probably it is not right considering the CPU cluster and CPU hotplug case.

Hongtao:
Please verify it on t4240 platform which have cluster.

Thanks,
Yuantian

> -----Original Message-----
> From: Viresh Kumar [mailto:viresh.kumar@linaro.org]
> Sent: Tuesday, September 02, 2014 11:41 AM
> To: Rafael Wysocki; Tang Yuantian-B29983
> Cc: linaro-kernel@lists.linaro.org; linux-pm@vger.kernel.org; Zhang
> Hongbo-B45939; Li Yang-Leo-R58472; Viresh Kumar
> Subject: [PATCH 2/2] cpufreq: ppc-corenet: remove per-cpu variable 'cpu_mask'
> 
> We are copying cpu_core_mask(cpu) in a per-cpu local variable: 'cpu_mask'.
> There is no need of doing this and can be replaced by a call to cpu_core_mask().
> 
> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
> ---
>  drivers/cpufreq/ppc-corenet-cpufreq.c | 26 +++-----------------------
>  1 file changed, 3 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c
> b/drivers/cpufreq/ppc-corenet-cpufreq.c
> index bee5df7..dbcac39 100644
> --- a/drivers/cpufreq/ppc-corenet-cpufreq.c
> +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
> @@ -69,9 +69,6 @@ static const u32 *fmask;
> 
>  static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
> 
> -/* cpumask in a cluster */
> -static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
> -
>  #ifndef CONFIG_SMP
>  static inline const struct cpumask *cpu_core_mask(int cpu)  { @@ -201,8
> +198,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
>  	data->table = table;
> 
>  	/* update ->cpus if we have cluster, no harm if not */
> -	cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
> -	for_each_cpu(i, per_cpu(cpu_mask, cpu))
> +	cpumask_copy(policy->cpus, cpu_core_mask(cpu));
> +	for_each_cpu(i, cpu_core_mask(cpu))
>  		per_cpu(cpu_data, i) = data;
> 
>  	/* Minimum transition latency is 12 platform clocks */ @@ -236,7 +233,7
> @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
>  	kfree(data->table);
>  	kfree(data);
> 
> -	for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
> +	for_each_cpu(cpu, cpu_core_mask(policy->cpu))
>  		per_cpu(cpu_data, cpu) = NULL;
> 
>  	return 0;
> @@ -285,12 +282,6 @@ static int __init ppc_corenet_cpufreq_init(void)
>  	if (!np)
>  		return -ENODEV;
> 
> -	for_each_possible_cpu(cpu) {
> -		if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
> -			goto err_mask;
> -		cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
> -	}
> -
>  	match = of_match_node(node_matches, np);
>  	data = match->data;
>  	if (data) {
> @@ -308,22 +299,11 @@ static int __init ppc_corenet_cpufreq_init(void)
>  		pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
> 
>  	return ret;
> -
> -err_mask:
> -	for_each_possible_cpu(cpu)
> -		free_cpumask_var(per_cpu(cpu_mask, cpu));
> -
> -	return -ENOMEM;
>  }
>  module_init(ppc_corenet_cpufreq_init);
> 
>  static void __exit ppc_corenet_cpufreq_exit(void)  {
> -	unsigned int cpu;
> -
> -	for_each_possible_cpu(cpu)
> -		free_cpumask_var(per_cpu(cpu_mask, cpu));
> -
>  	cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
>  }
>  module_exit(ppc_corenet_cpufreq_exit);
> --
> 2.0.3.693.g996b0fd

--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Viresh Kumar Sept. 2, 2014, 6:49 a.m. UTC | #2
On 2 September 2014 12:16, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> Hello Viresh:
>
> Probably it is not right considering the CPU cluster and CPU hotplug case.

So in case of hotplug the worst that can happen is policy->cpu gets updated.
But because we are all concerned about cpu_data here, which is stored in
per-cpu variables and is updated for all online CPUs, it should work.

I would like to see how that fails :)
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 2, 2014, 7:02 a.m. UTC | #3
I am saying your patch 2 probably is wrong, not patch 1.
Consider following case:
On T4240 platform, we have 3 cluster with 8 cpu in each cluster.
We offline 4 5 6 7 cpu and then online them back.
Cpu 4's policy->cpus is 0, 1, 2, 3, 4 
Cpu 5's policy->cpus is 0, 1, 2, 3, 4, 5.
....
So cpu 4 and 5's policy->cpus are not right, they should be 0, 1, 2, 3, 4, 5, 6, 7.

Thanks,
Yuantian

> -----Original Message-----

> From: Viresh Kumar [mailto:viresh.kumar@linaro.org]

> Sent: Tuesday, September 02, 2014 2:49 PM

> To: Tang Yuantian-B29983

> Cc: Rafael Wysocki; linaro-kernel@lists.linaro.org; linux-pm@vger.kernel.org;

> Zhang Hongbo-B45939; Li Yang-Leo-R58472

> Subject: Re: [PATCH 2/2] cpufreq: ppc-corenet: remove per-cpu variable

> 'cpu_mask'

> 

> On 2 September 2014 12:16, Yuantian Tang <Yuantian.Tang@freescale.com>

> wrote:

> > Hello Viresh:

> >

> > Probably it is not right considering the CPU cluster and CPU hotplug case.

> 

> So in case of hotplug the worst that can happen is policy->cpu gets updated.

> But because we are all concerned about cpu_data here, which is stored in

> per-cpu variables and is updated for all online CPUs, it should work.

> 

> I would like to see how that fails :)
Viresh Kumar Sept. 2, 2014, 7:09 a.m. UTC | #4
On 2 September 2014 12:32, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> I am saying your patch 2 probably is wrong, not patch 1.

Okay, it looked initially that both are screwed up :)

> Consider following case:
> On T4240 platform, we have 3 cluster with 8 cpu in each cluster.
> We offline 4 5 6 7 cpu and then online them back.

i.e. last four CPUs of first cluster..

> Cpu 4's policy->cpus is 0, 1, 2, 3, 4
> Cpu 5's policy->cpus is 0, 1, 2, 3, 4, 5.

How? And how is this patch going to touch policy->cpus?
CPU 0-7 should be sharing a single 'struct cpufreq_policy'
and so policy->cpus should be same for all..
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 2, 2014, 8:08 a.m. UTC | #5
SWYgcG9saWN5LT5jcHVzIGlzIHNhbWUgZm9yIGFsbCBjcHVzIGluIGEgY2x1c3RlciB0aGF0IHNo
b3VsZCBub3QgYmUgYSBwcm9ibGVtLg0KUHJvYmFibHkgdGhlcmUncyBzb21ldGhpbmcgZWxzZSB0
aGF0IG5lZWQgdG8gY2hlY2suDQpJIGhhdmUgZmVlbGluZ3MgdGhhdCB5b3VyIHBhdGNoIG1heSBu
b3QgYmUgY29ycmVjdC4gSSBob3BlIGl0IHdvcmtzIHRob3VnaC4NCg0KVGhhbmtzLA0KWXVhbnRp
YW4NCg0KPiAtLS0tLU9yaWdpbmFsIE1lc3NhZ2UtLS0tLQ0KPiBGcm9tOiBWaXJlc2ggS3VtYXIg
W21haWx0bzp2aXJlc2gua3VtYXJAbGluYXJvLm9yZ10NCj4gU2VudDogVHVlc2RheSwgU2VwdGVt
YmVyIDAyLCAyMDE0IDM6MTAgUE0NCj4gVG86IFRhbmcgWXVhbnRpYW4tQjI5OTgzDQo+IENjOiBS
YWZhZWwgV3lzb2NraTsgbGluYXJvLWtlcm5lbEBsaXN0cy5saW5hcm8ub3JnOyBsaW51eC1wbUB2
Z2VyLmtlcm5lbC5vcmc7DQo+IFpoYW5nIEhvbmdiby1CNDU5Mzk7IExpIFlhbmctTGVvLVI1ODQ3
Mg0KPiBTdWJqZWN0OiBSZTogW1BBVENIIDIvMl0gY3B1ZnJlcTogcHBjLWNvcmVuZXQ6IHJlbW92
ZSBwZXItY3B1IHZhcmlhYmxlDQo+ICdjcHVfbWFzaycNCj4gDQo+IE9uIDIgU2VwdGVtYmVyIDIw
MTQgMTI6MzIsIFl1YW50aWFuIFRhbmcgPFl1YW50aWFuLlRhbmdAZnJlZXNjYWxlLmNvbT4NCj4g
d3JvdGU6DQo+ID4gSSBhbSBzYXlpbmcgeW91ciBwYXRjaCAyIHByb2JhYmx5IGlzIHdyb25nLCBu
b3QgcGF0Y2ggMS4NCj4gDQo+IE9rYXksIGl0IGxvb2tlZCBpbml0aWFsbHkgdGhhdCBib3RoIGFy
ZSBzY3Jld2VkIHVwIDopDQo+IA0KPiA+IENvbnNpZGVyIGZvbGxvd2luZyBjYXNlOg0KPiA+IE9u
IFQ0MjQwIHBsYXRmb3JtLCB3ZSBoYXZlIDMgY2x1c3RlciB3aXRoIDggY3B1IGluIGVhY2ggY2x1
c3Rlci4NCj4gPiBXZSBvZmZsaW5lIDQgNSA2IDcgY3B1IGFuZCB0aGVuIG9ubGluZSB0aGVtIGJh
Y2suDQo+IA0KPiBpLmUuIGxhc3QgZm91ciBDUFVzIG9mIGZpcnN0IGNsdXN0ZXIuLg0KPiANCj4g
PiBDcHUgNCdzIHBvbGljeS0+Y3B1cyBpcyAwLCAxLCAyLCAzLCA0DQo+ID4gQ3B1IDUncyBwb2xp
Y3ktPmNwdXMgaXMgMCwgMSwgMiwgMywgNCwgNS4NCj4gDQo+IEhvdz8gQW5kIGhvdyBpcyB0aGlz
IHBhdGNoIGdvaW5nIHRvIHRvdWNoIHBvbGljeS0+Y3B1cz8NCj4gQ1BVIDAtNyBzaG91bGQgYmUg
c2hhcmluZyBhIHNpbmdsZSAnc3RydWN0IGNwdWZyZXFfcG9saWN5Jw0KPiBhbmQgc28gcG9saWN5
LT5jcHVzIHNob3VsZCBiZSBzYW1lIGZvciBhbGwuLg0K
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 3, 2014, 8:02 a.m. UTC | #6
Hello Viresh,

Test confirmed what I am concerned. Patch 1 is working but patch 2 didn't working in hotplug case.
(tested on t4240 platform with 3 cluster and 8 cpus in each cluster, kernel version is 3.12).

Log is as follows:
root@t4240rdb:~# cd /sys/devices/system/
root@t4240rdb:/sys/devices/system# ls
clockevents  clocksource  cpu  edac
root@t4240rdb:/sys/devices/system# cd cpu/
root@t4240rdb:/sys/devices/system/cpu# ls
cpu0  cpu1  cpu10  cpu11  cpu12  cpu13  cpu14  cpu15  cpu16  cpu17  cpu18  cpu19  cpu2  cpu20  cpu21  cpu22  cpu23  cpu3  cpu4cpu5  cpu6  cpu7  cpu8  cpu9  kernel_max  offline  online  possible  present  probe  release  uevent
root@t4240rdb:/sys/devices/system/cpu# cat cpu8/
altivec_idle            altivec_idle_wait_time  cache/                  cpufreq/                online                  physical_id             pir                     pw20_state              pw20_wait_time          smt_snooze_delay        subsystem/              topology/               uevent
root@t4240rdb:/sys/devices/system/cpu# cat cpu8/cpufreq/affected_cpus
8 9 10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu8/online
Cannot set affinity for irq 467
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu9/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu10/cpufreq/affected_cpus
10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu10/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu11/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu12/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu13/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu14/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu15/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu9/online
smp_85xx_kick_cpu: Can not start CPU #9. Start CPU #8 first.
smp: failed starting cpu 9 (rc -2)
-sh: echo: write error: No such file or directory
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu10/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu10/cpufreq/affected_cpus
10
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu11/online
sysfs: cannot create duplicate filename '/devices/system/cpu/cpu10/cpufreq'
------------[ cut here ]------------
WARNING: at fs/sysfs/dir.c:526
Modules linked in:
CPU: 18 PID: 2658 Comm: sh Not tainted 3.12.19-rt30-00018-g7eac4c0-dirty #2
task: c00000015fa0b440 ti: c00000007a54c000 task.ti: c00000007a54c000
NIP: c00000000018ea34 LR: c00000000018ea30 CTR: c000000000304330
REGS: c00000007a54f340 TRAP: 0700   Not tainted  (3.12.19-rt30-00018-g7eac4c0-dirty)
MSR: 0000000080029000 <CE,EE,ME>  CR: 44222444  XER: 20000000
SOFTE: 1

GPR00: c00000000018ea30 c00000007a54f5c0 c000000000b4d1a0 000000000000004b
GPR04: 0000000044222444 c000000009620e70 0000000000000008 0000000000000008
GPR08: 0000000000000000 0000000000000001 80000800803b8500 0000000000000000
GPR12: 0000000024222442 c00000000fffc700 0000000000000000 00000000100e0000
GPR16: 00000000100daea4 00000000100e0000 c0000000009738b8 c000000179a29e58
GPR20: c000000000a42680 c000000000c0d1a0 c000000000b57c20 c000000000b57600
GPR24: 0000000000000000 c000000000a789e0 0000000000000001 0000000000000000
GPR28: c00000017545c000 c00000007a54f6c0 c000000179962278 ffffffffffffffef
NIP [c00000000018ea34] .sysfs_add_one+0xd0/0xec
LR [c00000000018ea30] .sysfs_add_one+0xcc/0xec
Call Trace:
[c00000007a54f5c0] [c00000000018ea30] .sysfs_add_one+0xcc/0xec (unreliable)
[c00000007a54f650] [c00000000018faf8] .sysfs_do_create_link_sd+0xfc/0x2bc
[c00000007a54f700] [c00000000056468c] .__cpufreq_add_dev.isra.24+0x728/0x768
[c00000007a54f7e0] [c000000000564758] .cpufreq_cpu_callback+0x84/0x108
[c00000007a54f860] [c000000000067218] .notifier_call_chain+0x80/0xe8
[c00000007a54f900] [c00000000003be20] .__cpu_notify+0x34/0x78
[c00000007a54f970] [c00000000003c5cc] .cpu_up+0x194/0x1e4
[c00000007a54fa20] [c0000000005f6318] .cpu_subsys_online+0x24/0x4c
[c00000007a54fab0] [c0000000003173dc] .device_online+0xa4/0x104
[c00000007a54fb40] [c0000000003174c4] .online_store+0x88/0x90
[c00000007a54fbd0] [c0000000003143d0] .dev_attr_store+0x30/0x4c
[c00000007a54fc40] [c00000000018c98c] .sysfs_write_file+0xe8/0x1ac
[c00000007a54fcf0] [c00000000011324c] .vfs_write+0xe8/0x21c
[c00000007a54fd90] [c000000000113858] .SyS_write+0x58/0xcc
[c00000007a54fe30] [c000000000000598] syscall_exit+0x0/0x8c
Instruction dump:
4810e775 60000000 e89e0010 7f83e378 38a01000 4810e761 60000000 7f84e378
3c62ffde 38630e60 485b3755 60000000 <0fe00000> 7f83e378 4bf7d43d 60000000
---[ end trace 275aa88102f60f48 ]---
root@t4240rdb:/sys/devices/system/cpu#

the original driver is OK, the log is:
t4240rdb login: root
root@t4240rdb:~#
root@t4240rdb:~#
root@t4240rdb:~#
root@t4240rdb:~# cd /sys/devices/system/cpu/
root@t4240rdb:/sys/devices/system/cpu# cat cpu8/cpufreq/affected_cpus
8 9 10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu8/online
Cannot set affinity for irq 467
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu9/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu10/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu11/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu12/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu13/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu14/online
root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu15/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu14/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu15/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu14/cpufreq/affected_cpus
14 15
root@t4240rdb:/sys/devices/system/cpu# cat cpu15/cpufreq/affected_cpus
14 15
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu12/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu13/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu15/cpufreq/affected_cpus
12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu10/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu11/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu15/cpufreq/affected_cpus
10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu8/online
root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu9/online
root@t4240rdb:/sys/devices/system/cpu# cat cpu15/cpufreq/affected_cpus
8 9 10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu# cat cpu9/cpufreq/affected_cpus
8 9 10 11 12 13 14 15
root@t4240rdb:/sys/devices/system/cpu#
root@t4240rdb:/sys/devices/system/cpu# uname -a
Linux t4240rdb 3.12.19-rt30-00018-g7eac4c0-dirty #4 SMP Wed Sep 3 14:56:49 CST 2014 ppc64 GNU/Linux
root@t4240rdb:/sys/devices/system/cpu#

Thanks,
Yuantian

> -----Original Message-----

> From: Viresh Kumar [mailto:viresh.kumar@linaro.org]

> Sent: Tuesday, September 02, 2014 3:10 PM

> To: Tang Yuantian-B29983

> Cc: Rafael Wysocki; linaro-kernel@lists.linaro.org; linux-pm@vger.kernel.org;

> Zhang Hongbo-B45939; Li Yang-Leo-R58472

> Subject: Re: [PATCH 2/2] cpufreq: ppc-corenet: remove per-cpu variable

> 'cpu_mask'

> 

> On 2 September 2014 12:32, Yuantian Tang <Yuantian.Tang@freescale.com>

> wrote:

> > I am saying your patch 2 probably is wrong, not patch 1.

> 

> Okay, it looked initially that both are screwed up :)

> 

> > Consider following case:

> > On T4240 platform, we have 3 cluster with 8 cpu in each cluster.

> > We offline 4 5 6 7 cpu and then online them back.

> 

> i.e. last four CPUs of first cluster..

> 

> > Cpu 4's policy->cpus is 0, 1, 2, 3, 4

> > Cpu 5's policy->cpus is 0, 1, 2, 3, 4, 5.

> 

> How? And how is this patch going to touch policy->cpus?

> CPU 0-7 should be sharing a single 'struct cpufreq_policy'

> and so policy->cpus should be same for all..
Viresh Kumar Sept. 3, 2014, 9:50 a.m. UTC | #7
On 3 September 2014 13:32, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> Test confirmed what I am concerned. Patch 1 is working but patch 2 didn't working in hotplug case.
> (tested on t4240 platform with 3 cluster and 8 cpus in each cluster, kernel version is 3.12).

:(

> Log is as follows:
> root@t4240rdb:~# cd /sys/devices/system/
> root@t4240rdb:/sys/devices/system# ls
> clockevents  clocksource  cpu  edac
> root@t4240rdb:/sys/devices/system# cd cpu/
> root@t4240rdb:/sys/devices/system/cpu# ls
> cpu0  cpu1  cpu10  cpu11  cpu12  cpu13  cpu14  cpu15  cpu16  cpu17  cpu18  cpu19  cpu2  cpu20  cpu21  cpu22  cpu23  cpu3  cpu4cpu5  cpu6  cpu7  cpu8  cpu9  kernel_max  offline  online  possible  present  probe  release  uevent
> root@t4240rdb:/sys/devices/system/cpu# cat cpu8/
> altivec_idle            altivec_idle_wait_time  cache/                  cpufreq/                online                  physical_id             pir                     pw20_state              pw20_wait_time          smt_snooze_delay        subsystem/              topology/               uevent
> root@t4240rdb:/sys/devices/system/cpu# cat cpu8/cpufreq/affected_cpus
> 8 9 10 11 12 13 14 15
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu8/online
> Cannot set affinity for irq 467
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu9/online
> root@t4240rdb:/sys/devices/system/cpu# cat cpu10/cpufreq/affected_cpus
> 10 11 12 13 14 15
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu10/online
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu11/online
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu12/online
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu13/online
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu14/online
> root@t4240rdb:/sys/devices/system/cpu# echo 0 > cpu15/online
> root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu9/online
> smp_85xx_kick_cpu: Can not start CPU #9. Start CPU #8 first.
> smp: failed starting cpu 9 (rc -2)
> -sh: echo: write error: No such file or directory

I don't understand how is this related to my patch? And the successful sequence
below has CPU14 here instead of 8.. So this error might come without my
patch as well if this sequence is followed..

> root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu10/online
> root@t4240rdb:/sys/devices/system/cpu# cat cpu10/cpufreq/affected_cpus
> 10
> root@t4240rdb:/sys/devices/system/cpu# echo 1 > cpu11/online
> sysfs: cannot create duplicate filename '/devices/system/cpu/cpu10/cpufreq'
> ------------[ cut here ]------------
> WARNING: at fs/sysfs/dir.c:526

I think I know why this happened.

Does cpu_core_mask() gives mask of ONLY Online CPUs ? I *strongly* believe
YES looking at above crash.

If we can change that with some other routine which gives mask of all
(online+offline)
CPUs then this crash might go away..

Because of that, I believe there is another problem in your driver even without
my patch. Do this to get similar crash:

- build driver as module and don't load it by default
- boot your system
- offline CPUs 8 to 15 as you did above.
- insert module
- try to online cores as you did above.

--
viresh
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 4, 2014, 2:51 a.m. UTC | #8
Q2FuJ3QgYWdyZWUgeW91IG1vcmUuDQpBcyBJIGtub3csIHRoZXJlIGlzIG5vIHN1Y2ggZnVuY3Rp
b24gdG8gZ2V0IG1hc2sgb2Ygb25saW5lIGFuZCBvZmZsaW5lIGNwdXMgaW4gYSBjbHVzdGVyLg0K
DQpUaGFua3MsDQpZdWFudGlhbg0KDQo+IC0tLS0tT3JpZ2luYWwgTWVzc2FnZS0tLS0tDQo+IEZy
b206IFZpcmVzaCBLdW1hciBbbWFpbHRvOnZpcmVzaC5rdW1hckBsaW5hcm8ub3JnXQ0KPiBTZW50
OiBXZWRuZXNkYXksIFNlcHRlbWJlciAwMywgMjAxNCA1OjUwIFBNDQo+IFRvOiBUYW5nIFl1YW50
aWFuLUIyOTk4Mw0KPiBDYzogUmFmYWVsIFd5c29ja2k7IGxpbmFyby1rZXJuZWxAbGlzdHMubGlu
YXJvLm9yZzsgbGludXgtcG1Admdlci5rZXJuZWwub3JnOyBMaQ0KPiBZYW5nLUxlby1SNTg0NzI7
IEppYSBIb25ndGFvLUIzODk1MQ0KPiBTdWJqZWN0OiBSZTogW1BBVENIIDIvMl0gY3B1ZnJlcTog
cHBjLWNvcmVuZXQ6IHJlbW92ZSBwZXItY3B1IHZhcmlhYmxlDQo+ICdjcHVfbWFzaycNCj4gDQo+
IE9uIDMgU2VwdGVtYmVyIDIwMTQgMTM6MzIsIFl1YW50aWFuIFRhbmcgPFl1YW50aWFuLlRhbmdA
ZnJlZXNjYWxlLmNvbT4NCj4gd3JvdGU6DQo+ID4gVGVzdCBjb25maXJtZWQgd2hhdCBJIGFtIGNv
bmNlcm5lZC4gUGF0Y2ggMSBpcyB3b3JraW5nIGJ1dCBwYXRjaCAyIGRpZG4ndA0KPiB3b3JraW5n
IGluIGhvdHBsdWcgY2FzZS4NCj4gPiAodGVzdGVkIG9uIHQ0MjQwIHBsYXRmb3JtIHdpdGggMyBj
bHVzdGVyIGFuZCA4IGNwdXMgaW4gZWFjaCBjbHVzdGVyLCBrZXJuZWwNCj4gdmVyc2lvbiBpcyAz
LjEyKS4NCj4gDQo+IDooDQo+IA0KPiA+IExvZyBpcyBhcyBmb2xsb3dzOg0KPiA+IHJvb3RAdDQy
NDByZGI6fiMgY2QgL3N5cy9kZXZpY2VzL3N5c3RlbS8NCj4gPiByb290QHQ0MjQwcmRiOi9zeXMv
ZGV2aWNlcy9zeXN0ZW0jIGxzIGNsb2NrZXZlbnRzICBjbG9ja3NvdXJjZSAgY3B1DQo+ID4gZWRh
YyByb290QHQ0MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0jIGNkIGNwdS8NCj4gPiByb290QHQ0
MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBscw0KPiA+IGNwdTAgIGNwdTEgIGNwdTEw
ICBjcHUxMSAgY3B1MTIgIGNwdTEzICBjcHUxNCAgY3B1MTUgIGNwdTE2DQo+IGNwdTE3DQo+ID4g
Y3B1MTggIGNwdTE5ICBjcHUyICBjcHUyMCAgY3B1MjEgIGNwdTIyICBjcHUyMyAgY3B1MyAgY3B1
NGNwdTUNCj4gY3B1NiAgY3B1NyAgY3B1OCAgY3B1OSAga2VybmVsX21heCAgb2ZmbGluZSAgb25s
aW5lICBwb3NzaWJsZSAgcHJlc2VudA0KPiBwcm9iZSAgcmVsZWFzZSAgdWV2ZW50IHJvb3RAdDQy
NDByZGI6L3N5cy9kZXZpY2VzL3N5c3RlbS9jcHUjIGNhdCBjcHU4Lw0KPiA+IGFsdGl2ZWNfaWRs
ZSAgICAgICAgICAgIGFsdGl2ZWNfaWRsZV93YWl0X3RpbWUgIGNhY2hlLw0KPiBjcHVmcmVxLyAg
ICAgICAgICAgICAgICBvbmxpbmUgICAgICAgICAgICAgICAgICBwaHlzaWNhbF9pZA0KPiBwaXIg
ICAgICAgICAgICAgICAgICAgICBwdzIwX3N0YXRlICAgICAgICAgICAgICBwdzIwX3dhaXRfdGlt
ZQ0KPiBzbXRfc25vb3plX2RlbGF5ICAgICAgICBzdWJzeXN0ZW0vICAgICAgICAgICAgICB0b3Bv
bG9neS8NCj4gdWV2ZW50DQo+ID4gcm9vdEB0NDI0MHJkYjovc3lzL2RldmljZXMvc3lzdGVtL2Nw
dSMgY2F0IGNwdTgvY3B1ZnJlcS9hZmZlY3RlZF9jcHVzDQo+ID4gOCA5IDEwIDExIDEyIDEzIDE0
IDE1DQo+ID4gcm9vdEB0NDI0MHJkYjovc3lzL2RldmljZXMvc3lzdGVtL2NwdSMgZWNobyAwID4g
Y3B1OC9vbmxpbmUgQ2Fubm90IHNldA0KPiA+IGFmZmluaXR5IGZvciBpcnEgNDY3IHJvb3RAdDQy
NDByZGI6L3N5cy9kZXZpY2VzL3N5c3RlbS9jcHUjIGVjaG8gMCA+DQo+ID4gY3B1OS9vbmxpbmUg
cm9vdEB0NDI0MHJkYjovc3lzL2RldmljZXMvc3lzdGVtL2NwdSMgY2F0DQo+ID4gY3B1MTAvY3B1
ZnJlcS9hZmZlY3RlZF9jcHVzDQo+ID4gMTAgMTEgMTIgMTMgMTQgMTUNCj4gPiByb290QHQ0MjQw
cmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBlY2hvIDAgPiBjcHUxMC9vbmxpbmUNCj4gPiBy
b290QHQ0MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBlY2hvIDAgPiBjcHUxMS9vbmxp
bmUNCj4gPiByb290QHQ0MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBlY2hvIDAgPiBj
cHUxMi9vbmxpbmUNCj4gPiByb290QHQ0MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBl
Y2hvIDAgPiBjcHUxMy9vbmxpbmUNCj4gPiByb290QHQ0MjQwcmRiOi9zeXMvZGV2aWNlcy9zeXN0
ZW0vY3B1IyBlY2hvIDAgPiBjcHUxNC9vbmxpbmUNCj4gPiByb290QHQ0MjQwcmRiOi9zeXMvZGV2
aWNlcy9zeXN0ZW0vY3B1IyBlY2hvIDAgPiBjcHUxNS9vbmxpbmUNCj4gPiByb290QHQ0MjQwcmRi
Oi9zeXMvZGV2aWNlcy9zeXN0ZW0vY3B1IyBlY2hvIDEgPiBjcHU5L29ubGluZQ0KPiA+IHNtcF84
NXh4X2tpY2tfY3B1OiBDYW4gbm90IHN0YXJ0IENQVSAjOS4gU3RhcnQgQ1BVICM4IGZpcnN0Lg0K
PiA+IHNtcDogZmFpbGVkIHN0YXJ0aW5nIGNwdSA5IChyYyAtMikNCj4gPiAtc2g6IGVjaG86IHdy
aXRlIGVycm9yOiBObyBzdWNoIGZpbGUgb3IgZGlyZWN0b3J5DQo+IA0KPiBJIGRvbid0IHVuZGVy
c3RhbmQgaG93IGlzIHRoaXMgcmVsYXRlZCB0byBteSBwYXRjaD8gQW5kIHRoZSBzdWNjZXNzZnVs
IHNlcXVlbmNlDQo+IGJlbG93IGhhcyBDUFUxNCBoZXJlIGluc3RlYWQgb2YgOC4uIFNvIHRoaXMg
ZXJyb3IgbWlnaHQgY29tZSB3aXRob3V0IG15IHBhdGNoDQo+IGFzIHdlbGwgaWYgdGhpcyBzZXF1
ZW5jZSBpcyBmb2xsb3dlZC4uDQo+IA0KPiA+IHJvb3RAdDQyNDByZGI6L3N5cy9kZXZpY2VzL3N5
c3RlbS9jcHUjIGVjaG8gMSA+IGNwdTEwL29ubGluZQ0KPiA+IHJvb3RAdDQyNDByZGI6L3N5cy9k
ZXZpY2VzL3N5c3RlbS9jcHUjIGNhdCBjcHUxMC9jcHVmcmVxL2FmZmVjdGVkX2NwdXMNCj4gPiAx
MA0KPiA+IHJvb3RAdDQyNDByZGI6L3N5cy9kZXZpY2VzL3N5c3RlbS9jcHUjIGVjaG8gMSA+IGNw
dTExL29ubGluZQ0KPiA+IHN5c2ZzOiBjYW5ub3QgY3JlYXRlIGR1cGxpY2F0ZSBmaWxlbmFtZSAn
L2RldmljZXMvc3lzdGVtL2NwdS9jcHUxMC9jcHVmcmVxJw0KPiA+IC0tLS0tLS0tLS0tLVsgY3V0
IGhlcmUgXS0tLS0tLS0tLS0tLQ0KPiA+IFdBUk5JTkc6IGF0IGZzL3N5c2ZzL2Rpci5jOjUyNg0K
PiANCj4gSSB0aGluayBJIGtub3cgd2h5IHRoaXMgaGFwcGVuZWQuDQo+IA0KPiBEb2VzIGNwdV9j
b3JlX21hc2soKSBnaXZlcyBtYXNrIG9mIE9OTFkgT25saW5lIENQVXMgPyBJICpzdHJvbmdseSog
YmVsaWV2ZSBZRVMNCj4gbG9va2luZyBhdCBhYm92ZSBjcmFzaC4NCj4gDQo+IElmIHdlIGNhbiBj
aGFuZ2UgdGhhdCB3aXRoIHNvbWUgb3RoZXIgcm91dGluZSB3aGljaCBnaXZlcyBtYXNrIG9mIGFs
bA0KPiAob25saW5lK29mZmxpbmUpDQo+IENQVXMgdGhlbiB0aGlzIGNyYXNoIG1pZ2h0IGdvIGF3
YXkuLg0KPiANCj4gQmVjYXVzZSBvZiB0aGF0LCBJIGJlbGlldmUgdGhlcmUgaXMgYW5vdGhlciBw
cm9ibGVtIGluIHlvdXIgZHJpdmVyIGV2ZW4gd2l0aG91dCBteQ0KPiBwYXRjaC4gRG8gdGhpcyB0
byBnZXQgc2ltaWxhciBjcmFzaDoNCj4gDQo+IC0gYnVpbGQgZHJpdmVyIGFzIG1vZHVsZSBhbmQg
ZG9uJ3QgbG9hZCBpdCBieSBkZWZhdWx0DQo+IC0gYm9vdCB5b3VyIHN5c3RlbQ0KPiAtIG9mZmxp
bmUgQ1BVcyA4IHRvIDE1IGFzIHlvdSBkaWQgYWJvdmUuDQo+IC0gaW5zZXJ0IG1vZHVsZQ0KPiAt
IHRyeSB0byBvbmxpbmUgY29yZXMgYXMgeW91IGRpZCBhYm92ZS4NCj4gDQo+IC0tDQo+IHZpcmVz
aA0K
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Viresh Kumar Sept. 4, 2014, 3:38 a.m. UTC | #9
On 4 September 2014 08:21, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> Can't agree you more.

Its not about upstreaming my patch but doing the right thing.

> As I know, there is no such function to get mask of online and offline cpus in a cluster.

This is a primary requirement of cpufreq-subsystem. The ->init()
callback must initialize
policy->cpus to the mask of all online+offline CPUs that are sharing clock.

Okay, then what I can say is your driver is already broken (without my
patch) when we
use it as a module.

>> - build driver as module and don't load it by default
>> - boot your system
>> - offline CPUs 8 to 15 as you did above.
>> - insert module
>> - try to online cores as you did above.

Above sequence of events would reproduce similar crash for you. So, either
fix this problem or mark your module as 'bool' instead of 'tristate' so that it
can't be compiled as module anymore.

--
viresh
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 4, 2014, 4:33 a.m. UTC | #10
> -----Original Message-----

> From: Viresh Kumar [mailto:viresh.kumar@linaro.org]

> Sent: Thursday, September 04, 2014 11:38 AM

> To: Tang Yuantian-B29983

> Cc: Rafael Wysocki; linaro-kernel@lists.linaro.org; linux-pm@vger.kernel.org; Li

> Yang-Leo-R58472; Jia Hongtao-B38951

> Subject: Re: [PATCH 2/2] cpufreq: ppc-corenet: remove per-cpu variable

> 'cpu_mask'

> 

> On 4 September 2014 08:21, Yuantian Tang <Yuantian.Tang@freescale.com>

> wrote:

> > Can't agree you more.

> 

> Its not about upstreaming my patch but doing the right thing.

> 

> > As I know, there is no such function to get mask of online and offline cpus in a

> cluster.

> 

> This is a primary requirement of cpufreq-subsystem. The ->init() callback must

> initialize

> policy->cpus to the mask of all online+offline CPUs that are sharing clock.

> 

> Okay, then what I can say is your driver is already broken (without my

> patch) when we

> use it as a module.

> 

> >> - build driver as module and don't load it by default

> >> - boot your system

> >> - offline CPUs 8 to 15 as you did above.

> >> - insert module

> >> - try to online cores as you did above.

> 

> Above sequence of events would reproduce similar crash for you. So, either fix

> this problem or mark your module as 'bool' instead of 'tristate' so that it can't be

> compiled as module anymore.

> 


What about the other arch? Can they deal with this situation?

Thanks,
Yuantian
> --

> viresh
Viresh Kumar Sept. 4, 2014, 4:37 a.m. UTC | #11
On 4 September 2014 10:03, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> What about the other arch? Can they deal with this situation?

Yes for most of them (speacially ARM), because they fulfill the
requirement of CPUFreq
core. i.e. set policy->cpus to all online+offline CPUs.
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 4, 2014, 7:43 a.m. UTC | #12
T0ssIEkgc2VlLiBTZWVtcyBsaWtlIHdlIGRvbid0IGhhdmUgY2hvaWNlIGJ1dCBtYWtlIGl0IGJv
b2wuDQpJIHdpbGwgdHJ5IHRvIG1ha2UgaXQgYmV0dGVyIGxhdGVyLg0KDQpUaGFua3MsDQpZdWFu
dGlhbg0KDQo+IC0tLS0tT3JpZ2luYWwgTWVzc2FnZS0tLS0tDQo+IEZyb206IFZpcmVzaCBLdW1h
ciBbbWFpbHRvOnZpcmVzaC5rdW1hckBsaW5hcm8ub3JnXQ0KPiBTZW50OiBUaHVyc2RheSwgU2Vw
dGVtYmVyIDA0LCAyMDE0IDEyOjM3IFBNDQo+IFRvOiBUYW5nIFl1YW50aWFuLUIyOTk4Mw0KPiBD
YzogUmFmYWVsIFd5c29ja2k7IGxpbmFyby1rZXJuZWxAbGlzdHMubGluYXJvLm9yZzsgbGludXgt
cG1Admdlci5rZXJuZWwub3JnOyBMaQ0KPiBZYW5nLUxlby1SNTg0NzI7IEppYSBIb25ndGFvLUIz
ODk1MQ0KPiBTdWJqZWN0OiBSZTogW1BBVENIIDIvMl0gY3B1ZnJlcTogcHBjLWNvcmVuZXQ6IHJl
bW92ZSBwZXItY3B1IHZhcmlhYmxlDQo+ICdjcHVfbWFzaycNCj4gDQo+IE9uIDQgU2VwdGVtYmVy
IDIwMTQgMTA6MDMsIFl1YW50aWFuIFRhbmcgPFl1YW50aWFuLlRhbmdAZnJlZXNjYWxlLmNvbT4N
Cj4gd3JvdGU6DQo+ID4gV2hhdCBhYm91dCB0aGUgb3RoZXIgYXJjaD8gQ2FuIHRoZXkgZGVhbCB3
aXRoIHRoaXMgc2l0dWF0aW9uPw0KPiANCj4gWWVzIGZvciBtb3N0IG9mIHRoZW0gKHNwZWFjaWFs
bHkgQVJNKSwgYmVjYXVzZSB0aGV5IGZ1bGZpbGwgdGhlIHJlcXVpcmVtZW50IG9mDQo+IENQVUZy
ZXEgY29yZS4gaS5lLiBzZXQgcG9saWN5LT5jcHVzIHRvIGFsbCBvbmxpbmUrb2ZmbGluZSBDUFVz
Lg0K
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Viresh Kumar Sept. 4, 2014, 7:52 a.m. UTC | #13
On 4 September 2014 13:13, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> OK, I see. Seems like we don't have choice but make it bool.

I am quite surprised that the architecture doesn't provide such
macros.

I have added few powerpc guys (Mostly Maintainers) to check what would it
take to get such a macro..

What we want: A routine/macro that returns mask of all CPUs/threads sharing
clock line. cpu_core_mask() gives something similar but only for online CPUs,
and we want that for online+offline CPUs.


Btw, another powerpc driver is doing this:
drivers/cpufreq/powernv-cpufreq.c

base = cpu_first_thread_sibling(policy->cpu);
for (i = 0; i < threads_per_core; i++)
        cpumask_set_cpu(base + i, policy->cpus);


Will this work for you as well ?
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Sept. 4, 2014, 7:59 a.m. UTC | #14
On Thu, 2014-09-04 at 13:22 +0530, Viresh Kumar wrote:
> On 4 September 2014 13:13, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> > OK, I see. Seems like we don't have choice but make it bool.
> 
> I am quite surprised that the architecture doesn't provide such
> macros.
> 
> I have added few powerpc guys (Mostly Maintainers) to check what would it
> take to get such a macro..
> 
> What we want: A routine/macro that returns mask of all CPUs/threads sharing
> clock line. 

What do you mean by "clock line" ? This is a very .... odd concept here.
On P8 we have a reference clock (well several) going into the package,
which can contain multiple dies, which contain multiple cores which can
operate at different frequencies but it's not really a "clock line".

And every implementation out there would do things differently. I don't
know what Freescale does but you can't compare.

The architecture doesn't provide such a macro because the link between
cpufreq entity is a cpufreq specific concept. For example we could have
a chip where cores have pstate by pairs but otherwise are completely
independent. So it's up to the cpufreq driver for the given platform to
handle that.

I don't see how we could provide a "macro" ... we might be able to
represent the pstate domains in some way and provide some kind of
architecture function for that but it wouldn't be a macro. It happens
that today on P8 we control the pstates per core but that might not be
always the case for example.

> cpu_core_mask() gives something similar but only for online CPUs,
> and we want that for online+offline CPUs.

Correct.

> Btw, another powerpc driver is doing this:
> drivers/cpufreq/powernv-cpufreq.c
> 
> base = cpu_first_thread_sibling(policy->cpu);
> for (i = 0; i < threads_per_core; i++)
>         cpumask_set_cpu(base + i, policy->cpus);
> 
> 
> Will this work for you as 

That's P8 native. It currently use the core as the entity but that
probably needs to be improved based on some device-tree property for
long term solidity.

Ben.

> well ?


--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Viresh Kumar Sept. 4, 2014, 8:19 a.m. UTC | #15
On 4 September 2014 13:29, Benjamin Herrenschmidt
<benh@kernel.crashing.org> wrote:
> What do you mean by "clock line" ? This is a very .... odd concept here.

We want to know which CPUs change their P-state together. As cpufreq
considers that as a group (represented by a single struct cpufreq_policy).
And while changing frequency of a core, cpufreq governors look at the
load of all CPUs within that group and chooses the frequency that satisfies
the CPU with highest load.

> The architecture doesn't provide such a macro because the link between
> cpufreq entity is a cpufreq specific concept. For example we could have
> a chip where cores have pstate by pairs but otherwise are completely

You meant the possible p-states for CPUs are same but they change it
separately?

If yes, then this isn't what I meant by a single clock-line..

> I don't see how we could provide a "macro" ... we might be able to
> represent the pstate domains in some way and provide some kind of
> architecture function for that but it wouldn't be a macro. It happens
> that today on P8 we control the pstates per core but that might not be
> always the case for example.

I see. But a cpufreq-driver like "ppc-corenet-cpufreq.c" should be able
to get which all CPUs change p-states together, just like powernv..

And that's what we wanted to know for this driver.

>> Btw, another powerpc driver is doing this:
>> drivers/cpufreq/powernv-cpufreq.c
>>
>> base = cpu_first_thread_sibling(policy->cpu);
>> for (i = 0; i < threads_per_core; i++)
>>         cpumask_set_cpu(base + i, policy->cpus);
>>
>>
>> Will this work for you as
>
> That's P8 native. It currently use the core as the entity but that
> probably needs to be improved based on some device-tree property for
> long term solidity.

I see..
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tang Yuantian Sept. 5, 2014, 9:35 a.m. UTC | #16
> -----Original Message-----

> From: Viresh Kumar [mailto:viresh.kumar@linaro.org]

> Sent: Thursday, September 04, 2014 12:37 PM

> To: Tang Yuantian-B29983

> Cc: Rafael Wysocki; linaro-kernel@lists.linaro.org; linux-pm@vger.kernel.org; Li

> Yang-Leo-R58472; Jia Hongtao-B38951

> Subject: Re: [PATCH 2/2] cpufreq: ppc-corenet: remove per-cpu variable

> 'cpu_mask'

> 

> On 4 September 2014 10:03, Yuantian Tang <Yuantian.Tang@freescale.com>

> wrote:

> > What about the other arch? Can they deal with this situation?

> 

> Yes for most of them (speacially ARM), because they fulfill the requirement of

> CPUFreq core. i.e. set policy->cpus to all online+offline CPUs.


What's the name of function on ARM arch which get the mask of all online and offline cpus?

We want this driver to be used on ARM as well. So how to write the Kconfig, 
as you know we already have a CONFIG_* item in Kconfig.powerpc.

Thanks,
Yuantian
Viresh Kumar Sept. 5, 2014, 9:40 a.m. UTC | #17
On 5 September 2014 15:05, Yuantian Tang <Yuantian.Tang@freescale.com> wrote:
> What's the name of function on ARM arch which get the mask of all online and offline cpus?

You can have a look at arm_big_little.c for this, its topology_core_cpumask().

Also, this might not work for everybody and so we are trying to get this
information from DT, but it is taking some time to get the bindings correctly.

> We want this driver to be used on ARM as well. So how to write the Kconfig,
> as you know we already have a CONFIG_* item in Kconfig.powerpc.

Just duplicate them.
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index bee5df7..dbcac39 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -69,9 +69,6 @@  static const u32 *fmask;
 
 static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
 
-/* cpumask in a cluster */
-static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
-
 #ifndef CONFIG_SMP
 static inline const struct cpumask *cpu_core_mask(int cpu)
 {
@@ -201,8 +198,8 @@  static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	data->table = table;
 
 	/* update ->cpus if we have cluster, no harm if not */
-	cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
-	for_each_cpu(i, per_cpu(cpu_mask, cpu))
+	cpumask_copy(policy->cpus, cpu_core_mask(cpu));
+	for_each_cpu(i, cpu_core_mask(cpu))
 		per_cpu(cpu_data, i) = data;
 
 	/* Minimum transition latency is 12 platform clocks */
@@ -236,7 +233,7 @@  static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 	kfree(data->table);
 	kfree(data);
 
-	for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
+	for_each_cpu(cpu, cpu_core_mask(policy->cpu))
 		per_cpu(cpu_data, cpu) = NULL;
 
 	return 0;
@@ -285,12 +282,6 @@  static int __init ppc_corenet_cpufreq_init(void)
 	if (!np)
 		return -ENODEV;
 
-	for_each_possible_cpu(cpu) {
-		if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
-			goto err_mask;
-		cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
-	}
-
 	match = of_match_node(node_matches, np);
 	data = match->data;
 	if (data) {
@@ -308,22 +299,11 @@  static int __init ppc_corenet_cpufreq_init(void)
 		pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
 
 	return ret;
-
-err_mask:
-	for_each_possible_cpu(cpu)
-		free_cpumask_var(per_cpu(cpu_mask, cpu));
-
-	return -ENOMEM;
 }
 module_init(ppc_corenet_cpufreq_init);
 
 static void __exit ppc_corenet_cpufreq_exit(void)
 {
-	unsigned int cpu;
-
-	for_each_possible_cpu(cpu)
-		free_cpumask_var(per_cpu(cpu_mask, cpu));
-
 	cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
 }
 module_exit(ppc_corenet_cpufreq_exit);