Message ID | 1413897084-19715-8-git-send-email-mark.rutland@arm.com |
---|---|
State | New |
Headers | show |
On 10/21/2014 06:11 AM, Mark Rutland wrote: > @@ -162,16 +160,22 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) > return 0; > } > > -static void cpu_pmu_init(struct arm_pmu *cpu_pmu) > +static int cpu_pmu_init(struct arm_pmu *cpu_pmu) > { > int cpu; > + struct pmu_hw_events __percpu *cpu_hw_events; > + > + cpu_hw_events = alloc_percpu(struct pmu_hw_events); Shouldn't we free this somewhere? > + if (!cpu_hw_events) > + return -ENOMEM; > + > for_each_possible_cpu(cpu) { > - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); > + struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); > raw_spin_lock_init(&events->pmu_lock); > events->percpu_pmu = cpu_pmu; > } > > - cpu_pmu->hw_events = &cpu_hw_events; > + cpu_pmu->hw_events = cpu_hw_events; > cpu_pmu->request_irq = cpu_pmu_request_irq; > cpu_pmu->free_irq = cpu_pmu_free_irq; > > @@ -303,7 +309,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) > goto out_free; > } > > - cpu_pmu_init(cpu_pmu); > + ret = cpu_pmu_init(cpu_pmu); > + if (ret) > + goto out_free; > + > ret = armpmu_register(cpu_pmu, -1); > > if (!ret) Especially if this fails?
On Tue, Oct 21, 2014 at 10:24:54PM +0100, Stephen Boyd wrote: > On 10/21/2014 06:11 AM, Mark Rutland wrote: > > @@ -162,16 +160,22 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) > > return 0; > > } > > > > -static void cpu_pmu_init(struct arm_pmu *cpu_pmu) > > +static int cpu_pmu_init(struct arm_pmu *cpu_pmu) > > { > > int cpu; > > + struct pmu_hw_events __percpu *cpu_hw_events; > > + > > + cpu_hw_events = alloc_percpu(struct pmu_hw_events); > > Shouldn't we free this somewhere? My bad. We should clean this up if registration fails. As we don't support unregistering PMUs, that should be the only case where we'll need to free this. I'll wrap that up in a new cpu_pmu_destroy function, which we can also use to tear down the notifier introduced in the next patch. Thanks, Mark. > > > + if (!cpu_hw_events) > > + return -ENOMEM; > > + > > for_each_possible_cpu(cpu) { > > - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); > > + struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); > > raw_spin_lock_init(&events->pmu_lock); > > events->percpu_pmu = cpu_pmu; > > } > > > > - cpu_pmu->hw_events = &cpu_hw_events; > > + cpu_pmu->hw_events = cpu_hw_events; > > cpu_pmu->request_irq = cpu_pmu_request_irq; > > cpu_pmu->free_irq = cpu_pmu_free_irq; > > > > @@ -303,7 +309,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) > > goto out_free; > > } > > > > - cpu_pmu_init(cpu_pmu); > > + ret = cpu_pmu_init(cpu_pmu); > > + if (ret) > > + goto out_free; > > + > > ret = armpmu_register(cpu_pmu, -1); > > > > if (!ret) > > Especially if this fails? > > -- > Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, > a Linux Foundation Collaborative Project > >
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index cd95388..f8a237d 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -35,8 +35,6 @@ /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *cpu_pmu; -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - /* * Despite the names, these two functions are CPU-specific and are used * by the OProfile/perf code. @@ -162,16 +160,22 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } -static void cpu_pmu_init(struct arm_pmu *cpu_pmu) +static int cpu_pmu_init(struct arm_pmu *cpu_pmu) { int cpu; + struct pmu_hw_events __percpu *cpu_hw_events; + + cpu_hw_events = alloc_percpu(struct pmu_hw_events); + if (!cpu_hw_events) + return -ENOMEM; + for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); + struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); raw_spin_lock_init(&events->pmu_lock); events->percpu_pmu = cpu_pmu; } - cpu_pmu->hw_events = &cpu_hw_events; + cpu_pmu->hw_events = cpu_hw_events; cpu_pmu->request_irq = cpu_pmu_request_irq; cpu_pmu->free_irq = cpu_pmu_free_irq; @@ -182,6 +186,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) /* If no interrupts available, set the corresponding capability flag */ if (!platform_get_irq(cpu_pmu->plat_device, 0)) cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + + return 0; } /* @@ -303,7 +309,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) goto out_free; } - cpu_pmu_init(cpu_pmu); + ret = cpu_pmu_init(cpu_pmu); + if (ret) + goto out_free; + ret = armpmu_register(cpu_pmu, -1); if (!ret)