aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2014-10-21 09:11:23 -0400
committerWill Deacon <will.deacon@arm.com>2014-10-30 08:17:00 -0400
commitabdf655a30b6464fe86c8369de60ccf92f73f589 (patch)
treec00b0659fe23d48e74934816e4a478acbc85cc97
parent5ebd92003494a19ac5246ae385c073be16de1144 (diff)
arm: perf: dynamically allocate cpu hardware data
To support multiple PMUs, each PMU will need its own accounting data. As we don't know how (in general) many PMUs we'll have to support at compile-time, we must allocate the data at runtime dynamically Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event_cpu.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index b9391fa2368d..f0f6c5ef41b0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -35,8 +35,6 @@
35/* Set at runtime when we know what CPU type we are. */ 35/* Set at runtime when we know what CPU type we are. */
36static struct arm_pmu *cpu_pmu; 36static struct arm_pmu *cpu_pmu;
37 37
38static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
39
40/* 38/*
41 * Despite the names, these two functions are CPU-specific and are used 39 * Despite the names, these two functions are CPU-specific and are used
42 * by the OProfile/perf code. 40 * by the OProfile/perf code.
@@ -162,16 +160,22 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
162 return 0; 160 return 0;
163} 161}
164 162
165static void cpu_pmu_init(struct arm_pmu *cpu_pmu) 163static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
166{ 164{
167 int cpu; 165 int cpu;
166 struct pmu_hw_events __percpu *cpu_hw_events;
167
168 cpu_hw_events = alloc_percpu(struct pmu_hw_events);
169 if (!cpu_hw_events)
170 return -ENOMEM;
171
168 for_each_possible_cpu(cpu) { 172 for_each_possible_cpu(cpu) {
169 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); 173 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
170 raw_spin_lock_init(&events->pmu_lock); 174 raw_spin_lock_init(&events->pmu_lock);
171 events->percpu_pmu = cpu_pmu; 175 events->percpu_pmu = cpu_pmu;
172 } 176 }
173 177
174 cpu_pmu->hw_events = &cpu_hw_events; 178 cpu_pmu->hw_events = cpu_hw_events;
175 cpu_pmu->request_irq = cpu_pmu_request_irq; 179 cpu_pmu->request_irq = cpu_pmu_request_irq;
176 cpu_pmu->free_irq = cpu_pmu_free_irq; 180 cpu_pmu->free_irq = cpu_pmu_free_irq;
177 181
@@ -182,6 +186,13 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
182 /* If no interrupts available, set the corresponding capability flag */ 186 /* If no interrupts available, set the corresponding capability flag */
183 if (!platform_get_irq(cpu_pmu->plat_device, 0)) 187 if (!platform_get_irq(cpu_pmu->plat_device, 0))
184 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 188 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
189
190 return 0;
191}
192
193static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
194{
195 free_percpu(cpu_pmu->hw_events);
185} 196}
186 197
187/* 198/*
@@ -303,12 +314,18 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
303 goto out_free; 314 goto out_free;
304 } 315 }
305 316
306 cpu_pmu_init(cpu_pmu); 317 ret = cpu_pmu_init(cpu_pmu);
318 if (ret)
319 goto out_free;
320
307 ret = armpmu_register(cpu_pmu, -1); 321 ret = armpmu_register(cpu_pmu, -1);
322 if (ret)
323 goto out_destroy;
308 324
309 if (!ret) 325 return 0;
310 return 0;
311 326
327out_destroy:
328 cpu_pmu_destroy(cpu_pmu);
312out_free: 329out_free:
313 pr_info("failed to register PMU devices!\n"); 330 pr_info("failed to register PMU devices!\n");
314 kfree(pmu); 331 kfree(pmu);