diff options
author | Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> | 2012-07-31 05:34:25 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2012-08-23 06:35:52 -0400 |
commit | 051f1b13144dd8553d5a5104dde94c7263ae3ba7 (patch) | |
tree | b9f10b81ace1d986a01c28afddec1f2cc8028e66 /arch | |
parent | 5505b206ca006d0506d1d3b3c494aa86234f66e2 (diff) |
ARM: perf: move irq registration into pmu implementation
This patch moves the CPU-specific IRQ registration and parsing code into
the CPU PMU backend. This is required because a PMU may have more than
one interrupt, which in turn can be either PPI (per-cpu) or SPI
(requiring strict affinity setting at the interrupt distributor).
Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
[will: cosmetic edits and reworked interrupt dispatching]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/pmu.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 72 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 66 |
3 files changed, 79 insertions, 61 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a993ad676047..a26170dce02e 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -78,6 +78,8 @@ struct arm_pmu { | |||
78 | void (*start)(void); | 78 | void (*start)(void); |
79 | void (*stop)(void); | 79 | void (*stop)(void); |
80 | void (*reset)(void *); | 80 | void (*reset)(void *); |
81 | int (*request_irq)(irq_handler_t handler); | ||
82 | void (*free_irq)(void); | ||
81 | int (*map_event)(struct perf_event *event); | 83 | int (*map_event)(struct perf_event *event); |
82 | int num_events; | 84 | int num_events; |
83 | atomic_t active_events; | 85 | atomic_t active_events; |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 86fd39937171..93971b1a4f0b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -297,87 +297,39 @@ validate_group(struct perf_event *event) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | static irqreturn_t armpmu_platform_irq(int irq, void *dev) | 300 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
301 | { | 301 | { |
302 | struct arm_pmu *armpmu = (struct arm_pmu *) dev; | 302 | struct arm_pmu *armpmu = (struct arm_pmu *) dev; |
303 | struct platform_device *plat_device = armpmu->plat_device; | 303 | struct platform_device *plat_device = armpmu->plat_device; |
304 | struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); | 304 | struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); |
305 | 305 | ||
306 | return plat->handle_irq(irq, dev, armpmu->handle_irq); | 306 | if (plat && plat->handle_irq) |
307 | return plat->handle_irq(irq, dev, armpmu->handle_irq); | ||
308 | else | ||
309 | return armpmu->handle_irq(irq, dev); | ||
307 | } | 310 | } |
308 | 311 | ||
309 | static void | 312 | static void |
310 | armpmu_release_hardware(struct arm_pmu *armpmu) | 313 | armpmu_release_hardware(struct arm_pmu *armpmu) |
311 | { | 314 | { |
312 | int i, irq, irqs; | 315 | armpmu->free_irq(); |
313 | struct platform_device *pmu_device = armpmu->plat_device; | 316 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
314 | |||
315 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | ||
316 | |||
317 | for (i = 0; i < irqs; ++i) { | ||
318 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) | ||
319 | continue; | ||
320 | irq = platform_get_irq(pmu_device, i); | ||
321 | if (irq >= 0) | ||
322 | free_irq(irq, armpmu); | ||
323 | } | ||
324 | |||
325 | pm_runtime_put_sync(&pmu_device->dev); | ||
326 | } | 317 | } |
327 | 318 | ||
328 | static int | 319 | static int |
329 | armpmu_reserve_hardware(struct arm_pmu *armpmu) | 320 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
330 | { | 321 | { |
331 | struct arm_pmu_platdata *plat; | 322 | int err; |
332 | irq_handler_t handle_irq; | ||
333 | int i, err, irq, irqs; | ||
334 | struct platform_device *pmu_device = armpmu->plat_device; | 323 | struct platform_device *pmu_device = armpmu->plat_device; |
335 | 324 | ||
336 | if (!pmu_device) | 325 | if (!pmu_device) |
337 | return -ENODEV; | 326 | return -ENODEV; |
338 | 327 | ||
339 | plat = dev_get_platdata(&pmu_device->dev); | ||
340 | if (plat && plat->handle_irq) | ||
341 | handle_irq = armpmu_platform_irq; | ||
342 | else | ||
343 | handle_irq = armpmu->handle_irq; | ||
344 | |||
345 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | ||
346 | if (irqs < 1) { | ||
347 | pr_err("no irqs for PMUs defined\n"); | ||
348 | return -ENODEV; | ||
349 | } | ||
350 | |||
351 | pm_runtime_get_sync(&pmu_device->dev); | 328 | pm_runtime_get_sync(&pmu_device->dev); |
352 | 329 | err = armpmu->request_irq(armpmu_dispatch_irq); | |
353 | for (i = 0; i < irqs; ++i) { | 330 | if (err) { |
354 | err = 0; | 331 | armpmu_release_hardware(armpmu); |
355 | irq = platform_get_irq(pmu_device, i); | 332 | return err; |
356 | if (irq < 0) | ||
357 | continue; | ||
358 | |||
359 | /* | ||
360 | * If we have a single PMU interrupt that we can't shift, | ||
361 | * assume that we're running on a uniprocessor machine and | ||
362 | * continue. Otherwise, continue without this interrupt. | ||
363 | */ | ||
364 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | ||
365 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
366 | irq, i); | ||
367 | continue; | ||
368 | } | ||
369 | |||
370 | err = request_irq(irq, handle_irq, | ||
371 | IRQF_DISABLED | IRQF_NOBALANCING, | ||
372 | "arm-pmu", armpmu); | ||
373 | if (err) { | ||
374 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | ||
375 | irq); | ||
376 | armpmu_release_hardware(armpmu); | ||
377 | return err; | ||
378 | } | ||
379 | |||
380 | cpumask_set_cpu(i, &armpmu->active_irqs); | ||
381 | } | 333 | } |
382 | 334 | ||
383 | return 0; | 335 | return 0; |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 56ddc989c909..8d7d8d4de9d6 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -70,6 +70,67 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) | |||
70 | return &__get_cpu_var(cpu_hw_events); | 70 | return &__get_cpu_var(cpu_hw_events); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void cpu_pmu_free_irq(void) | ||
74 | { | ||
75 | int i, irq, irqs; | ||
76 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
77 | |||
78 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | ||
79 | |||
80 | for (i = 0; i < irqs; ++i) { | ||
81 | if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) | ||
82 | continue; | ||
83 | irq = platform_get_irq(pmu_device, i); | ||
84 | if (irq >= 0) | ||
85 | free_irq(irq, cpu_pmu); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static int cpu_pmu_request_irq(irq_handler_t handler) | ||
90 | { | ||
91 | int i, err, irq, irqs; | ||
92 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
93 | |||
94 | if (!pmu_device) | ||
95 | return -ENODEV; | ||
96 | |||
97 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | ||
98 | if (irqs < 1) { | ||
99 | pr_err("no irqs for PMUs defined\n"); | ||
100 | return -ENODEV; | ||
101 | } | ||
102 | |||
103 | for (i = 0; i < irqs; ++i) { | ||
104 | err = 0; | ||
105 | irq = platform_get_irq(pmu_device, i); | ||
106 | if (irq < 0) | ||
107 | continue; | ||
108 | |||
109 | /* | ||
110 | * If we have a single PMU interrupt that we can't shift, | ||
111 | * assume that we're running on a uniprocessor machine and | ||
112 | * continue. Otherwise, continue without this interrupt. | ||
113 | */ | ||
114 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | ||
115 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
116 | irq, i); | ||
117 | continue; | ||
118 | } | ||
119 | |||
120 | err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", | ||
121 | cpu_pmu); | ||
122 | if (err) { | ||
123 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | ||
124 | irq); | ||
125 | return err; | ||
126 | } | ||
127 | |||
128 | cpumask_set_cpu(i, &cpu_pmu->active_irqs); | ||
129 | } | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
73 | static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) | 134 | static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) |
74 | { | 135 | { |
75 | int cpu; | 136 | int cpu; |
@@ -79,7 +140,10 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
79 | events->used_mask = per_cpu(used_mask, cpu); | 140 | events->used_mask = per_cpu(used_mask, cpu); |
80 | raw_spin_lock_init(&events->pmu_lock); | 141 | raw_spin_lock_init(&events->pmu_lock); |
81 | } | 142 | } |
82 | cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; | 143 | |
144 | cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; | ||
145 | cpu_pmu->request_irq = cpu_pmu_request_irq; | ||
146 | cpu_pmu->free_irq = cpu_pmu_free_irq; | ||
83 | 147 | ||
84 | /* Ensure the PMU has sane values out of reset. */ | 148 | /* Ensure the PMU has sane values out of reset. */ |
85 | if (cpu_pmu && cpu_pmu->reset) | 149 | if (cpu_pmu && cpu_pmu->reset) |