aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2011-07-27 10:18:59 -0400
committerWill Deacon <will.deacon@arm.com>2011-08-31 05:17:59 -0400
commit0b390e2126e03b6ec41f96fb0550b1526d00e203 (patch)
tree36520568e01918ab82900631c75960016ffe464c /arch/arm/kernel/perf_event.c
parentb0e89590f4f27ea5ff30bdedb9a58ea904a6b353 (diff)
ARM: perf: use cpumask_t to record active IRQs
Commit 5dfc54e0 ("ARM: GIC: avoid routing interrupts to offline CPUs") prevents the GIC from setting the affinity of an IRQ to a CPU with id >= nr_cpu_ids. This was previously abused by perf on some platforms where more IRQs were registered than possible CPUs. This patch fixes the problem by using a cpumask_t to keep track of the active (requested) interrupts in perf. The same effect could be achieved by limiting the number of IRQs to the number of CPUs, but using a mask instead will be useful for adding extended CPU hotplug support in the future. Acked-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Jean Pihet <j-pihet@ti.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c64
1 files changed, 31 insertions, 33 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8514855ffc2..d507fe148e0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -69,6 +69,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
69 69
70struct arm_pmu { 70struct arm_pmu {
71 enum arm_perf_pmu_ids id; 71 enum arm_perf_pmu_ids id;
72 cpumask_t active_irqs;
72 const char *name; 73 const char *name;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev); 74 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx); 75 void (*enable)(struct hw_perf_event *evt, int idx);
@@ -388,6 +389,25 @@ static irqreturn_t armpmu_platform_irq(int irq, void *dev)
388 return plat->handle_irq(irq, dev, armpmu->handle_irq); 389 return plat->handle_irq(irq, dev, armpmu->handle_irq);
389} 390}
390 391
392static void
393armpmu_release_hardware(void)
394{
395 int i, irq, irqs;
396
397 irqs = min(pmu_device->num_resources, num_possible_cpus());
398
399 for (i = 0; i < irqs; ++i) {
400 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
401 continue;
402 irq = platform_get_irq(pmu_device, i);
403 if (irq >= 0)
404 free_irq(irq, NULL);
405 }
406
407 armpmu->stop();
408 release_pmu(ARM_PMU_DEVICE_CPU);
409}
410
391static int 411static int
392armpmu_reserve_hardware(void) 412armpmu_reserve_hardware(void)
393{ 413{
@@ -401,20 +421,20 @@ armpmu_reserve_hardware(void)
401 return err; 421 return err;
402 } 422 }
403 423
404 irqs = pmu_device->num_resources;
405
406 plat = dev_get_platdata(&pmu_device->dev); 424 plat = dev_get_platdata(&pmu_device->dev);
407 if (plat && plat->handle_irq) 425 if (plat && plat->handle_irq)
408 handle_irq = armpmu_platform_irq; 426 handle_irq = armpmu_platform_irq;
409 else 427 else
410 handle_irq = armpmu->handle_irq; 428 handle_irq = armpmu->handle_irq;
411 429
430 irqs = min(pmu_device->num_resources, num_possible_cpus());
412 if (irqs < 1) { 431 if (irqs < 1) {
413 pr_err("no irqs for PMUs defined\n"); 432 pr_err("no irqs for PMUs defined\n");
414 return -ENODEV; 433 return -ENODEV;
415 } 434 }
416 435
417 for (i = 0; i < irqs; ++i) { 436 for (i = 0; i < irqs; ++i) {
437 err = 0;
418 irq = platform_get_irq(pmu_device, i); 438 irq = platform_get_irq(pmu_device, i);
419 if (irq < 0) 439 if (irq < 0)
420 continue; 440 continue;
@@ -422,13 +442,12 @@ armpmu_reserve_hardware(void)
422 /* 442 /*
423 * If we have a single PMU interrupt that we can't shift, 443 * If we have a single PMU interrupt that we can't shift,
424 * assume that we're running on a uniprocessor machine and 444 * assume that we're running on a uniprocessor machine and
425 * continue. 445 * continue. Otherwise, continue without this interrupt.
426 */ 446 */
427 err = irq_set_affinity(irq, cpumask_of(i)); 447 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
428 if (err && irqs > 1) { 448 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
429 pr_err("unable to set irq affinity (irq=%d, cpu=%u)\n", 449 irq, i);
430 irq, i); 450 continue;
431 break;
432 } 451 }
433 452
434 err = request_irq(irq, handle_irq, 453 err = request_irq(irq, handle_irq,
@@ -437,35 +456,14 @@ armpmu_reserve_hardware(void)
437 if (err) { 456 if (err) {
438 pr_err("unable to request IRQ%d for ARM PMU counters\n", 457 pr_err("unable to request IRQ%d for ARM PMU counters\n",
439 irq); 458 irq);
440 break; 459 armpmu_release_hardware();
460 return err;
441 } 461 }
442 }
443 462
444 if (err) { 463 cpumask_set_cpu(i, &armpmu->active_irqs);
445 for (i = i - 1; i >= 0; --i) {
446 irq = platform_get_irq(pmu_device, i);
447 if (irq >= 0)
448 free_irq(irq, NULL);
449 }
450 release_pmu(ARM_PMU_DEVICE_CPU);
451 } 464 }
452 465
453 return err; 466 return 0;
454}
455
456static void
457armpmu_release_hardware(void)
458{
459 int i, irq;
460
461 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
462 irq = platform_get_irq(pmu_device, i);
463 if (irq >= 0)
464 free_irq(irq, NULL);
465 }
466 armpmu->stop();
467
468 release_pmu(ARM_PMU_DEVICE_CPU);
469} 467}
470 468
471static atomic_t active_events = ATOMIC_INIT(0); 469static atomic_t active_events = ATOMIC_INIT(0);