aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-04-11 04:39:51 -0400
committerWill Deacon <will.deacon@arm.com>2017-04-11 11:29:53 -0400
commit0e2663d921ea1861540cd7f331d8e2c7668aa31f (patch)
treec3cd5bba53d72874786b7d7866a91ee6750c76bb
parent3cf6111025cb3346be43856e4c5e9b795b447832 (diff)
drivers/perf: arm_pmu: split cpu-local irq request/free
Currently we have functions to request/free all IRQs for a given PMU. While this works today, this won't work for ACPI, where we don't know the full set of IRQs up front, and need to request them separately. To enable supporting ACPI, this patch splits out the cpu-local request/free into new functions, allowing us to request/free individual IRQs. As this makes it possible/necessary to request a PPI once per cpu, an additional check is added to detect mismatched PPIs. This shouldn't matter for the DT / platform case, as we check this when parsing. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/perf/arm_pmu.c88
1 files changed, 52 insertions, 36 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 3c4e97df8331..c09c379b038d 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -527,65 +527,81 @@ int perf_num_counters(void)
527} 527}
528EXPORT_SYMBOL_GPL(perf_num_counters); 528EXPORT_SYMBOL_GPL(perf_num_counters);
529 529
530static void armpmu_free_irqs(struct arm_pmu *armpmu) 530static void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
531{ 531{
532 int cpu;
533 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; 532 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
533 int irq = per_cpu(hw_events->irq, cpu);
534 534
535 for_each_cpu(cpu, &armpmu->supported_cpus) { 535 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
536 int irq = per_cpu(hw_events->irq, cpu); 536 return;
537 if (!irq)
538 continue;
539 537
540 if (irq_is_percpu(irq)) { 538 if (irq_is_percpu(irq)) {
541 free_percpu_irq(irq, &hw_events->percpu_pmu); 539 free_percpu_irq(irq, &hw_events->percpu_pmu);
542 break; 540 cpumask_clear(&armpmu->active_irqs);
543 } 541 return;
542 }
544 543
545 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) 544 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
546 continue; 545}
547 546
548 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 547static void armpmu_free_irqs(struct arm_pmu *armpmu)
549 } 548{
549 int cpu;
550
551 for_each_cpu(cpu, &armpmu->supported_cpus)
552 armpmu_free_irq(armpmu, cpu);
550} 553}
551 554
552static int armpmu_request_irqs(struct arm_pmu *armpmu) 555static int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
553{ 556{
554 int cpu, err; 557 int err = 0;
555 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; 558 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
556 const irq_handler_t handler = armpmu_dispatch_irq; 559 const irq_handler_t handler = armpmu_dispatch_irq;
560 int irq = per_cpu(hw_events->irq, cpu);
561 if (!irq)
562 return 0;
557 563
558 for_each_cpu(cpu, &armpmu->supported_cpus) { 564 if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
559 int irq = per_cpu(hw_events->irq, cpu); 565 err = request_percpu_irq(irq, handler, "arm-pmu",
560 if (!irq) 566 &hw_events->percpu_pmu);
561 continue; 567 } else if (irq_is_percpu(irq)) {
568 int other_cpu = cpumask_first(&armpmu->active_irqs);
569 int other_irq = per_cpu(hw_events->irq, other_cpu);
562 570
563 if (irq_is_percpu(irq)) { 571 if (irq != other_irq) {
564 err = request_percpu_irq(irq, handler, "arm-pmu", 572 pr_warn("mismatched PPIs detected.\n");
565 &hw_events->percpu_pmu); 573 err = -EINVAL;
566 if (err) {
567 pr_err("unable to request IRQ%d for ARM PMU counters\n",
568 irq);
569 }
570
571 return err;
572 } 574 }
573 575 } else {
574 err = request_irq(irq, handler, 576 err = request_irq(irq, handler,
575 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 577 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
576 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 578 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
577 if (err) { 579 }
578 pr_err("unable to request IRQ%d for ARM PMU counters\n",
579 irq);
580 return err;
581 }
582 580
583 cpumask_set_cpu(cpu, &armpmu->active_irqs); 581 if (err) {
582 pr_err("unable to request IRQ%d for ARM PMU counters\n",
583 irq);
584 return err;
584 } 585 }
585 586
587 cpumask_set_cpu(cpu, &armpmu->active_irqs);
588
586 return 0; 589 return 0;
587} 590}
588 591
592static int armpmu_request_irqs(struct arm_pmu *armpmu)
593{
594 int cpu, err;
595
596 for_each_cpu(cpu, &armpmu->supported_cpus) {
597 err = armpmu_request_irq(armpmu, cpu);
598 if (err)
599 break;
600 }
601
602 return err;
603}
604
589static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) 605static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
590{ 606{
591 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; 607 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;