aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-02-05 11:42:00 -0500
committerWill Deacon <will.deacon@arm.com>2018-02-20 06:34:54 -0500
commit6de3f79112cc26bf24edbb240248d21e1dd85dde (patch)
tree7a8301e69ef2f8f7cae3de8316056b4679c28019
parent43fc9a2febbd96dd39588d67ace456b7bbc73d9f (diff)
arm_pmu: explicitly enable/disable SPIs at hotplug
To support ACPI systems, we need to request IRQs before CPUs are hotplugged, and thus we need to request IRQs before we know their associated PMU. This is problematic if a PMU IRQ is pending out of reset, as it may be taken before we know the PMU, and thus the IRQ handler won't be able to handle it, leaving it screaming. To avoid such problems, lets request all IRQs in a disabled state, and explicitly enable/disable them at hotplug time, when we're sure the PMU has been probed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/perf/arm_pmu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index ddcabd6a5d52..72118e6f9122 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -558,6 +558,7 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
558 IRQF_NOBALANCING | 558 IRQF_NOBALANCING |
559 IRQF_NO_THREAD; 559 IRQF_NO_THREAD;
560 560
561 irq_set_status_flags(irq, IRQ_NOAUTOEN);
561 err = request_irq(irq, handler, irq_flags, "arm-pmu", 562 err = request_irq(irq, handler, irq_flags, "arm-pmu",
562 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 563 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
563 } else if (cpumask_empty(&armpmu->active_irqs)) { 564 } else if (cpumask_empty(&armpmu->active_irqs)) {
@@ -600,10 +601,10 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
600 601
601 irq = armpmu_get_cpu_irq(pmu, cpu); 602 irq = armpmu_get_cpu_irq(pmu, cpu);
602 if (irq) { 603 if (irq) {
603 if (irq_is_percpu_devid(irq)) { 604 if (irq_is_percpu_devid(irq))
604 enable_percpu_irq(irq, IRQ_TYPE_NONE); 605 enable_percpu_irq(irq, IRQ_TYPE_NONE);
605 return 0; 606 else
606 } 607 enable_irq(irq);
607 } 608 }
608 609
609 return 0; 610 return 0;
@@ -618,8 +619,12 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
618 return 0; 619 return 0;
619 620
620 irq = armpmu_get_cpu_irq(pmu, cpu); 621 irq = armpmu_get_cpu_irq(pmu, cpu);
621 if (irq && irq_is_percpu_devid(irq)) 622 if (irq) {
622 disable_percpu_irq(irq); 623 if (irq_is_percpu_devid(irq))
624 disable_percpu_irq(irq);
625 else
626 disable_irq(irq);
627 }
623 628
624 return 0; 629 return 0;
625} 630}