summaryrefslogtreecommitdiffstats
path: root/drivers/perf
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-07-08 10:56:04 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-07-08 12:39:55 -0400
commit19a469a58720ea96b649b06fb09ddfd3e831aa69 (patch)
treeaa50f6bce42d8e4ed1aaf29b6c0025192b71f695 /drivers/perf
parent90f777beb788d08300f4a1482cb4fd37a401b472 (diff)
drivers/perf: arm-pmu: Handle per-interrupt affinity mask
On a big-little system, PMUs can be wired to CPUs using per CPU interrups (PPI). In this case, it is important to make sure that the enable/disable do happen on the right set of CPUs. So instead of relying on the interrupt-affinity property, we can use the actual percpu affinity that DT exposes as part of the interrupt specifier. The DT binding is also updated to reflect the fact that the interrupt-affinity property shouldn't be used in that case. Acked-by: Rob Herring <robh@kernel.org> Tested-by: Caesar Wang <wxt@rock-chips.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm_pmu.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 140436a046c0..8e4d7f590b06 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -603,7 +603,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
603 603
604 irq = platform_get_irq(pmu_device, 0); 604 irq = platform_get_irq(pmu_device, 0);
605 if (irq >= 0 && irq_is_percpu(irq)) { 605 if (irq >= 0 && irq_is_percpu(irq)) {
606 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); 606 on_each_cpu_mask(&cpu_pmu->supported_cpus,
607 cpu_pmu_disable_percpu_irq, &irq, 1);
607 free_percpu_irq(irq, &hw_events->percpu_pmu); 608 free_percpu_irq(irq, &hw_events->percpu_pmu);
608 } else { 609 } else {
609 for (i = 0; i < irqs; ++i) { 610 for (i = 0; i < irqs; ++i) {
@@ -645,7 +646,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
645 irq); 646 irq);
646 return err; 647 return err;
647 } 648 }
648 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); 649
650 on_each_cpu_mask(&cpu_pmu->supported_cpus,
651 cpu_pmu_enable_percpu_irq, &irq, 1);
649 } else { 652 } else {
650 for (i = 0; i < irqs; ++i) { 653 for (i = 0; i < irqs; ++i) {
651 int cpu = i; 654 int cpu = i;
@@ -961,9 +964,23 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
961 i++; 964 i++;
962 } while (1); 965 } while (1);
963 966
964 /* If we didn't manage to parse anything, claim to support all CPUs */ 967 /* If we didn't manage to parse anything, try the interrupt affinity */
965 if (cpumask_weight(&pmu->supported_cpus) == 0) 968 if (cpumask_weight(&pmu->supported_cpus) == 0) {
966 cpumask_setall(&pmu->supported_cpus); 969 if (!using_spi) {
970 /* If using PPIs, check the affinity of the partition */
971 int ret, irq;
972
973 irq = platform_get_irq(pdev, 0);
974 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
975 if (ret) {
976 kfree(irqs);
977 return ret;
978 }
979 } else {
980 /* Otherwise default to all CPUs */
981 cpumask_setall(&pmu->supported_cpus);
982 }
983 }
967 984
968 /* If we matched up the IRQ affinities, use them to route the SPIs */ 985 /* If we matched up the IRQ affinities, use them to route the SPIs */
969 if (using_spi && i == pdev->num_resources) 986 if (using_spi && i == pdev->num_resources)