diff options
author | Will Deacon <will.deacon@arm.com> | 2015-03-06 06:54:09 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-03-24 11:07:57 -0400 |
commit | 9fd85eb502a78bd812db58bd1f668b2a06ee30a5 (patch) | |
tree | 81e8e9ea897a7ab9aa32e7bbfc56ca62423b119e /arch | |
parent | e429817b401f095ac483fcb02524b01faf45dad6 (diff) |
ARM: pmu: add support for interrupt-affinity property
Historically, the PMU devicetree bindings have expected SPIs to be
listed in order of *logical* CPU number. This is problematic for
bootloaders, especially when the boot CPU (logical ID 0) isn't listed
first in the devicetree.
This patch adds a new optional property, interrupt-affinity, to the
PMU node which allows the interrupt affinity to be described using
a list of phandled to CPU nodes, with each entry in the list
corresponding to the SPI at the same index in the interrupts property.
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/pmu.h | 1 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 69 |
2 files changed, 63 insertions, 7 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index b1596bd59129..675e4ab79f68 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -92,6 +92,7 @@ struct pmu_hw_events { | |||
92 | struct arm_pmu { | 92 | struct arm_pmu { |
93 | struct pmu pmu; | 93 | struct pmu pmu; |
94 | cpumask_t active_irqs; | 94 | cpumask_t active_irqs; |
95 | int *irq_affinity; | ||
95 | char *name; | 96 | char *name; |
96 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 97 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
97 | void (*enable)(struct perf_event *event); | 98 | void (*enable)(struct perf_event *event); |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 7eb86e294c68..91c7ba182dcd 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) | |||
92 | free_percpu_irq(irq, &hw_events->percpu_pmu); | 92 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
93 | } else { | 93 | } else { |
94 | for (i = 0; i < irqs; ++i) { | 94 | for (i = 0; i < irqs; ++i) { |
95 | if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) | 95 | int cpu = i; |
96 | |||
97 | if (cpu_pmu->irq_affinity) | ||
98 | cpu = cpu_pmu->irq_affinity[i]; | ||
99 | |||
100 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) | ||
96 | continue; | 101 | continue; |
97 | irq = platform_get_irq(pmu_device, i); | 102 | irq = platform_get_irq(pmu_device, i); |
98 | if (irq >= 0) | 103 | if (irq >= 0) |
99 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); | 104 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
100 | } | 105 | } |
101 | } | 106 | } |
102 | } | 107 | } |
@@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
128 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); | 133 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); |
129 | } else { | 134 | } else { |
130 | for (i = 0; i < irqs; ++i) { | 135 | for (i = 0; i < irqs; ++i) { |
136 | int cpu = i; | ||
137 | |||
131 | err = 0; | 138 | err = 0; |
132 | irq = platform_get_irq(pmu_device, i); | 139 | irq = platform_get_irq(pmu_device, i); |
133 | if (irq < 0) | 140 | if (irq < 0) |
134 | continue; | 141 | continue; |
135 | 142 | ||
143 | if (cpu_pmu->irq_affinity) | ||
144 | cpu = cpu_pmu->irq_affinity[i]; | ||
145 | |||
136 | /* | 146 | /* |
137 | * If we have a single PMU interrupt that we can't shift, | 147 | * If we have a single PMU interrupt that we can't shift, |
138 | * assume that we're running on a uniprocessor machine and | 148 | * assume that we're running on a uniprocessor machine and |
139 | * continue. Otherwise, continue without this interrupt. | 149 | * continue. Otherwise, continue without this interrupt. |
140 | */ | 150 | */ |
141 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | 151 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
142 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | 152 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", |
143 | irq, i); | 153 | irq, cpu); |
144 | continue; | 154 | continue; |
145 | } | 155 | } |
146 | 156 | ||
147 | err = request_irq(irq, handler, | 157 | err = request_irq(irq, handler, |
148 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", | 158 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", |
149 | per_cpu_ptr(&hw_events->percpu_pmu, i)); | 159 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
150 | if (err) { | 160 | if (err) { |
151 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | 161 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
152 | irq); | 162 | irq); |
153 | return err; | 163 | return err; |
154 | } | 164 | } |
155 | 165 | ||
156 | cpumask_set_cpu(i, &cpu_pmu->active_irqs); | 166 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); |
157 | } | 167 | } |
158 | } | 168 | } |
159 | 169 | ||
@@ -291,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
291 | return ret; | 301 | return ret; |
292 | } | 302 | } |
293 | 303 | ||
304 | static int of_pmu_irq_cfg(struct platform_device *pdev) | ||
305 | { | ||
306 | int i; | ||
307 | int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | ||
308 | |||
309 | if (!irqs) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | for (i = 0; i < pdev->num_resources; ++i) { | ||
313 | struct device_node *dn; | ||
314 | int cpu; | ||
315 | |||
316 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", | ||
317 | i); | ||
318 | if (!dn) { | ||
319 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | ||
320 | of_node_full_name(dn), i); | ||
321 | break; | ||
322 | } | ||
323 | |||
324 | for_each_possible_cpu(cpu) | ||
325 | if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) | ||
326 | break; | ||
327 | |||
328 | of_node_put(dn); | ||
329 | if (cpu >= nr_cpu_ids) { | ||
330 | pr_warn("Failed to find logical CPU for %s\n", | ||
331 | dn->name); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | irqs[i] = cpu; | ||
336 | } | ||
337 | |||
338 | if (i == pdev->num_resources) | ||
339 | cpu_pmu->irq_affinity = irqs; | ||
340 | else | ||
341 | kfree(irqs); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
294 | static int cpu_pmu_device_probe(struct platform_device *pdev) | 346 | static int cpu_pmu_device_probe(struct platform_device *pdev) |
295 | { | 347 | { |
296 | const struct of_device_id *of_id; | 348 | const struct of_device_id *of_id; |
@@ -315,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) | |||
315 | 367 | ||
316 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { | 368 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { |
317 | init_fn = of_id->data; | 369 | init_fn = of_id->data; |
318 | ret = init_fn(pmu); | 370 | |
371 | ret = of_pmu_irq_cfg(pdev); | ||
372 | if (!ret) | ||
373 | ret = init_fn(pmu); | ||
319 | } else { | 374 | } else { |
320 | ret = probe_current_pmu(pmu); | 375 | ret = probe_current_pmu(pmu); |
321 | } | 376 | } |