aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-03-06 06:54:10 -0500
committerWill Deacon <will.deacon@arm.com>2015-03-24 11:09:47 -0400
commitd5efd9cc9cf2e422d064c912c7d5d985f52c1b2c (patch)
treeec4a29ce1df9016b48597888d55e925f3bdf4ac5
parent71bbf038eaa44a80dd6df0da7c708d4618172fe0 (diff)
arm64: pmu: add support for interrupt-affinity property
Historically, the PMU devicetree bindings have expected SPIs to be listed in order of *logical* CPU number. This is problematic for bootloaders, especially when the boot CPU (logical ID 0) isn't listed first in the devicetree. This patch adds a new optional property, interrupt-affinity, to the PMU node which allows the interrupt affinity to be described using a list of phandled to CPU nodes, with each entry in the list corresponding to the SPI at the same index in the interrupts property. Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/pmu.h1
-rw-r--r--arch/arm64/kernel/perf_event.c57
2 files changed, 54 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
index e6f087806aaf..b7710a59672c 100644
--- a/arch/arm64/include/asm/pmu.h
+++ b/arch/arm64/include/asm/pmu.h
@@ -44,6 +44,7 @@ struct pmu_hw_events {
44struct arm_pmu { 44struct arm_pmu {
45 struct pmu pmu; 45 struct pmu pmu;
46 cpumask_t active_irqs; 46 cpumask_t active_irqs;
47 int *irq_affinity;
47 const char *name; 48 const char *name;
48 irqreturn_t (*handle_irq)(int irq_num, void *dev); 49 irqreturn_t (*handle_irq)(int irq_num, void *dev);
49 void (*enable)(struct hw_perf_event *evt, int idx); 50 void (*enable)(struct hw_perf_event *evt, int idx);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 68a74151fa6c..195991dadc37 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,8 +25,10 @@
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/of.h>
28#include <linux/perf_event.h> 29#include <linux/perf_event.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/slab.h>
30#include <linux/spinlock.h> 32#include <linux/spinlock.h>
31#include <linux/uaccess.h> 33#include <linux/uaccess.h>
32 34
@@ -405,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
405 free_percpu_irq(irq, &cpu_hw_events); 407 free_percpu_irq(irq, &cpu_hw_events);
406 } else { 408 } else {
407 for (i = 0; i < irqs; ++i) { 409 for (i = 0; i < irqs; ++i) {
408 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) 410 int cpu = i;
411
412 if (armpmu->irq_affinity)
413 cpu = armpmu->irq_affinity[i];
414
415 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
409 continue; 416 continue;
410 irq = platform_get_irq(pmu_device, i); 417 irq = platform_get_irq(pmu_device, i);
411 if (irq > 0) 418 if (irq > 0)
@@ -459,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
459 on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); 466 on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
460 } else { 467 } else {
461 for (i = 0; i < irqs; ++i) { 468 for (i = 0; i < irqs; ++i) {
469 int cpu = i;
470
462 err = 0; 471 err = 0;
463 irq = platform_get_irq(pmu_device, i); 472 irq = platform_get_irq(pmu_device, i);
464 if (irq <= 0) 473 if (irq <= 0)
465 continue; 474 continue;
466 475
476 if (armpmu->irq_affinity)
477 cpu = armpmu->irq_affinity[i];
478
467 /* 479 /*
468 * If we have a single PMU interrupt that we can't shift, 480 * If we have a single PMU interrupt that we can't shift,
469 * assume that we're running on a uniprocessor machine and 481 * assume that we're running on a uniprocessor machine and
470 * continue. Otherwise, continue without this interrupt. 482 * continue. Otherwise, continue without this interrupt.
471 */ 483 */
472 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { 484 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
473 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", 485 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
474 irq, i); 486 irq, cpu);
475 continue; 487 continue;
476 } 488 }
477 489
@@ -485,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
485 return err; 497 return err;
486 } 498 }
487 499
488 cpumask_set_cpu(i, &armpmu->active_irqs); 500 cpumask_set_cpu(cpu, &armpmu->active_irqs);
489 } 501 }
490 } 502 }
491 503
@@ -1298,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = {
1298 1310
1299static int armpmu_device_probe(struct platform_device *pdev) 1311static int armpmu_device_probe(struct platform_device *pdev)
1300{ 1312{
1313 int i, *irqs;
1314
1301 if (!cpu_pmu) 1315 if (!cpu_pmu)
1302 return -ENODEV; 1316 return -ENODEV;
1303 1317
1318 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
1319 if (!irqs)
1320 return -ENOMEM;
1321
1322 for (i = 0; i < pdev->num_resources; ++i) {
1323 struct device_node *dn;
1324 int cpu;
1325
1326 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
1327 i);
1328 if (!dn) {
1329 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
1330 of_node_full_name(dn), i);
1331 break;
1332 }
1333
1334 for_each_possible_cpu(cpu)
1335 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
1336 break;
1337
1338 of_node_put(dn);
1339 if (cpu >= nr_cpu_ids) {
1340 pr_warn("Failed to find logical CPU for %s\n",
1341 dn->name);
1342 break;
1343 }
1344
1345 irqs[i] = cpu;
1346 }
1347
1348 if (i == pdev->num_resources)
1349 cpu_pmu->irq_affinity = irqs;
1350 else
1351 kfree(irqs);
1352
1304 cpu_pmu->plat_device = pdev; 1353 cpu_pmu->plat_device = pdev;
1305 return 0; 1354 return 0;
1306} 1355}