diff options
-rw-r--r-- | arch/arm/include/asm/pmu.h | 1 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 25 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 15 |
3 files changed, 38 insertions, 3 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 675e4ab79f68..ecad26e7a68f 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -92,6 +92,7 @@ struct pmu_hw_events { | |||
92 | struct arm_pmu { | 92 | struct arm_pmu { |
93 | struct pmu pmu; | 93 | struct pmu pmu; |
94 | cpumask_t active_irqs; | 94 | cpumask_t active_irqs; |
95 | cpumask_t supported_cpus; | ||
95 | int *irq_affinity; | 96 | int *irq_affinity; |
96 | char *name; | 97 | char *name; |
97 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 98 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 4a86a0133ac3..9b536be74f7b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | 12 | #define pr_fmt(fmt) "hw perfevents: " fmt |
13 | 13 | ||
14 | #include <linux/cpumask.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
16 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
@@ -229,6 +230,10 @@ armpmu_add(struct perf_event *event, int flags) | |||
229 | int idx; | 230 | int idx; |
230 | int err = 0; | 231 | int err = 0; |
231 | 232 | ||
233 | /* An event following a process won't be stopped earlier */ | ||
234 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | ||
235 | return -ENOENT; | ||
236 | |||
232 | perf_pmu_disable(event->pmu); | 237 | perf_pmu_disable(event->pmu); |
233 | 238 | ||
234 | /* If we don't have a space for the counter then finish early. */ | 239 | /* If we don't have a space for the counter then finish early. */ |
@@ -454,6 +459,17 @@ static int armpmu_event_init(struct perf_event *event) | |||
454 | int err = 0; | 459 | int err = 0; |
455 | atomic_t *active_events = &armpmu->active_events; | 460 | atomic_t *active_events = &armpmu->active_events; |
456 | 461 | ||
462 | /* | ||
463 | * Reject CPU-affine events for CPUs that are of a different class to | ||
464 | * that which this PMU handles. Process-following events (where | ||
465 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | ||
466 | * reject them later (in armpmu_add) if they're scheduled on a | ||
467 | * different class of CPU. | ||
468 | */ | ||
469 | if (event->cpu != -1 && | ||
470 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | ||
471 | return -ENOENT; | ||
472 | |||
457 | /* does not support taken branch sampling */ | 473 | /* does not support taken branch sampling */ |
458 | if (has_branch_stack(event)) | 474 | if (has_branch_stack(event)) |
459 | return -EOPNOTSUPP; | 475 | return -EOPNOTSUPP; |
@@ -489,6 +505,10 @@ static void armpmu_enable(struct pmu *pmu) | |||
489 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | 505 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
490 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | 506 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
491 | 507 | ||
508 | /* For task-bound events we may be called on other CPUs */ | ||
509 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | ||
510 | return; | ||
511 | |||
492 | if (enabled) | 512 | if (enabled) |
493 | armpmu->start(armpmu); | 513 | armpmu->start(armpmu); |
494 | } | 514 | } |
@@ -496,6 +516,11 @@ static void armpmu_enable(struct pmu *pmu) | |||
496 | static void armpmu_disable(struct pmu *pmu) | 516 | static void armpmu_disable(struct pmu *pmu) |
497 | { | 517 | { |
498 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 518 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
519 | |||
520 | /* For task-bound events we may be called on other CPUs */ | ||
521 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | ||
522 | return; | ||
523 | |||
499 | armpmu->stop(armpmu); | 524 | armpmu->stop(armpmu); |
500 | } | 525 | } |
501 | 526 | ||
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 2a9003ef6db3..9602d31aae03 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -179,11 +179,15 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
179 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, | 179 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, |
180 | void *hcpu) | 180 | void *hcpu) |
181 | { | 181 | { |
182 | int cpu = (unsigned long)hcpu; | ||
182 | struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); | 183 | struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); |
183 | 184 | ||
184 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | 185 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) |
185 | return NOTIFY_DONE; | 186 | return NOTIFY_DONE; |
186 | 187 | ||
188 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | ||
189 | return NOTIFY_DONE; | ||
190 | |||
187 | if (pmu->reset) | 191 | if (pmu->reset) |
188 | pmu->reset(pmu); | 192 | pmu->reset(pmu); |
189 | else | 193 | else |
@@ -219,7 +223,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
219 | 223 | ||
220 | /* Ensure the PMU has sane values out of reset. */ | 224 | /* Ensure the PMU has sane values out of reset. */ |
221 | if (cpu_pmu->reset) | 225 | if (cpu_pmu->reset) |
222 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); | 226 | on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, |
227 | cpu_pmu, 1); | ||
223 | 228 | ||
224 | /* If no interrupts available, set the corresponding capability flag */ | 229 | /* If no interrupts available, set the corresponding capability flag */ |
225 | if (!platform_get_irq(cpu_pmu->plat_device, 0)) | 230 | if (!platform_get_irq(cpu_pmu->plat_device, 0)) |
@@ -334,12 +339,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
334 | } | 339 | } |
335 | 340 | ||
336 | irqs[i] = cpu; | 341 | irqs[i] = cpu; |
342 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | ||
337 | } | 343 | } |
338 | 344 | ||
339 | if (i == pdev->num_resources) | 345 | if (i == pdev->num_resources) { |
340 | pmu->irq_affinity = irqs; | 346 | pmu->irq_affinity = irqs; |
341 | else | 347 | } else { |
342 | kfree(irqs); | 348 | kfree(irqs); |
349 | cpumask_setall(&pmu->supported_cpus); | ||
350 | } | ||
343 | 351 | ||
344 | return 0; | 352 | return 0; |
345 | } | 353 | } |
@@ -374,6 +382,7 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) | |||
374 | ret = init_fn(pmu); | 382 | ret = init_fn(pmu); |
375 | } else { | 383 | } else { |
376 | ret = probe_current_pmu(pmu); | 384 | ret = probe_current_pmu(pmu); |
385 | cpumask_setall(&pmu->supported_cpus); | ||
377 | } | 386 | } |
378 | 387 | ||
379 | if (ret) { | 388 | if (ret) { |