aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinayak Kale <vkale@apm.com>2013-12-04 05:09:51 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2013-12-19 12:43:05 -0500
commit66aa8d6a145b6a66566b4fce219cc56c3d0e01c3 (patch)
tree1aa3806d9ac3fbd03b7d02326f3d33fc9ea880c8
parent7f4a8e7b1943c1fc7e4b08509e308197babdcd5b (diff)
arm64: perf: add support for percpu pmu interrupt
Add support for irq registration when pmu interrupt is percpu. Signed-off-by: Vinayak Kale <vkale@apm.com> Signed-off-by: Tuan Phan <tphan@apm.com> [will: tidied up cross-calling to pass &irq] Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/kernel/perf_event.c108
1 files changed, 78 insertions, 30 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 0e63c98d224c..5b1cd792274a 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/bitmap.h> 23#include <linux/bitmap.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irq.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/export.h> 27#include <linux/export.h>
27#include <linux/perf_event.h> 28#include <linux/perf_event.h>
@@ -363,26 +364,53 @@ validate_group(struct perf_event *event)
363} 364}
364 365
365static void 366static void
367armpmu_disable_percpu_irq(void *data)
368{
369 unsigned int irq = *(unsigned int *)data;
370 disable_percpu_irq(irq);
371}
372
373static void
366armpmu_release_hardware(struct arm_pmu *armpmu) 374armpmu_release_hardware(struct arm_pmu *armpmu)
367{ 375{
368 int i, irq, irqs; 376 int irq;
377 unsigned int i, irqs;
369 struct platform_device *pmu_device = armpmu->plat_device; 378 struct platform_device *pmu_device = armpmu->plat_device;
370 379
371 irqs = min(pmu_device->num_resources, num_possible_cpus()); 380 irqs = min(pmu_device->num_resources, num_possible_cpus());
381 if (!irqs)
382 return;
372 383
373 for (i = 0; i < irqs; ++i) { 384 irq = platform_get_irq(pmu_device, 0);
374 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) 385 if (irq <= 0)
375 continue; 386 return;
376 irq = platform_get_irq(pmu_device, i); 387
377 if (irq >= 0) 388 if (irq_is_percpu(irq)) {
378 free_irq(irq, armpmu); 389 on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
390 free_percpu_irq(irq, &cpu_hw_events);
391 } else {
392 for (i = 0; i < irqs; ++i) {
393 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
394 continue;
395 irq = platform_get_irq(pmu_device, i);
396 if (irq > 0)
397 free_irq(irq, armpmu);
398 }
379 } 399 }
380} 400}
381 401
402static void
403armpmu_enable_percpu_irq(void *data)
404{
405 unsigned int irq = *(unsigned int *)data;
406 enable_percpu_irq(irq, IRQ_TYPE_NONE);
407}
408
382static int 409static int
383armpmu_reserve_hardware(struct arm_pmu *armpmu) 410armpmu_reserve_hardware(struct arm_pmu *armpmu)
384{ 411{
385 int i, err, irq, irqs; 412 int err, irq;
413 unsigned int i, irqs;
386 struct platform_device *pmu_device = armpmu->plat_device; 414 struct platform_device *pmu_device = armpmu->plat_device;
387 415
388 if (!pmu_device) { 416 if (!pmu_device) {
@@ -391,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
391 } 419 }
392 420
393 irqs = min(pmu_device->num_resources, num_possible_cpus()); 421 irqs = min(pmu_device->num_resources, num_possible_cpus());
394 if (irqs < 1) { 422 if (!irqs) {
395 pr_err("no irqs for PMUs defined\n"); 423 pr_err("no irqs for PMUs defined\n");
396 return -ENODEV; 424 return -ENODEV;
397 } 425 }
398 426
399 for (i = 0; i < irqs; ++i) { 427 irq = platform_get_irq(pmu_device, 0);
400 err = 0; 428 if (irq <= 0) {
401 irq = platform_get_irq(pmu_device, i); 429 pr_err("failed to get valid irq for PMU device\n");
402 if (irq < 0) 430 return -ENODEV;
403 continue; 431 }
404 432
405 /* 433 if (irq_is_percpu(irq)) {
406 * If we have a single PMU interrupt that we can't shift, 434 err = request_percpu_irq(irq, armpmu->handle_irq,
407 * assume that we're running on a uniprocessor machine and 435 "arm-pmu", &cpu_hw_events);
408 * continue. Otherwise, continue without this interrupt.
409 */
410 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
411 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
412 irq, i);
413 continue;
414 }
415 436
416 err = request_irq(irq, armpmu->handle_irq,
417 IRQF_NOBALANCING,
418 "arm-pmu", armpmu);
419 if (err) { 437 if (err) {
420 pr_err("unable to request IRQ%d for ARM PMU counters\n", 438 pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
421 irq); 439 irq);
422 armpmu_release_hardware(armpmu); 440 armpmu_release_hardware(armpmu);
423 return err; 441 return err;
424 } 442 }
425 443
426 cpumask_set_cpu(i, &armpmu->active_irqs); 444 on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
445 } else {
446 for (i = 0; i < irqs; ++i) {
447 err = 0;
448 irq = platform_get_irq(pmu_device, i);
449 if (irq <= 0)
450 continue;
451
452 /*
453 * If we have a single PMU interrupt that we can't shift,
454 * assume that we're running on a uniprocessor machine and
455 * continue. Otherwise, continue without this interrupt.
456 */
457 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
458 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
459 irq, i);
460 continue;
461 }
462
463 err = request_irq(irq, armpmu->handle_irq,
464 IRQF_NOBALANCING,
465 "arm-pmu", armpmu);
466 if (err) {
467 pr_err("unable to request IRQ%d for ARM PMU counters\n",
468 irq);
469 armpmu_release_hardware(armpmu);
470 return err;
471 }
472
473 cpumask_set_cpu(i, &armpmu->active_irqs);
474 }
427 } 475 }
428 476
429 return 0; 477 return 0;