diff options
Diffstat (limited to 'arch/arc/kernel/perf_event.c')
-rw-r--r-- | arch/arc/kernel/perf_event.c | 32 |
1 files changed, 9 insertions, 23 deletions
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 0c08bb1ce15a..8b134cfe5e1f 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) | |||
428 | 428 | ||
429 | #endif /* CONFIG_ISA_ARCV2 */ | 429 | #endif /* CONFIG_ISA_ARCV2 */ |
430 | 430 | ||
431 | void arc_cpu_pmu_irq_init(void) | 431 | static void arc_cpu_pmu_irq_init(void *data) |
432 | { | 432 | { |
433 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); | 433 | int irq = *(int *)data; |
434 | 434 | ||
435 | arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, | 435 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
436 | "ARC perf counters", pmu_cpu); | ||
437 | 436 | ||
438 | /* Clear all pending interrupt flags */ | 437 | /* Clear all pending interrupt flags */ |
439 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | 438 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); |
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
515 | 514 | ||
516 | if (has_interrupts) { | 515 | if (has_interrupts) { |
517 | int irq = platform_get_irq(pdev, 0); | 516 | int irq = platform_get_irq(pdev, 0); |
518 | unsigned long flags; | ||
519 | 517 | ||
520 | if (irq < 0) { | 518 | if (irq < 0) { |
521 | pr_err("Cannot get IRQ number for the platform\n"); | 519 | pr_err("Cannot get IRQ number for the platform\n"); |
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
524 | 522 | ||
525 | arc_pmu->irq = irq; | 523 | arc_pmu->irq = irq; |
526 | 524 | ||
527 | /* | 525 | /* intc map function ensures irq_set_percpu_devid() called */ |
528 | * arc_cpu_pmu_irq_init() needs to be called on all cores for | 526 | request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", |
529 | * their respective local PMU. | 527 | this_cpu_ptr(&arc_pmu_cpu)); |
530 | * However we use opencoded on_each_cpu() to ensure it is called | 528 | |
531 | * on core0 first, so that arc_request_percpu_irq() sets up | 529 | on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); |
532 | * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable | 530 | |
533 | * perf IRQ on non master cores. | ||
534 | * see arc_request_percpu_irq() | ||
535 | */ | ||
536 | preempt_disable(); | ||
537 | local_irq_save(flags); | ||
538 | arc_cpu_pmu_irq_init(); | ||
539 | local_irq_restore(flags); | ||
540 | smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); | ||
541 | preempt_enable(); | ||
542 | |||
543 | /* Clean all pending interrupt flags */ | ||
544 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | ||
545 | } else | 531 | } else |
546 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | 532 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
547 | 533 | ||