aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-07-25 16:05:32 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-08-07 20:40:42 -0400
commit45ed695ac10a23cb4e60a3e0b68b3f21a8670670 (patch)
tree34a810bee0cf351908449a9563ba1fc126f319cb /arch/arm64
parent365ec7b17327329efc71276722ca8db3f21f2edd (diff)
ARM64: add IPI tracepoints
The strings used to list IPIs in /proc/interrupts are reused for tracing purposes. While at it, the code is slightly cleaned up so the ipi_types array indices are no longer offset by IPI_RESCHEDULE whose value is 0 anyway. Link: http://lkml.kernel.org/p/1406318733-26754-5-git-send-email-nicolas.pitre@linaro.org Acked-by: Will Deacon <will.deacon@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/smp.c65
1 files changed, 39 insertions, 26 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 40f38f46c8e0..a89c66f3b4c5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -50,6 +50,9 @@
50#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
51#include <asm/ptrace.h> 51#include <asm/ptrace.h>
52 52
53#define CREATE_TRACE_POINTS
54#include <trace/events/ipi.h>
55
53/* 56/*
54 * as from 2.5, kernels no longer have an init_tasks structure 57 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core 58 * so we need some other way of telling a new secondary core
@@ -307,8 +310,6 @@ void __init smp_prepare_boot_cpu(void)
307 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 310 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
308} 311}
309 312
310static void (*smp_cross_call)(const struct cpumask *, unsigned int);
311
312/* 313/*
313 * Enumerate the possible CPU set from the device tree and build the 314 * Enumerate the possible CPU set from the device tree and build the
314 * cpu logical map array containing MPIDR values related to logical 315 * cpu logical map array containing MPIDR values related to logical
@@ -463,32 +464,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
463 } 464 }
464} 465}
465 466
467static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
466 468
467void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 469void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
468{ 470{
469 smp_cross_call = fn; 471 __smp_cross_call = fn;
470} 472}
471 473
472void arch_send_call_function_ipi_mask(const struct cpumask *mask) 474static const char *ipi_types[NR_IPI] __tracepoint_string = {
473{ 475#define S(x,s) [x] = s
474 smp_cross_call(mask, IPI_CALL_FUNC);
475}
476
477void arch_send_call_function_single_ipi(int cpu)
478{
479 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
480}
481
482#ifdef CONFIG_IRQ_WORK
483void arch_irq_work_raise(void)
484{
485 if (smp_cross_call)
486 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
487}
488#endif
489
490static const char *ipi_types[NR_IPI] = {
491#define S(x,s) [x - IPI_RESCHEDULE] = s
492 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 476 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
493 S(IPI_CALL_FUNC, "Function call interrupts"), 477 S(IPI_CALL_FUNC, "Function call interrupts"),
494 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 478 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
@@ -497,12 +481,18 @@ static const char *ipi_types[NR_IPI] = {
497 S(IPI_IRQ_WORK, "IRQ work interrupts"), 481 S(IPI_IRQ_WORK, "IRQ work interrupts"),
498}; 482};
499 483
484static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
485{
486 trace_ipi_raise(target, ipi_types[ipinr]);
487 __smp_cross_call(target, ipinr);
488}
489
500void show_ipi_list(struct seq_file *p, int prec) 490void show_ipi_list(struct seq_file *p, int prec)
501{ 491{
502 unsigned int cpu, i; 492 unsigned int cpu, i;
503 493
504 for (i = 0; i < NR_IPI; i++) { 494 for (i = 0; i < NR_IPI; i++) {
505 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, 495 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
506 prec >= 4 ? " " : ""); 496 prec >= 4 ? " " : "");
507 for_each_online_cpu(cpu) 497 for_each_online_cpu(cpu)
508 seq_printf(p, "%10u ", 498 seq_printf(p, "%10u ",
@@ -522,6 +512,24 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
522 return sum; 512 return sum;
523} 513}
524 514
515void arch_send_call_function_ipi_mask(const struct cpumask *mask)
516{
517 smp_cross_call(mask, IPI_CALL_FUNC);
518}
519
520void arch_send_call_function_single_ipi(int cpu)
521{
522 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
523}
524
525#ifdef CONFIG_IRQ_WORK
526void arch_irq_work_raise(void)
527{
528 if (__smp_cross_call)
529 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
530}
531#endif
532
525static DEFINE_RAW_SPINLOCK(stop_lock); 533static DEFINE_RAW_SPINLOCK(stop_lock);
526 534
527/* 535/*
@@ -553,8 +561,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
553 unsigned int cpu = smp_processor_id(); 561 unsigned int cpu = smp_processor_id();
554 struct pt_regs *old_regs = set_irq_regs(regs); 562 struct pt_regs *old_regs = set_irq_regs(regs);
555 563
556 if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) 564 if ((unsigned)ipinr < NR_IPI) {
557 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); 565 trace_ipi_entry(ipi_types[ipinr]);
566 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
567 }
558 568
559 switch (ipinr) { 569 switch (ipinr) {
560 case IPI_RESCHEDULE: 570 case IPI_RESCHEDULE:
@@ -599,6 +609,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
599 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 609 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
600 break; 610 break;
601 } 611 }
612
613 if ((unsigned)ipinr < NR_IPI)
614 trace_ipi_exit(ipi_types[ipinr]);
602 set_irq_regs(old_regs); 615 set_irq_regs(old_regs);
603} 616}
604 617