aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAKASHI Takahiro <takahiro.akashi@linaro.org>2014-04-30 05:54:33 -0400
committerWill Deacon <will.deacon@arm.com>2014-05-29 04:08:08 -0400
commit819e50e25d0ce8a75f5cba815416a6a8573655c4 (patch)
tree006f9d7dc3f97f47709430357651a83ffdc1f86d
parentaf64d2aa872a174772ffc00e2558083f70193acb (diff)
arm64: Add ftrace support
This patch implements arm64 specific part to support function tracers, such as function (CONFIG_FUNCTION_TRACER), function_graph (CONFIG_FUNCTION_GRAPH_TRACER) and function profiler (CONFIG_FUNCTION_PROFILER). With 'function' tracer, all the functions in the kernel are traced with timestamps in ${sysfs}/tracing/trace. If function_graph tracer is specified, call graph is generated. The kernel must be compiled with -pg option so that _mcount() is inserted at the beginning of functions. This function is called on every function's entry as long as tracing is enabled. In addition, function_graph tracer also needs to be able to probe function's exit. ftrace_graph_caller() & return_to_handler do this by faking link register's value to intercept function's return path. More details on architecture specific requirements are described in Documentation/trace/ftrace-design.txt. Reviewed-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/ftrace.h23
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/entry-ftrace.S175
-rw-r--r--arch/arm64/kernel/ftrace.c64
6 files changed, 272 insertions, 0 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c7022a9d5eeb..8a25a4ad3159 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -38,6 +38,8 @@ config ARM64
38 select HAVE_DMA_CONTIGUOUS 38 select HAVE_DMA_CONTIGUOUS
39 select HAVE_EFFICIENT_UNALIGNED_ACCESS 39 select HAVE_EFFICIENT_UNALIGNED_ACCESS
40 select HAVE_FTRACE_MCOUNT_RECORD 40 select HAVE_FTRACE_MCOUNT_RECORD
41 select HAVE_FUNCTION_TRACER
42 select HAVE_FUNCTION_GRAPH_TRACER
41 select HAVE_GENERIC_DMA_COHERENT 43 select HAVE_GENERIC_DMA_COHERENT
42 select HAVE_HW_BREAKPOINT if PERF_EVENTS 44 select HAVE_HW_BREAKPOINT if PERF_EVENTS
43 select HAVE_MEMBLOCK 45 select HAVE_MEMBLOCK
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
new file mode 100644
index 000000000000..58ea5951b198
--- /dev/null
+++ b/arch/arm64/include/asm/ftrace.h
@@ -0,0 +1,23 @@
1/*
2 * arch/arm64/include/asm/ftrace.h
3 *
4 * Copyright (C) 2013 Linaro Limited
5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_FTRACE_H
12#define __ASM_FTRACE_H
13
14#include <asm/insn.h>
15
16#define MCOUNT_ADDR ((unsigned long)_mcount)
17#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
18
19#ifndef __ASSEMBLY__
20extern void _mcount(unsigned long);
21#endif /* __ASSEMBLY__ */
22
23#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7d811d9522bc..1ebcb4c6b5d8 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -5,6 +5,9 @@
5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) 5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7 7
8CFLAGS_REMOVE_ftrace.o = -pg
9CFLAGS_REMOVE_insn.o = -pg
10
8# Object file lists. 11# Object file lists.
9arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ 12arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
10 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 13 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
@@ -13,6 +16,7 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
13 16
14arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 17arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
15 sys_compat.o 18 sys_compat.o
19arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
16arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o 20arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
17arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o 21arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
18arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o 22arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 1edc792b4a1f..a85843ddbde8 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -61,3 +61,7 @@ EXPORT_SYMBOL(clear_bit);
61EXPORT_SYMBOL(test_and_clear_bit); 61EXPORT_SYMBOL(test_and_clear_bit);
62EXPORT_SYMBOL(change_bit); 62EXPORT_SYMBOL(change_bit);
63EXPORT_SYMBOL(test_and_change_bit); 63EXPORT_SYMBOL(test_and_change_bit);
64
65#ifdef CONFIG_FUNCTION_TRACER
66EXPORT_SYMBOL(_mcount);
67#endif
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..b2d8c4559cd8
--- /dev/null
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -0,0 +1,175 @@
1/*
2 * arch/arm64/kernel/entry-ftrace.S
3 *
4 * Copyright (C) 2013 Linaro Limited
5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/linkage.h>
13#include <asm/ftrace.h>
14#include <asm/insn.h>
15
16/*
17 * Gcc with -pg will put the following code in the beginning of each function:
18 * mov x0, x30
19 * bl _mcount
20 * [function's body ...]
21 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
22 * ftrace is enabled.
23 *
24 * Please note that x0 as an argument will not be used here because we can
25 * get lr(x30) of instrumented function at any time by winding up call stack
26 * as long as the kernel is compiled without -fomit-frame-pointer.
27 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
28 *
29 * stack layout after mcount_enter in _mcount():
30 *
31 * current sp/fp => 0:+-----+
32 * in _mcount() | x29 | -> instrumented function's fp
33 * +-----+
34 * | x30 | -> _mcount()'s lr (= instrumented function's pc)
35 * old sp => +16:+-----+
36 * when instrumented | |
37 * function calls | ... |
38 * _mcount() | |
39 * | |
40 * instrumented => +xx:+-----+
41 * function's fp | x29 | -> parent's fp
42 * +-----+
43 * | x30 | -> instrumented function's lr (= parent's pc)
44 * +-----+
45 * | ... |
46 */
47
48 .macro mcount_enter
49 stp x29, x30, [sp, #-16]!
50 mov x29, sp
51 .endm
52
53 .macro mcount_exit
54 ldp x29, x30, [sp], #16
55 ret
56 .endm
57
58 .macro mcount_adjust_addr rd, rn
59 sub \rd, \rn, #AARCH64_INSN_SIZE
60 .endm
61
62 /* for instrumented function's parent */
63 .macro mcount_get_parent_fp reg
64 ldr \reg, [x29]
65 ldr \reg, [\reg]
66 .endm
67
68 /* for instrumented function */
69 .macro mcount_get_pc0 reg
70 mcount_adjust_addr \reg, x30
71 .endm
72
73 .macro mcount_get_pc reg
74 ldr \reg, [x29, #8]
75 mcount_adjust_addr \reg, \reg
76 .endm
77
78 .macro mcount_get_lr reg
79 ldr \reg, [x29]
80 ldr \reg, [\reg, #8]
81 mcount_adjust_addr \reg, \reg
82 .endm
83
84 .macro mcount_get_lr_addr reg
85 ldr \reg, [x29]
86 add \reg, \reg, #8
87 .endm
88
89/*
90 * void _mcount(unsigned long return_address)
91 * @return_address: return address to instrumented function
92 *
93 * This function makes calls, if enabled, to:
94 * - tracer function to probe instrumented function's entry,
95 * - ftrace_graph_caller to set up an exit hook
96 */
97ENTRY(_mcount)
98#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99 ldr x0, =ftrace_trace_stop
100 ldr x0, [x0] // if ftrace_trace_stop
101 ret // return;
102#endif
103 mcount_enter
104
105 ldr x0, =ftrace_trace_function
106 ldr x2, [x0]
107 adr x0, ftrace_stub
108 cmp x0, x2 // if (ftrace_trace_function
109 b.eq skip_ftrace_call // != ftrace_stub) {
110
111 mcount_get_pc x0 // function's pc
112 mcount_get_lr x1 // function's lr (= parent's pc)
113 blr x2 // (*ftrace_trace_function)(pc, lr);
114
115#ifndef CONFIG_FUNCTION_GRAPH_TRACER
116skip_ftrace_call: // return;
117 mcount_exit // }
118#else
119 mcount_exit // return;
120 // }
121skip_ftrace_call:
122 ldr x1, =ftrace_graph_return
123 ldr x2, [x1] // if ((ftrace_graph_return
124 cmp x0, x2 // != ftrace_stub)
125 b.ne ftrace_graph_caller
126
127 ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
128 ldr x2, [x1] // != ftrace_graph_entry_stub))
129 ldr x0, =ftrace_graph_entry_stub
130 cmp x0, x2
131 b.ne ftrace_graph_caller // ftrace_graph_caller();
132
133 mcount_exit
134#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
135ENDPROC(_mcount)
136
137ENTRY(ftrace_stub)
138 ret
139ENDPROC(ftrace_stub)
140
141#ifdef CONFIG_FUNCTION_GRAPH_TRACER
142/*
143 * void ftrace_graph_caller(void)
144 *
145 * Called from _mcount() or ftrace_caller() when function_graph tracer is
146 * selected.
147 * This function w/ prepare_ftrace_return() fakes link register's value on
148 * the call stack in order to intercept instrumented function's return path
149 * and run return_to_handler() later on its exit.
150 */
151ENTRY(ftrace_graph_caller)
152 mcount_get_lr_addr x0 // pointer to function's saved lr
153 mcount_get_pc x1 // function's pc
154 mcount_get_parent_fp x2 // parent's fp
155 bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
156
157 mcount_exit
158ENDPROC(ftrace_graph_caller)
159
160/*
161 * void return_to_handler(void)
162 *
163 * Run ftrace_return_to_handler() before going back to parent.
164 * @fp is checked against the value passed by ftrace_graph_caller()
165 * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
166 */
167ENTRY(return_to_handler)
168 str x0, [sp, #-16]!
169 mov x0, x29 // parent's fp
170 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
171 mov x30, x0 // restore the original return address
172 ldr x0, [sp], #16
173 ret
174END(return_to_handler)
175#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
new file mode 100644
index 000000000000..a559ab86999b
--- /dev/null
+++ b/arch/arm64/kernel/ftrace.c
@@ -0,0 +1,64 @@
1/*
2 * arch/arm64/kernel/ftrace.c
3 *
4 * Copyright (C) 2013 Linaro Limited
5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/ftrace.h>
13#include <linux/swab.h>
14#include <linux/uaccess.h>
15
16#include <asm/cacheflush.h>
17#include <asm/ftrace.h>
18#include <asm/insn.h>
19
20#ifdef CONFIG_FUNCTION_GRAPH_TRACER
21/*
22 * function_graph tracer expects ftrace_return_to_handler() to be called
23 * on the way back to parent. For this purpose, this function is called
24 * in _mcount() or ftrace_caller() to replace return address (*parent) on
25 * the call stack to return_to_handler.
26 *
27 * Note that @frame_pointer is used only for sanity check later.
28 */
29void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
30 unsigned long frame_pointer)
31{
32 unsigned long return_hooker = (unsigned long)&return_to_handler;
33 unsigned long old;
34 struct ftrace_graph_ent trace;
35 int err;
36
37 if (unlikely(atomic_read(&current->tracing_graph_pause)))
38 return;
39
40 /*
41 * Note:
42 * No protection against faulting at *parent, which may be seen
43 * on other archs. It's unlikely on AArch64.
44 */
45 old = *parent;
46 *parent = return_hooker;
47
48 trace.func = self_addr;
49 trace.depth = current->curr_ret_stack + 1;
50
51 /* Only trace if the calling function expects to */
52 if (!ftrace_graph_entry(&trace)) {
53 *parent = old;
54 return;
55 }
56
57 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
58 frame_pointer);
59 if (err == -EBUSY) {
60 *parent = old;
61 return;
62 }
63}
64#endif /* CONFIG_FUNCTION_GRAPH_TRACER */