diff options
author | AKASHI Takahiro <takahiro.akashi@linaro.org> | 2014-04-30 05:54:34 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2014-05-29 04:08:33 -0400 |
commit | bd7d38dbdf356e75eb3b1699158c9b8021fd6784 (patch) | |
tree | edad0afa91c1774cfaba6319b319d65db244123d | |
parent | 819e50e25d0ce8a75f5cba815416a6a8573655c4 (diff) |
arm64: ftrace: Add dynamic ftrace support
This patch allows "dynamic ftrace" if CONFIG_DYNAMIC_FTRACE is enabled.
Here we can turn on and off tracing dynamically per-function base.
On arm64, this is done by patching single branch instruction to _mcount()
inserted by gcc -pg option. The branch is replaced to NOP initially at
kernel start up, and later on, NOP to branch to ftrace_caller() when
enabled or branch to NOP when disabled.
Please note that ftrace_caller() is a counterpart of _mcount() in case of
'static' ftrace.
More details on architecture specific requirements are described in
Documentation/trace/ftrace-design.txt.
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/ftrace.h | 15 | ||||
-rw-r--r-- | arch/arm64/kernel/entry-ftrace.S | 43 | ||||
-rw-r--r-- | arch/arm64/kernel/ftrace.c | 112 |
4 files changed, 171 insertions, 0 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8a25a4ad3159..4e689ad5f27c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -36,6 +36,7 @@ config ARM64 | |||
36 | select HAVE_DMA_API_DEBUG | 36 | select HAVE_DMA_API_DEBUG |
37 | select HAVE_DMA_ATTRS | 37 | select HAVE_DMA_ATTRS |
38 | select HAVE_DMA_CONTIGUOUS | 38 | select HAVE_DMA_CONTIGUOUS |
39 | select HAVE_DYNAMIC_FTRACE | ||
39 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 40 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
40 | select HAVE_FTRACE_MCOUNT_RECORD | 41 | select HAVE_FTRACE_MCOUNT_RECORD |
41 | select HAVE_FUNCTION_TRACER | 42 | select HAVE_FUNCTION_TRACER |
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 58ea5951b198..ed5c448ece99 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h | |||
@@ -18,6 +18,21 @@ | |||
18 | 18 | ||
19 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
20 | extern void _mcount(unsigned long); | 20 | extern void _mcount(unsigned long); |
21 | |||
22 | struct dyn_arch_ftrace { | ||
23 | /* No extra data needed for arm64 */ | ||
24 | }; | ||
25 | |||
26 | extern unsigned long ftrace_graph_call; | ||
27 | |||
28 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
29 | { | ||
30 | /* | ||
31 | * addr is the address of the mcount call instruction. | ||
32 | * recordmcount does the necessary offset calculation. | ||
33 | */ | ||
34 | return addr; | ||
35 | } | ||
21 | #endif /* __ASSEMBLY__ */ | 36 | #endif /* __ASSEMBLY__ */ |
22 | 37 | ||
23 | #endif /* __ASM_FTRACE_H */ | 38 | #endif /* __ASM_FTRACE_H */ |
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index b2d8c4559cd8..b051871f2965 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -86,6 +86,7 @@ | |||
86 | add \reg, \reg, #8 | 86 | add \reg, \reg, #8 |
87 | .endm | 87 | .endm |
88 | 88 | ||
89 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
89 | /* | 90 | /* |
90 | * void _mcount(unsigned long return_address) | 91 | * void _mcount(unsigned long return_address) |
91 | * @return_address: return address to instrumented function | 92 | * @return_address: return address to instrumented function |
@@ -134,6 +135,48 @@ skip_ftrace_call: | |||
134 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 135 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
135 | ENDPROC(_mcount) | 136 | ENDPROC(_mcount) |
136 | 137 | ||
138 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
139 | /* | ||
140 | * _mcount() is used to build the kernel with -pg option, but all the branch | ||
141 | * instructions to _mcount() are replaced to NOP initially at kernel start up, | ||
142 | * and later on, NOP to branch to ftrace_caller() when enabled or branch to | ||
143 | * NOP when disabled per-function base. | ||
144 | */ | ||
145 | ENTRY(_mcount) | ||
146 | ret | ||
147 | ENDPROC(_mcount) | ||
148 | |||
149 | /* | ||
150 | * void ftrace_caller(unsigned long return_address) | ||
151 | * @return_address: return address to instrumented function | ||
152 | * | ||
153 | * This function is a counterpart of _mcount() in 'static' ftrace, and | ||
154 | * makes calls to: | ||
155 | * - tracer function to probe instrumented function's entry, | ||
156 | * - ftrace_graph_caller to set up an exit hook | ||
157 | */ | ||
158 | ENTRY(ftrace_caller) | ||
159 | mcount_enter | ||
160 | |||
161 | mcount_get_pc0 x0 // function's pc | ||
162 | mcount_get_lr x1 // function's lr | ||
163 | |||
164 | .global ftrace_call | ||
165 | ftrace_call: // tracer(pc, lr); | ||
166 | nop // This will be replaced with "bl xxx" | ||
167 | // where xxx can be any kind of tracer. | ||
168 | |||
169 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
170 | .global ftrace_graph_call | ||
171 | ftrace_graph_call: // ftrace_graph_caller(); | ||
172 | nop // If enabled, this will be replaced | ||
173 | // "b ftrace_graph_caller" | ||
174 | #endif | ||
175 | |||
176 | mcount_exit | ||
177 | ENDPROC(ftrace_caller) | ||
178 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
179 | |||
137 | ENTRY(ftrace_stub) | 180 | ENTRY(ftrace_stub) |
138 | ret | 181 | ret |
139 | ENDPROC(ftrace_stub) | 182 | ENDPROC(ftrace_stub) |
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index a559ab86999b..7924d73b6476 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c | |||
@@ -17,6 +17,87 @@ | |||
17 | #include <asm/ftrace.h> | 17 | #include <asm/ftrace.h> |
18 | #include <asm/insn.h> | 18 | #include <asm/insn.h> |
19 | 19 | ||
20 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
21 | /* | ||
22 | * Replace a single instruction, which may be a branch or NOP. | ||
23 | * If @validate == true, a replaced instruction is checked against 'old'. | ||
24 | */ | ||
25 | static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, | ||
26 | bool validate) | ||
27 | { | ||
28 | u32 replaced; | ||
29 | |||
30 | /* | ||
31 | * Note: | ||
32 | * Due to modules and __init, code can disappear and change, | ||
33 | * we need to protect against faulting as well as code changing. | ||
34 | * We do this by aarch64_insn_*() which use the probe_kernel_*(). | ||
35 | * | ||
36 | * No lock is held here because all the modifications are run | ||
37 | * through stop_machine(). | ||
38 | */ | ||
39 | if (validate) { | ||
40 | if (aarch64_insn_read((void *)pc, &replaced)) | ||
41 | return -EFAULT; | ||
42 | |||
43 | if (replaced != old) | ||
44 | return -EINVAL; | ||
45 | } | ||
46 | if (aarch64_insn_patch_text_nosync((void *)pc, new)) | ||
47 | return -EPERM; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Replace tracer function in ftrace_caller() | ||
54 | */ | ||
55 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
56 | { | ||
57 | unsigned long pc; | ||
58 | u32 new; | ||
59 | |||
60 | pc = (unsigned long)&ftrace_call; | ||
61 | new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true); | ||
62 | |||
63 | return ftrace_modify_code(pc, 0, new, false); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Turn on the call to ftrace_caller() in instrumented function | ||
68 | */ | ||
69 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
70 | { | ||
71 | unsigned long pc = rec->ip; | ||
72 | u32 old, new; | ||
73 | |||
74 | old = aarch64_insn_gen_nop(); | ||
75 | new = aarch64_insn_gen_branch_imm(pc, addr, true); | ||
76 | |||
77 | return ftrace_modify_code(pc, old, new, true); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Turn off the call to ftrace_caller() in instrumented function | ||
82 | */ | ||
83 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | ||
84 | unsigned long addr) | ||
85 | { | ||
86 | unsigned long pc = rec->ip; | ||
87 | u32 old, new; | ||
88 | |||
89 | old = aarch64_insn_gen_branch_imm(pc, addr, true); | ||
90 | new = aarch64_insn_gen_nop(); | ||
91 | |||
92 | return ftrace_modify_code(pc, old, new, true); | ||
93 | } | ||
94 | |||
95 | int __init ftrace_dyn_arch_init(void) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
100 | |||
20 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 101 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
21 | /* | 102 | /* |
22 | * function_graph tracer expects ftrace_return_to_handler() to be called | 103 | * function_graph tracer expects ftrace_return_to_handler() to be called |
@@ -61,4 +142,35 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
61 | return; | 142 | return; |
62 | } | 143 | } |
63 | } | 144 | } |
145 | |||
146 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
147 | /* | ||
148 | * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() | ||
149 | * depending on @enable. | ||
150 | */ | ||
151 | static int ftrace_modify_graph_caller(bool enable) | ||
152 | { | ||
153 | unsigned long pc = (unsigned long)&ftrace_graph_call; | ||
154 | u32 branch, nop; | ||
155 | |||
156 | branch = aarch64_insn_gen_branch_imm(pc, | ||
157 | (unsigned long)ftrace_graph_caller, false); | ||
158 | nop = aarch64_insn_gen_nop(); | ||
159 | |||
160 | if (enable) | ||
161 | return ftrace_modify_code(pc, nop, branch, true); | ||
162 | else | ||
163 | return ftrace_modify_code(pc, branch, nop, true); | ||
164 | } | ||
165 | |||
166 | int ftrace_enable_ftrace_graph_caller(void) | ||
167 | { | ||
168 | return ftrace_modify_graph_caller(true); | ||
169 | } | ||
170 | |||
171 | int ftrace_disable_ftrace_graph_caller(void) | ||
172 | { | ||
173 | return ftrace_modify_graph_caller(false); | ||
174 | } | ||
175 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
64 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 176 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |