diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-07-10 20:29:03 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-07-10 21:08:01 -0400 |
commit | 327933f5d6cdf083284d3c06e0370d1de464aef4 (patch) | |
tree | 38046aa3e6b605bf4e16c5d7ac3968f5fa656e8f /arch | |
parent | b99610fb9cdf390965c62c22322596d961591160 (diff) |
sh: Function graph tracer support
Add both dynamic and static function graph tracer support for sh.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/ftrace.h | 3 | ||||
-rw-r--r-- | arch/sh/kernel/Makefile_32 | 1 | ||||
-rw-r--r-- | arch/sh/kernel/ftrace.c | 122 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux_64.lds.S | 0 | ||||
-rw-r--r-- | arch/sh/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/lib/mcount.S | 117 |
7 files changed, 244 insertions, 1 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 29e41ec6537d..6d110a4f7f65 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -33,6 +33,7 @@ config SUPERH32 | |||
33 | select HAVE_DYNAMIC_FTRACE | 33 | select HAVE_DYNAMIC_FTRACE |
34 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 34 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
35 | select HAVE_FTRACE_SYSCALLS | 35 | select HAVE_FTRACE_SYSCALLS |
36 | select HAVE_FUNCTION_GRAPH_TRACER | ||
36 | select HAVE_ARCH_KGDB | 37 | select HAVE_ARCH_KGDB |
37 | select ARCH_HIBERNATION_POSSIBLE if MMU | 38 | select ARCH_HIBERNATION_POSSIBLE if MMU |
38 | 39 | ||
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index b09311ad1db3..7e0bcc4d4a96 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h | |||
@@ -13,8 +13,11 @@ extern void mcount(void); | |||
13 | #ifdef CONFIG_DYNAMIC_FTRACE | 13 | #ifdef CONFIG_DYNAMIC_FTRACE |
14 | #define CALL_ADDR ((long)(ftrace_call)) | 14 | #define CALL_ADDR ((long)(ftrace_call)) |
15 | #define STUB_ADDR ((long)(ftrace_stub)) | 15 | #define STUB_ADDR ((long)(ftrace_stub)) |
16 | #define GRAPH_ADDR ((long)(ftrace_graph_call)) | ||
17 | #define CALLER_ADDR ((long)(ftrace_caller)) | ||
16 | 18 | ||
17 | #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) | 19 | #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) |
20 | #define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) | ||
18 | 21 | ||
19 | struct dyn_arch_ftrace { | 22 | struct dyn_arch_ftrace { |
20 | /* No extra data needed on sh */ | 23 | /* No extra data needed on sh */ |
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index fee924a9c46c..94ed99b68002 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o | |||
30 | obj-$(CONFIG_GENERIC_GPIO) += gpio.o | 30 | obj-$(CONFIG_GENERIC_GPIO) += gpio.o |
31 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 31 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
32 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 32 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
33 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | ||
33 | obj-$(CONFIG_DUMP_CODE) += disassemble.o | 34 | obj-$(CONFIG_DUMP_CODE) += disassemble.o |
34 | obj-$(CONFIG_HIBERNATION) += swsusp.o | 35 | obj-$(CONFIG_HIBERNATION) += swsusp.o |
35 | 36 | ||
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 4f62eced0aec..6647dfcb781d 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -16,11 +16,13 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/kernel.h> | ||
19 | #include <asm/ftrace.h> | 20 | #include <asm/ftrace.h> |
20 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
21 | #include <asm/unistd.h> | 22 | #include <asm/unistd.h> |
22 | #include <trace/syscall.h> | 23 | #include <trace/syscall.h> |
23 | 24 | ||
25 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
24 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; | 26 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; |
25 | 27 | ||
26 | static unsigned char ftrace_nop[4]; | 28 | static unsigned char ftrace_nop[4]; |
@@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data) | |||
133 | 135 | ||
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |
138 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
139 | |||
140 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
141 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
142 | extern void ftrace_graph_call(void); | ||
143 | |||
144 | static int ftrace_mod(unsigned long ip, unsigned long old_addr, | ||
145 | unsigned long new_addr) | ||
146 | { | ||
147 | unsigned char code[MCOUNT_INSN_SIZE]; | ||
148 | |||
149 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | ||
150 | return -EFAULT; | ||
151 | |||
152 | if (old_addr != __raw_readl((unsigned long *)code)) | ||
153 | return -EINVAL; | ||
154 | |||
155 | __raw_writel(new_addr, ip); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int ftrace_enable_ftrace_graph_caller(void) | ||
160 | { | ||
161 | unsigned long ip, old_addr, new_addr; | ||
162 | |||
163 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | ||
164 | old_addr = (unsigned long)(&skip_trace); | ||
165 | new_addr = (unsigned long)(&ftrace_graph_caller); | ||
166 | |||
167 | return ftrace_mod(ip, old_addr, new_addr); | ||
168 | } | ||
169 | |||
170 | int ftrace_disable_ftrace_graph_caller(void) | ||
171 | { | ||
172 | unsigned long ip, old_addr, new_addr; | ||
173 | |||
174 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | ||
175 | old_addr = (unsigned long)(&ftrace_graph_caller); | ||
176 | new_addr = (unsigned long)(&skip_trace); | ||
177 | |||
178 | return ftrace_mod(ip, old_addr, new_addr); | ||
179 | } | ||
180 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
181 | |||
182 | /* | ||
183 | * Hook the return address and push it in the stack of return addrs | ||
184 | * in the current thread info. | ||
185 | * | ||
186 | * This is the main routine for the function graph tracer. The function | ||
187 | * graph tracer essentially works like this: | ||
188 | * | ||
189 | * parent is the stack address containing self_addr's return address. | ||
190 | * We pull the real return address out of parent and store it in | ||
191 | * current's ret_stack. Then, we replace the return address on the stack | ||
192 | * with the address of return_to_handler. self_addr is the function that | ||
193 | * called mcount. | ||
194 | * | ||
195 | * When self_addr returns, it will jump to return_to_handler which calls | ||
196 | * ftrace_return_to_handler. ftrace_return_to_handler will pull the real | ||
197 | * return address off of current's ret_stack and jump to it. | ||
198 | */ | ||
199 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
200 | { | ||
201 | unsigned long old; | ||
202 | int faulted, err; | ||
203 | struct ftrace_graph_ent trace; | ||
204 | unsigned long return_hooker = (unsigned long)&return_to_handler; | ||
205 | |||
206 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
207 | return; | ||
208 | |||
209 | /* | ||
210 | * Protect against fault, even if it shouldn't | ||
211 | * happen. This tool is too much intrusive to | ||
212 | * ignore such a protection. | ||
213 | */ | ||
214 | __asm__ __volatile__( | ||
215 | "1: \n\t" | ||
216 | "mov.l @%2, %0 \n\t" | ||
217 | "2: \n\t" | ||
218 | "mov.l %3, @%2 \n\t" | ||
219 | "mov #0, %1 \n\t" | ||
220 | "3: \n\t" | ||
221 | ".section .fixup, \"ax\" \n\t" | ||
222 | "4: \n\t" | ||
223 | "mov.l 5f, %0 \n\t" | ||
224 | "jmp @%0 \n\t" | ||
225 | " mov #1, %1 \n\t" | ||
226 | ".balign 4 \n\t" | ||
227 | "5: .long 3b \n\t" | ||
228 | ".previous \n\t" | ||
229 | ".section __ex_table,\"a\" \n\t" | ||
230 | ".long 1b, 4b \n\t" | ||
231 | ".long 2b, 4b \n\t" | ||
232 | ".previous \n\t" | ||
233 | : "=&r" (old), "=r" (faulted) | ||
234 | : "r" (parent), "r" (return_hooker) | ||
235 | ); | ||
236 | |||
237 | if (unlikely(faulted)) { | ||
238 | ftrace_graph_stop(); | ||
239 | WARN_ON(1); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); | ||
244 | if (err == -EBUSY) { | ||
245 | __raw_writel(old, parent); | ||
246 | return; | ||
247 | } | ||
248 | |||
249 | trace.func = self_addr; | ||
250 | |||
251 | /* Only trace if the calling function expects to */ | ||
252 | if (!ftrace_graph_entry(&trace)) { | ||
253 | current->curr_ret_stack--; | ||
254 | __raw_writel(old, parent); | ||
255 | } | ||
256 | } | ||
257 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
136 | 258 | ||
137 | #ifdef CONFIG_FTRACE_SYSCALLS | 259 | #ifdef CONFIG_FTRACE_SYSCALLS |
138 | 260 | ||
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/arch/sh/kernel/vmlinux_64.lds.S | |||
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index aaea580b65bb..19328d90a2d1 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -25,6 +25,7 @@ memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o | |||
25 | 25 | ||
26 | lib-$(CONFIG_MMU) += copy_page.o clear_page.o | 26 | lib-$(CONFIG_MMU) += copy_page.o clear_page.o |
27 | lib-$(CONFIG_FUNCTION_TRACER) += mcount.o | 27 | lib-$(CONFIG_FUNCTION_TRACER) += mcount.o |
28 | lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o | ||
28 | lib-y += $(memcpy-y) $(udivsi3-y) | 29 | lib-y += $(memcpy-y) $(udivsi3-y) |
29 | 30 | ||
30 | EXTRA_CFLAGS += -Werror | 31 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 8596483f7b41..bd3ec648becc 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S | |||
@@ -111,14 +111,62 @@ mcount_call: | |||
111 | jsr @r6 | 111 | jsr @r6 |
112 | nop | 112 | nop |
113 | 113 | ||
114 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
115 | mov.l .Lftrace_graph_return, r6 | ||
116 | mov.l .Lftrace_stub, r7 | ||
117 | cmp/eq r6, r7 | ||
118 | bt 1f | ||
119 | |||
120 | mov.l .Lftrace_graph_caller, r0 | ||
121 | jmp @r0 | ||
122 | nop | ||
123 | |||
124 | 1: | ||
125 | mov.l .Lftrace_graph_entry, r6 | ||
126 | mov.l .Lftrace_graph_entry_stub, r7 | ||
127 | cmp/eq r6, r7 | ||
128 | bt skip_trace | ||
129 | |||
130 | mov.l .Lftrace_graph_caller, r0 | ||
131 | jmp @r0 | ||
132 | nop | ||
133 | |||
134 | .align 2 | ||
135 | .Lftrace_graph_return: | ||
136 | .long ftrace_graph_return | ||
137 | .Lftrace_graph_entry: | ||
138 | .long ftrace_graph_entry | ||
139 | .Lftrace_graph_entry_stub: | ||
140 | .long ftrace_graph_entry_stub | ||
141 | .Lftrace_graph_caller: | ||
142 | .long ftrace_graph_caller | ||
143 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
144 | |||
145 | .globl skip_trace | ||
114 | skip_trace: | 146 | skip_trace: |
115 | MCOUNT_LEAVE() | 147 | MCOUNT_LEAVE() |
116 | 148 | ||
117 | .align 2 | 149 | .align 2 |
118 | .Lftrace_trace_function: | 150 | .Lftrace_trace_function: |
119 | .long ftrace_trace_function | 151 | .long ftrace_trace_function |
120 | 152 | ||
121 | #ifdef CONFIG_DYNAMIC_FTRACE | 153 | #ifdef CONFIG_DYNAMIC_FTRACE |
154 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
155 | /* | ||
156 | * NOTE: Do not move either ftrace_graph_call or ftrace_caller | ||
157 | * as this will affect the calculation of GRAPH_INSN_OFFSET. | ||
158 | */ | ||
159 | .globl ftrace_graph_call | ||
160 | ftrace_graph_call: | ||
161 | mov.l .Lskip_trace, r0 | ||
162 | jmp @r0 | ||
163 | nop | ||
164 | |||
165 | .align 2 | ||
166 | .Lskip_trace: | ||
167 | .long skip_trace | ||
168 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
169 | |||
122 | .globl ftrace_caller | 170 | .globl ftrace_caller |
123 | ftrace_caller: | 171 | ftrace_caller: |
124 | mov.l .Lfunction_trace_stop, r0 | 172 | mov.l .Lfunction_trace_stop, r0 |
@@ -136,7 +184,12 @@ ftrace_call: | |||
136 | jsr @r6 | 184 | jsr @r6 |
137 | nop | 185 | nop |
138 | 186 | ||
187 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
188 | bra ftrace_graph_call | ||
189 | nop | ||
190 | #else | ||
139 | MCOUNT_LEAVE() | 191 | MCOUNT_LEAVE() |
192 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
140 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 193 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
141 | 194 | ||
142 | /* | 195 | /* |
@@ -188,3 +241,65 @@ stack_panic: | |||
188 | .Lpanic_str: | 241 | .Lpanic_str: |
189 | .string "Stack error" | 242 | .string "Stack error" |
190 | #endif /* CONFIG_STACK_DEBUG */ | 243 | #endif /* CONFIG_STACK_DEBUG */ |
244 | |||
245 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
246 | .globl ftrace_graph_caller | ||
247 | ftrace_graph_caller: | ||
248 | mov.l 2f, r0 | ||
249 | mov.l @r0, r0 | ||
250 | tst r0, r0 | ||
251 | bt 1f | ||
252 | |||
253 | mov.l 3f, r1 | ||
254 | jmp @r1 | ||
255 | nop | ||
256 | 1: | ||
257 | /* | ||
258 | * MCOUNT_ENTER() pushed 5 registers onto the stack, so | ||
259 | * the stack address containing our return address is | ||
260 | * r15 + 20. | ||
261 | */ | ||
262 | mov #20, r0 | ||
263 | add r15, r0 | ||
264 | mov r0, r4 | ||
265 | |||
266 | mov.l .Lprepare_ftrace_return, r0 | ||
267 | jsr @r0 | ||
268 | nop | ||
269 | |||
270 | MCOUNT_LEAVE() | ||
271 | |||
272 | .align 2 | ||
273 | 2: .long function_trace_stop | ||
274 | 3: .long skip_trace | ||
275 | .Lprepare_ftrace_return: | ||
276 | .long prepare_ftrace_return | ||
277 | |||
278 | .globl return_to_handler | ||
279 | return_to_handler: | ||
280 | /* | ||
281 | * Save the return values. | ||
282 | */ | ||
283 | mov.l r0, @-r15 | ||
284 | mov.l r1, @-r15 | ||
285 | |||
286 | mov #0, r4 | ||
287 | |||
288 | mov.l .Lftrace_return_to_handler, r0 | ||
289 | jsr @r0 | ||
290 | nop | ||
291 | |||
292 | /* | ||
293 | * The return value from ftrace_return_handler has the real | ||
294 | * address that we should return to. | ||
295 | */ | ||
296 | lds r0, pr | ||
297 | mov.l @r15+, r1 | ||
298 | rts | ||
299 | mov.l @r15+, r0 | ||
300 | |||
301 | |||
302 | .align 2 | ||
303 | .Lftrace_return_to_handler: | ||
304 | .long ftrace_return_to_handler | ||
305 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||