diff options
Diffstat (limited to 'arch/sh/kernel/ftrace.c')
-rw-r--r-- | arch/sh/kernel/ftrace.c | 188 |
1 files changed, 188 insertions, 0 deletions
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 066f37dc32a9..a3dcc6d5d253 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -16,9 +16,13 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/kernel.h> | ||
19 | #include <asm/ftrace.h> | 20 | #include <asm/ftrace.h> |
20 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
22 | #include <asm/unistd.h> | ||
23 | #include <trace/syscall.h> | ||
21 | 24 | ||
25 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
22 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; | 26 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; |
23 | 27 | ||
24 | static unsigned char ftrace_nop[4]; | 28 | static unsigned char ftrace_nop[4]; |
@@ -131,3 +135,187 @@ int __init ftrace_dyn_arch_init(void *data) | |||
131 | 135 | ||
132 | return 0; | 136 | return 0; |
133 | } | 137 | } |
138 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
139 | |||
140 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
141 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
142 | extern void ftrace_graph_call(void); | ||
143 | |||
144 | static int ftrace_mod(unsigned long ip, unsigned long old_addr, | ||
145 | unsigned long new_addr) | ||
146 | { | ||
147 | unsigned char code[MCOUNT_INSN_SIZE]; | ||
148 | |||
149 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | ||
150 | return -EFAULT; | ||
151 | |||
152 | if (old_addr != __raw_readl((unsigned long *)code)) | ||
153 | return -EINVAL; | ||
154 | |||
155 | __raw_writel(new_addr, ip); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int ftrace_enable_ftrace_graph_caller(void) | ||
160 | { | ||
161 | unsigned long ip, old_addr, new_addr; | ||
162 | |||
163 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | ||
164 | old_addr = (unsigned long)(&skip_trace); | ||
165 | new_addr = (unsigned long)(&ftrace_graph_caller); | ||
166 | |||
167 | return ftrace_mod(ip, old_addr, new_addr); | ||
168 | } | ||
169 | |||
170 | int ftrace_disable_ftrace_graph_caller(void) | ||
171 | { | ||
172 | unsigned long ip, old_addr, new_addr; | ||
173 | |||
174 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | ||
175 | old_addr = (unsigned long)(&ftrace_graph_caller); | ||
176 | new_addr = (unsigned long)(&skip_trace); | ||
177 | |||
178 | return ftrace_mod(ip, old_addr, new_addr); | ||
179 | } | ||
180 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
181 | |||
182 | /* | ||
183 | * Hook the return address and push it in the stack of return addrs | ||
184 | * in the current thread info. | ||
185 | * | ||
186 | * This is the main routine for the function graph tracer. The function | ||
187 | * graph tracer essentially works like this: | ||
188 | * | ||
189 | * parent is the stack address containing self_addr's return address. | ||
190 | * We pull the real return address out of parent and store it in | ||
191 | * current's ret_stack. Then, we replace the return address on the stack | ||
192 | * with the address of return_to_handler. self_addr is the function that | ||
193 | * called mcount. | ||
194 | * | ||
195 | * When self_addr returns, it will jump to return_to_handler which calls | ||
196 | * ftrace_return_to_handler. ftrace_return_to_handler will pull the real | ||
197 | * return address off of current's ret_stack and jump to it. | ||
198 | */ | ||
199 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
200 | { | ||
201 | unsigned long old; | ||
202 | int faulted, err; | ||
203 | struct ftrace_graph_ent trace; | ||
204 | unsigned long return_hooker = (unsigned long)&return_to_handler; | ||
205 | |||
206 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
207 | return; | ||
208 | |||
209 | /* | ||
210 | * Protect against fault, even if it shouldn't | ||
211 | * happen. This tool is too much intrusive to | ||
212 | * ignore such a protection. | ||
213 | */ | ||
214 | __asm__ __volatile__( | ||
215 | "1: \n\t" | ||
216 | "mov.l @%2, %0 \n\t" | ||
217 | "2: \n\t" | ||
218 | "mov.l %3, @%2 \n\t" | ||
219 | "mov #0, %1 \n\t" | ||
220 | "3: \n\t" | ||
221 | ".section .fixup, \"ax\" \n\t" | ||
222 | "4: \n\t" | ||
223 | "mov.l 5f, %0 \n\t" | ||
224 | "jmp @%0 \n\t" | ||
225 | " mov #1, %1 \n\t" | ||
226 | ".balign 4 \n\t" | ||
227 | "5: .long 3b \n\t" | ||
228 | ".previous \n\t" | ||
229 | ".section __ex_table,\"a\" \n\t" | ||
230 | ".long 1b, 4b \n\t" | ||
231 | ".long 2b, 4b \n\t" | ||
232 | ".previous \n\t" | ||
233 | : "=&r" (old), "=r" (faulted) | ||
234 | : "r" (parent), "r" (return_hooker) | ||
235 | ); | ||
236 | |||
237 | if (unlikely(faulted)) { | ||
238 | ftrace_graph_stop(); | ||
239 | WARN_ON(1); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); | ||
244 | if (err == -EBUSY) { | ||
245 | __raw_writel(old, parent); | ||
246 | return; | ||
247 | } | ||
248 | |||
249 | trace.func = self_addr; | ||
250 | |||
251 | /* Only trace if the calling function expects to */ | ||
252 | if (!ftrace_graph_entry(&trace)) { | ||
253 | current->curr_ret_stack--; | ||
254 | __raw_writel(old, parent); | ||
255 | } | ||
256 | } | ||
257 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
258 | |||
259 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
260 | |||
261 | extern unsigned long __start_syscalls_metadata[]; | ||
262 | extern unsigned long __stop_syscalls_metadata[]; | ||
263 | extern unsigned long *sys_call_table; | ||
264 | |||
265 | static struct syscall_metadata **syscalls_metadata; | ||
266 | |||
267 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | ||
268 | { | ||
269 | struct syscall_metadata *start; | ||
270 | struct syscall_metadata *stop; | ||
271 | char str[KSYM_SYMBOL_LEN]; | ||
272 | |||
273 | |||
274 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
275 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
276 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | ||
277 | |||
278 | for ( ; start < stop; start++) { | ||
279 | if (start->name && !strcmp(start->name, str)) | ||
280 | return start; | ||
281 | } | ||
282 | |||
283 | return NULL; | ||
284 | } | ||
285 | |||
286 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
287 | { | ||
288 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | ||
289 | return NULL; | ||
290 | |||
291 | return syscalls_metadata[nr]; | ||
292 | } | ||
293 | |||
294 | void arch_init_ftrace_syscalls(void) | ||
295 | { | ||
296 | int i; | ||
297 | struct syscall_metadata *meta; | ||
298 | unsigned long **psys_syscall_table = &sys_call_table; | ||
299 | static atomic_t refs; | ||
300 | |||
301 | if (atomic_inc_return(&refs) != 1) | ||
302 | goto end; | ||
303 | |||
304 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
305 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | ||
306 | if (!syscalls_metadata) { | ||
307 | WARN_ON(1); | ||
308 | return; | ||
309 | } | ||
310 | |||
311 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | ||
312 | meta = find_syscall_meta(psys_syscall_table[i]); | ||
313 | syscalls_metadata[i] = meta; | ||
314 | } | ||
315 | return; | ||
316 | |||
317 | /* Paranoid: avoid overflow */ | ||
318 | end: | ||
319 | atomic_dec(&refs); | ||
320 | } | ||
321 | #endif /* CONFIG_FTRACE_SYSCALLS */ | ||