diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 25 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 75 |
3 files changed, 2 insertions, 99 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b55b4a7fbefd..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace { | |||
55 | #endif /* __ASSEMBLY__ */ | 55 | #endif /* __ASSEMBLY__ */ |
56 | #endif /* CONFIG_FUNCTION_TRACER */ | 56 | #endif /* CONFIG_FUNCTION_TRACER */ |
57 | 57 | ||
58 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /* | ||
63 | * Stack of return addresses for functions | ||
64 | * of a thread. | ||
65 | * Used in struct thread_info | ||
66 | */ | ||
67 | struct ftrace_ret_stack { | ||
68 | unsigned long ret; | ||
69 | unsigned long func; | ||
70 | unsigned long long calltime; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * Primary handler of a function return. | ||
75 | * It relays on ftrace_return_to_handler. | ||
76 | * Defined in entry_32/64.S | ||
77 | */ | ||
78 | extern void return_to_handler(void); | ||
79 | |||
80 | #endif /* __ASSEMBLY__ */ | ||
81 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
82 | |||
83 | #endif /* _ASM_X86_FTRACE_H */ | 58 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 077c9ea655fc..4325165753d8 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kdebug.h> | 10 | #include <linux/kdebug.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/ftrace.h> | ||
13 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
14 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
15 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 2f9c0c8cb4c7..c2e057d9f88c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -369,79 +369,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
369 | 369 | ||
370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
371 | 371 | ||
372 | /* Add a function return address to the trace stack on thread info.*/ | ||
373 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
374 | unsigned long func, int *depth) | ||
375 | { | ||
376 | int index; | ||
377 | |||
378 | if (!current->ret_stack) | ||
379 | return -EBUSY; | ||
380 | |||
381 | /* The return trace stack is full */ | ||
382 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
383 | atomic_inc(¤t->trace_overrun); | ||
384 | return -EBUSY; | ||
385 | } | ||
386 | |||
387 | index = ++current->curr_ret_stack; | ||
388 | barrier(); | ||
389 | current->ret_stack[index].ret = ret; | ||
390 | current->ret_stack[index].func = func; | ||
391 | current->ret_stack[index].calltime = time; | ||
392 | *depth = index; | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
398 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
399 | { | ||
400 | int index; | ||
401 | |||
402 | index = current->curr_ret_stack; | ||
403 | |||
404 | if (unlikely(index < 0)) { | ||
405 | ftrace_graph_stop(); | ||
406 | WARN_ON(1); | ||
407 | /* Might as well panic, otherwise we have no where to go */ | ||
408 | *ret = (unsigned long)panic; | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | *ret = current->ret_stack[index].ret; | ||
413 | trace->func = current->ret_stack[index].func; | ||
414 | trace->calltime = current->ret_stack[index].calltime; | ||
415 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
416 | trace->depth = index; | ||
417 | barrier(); | ||
418 | current->curr_ret_stack--; | ||
419 | |||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Send the trace to the ring-buffer. | ||
424 | * @return the original return address. | ||
425 | */ | ||
426 | unsigned long ftrace_return_to_handler(void) | ||
427 | { | ||
428 | struct ftrace_graph_ret trace; | ||
429 | unsigned long ret; | ||
430 | |||
431 | pop_return_trace(&trace, &ret); | ||
432 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
433 | ftrace_graph_return(&trace); | ||
434 | |||
435 | if (unlikely(!ret)) { | ||
436 | ftrace_graph_stop(); | ||
437 | WARN_ON(1); | ||
438 | /* Might as well panic. What else to do? */ | ||
439 | ret = (unsigned long)panic; | ||
440 | } | ||
441 | |||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | /* | 372 | /* |
446 | * Hook the return address and push it in the stack of return addrs | 373 | * Hook the return address and push it in the stack of return addrs |
447 | * in current thread info. | 374 | * in current thread info. |
@@ -494,7 +421,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
494 | 421 | ||
495 | calltime = cpu_clock(raw_smp_processor_id()); | 422 | calltime = cpu_clock(raw_smp_processor_id()); |
496 | 423 | ||
497 | if (push_return_trace(old, calltime, | 424 | if (ftrace_push_return_trace(old, calltime, |
498 | self_addr, &trace.depth) == -EBUSY) { | 425 | self_addr, &trace.depth) == -EBUSY) { |
499 | *parent = old; | 426 | *parent = old; |
500 | return; | 427 | return; |