diff options
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 25 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 75 | ||||
-rw-r--r-- | include/linux/ftrace.h | 24 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 75 |
5 files changed, 101 insertions, 99 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b55b4a7fbefd..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace { | |||
55 | #endif /* __ASSEMBLY__ */ | 55 | #endif /* __ASSEMBLY__ */ |
56 | #endif /* CONFIG_FUNCTION_TRACER */ | 56 | #endif /* CONFIG_FUNCTION_TRACER */ |
57 | 57 | ||
58 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /* | ||
63 | * Stack of return addresses for functions | ||
64 | * of a thread. | ||
65 | * Used in struct thread_info | ||
66 | */ | ||
67 | struct ftrace_ret_stack { | ||
68 | unsigned long ret; | ||
69 | unsigned long func; | ||
70 | unsigned long long calltime; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * Primary handler of a function return. | ||
75 | * It relays on ftrace_return_to_handler. | ||
76 | * Defined in entry_32/64.S | ||
77 | */ | ||
78 | extern void return_to_handler(void); | ||
79 | |||
80 | #endif /* __ASSEMBLY__ */ | ||
81 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
82 | |||
83 | #endif /* _ASM_X86_FTRACE_H */ | 58 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 077c9ea655fc..4325165753d8 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kdebug.h> | 10 | #include <linux/kdebug.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/ftrace.h> | ||
13 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
14 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
15 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 2f9c0c8cb4c7..c2e057d9f88c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -369,79 +369,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
369 | 369 | ||
370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 370 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
371 | 371 | ||
372 | /* Add a function return address to the trace stack on thread info.*/ | ||
373 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
374 | unsigned long func, int *depth) | ||
375 | { | ||
376 | int index; | ||
377 | |||
378 | if (!current->ret_stack) | ||
379 | return -EBUSY; | ||
380 | |||
381 | /* The return trace stack is full */ | ||
382 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
383 | atomic_inc(¤t->trace_overrun); | ||
384 | return -EBUSY; | ||
385 | } | ||
386 | |||
387 | index = ++current->curr_ret_stack; | ||
388 | barrier(); | ||
389 | current->ret_stack[index].ret = ret; | ||
390 | current->ret_stack[index].func = func; | ||
391 | current->ret_stack[index].calltime = time; | ||
392 | *depth = index; | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
398 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
399 | { | ||
400 | int index; | ||
401 | |||
402 | index = current->curr_ret_stack; | ||
403 | |||
404 | if (unlikely(index < 0)) { | ||
405 | ftrace_graph_stop(); | ||
406 | WARN_ON(1); | ||
407 | /* Might as well panic, otherwise we have no where to go */ | ||
408 | *ret = (unsigned long)panic; | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | *ret = current->ret_stack[index].ret; | ||
413 | trace->func = current->ret_stack[index].func; | ||
414 | trace->calltime = current->ret_stack[index].calltime; | ||
415 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
416 | trace->depth = index; | ||
417 | barrier(); | ||
418 | current->curr_ret_stack--; | ||
419 | |||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Send the trace to the ring-buffer. | ||
424 | * @return the original return address. | ||
425 | */ | ||
426 | unsigned long ftrace_return_to_handler(void) | ||
427 | { | ||
428 | struct ftrace_graph_ret trace; | ||
429 | unsigned long ret; | ||
430 | |||
431 | pop_return_trace(&trace, &ret); | ||
432 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
433 | ftrace_graph_return(&trace); | ||
434 | |||
435 | if (unlikely(!ret)) { | ||
436 | ftrace_graph_stop(); | ||
437 | WARN_ON(1); | ||
438 | /* Might as well panic. What else to do? */ | ||
439 | ret = (unsigned long)panic; | ||
440 | } | ||
441 | |||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | /* | 372 | /* |
446 | * Hook the return address and push it in the stack of return addrs | 373 | * Hook the return address and push it in the stack of return addrs |
447 | * in current thread info. | 374 | * in current thread info. |
@@ -494,7 +421,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
494 | 421 | ||
495 | calltime = cpu_clock(raw_smp_processor_id()); | 422 | calltime = cpu_clock(raw_smp_processor_id()); |
496 | 423 | ||
497 | if (push_return_trace(old, calltime, | 424 | if (ftrace_push_return_trace(old, calltime, |
498 | self_addr, &trace.depth) == -EBUSY) { | 425 | self_addr, &trace.depth) == -EBUSY) { |
499 | *parent = old; | 426 | *parent = old; |
500 | return; | 427 | return; |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9d224c43e634..915f4723fc8b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -404,6 +404,30 @@ struct ftrace_graph_ret { | |||
404 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 404 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
405 | 405 | ||
406 | /* | 406 | /* |
407 | * Stack of return addresses for functions | ||
408 | * of a thread. | ||
409 | * Used in struct thread_info | ||
410 | */ | ||
411 | struct ftrace_ret_stack { | ||
412 | unsigned long ret; | ||
413 | unsigned long func; | ||
414 | unsigned long long calltime; | ||
415 | }; | ||
416 | |||
417 | /* | ||
418 | * Primary handler of a function return. | ||
419 | * It relays on ftrace_return_to_handler. | ||
420 | * Defined in entry_32/64.S | ||
421 | */ | ||
422 | extern void return_to_handler(void); | ||
423 | |||
424 | extern int | ||
425 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
426 | unsigned long func, int *depth); | ||
427 | extern void | ||
428 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
429 | |||
430 | /* | ||
407 | * Sometimes we don't want to trace a function with the function | 431 | * Sometimes we don't want to trace a function with the function |
408 | * graph tracer but we want them to keep traced by the usual function | 432 | * graph tracer but we want them to keep traced by the usual function |
409 | * tracer if the function graph tracer is not configured. | 433 | * tracer if the function graph tracer is not configured. |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0ff5cb661900..6c7738e4f98b 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -50,6 +50,81 @@ static struct tracer_flags tracer_flags = { | |||
50 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
51 | 51 | ||
52 | 52 | ||
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | int | ||
55 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
56 | unsigned long func, int *depth) | ||
57 | { | ||
58 | int index; | ||
59 | |||
60 | if (!current->ret_stack) | ||
61 | return -EBUSY; | ||
62 | |||
63 | /* The return trace stack is full */ | ||
64 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
65 | atomic_inc(¤t->trace_overrun); | ||
66 | return -EBUSY; | ||
67 | } | ||
68 | |||
69 | index = ++current->curr_ret_stack; | ||
70 | barrier(); | ||
71 | current->ret_stack[index].ret = ret; | ||
72 | current->ret_stack[index].func = func; | ||
73 | current->ret_stack[index].calltime = time; | ||
74 | *depth = index; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
80 | void | ||
81 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
82 | { | ||
83 | int index; | ||
84 | |||
85 | index = current->curr_ret_stack; | ||
86 | |||
87 | if (unlikely(index < 0)) { | ||
88 | ftrace_graph_stop(); | ||
89 | WARN_ON(1); | ||
90 | /* Might as well panic, otherwise we have no where to go */ | ||
91 | *ret = (unsigned long)panic; | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | *ret = current->ret_stack[index].ret; | ||
96 | trace->func = current->ret_stack[index].func; | ||
97 | trace->calltime = current->ret_stack[index].calltime; | ||
98 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
99 | trace->depth = index; | ||
100 | barrier(); | ||
101 | current->curr_ret_stack--; | ||
102 | |||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
110 | { | ||
111 | struct ftrace_graph_ret trace; | ||
112 | unsigned long ret; | ||
113 | |||
114 | ftrace_pop_return_trace(&trace, &ret); | ||
115 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
116 | ftrace_graph_return(&trace); | ||
117 | |||
118 | if (unlikely(!ret)) { | ||
119 | ftrace_graph_stop(); | ||
120 | WARN_ON(1); | ||
121 | /* Might as well panic. What else to do? */ | ||
122 | ret = (unsigned long)panic; | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
53 | static int graph_trace_init(struct trace_array *tr) | 128 | static int graph_trace_init(struct trace_array *tr) |
54 | { | 129 | { |
55 | int ret = register_ftrace_graph(&trace_graph_return, | 130 | int ret = register_ftrace_graph(&trace_graph_return, |