aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-09 13:54:03 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-18 13:43:04 -0500
commit712406a6bf59ebf4a00358bb59a4a2a1b2953d90 (patch)
tree5bea439ccacde69ba71c5da8e8e307c2d343aa93
parentd2f8d7ee1a9b4650b4e43325b321801264f7c37a (diff)
tracing/function-graph-tracer: make arch generic push pop functions
There is nothing really arch specific of the push and pop functions used by the function graph tracer. This patch moves them to generic code. Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r--arch/x86/include/asm/ftrace.h25
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/ftrace.c75
-rw-r--r--include/linux/ftrace.h24
-rw-r--r--kernel/trace/trace_functions_graph.c75
5 files changed, 101 insertions, 99 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b55b4a7fbefd..db24c2278be0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
55#endif /* __ASSEMBLY__ */ 55#endif /* __ASSEMBLY__ */
56#endif /* CONFIG_FUNCTION_TRACER */ 56#endif /* CONFIG_FUNCTION_TRACER */
57 57
58#ifdef CONFIG_FUNCTION_GRAPH_TRACER
59
60#ifndef __ASSEMBLY__
61
62/*
63 * Stack of return addresses for functions
64 * of a thread.
65 * Used in struct thread_info
66 */
67struct ftrace_ret_stack {
68 unsigned long ret;
69 unsigned long func;
70 unsigned long long calltime;
71};
72
73/*
74 * Primary handler of a function return.
75 * It relays on ftrace_return_to_handler.
76 * Defined in entry_32/64.S
77 */
78extern void return_to_handler(void);
79
80#endif /* __ASSEMBLY__ */
81#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
82
83#endif /* _ASM_X86_FTRACE_H */ 58#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6b1f6f6f8661..c0852291b623 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -10,6 +10,7 @@
10#include <linux/kdebug.h> 10#include <linux/kdebug.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/ftrace.h>
13#include <linux/kexec.h> 14#include <linux/kexec.h>
14#include <linux/bug.h> 15#include <linux/bug.h>
15#include <linux/nmi.h> 16#include <linux/nmi.h>
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 231bdd3c5b1c..76f7141e0f91 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -389,79 +389,6 @@ void ftrace_nmi_exit(void)
389 389
390#endif /* !CONFIG_DYNAMIC_FTRACE */ 390#endif /* !CONFIG_DYNAMIC_FTRACE */
391 391
392/* Add a function return address to the trace stack on thread info.*/
393static int push_return_trace(unsigned long ret, unsigned long long time,
394 unsigned long func, int *depth)
395{
396 int index;
397
398 if (!current->ret_stack)
399 return -EBUSY;
400
401 /* The return trace stack is full */
402 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
403 atomic_inc(&current->trace_overrun);
404 return -EBUSY;
405 }
406
407 index = ++current->curr_ret_stack;
408 barrier();
409 current->ret_stack[index].ret = ret;
410 current->ret_stack[index].func = func;
411 current->ret_stack[index].calltime = time;
412 *depth = index;
413
414 return 0;
415}
416
417/* Retrieve a function return address to the trace stack on thread info.*/
418static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
419{
420 int index;
421
422 index = current->curr_ret_stack;
423
424 if (unlikely(index < 0)) {
425 ftrace_graph_stop();
426 WARN_ON(1);
427 /* Might as well panic, otherwise we have no where to go */
428 *ret = (unsigned long)panic;
429 return;
430 }
431
432 *ret = current->ret_stack[index].ret;
433 trace->func = current->ret_stack[index].func;
434 trace->calltime = current->ret_stack[index].calltime;
435 trace->overrun = atomic_read(&current->trace_overrun);
436 trace->depth = index;
437 barrier();
438 current->curr_ret_stack--;
439
440}
441
442/*
443 * Send the trace to the ring-buffer.
444 * @return the original return address.
445 */
446unsigned long ftrace_return_to_handler(void)
447{
448 struct ftrace_graph_ret trace;
449 unsigned long ret;
450
451 pop_return_trace(&trace, &ret);
452 trace.rettime = cpu_clock(raw_smp_processor_id());
453 ftrace_graph_return(&trace);
454
455 if (unlikely(!ret)) {
456 ftrace_graph_stop();
457 WARN_ON(1);
458 /* Might as well panic. What else to do? */
459 ret = (unsigned long)panic;
460 }
461
462 return ret;
463}
464
465/* 392/*
466 * Hook the return address and push it in the stack of return addrs 393 * Hook the return address and push it in the stack of return addrs
467 * in current thread info. 394 * in current thread info.
@@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
521 448
522 calltime = cpu_clock(raw_smp_processor_id()); 449 calltime = cpu_clock(raw_smp_processor_id());
523 450
524 if (push_return_trace(old, calltime, 451 if (ftrace_push_return_trace(old, calltime,
525 self_addr, &trace.depth) == -EBUSY) { 452 self_addr, &trace.depth) == -EBUSY) {
526 *parent = old; 453 *parent = old;
527 return; 454 return;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 677432b9cb7e..a7f8134c594e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -380,6 +380,30 @@ struct ftrace_graph_ret {
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER 380#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 381
382/* 382/*
383 * Stack of return addresses for functions
384 * of a thread.
385 * Used in struct thread_info
386 */
387struct ftrace_ret_stack {
388 unsigned long ret;
389 unsigned long func;
390 unsigned long long calltime;
391};
392
393/*
394 * Primary handler of a function return.
395 * It relays on ftrace_return_to_handler.
396 * Defined in entry_32/64.S
397 */
398extern void return_to_handler(void);
399
400extern int
401ftrace_push_return_trace(unsigned long ret, unsigned long long time,
402 unsigned long func, int *depth);
403extern void
404ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
405
406/*
383 * Sometimes we don't want to trace a function with the function 407 * Sometimes we don't want to trace a function with the function
384 * graph tracer but we want them to keep traced by the usual function 408 * graph tracer but we want them to keep traced by the usual function
385 * tracer if the function graph tracer is not configured. 409 * tracer if the function graph tracer is not configured.
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 930c08e5b38e..dce71a5b51bc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
42/* pid on the last trace processed */ 42/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; 43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44 44
45/* Add a function return address to the trace stack on thread info.*/
46int
47ftrace_push_return_trace(unsigned long ret, unsigned long long time,
48 unsigned long func, int *depth)
49{
50 int index;
51
52 if (!current->ret_stack)
53 return -EBUSY;
54
55 /* The return trace stack is full */
56 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
57 atomic_inc(&current->trace_overrun);
58 return -EBUSY;
59 }
60
61 index = ++current->curr_ret_stack;
62 barrier();
63 current->ret_stack[index].ret = ret;
64 current->ret_stack[index].func = func;
65 current->ret_stack[index].calltime = time;
66 *depth = index;
67
68 return 0;
69}
70
71/* Retrieve a function return address to the trace stack on thread info.*/
72void
73ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
74{
75 int index;
76
77 index = current->curr_ret_stack;
78
79 if (unlikely(index < 0)) {
80 ftrace_graph_stop();
81 WARN_ON(1);
82 /* Might as well panic, otherwise we have no where to go */
83 *ret = (unsigned long)panic;
84 return;
85 }
86
87 *ret = current->ret_stack[index].ret;
88 trace->func = current->ret_stack[index].func;
89 trace->calltime = current->ret_stack[index].calltime;
90 trace->overrun = atomic_read(&current->trace_overrun);
91 trace->depth = index;
92 barrier();
93 current->curr_ret_stack--;
94
95}
96
97/*
98 * Send the trace to the ring-buffer.
99 * @return the original return address.
100 */
101unsigned long ftrace_return_to_handler(void)
102{
103 struct ftrace_graph_ret trace;
104 unsigned long ret;
105
106 ftrace_pop_return_trace(&trace, &ret);
107 trace.rettime = cpu_clock(raw_smp_processor_id());
108 ftrace_graph_return(&trace);
109
110 if (unlikely(!ret)) {
111 ftrace_graph_stop();
112 WARN_ON(1);
113 /* Might as well panic. What else to do? */
114 ret = (unsigned long)panic;
115 }
116
117 return ret;
118}
119
45static int graph_trace_init(struct trace_array *tr) 120static int graph_trace_init(struct trace_array *tr)
46{ 121{
47 int cpu, ret; 122 int cpu, ret;