aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-09 13:54:03 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-18 13:43:04 -0500
commit712406a6bf59ebf4a00358bb59a4a2a1b2953d90 (patch)
tree5bea439ccacde69ba71c5da8e8e307c2d343aa93 /kernel/trace/trace_functions_graph.c
parentd2f8d7ee1a9b4650b4e43325b321801264f7c37a (diff)
tracing/function-graph-tracer: make arch generic push pop functions
There is nothing really arch specific of the push and pop functions used by the function graph tracer. This patch moves them to generic code. Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 930c08e5b38e..dce71a5b51bc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
42/* pid on the last trace processed */ 42/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; 43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44 44
45/* Add a function return address to the trace stack on thread info.*/
46int
47ftrace_push_return_trace(unsigned long ret, unsigned long long time,
48 unsigned long func, int *depth)
49{
50 int index;
51
52 if (!current->ret_stack)
53 return -EBUSY;
54
55 /* The return trace stack is full */
56 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
57 atomic_inc(&current->trace_overrun);
58 return -EBUSY;
59 }
60
61 index = ++current->curr_ret_stack;
62 barrier();
63 current->ret_stack[index].ret = ret;
64 current->ret_stack[index].func = func;
65 current->ret_stack[index].calltime = time;
66 *depth = index;
67
68 return 0;
69}
70
71/* Retrieve a function return address to the trace stack on thread info.*/
72void
73ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
74{
75 int index;
76
77 index = current->curr_ret_stack;
78
79 if (unlikely(index < 0)) {
80 ftrace_graph_stop();
81 WARN_ON(1);
82 /* Might as well panic, otherwise we have no where to go */
83 *ret = (unsigned long)panic;
84 return;
85 }
86
87 *ret = current->ret_stack[index].ret;
88 trace->func = current->ret_stack[index].func;
89 trace->calltime = current->ret_stack[index].calltime;
90 trace->overrun = atomic_read(&current->trace_overrun);
91 trace->depth = index;
92 barrier();
93 current->curr_ret_stack--;
94
95}
96
97/*
98 * Send the trace to the ring-buffer.
99 * @return the original return address.
100 */
101unsigned long ftrace_return_to_handler(void)
102{
103 struct ftrace_graph_ret trace;
104 unsigned long ret;
105
106 ftrace_pop_return_trace(&trace, &ret);
107 trace.rettime = cpu_clock(raw_smp_processor_id());
108 ftrace_graph_return(&trace);
109
110 if (unlikely(!ret)) {
111 ftrace_graph_stop();
112 WARN_ON(1);
113 /* Might as well panic. What else to do? */
114 ret = (unsigned long)panic;
115 }
116
117 return ret;
118}
119
45static int graph_trace_init(struct trace_array *tr) 120static int graph_trace_init(struct trace_array *tr)
46{ 121{
47 int cpu, ret; 122 int cpu, ret;