aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c56
1 files changed, 53 insertions, 3 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b5c09242683d..e08c030b8f38 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
114 return -EBUSY; 114 return -EBUSY;
115 } 115 }
116 116
117 /*
118 * The curr_ret_stack is an index to ftrace return stack of
119 * current task. Its value should be in [0, FTRACE_RETFUNC_
120 * DEPTH) when the function graph tracer is used. To support
121 * filtering out specific functions, it makes the index
122 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 * so when it sees a negative index the ftrace will ignore
124 * the record. And the index gets recovered when returning
125 * from the filtered function by adding the FTRACE_NOTRACE_
126 * DEPTH and then it'll continue to record functions normally.
127 *
128 * The curr_ret_stack is initialized to -1 and get increased
129 * in this function. So it can be less than -1 only if it was
130 * filtered out via ftrace_graph_notrace_addr() which can be
131 * set from set_graph_notrace file in debugfs by user.
132 */
133 if (current->curr_ret_stack < -1)
134 return -EBUSY;
135
117 calltime = trace_clock_local(); 136 calltime = trace_clock_local();
118 137
119 index = ++current->curr_ret_stack; 138 index = ++current->curr_ret_stack;
139 if (ftrace_graph_notrace_addr(func))
140 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
120 barrier(); 141 barrier();
121 current->ret_stack[index].ret = ret; 142 current->ret_stack[index].ret = ret;
122 current->ret_stack[index].func = func; 143 current->ret_stack[index].func = func;
123 current->ret_stack[index].calltime = calltime; 144 current->ret_stack[index].calltime = calltime;
124 current->ret_stack[index].subtime = 0; 145 current->ret_stack[index].subtime = 0;
125 current->ret_stack[index].fp = frame_pointer; 146 current->ret_stack[index].fp = frame_pointer;
126 *depth = index; 147 *depth = current->curr_ret_stack;
127 148
128 return 0; 149 return 0;
129} 150}
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
137 158
138 index = current->curr_ret_stack; 159 index = current->curr_ret_stack;
139 160
140 if (unlikely(index < 0)) { 161 /*
162 * A negative index here means that it's just returned from a
163 * notrace'd function. Recover index to get an original
164 * return address. See ftrace_push_return_trace().
165 *
166 * TODO: Need to check whether the stack gets corrupted.
167 */
168 if (index < 0)
169 index += FTRACE_NOTRACE_DEPTH;
170
171 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
141 ftrace_graph_stop(); 172 ftrace_graph_stop();
142 WARN_ON(1); 173 WARN_ON(1);
143 /* Might as well panic, otherwise we have no where to go */ 174 /* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
193 trace.rettime = trace_clock_local(); 224 trace.rettime = trace_clock_local();
194 barrier(); 225 barrier();
195 current->curr_ret_stack--; 226 current->curr_ret_stack--;
227 /*
228 * The curr_ret_stack can be less than -1 only if it was
229 * filtered out and it's about to return from the function.
230 * Recover the index and continue to trace normal functions.
231 */
232 if (current->curr_ret_stack < -1) {
233 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 return ret;
235 }
196 236
197 /* 237 /*
198 * The trace should run after decrementing the ret counter 238 * The trace should run after decrementing the ret counter
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
259 299
260 /* trace it when it is-nested-in or is a function enabled. */ 300 /* trace it when it is-nested-in or is a function enabled. */
261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
262 ftrace_graph_ignore_irqs()) || 302 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
263 (max_depth && trace->depth >= max_depth)) 303 (max_depth && trace->depth >= max_depth))
264 return 0; 304 return 0;
265 305
306 /*
307 * Do not trace a function if it's filtered by set_graph_notrace.
308 * Make the index of ret stack negative to indicate that it should
309 * ignore further functions. But it needs its own ret stack entry
310 * to recover the original index in order to continue tracing after
311 * returning from the function.
312 */
313 if (ftrace_graph_notrace_addr(trace->func))
314 return 1;
315
266 local_irq_save(flags); 316 local_irq_save(flags);
267 cpu = raw_smp_processor_id(); 317 cpu = raw_smp_processor_id();
268 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 318 data = per_cpu_ptr(tr->trace_buffer.data, cpu);