aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-19 15:18:40 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-27 20:31:54 -0500
commit552701dd0fa7c3d448142e87210590ba424694a0 (patch)
tree3090655780506b181aeef9125d6377d6166349a8 /kernel/trace/trace_functions_graph.c
parent39eb456dacb543de90d3bc6a8e0ac5cf51ac475e (diff)
function_graph: Move return callback before update of curr_ret_stack
In the past, curr_ret_stack had two functions. One was to denote the depth of the call graph, the other is to keep track of where on the ret_stack the data is used. Although they may be slightly related, there are two cases where they need to be used differently. The one case is that it keeps the ret_stack data from being corrupted by an interrupt coming in and overwriting the data still in use. The other is just to know where the depth of the stack currently is. The function profiler uses the ret_stack to save a "subtime" variable that is part of the data on the ret_stack. If curr_ret_stack is modified too early, then this variable can be corrupted. The "max_depth" option, when set to 1, will record the first functions going into the kernel. To see all top functions (when dealing with timings), the depth variable needs to be lowered before calling the return hook. But by lowering the curr_ret_stack, it makes the data on the ret_stack still being used by the return hook susceptible to being overwritten. Now that there's two variables to handle both cases (curr_ret_depth), we can move them to the locations where they can handle both cases. Cc: stable@kernel.org Fixes: 03274a3ffb449 ("tracing/fgraph: Adjust fgraph depth before calling trace return callback") Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 02d4081a7f5a..4f0d72ae6362 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -261,7 +261,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
261 trace->func = current->ret_stack[index].func; 261 trace->func = current->ret_stack[index].func;
262 trace->calltime = current->ret_stack[index].calltime; 262 trace->calltime = current->ret_stack[index].calltime;
263 trace->overrun = atomic_read(&current->trace_overrun); 263 trace->overrun = atomic_read(&current->trace_overrun);
264 trace->depth = current->curr_ret_depth; 264 trace->depth = current->curr_ret_depth--;
265 /*
266 * We still want to trace interrupts coming in if
267 * max_depth is set to 1. Make sure the decrement is
268 * seen before ftrace_graph_return.
269 */
270 barrier();
265} 271}
266 272
267/* 273/*
@@ -275,9 +281,14 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
275 281
276 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 282 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
277 trace.rettime = trace_clock_local(); 283 trace.rettime = trace_clock_local();
284 ftrace_graph_return(&trace);
285 /*
286 * The ftrace_graph_return() may still access the current
287 * ret_stack structure, we need to make sure the update of
288 * curr_ret_stack is after that.
289 */
278 barrier(); 290 barrier();
279 current->curr_ret_stack--; 291 current->curr_ret_stack--;
280 current->curr_ret_depth--;
281 /* 292 /*
282 * The curr_ret_stack can be less than -1 only if it was 293 * The curr_ret_stack can be less than -1 only if it was
283 * filtered out and it's about to return from the function. 294 * filtered out and it's about to return from the function.
@@ -288,13 +299,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
288 return ret; 299 return ret;
289 } 300 }
290 301
291 /*
292 * The trace should run after decrementing the ret counter
293 * in case an interrupt were to come in. We don't want to
294 * lose the interrupt if max_depth is set.
295 */
296 ftrace_graph_return(&trace);
297
298 if (unlikely(!ret)) { 302 if (unlikely(!ret)) {
299 ftrace_graph_stop(); 303 ftrace_graph_stop();
300 WARN_ON(1); 304 WARN_ON(1);