aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-19 08:07:12 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-27 20:31:54 -0500
commit39eb456dacb543de90d3bc6a8e0ac5cf51ac475e (patch)
tree790493fbeb31636acf0149ea92b146dc03f7c90d /kernel/trace/trace_functions_graph.c
parentd125f3f866df88da5a85df00291f88f0baa89f7c (diff)
function_graph: Use new curr_ret_depth to manage depth instead of curr_ret_stack
Currently, the depth of the ret_stack is determined by curr_ret_stack index. The issue is that there's a race between setting of the curr_ret_stack and calling of the callback attached to the return of the function. Commit 03274a3ffb44 ("tracing/fgraph: Adjust fgraph depth before calling trace return callback") moved the calling of the callback to after the setting of the curr_ret_stack, even stating that it was safe to do so, when in fact, it was the reason there was a barrier() there (yes, I should have commented that barrier()). Not only does the curr_ret_stack keep track of the current call graph depth, it also keeps the ret_stack content from being overwritten by new data. The function profiler, uses the "subtime" variable of ret_stack structure and by moving the curr_ret_stack, it allows for interrupts to use the same structure it was using, corrupting the data, and breaking the profiler. To fix this, there needs to be two variables to handle the call stack depth and the pointer to where the ret_stack is being used, as they need to change at two different locations. Cc: stable@kernel.org Fixes: 03274a3ffb449 ("tracing/fgraph: Adjust fgraph depth before calling trace return callback") Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 88ca787a1cdc..02d4081a7f5a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -119,7 +119,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
119 119
120/* Add a function return address to the trace stack on thread info.*/ 120/* Add a function return address to the trace stack on thread info.*/
121static int 121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp) 123 unsigned long frame_pointer, unsigned long *retp)
124{ 124{
125 unsigned long long calltime; 125 unsigned long long calltime;
@@ -177,8 +177,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp; 178 current->ret_stack[index].retp = retp;
179#endif 179#endif
180 *depth = current->curr_ret_stack;
181
182 return 0; 180 return 0;
183} 181}
184 182
@@ -188,14 +186,20 @@ int function_graph_enter(unsigned long ret, unsigned long func,
188 struct ftrace_graph_ent trace; 186 struct ftrace_graph_ent trace;
189 187
190 trace.func = func; 188 trace.func = func;
191 trace.depth = current->curr_ret_stack + 1; 189 trace.depth = ++current->curr_ret_depth;
192 190
193 /* Only trace if the calling function expects to */ 191 /* Only trace if the calling function expects to */
194 if (!ftrace_graph_entry(&trace)) 192 if (!ftrace_graph_entry(&trace))
195 return -EBUSY; 193 goto out;
196 194
197 return ftrace_push_return_trace(ret, func, &trace.depth, 195 if (ftrace_push_return_trace(ret, func,
198 frame_pointer, retp); 196 frame_pointer, retp))
197 goto out;
198
199 return 0;
200 out:
201 current->curr_ret_depth--;
202 return -EBUSY;
199} 203}
200 204
201/* Retrieve a function return address to the trace stack on thread info.*/ 205/* Retrieve a function return address to the trace stack on thread info.*/
@@ -257,7 +261,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
257 trace->func = current->ret_stack[index].func; 261 trace->func = current->ret_stack[index].func;
258 trace->calltime = current->ret_stack[index].calltime; 262 trace->calltime = current->ret_stack[index].calltime;
259 trace->overrun = atomic_read(&current->trace_overrun); 263 trace->overrun = atomic_read(&current->trace_overrun);
260 trace->depth = index; 264 trace->depth = current->curr_ret_depth;
261} 265}
262 266
263/* 267/*
@@ -273,6 +277,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
273 trace.rettime = trace_clock_local(); 277 trace.rettime = trace_clock_local();
274 barrier(); 278 barrier();
275 current->curr_ret_stack--; 279 current->curr_ret_stack--;
280 current->curr_ret_depth--;
276 /* 281 /*
277 * The curr_ret_stack can be less than -1 only if it was 282 * The curr_ret_stack can be less than -1 only if it was
278 * filtered out and it's about to return from the function. 283 * filtered out and it's about to return from the function.