diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-11-14 13:14:58 -0500 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-11-29 23:38:34 -0500 |
commit | 9cd2992f2d6c8df54c5b937d5d1f8a23b684cc1d (patch) | |
tree | 36c8b8ef56855bc2b623adea03e511bee3695023 /kernel/trace/trace_functions_graph.c | |
parent | d864a3ca883095aa12575b84841ebd52b3d808fa (diff) |
fgraph: Have set_graph_notrace only affect function_graph tracer
In order to make the function graph infrastructure more generic, there can
not be code specific for the function_graph tracer in the generic code. This
includes the set_graph_notrace logic, that stops all graph calls when a
function in the set_graph_notrace is hit.
By using the trace_recursion mask, we can use a bit in the current
task_struct to implement the notrace code, and move the logic out of
fgraph.c and into trace_functions_graph.c and keeps it affecting only the
tracer and not all call graph callbacks.
Acked-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b846d82c2f95..ecf543df943b 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -188,6 +188,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
188 | int cpu; | 188 | int cpu; |
189 | int pc; | 189 | int pc; |
190 | 190 | ||
191 | if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) | ||
192 | return 0; | ||
193 | |||
194 | if (ftrace_graph_notrace_addr(trace->func)) { | ||
195 | trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); | ||
196 | /* | ||
197 | * Need to return 1 to have the return called | ||
198 | * that will clear the NOTRACE bit. | ||
199 | */ | ||
200 | return 1; | ||
201 | } | ||
202 | |||
191 | if (!ftrace_trace_task(tr)) | 203 | if (!ftrace_trace_task(tr)) |
192 | return 0; | 204 | return 0; |
193 | 205 | ||
@@ -290,6 +302,11 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
290 | 302 | ||
291 | ftrace_graph_addr_finish(trace); | 303 | ftrace_graph_addr_finish(trace); |
292 | 304 | ||
305 | if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { | ||
306 | trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); | ||
307 | return; | ||
308 | } | ||
309 | |||
293 | local_irq_save(flags); | 310 | local_irq_save(flags); |
294 | cpu = raw_smp_processor_id(); | 311 | cpu = raw_smp_processor_id(); |
295 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 312 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
@@ -315,6 +332,11 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | |||
315 | { | 332 | { |
316 | ftrace_graph_addr_finish(trace); | 333 | ftrace_graph_addr_finish(trace); |
317 | 334 | ||
335 | if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { | ||
336 | trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); | ||
337 | return; | ||
338 | } | ||
339 | |||
318 | if (tracing_thresh && | 340 | if (tracing_thresh && |
319 | (trace->rettime - trace->calltime < tracing_thresh)) | 341 | (trace->rettime - trace->calltime < tracing_thresh)) |
320 | return; | 342 | return; |