diff options
author | Joel Fernandes <agnel.joel@gmail.com> | 2016-06-18 01:44:54 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2016-06-27 13:29:24 -0400 |
commit | 7fa8b7171a638ad896acabd9a17183b75b70aeb4 (patch) | |
tree | 07b6570578e9f03a9397ef8a42753625fd000d9d /kernel/trace/trace_functions_graph.c | |
parent | be54f69c26193de31053190761e521903b89d098 (diff) |
tracing/function_graph: Fix filters for function_graph threshold
Function graph tracer currently ignores filters if tracing_thresh is set.
For example, even if set_ftrace_pid is set, then its ignored if tracing_thresh
set, resulting in all processes being traced.
To fix this, we reuse the same entry function as when tracing_thresh is not
set and do everything as in the regular case except for writing the function entry
to the ring buffer.
Link: http://lkml.kernel.org/r/1466228694-2677-1-git-send-email-agnel.joel@gmail.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Joel Fernandes <agnel.joel@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 67cce7896aeb..7363ccf79512 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -338,6 +338,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
338 | if (ftrace_graph_notrace_addr(trace->func)) | 338 | if (ftrace_graph_notrace_addr(trace->func)) |
339 | return 1; | 339 | return 1; |
340 | 340 | ||
341 | /* | ||
342 | * Stop here if tracing_threshold is set. We only write function return | ||
343 | * events to the ring buffer. | ||
344 | */ | ||
345 | if (tracing_thresh) | ||
346 | return 1; | ||
347 | |||
341 | local_irq_save(flags); | 348 | local_irq_save(flags); |
342 | cpu = raw_smp_processor_id(); | 349 | cpu = raw_smp_processor_id(); |
343 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 350 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
@@ -355,14 +362,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
355 | return ret; | 362 | return ret; |
356 | } | 363 | } |
357 | 364 | ||
358 | static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | ||
359 | { | ||
360 | if (tracing_thresh) | ||
361 | return 1; | ||
362 | else | ||
363 | return trace_graph_entry(trace); | ||
364 | } | ||
365 | |||
366 | static void | 365 | static void |
367 | __trace_graph_function(struct trace_array *tr, | 366 | __trace_graph_function(struct trace_array *tr, |
368 | unsigned long ip, unsigned long flags, int pc) | 367 | unsigned long ip, unsigned long flags, int pc) |
@@ -457,7 +456,7 @@ static int graph_trace_init(struct trace_array *tr) | |||
457 | set_graph_array(tr); | 456 | set_graph_array(tr); |
458 | if (tracing_thresh) | 457 | if (tracing_thresh) |
459 | ret = register_ftrace_graph(&trace_graph_thresh_return, | 458 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
460 | &trace_graph_thresh_entry); | 459 | &trace_graph_entry); |
461 | else | 460 | else |
462 | ret = register_ftrace_graph(&trace_graph_return, | 461 | ret = register_ftrace_graph(&trace_graph_return, |
463 | &trace_graph_entry); | 462 | &trace_graph_entry); |