diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-01-13 06:38:30 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-01-28 19:05:12 -0500 |
commit | ea2c68a08fedb5053ba312d661e47df9f4d72411 (patch) | |
tree | 76e5e61b8377aac6283670481171bf899905052b /kernel/trace/trace_functions_graph.c | |
parent | 24a53652e361321b09df5040711e69387344ce09 (diff) |
tracing: Simplify test for function_graph tracing start point
In the function graph tracer, a calling function is to be traced
only when it is enabled through the set_graph_function file,
or when it is nested in an enabled function.
Current code uses TSK_TRACE_FL_GRAPH to test whether it is nested
or not. Looking at the code, we can get this:
(trace->depth > 0) <==> (TSK_TRACE_FL_GRAPH is set)
trace->depth is more explicit to tell that it is nested.
So we use trace->depth directly and simplify the code.
No functionality is changed.
TSK_TRACE_FL_GRAPH is not removed yet, it is left for future usage.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <4B4DB0B6.7040607@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f2252296607c..616b135c9eb9 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -215,7 +215,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
215 | if (!ftrace_trace_task(current)) | 215 | if (!ftrace_trace_task(current)) |
216 | return 0; | 216 | return 0; |
217 | 217 | ||
218 | if (!ftrace_graph_addr(trace->func)) | 218 | /* trace it when it is-nested-in or is a function enabled. */ |
219 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | ||
219 | return 0; | 220 | return 0; |
220 | 221 | ||
221 | local_irq_save(flags); | 222 | local_irq_save(flags); |
@@ -228,9 +229,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
228 | } else { | 229 | } else { |
229 | ret = 0; | 230 | ret = 0; |
230 | } | 231 | } |
231 | /* Only do the atomic if it is not already set */ | ||
232 | if (!test_tsk_trace_graph(current)) | ||
233 | set_tsk_trace_graph(current); | ||
234 | 232 | ||
235 | atomic_dec(&data->disabled); | 233 | atomic_dec(&data->disabled); |
236 | local_irq_restore(flags); | 234 | local_irq_restore(flags); |
@@ -278,8 +276,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
278 | pc = preempt_count(); | 276 | pc = preempt_count(); |
279 | __trace_graph_return(tr, trace, flags, pc); | 277 | __trace_graph_return(tr, trace, flags, pc); |
280 | } | 278 | } |
281 | if (!trace->depth) | ||
282 | clear_tsk_trace_graph(current); | ||
283 | atomic_dec(&data->disabled); | 279 | atomic_dec(&data->disabled); |
284 | local_irq_restore(flags); | 280 | local_irq_restore(flags); |
285 | } | 281 | } |