diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-01-13 06:38:30 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-01-28 19:05:12 -0500 |
commit | ea2c68a08fedb5053ba312d661e47df9f4d72411 (patch) | |
tree | 76e5e61b8377aac6283670481171bf899905052b /kernel/trace | |
parent | 24a53652e361321b09df5040711e69387344ce09 (diff) |
tracing: Simplify test for function_graph tracing start point
In the function graph tracer, a calling function is to be traced
only when it is enabled through the set_graph_function file,
or when it is nested in an enabled function.
Current code uses TSK_TRACE_FL_GRAPH to test whether it is nested
or not. Looking at the code, we can get this:
(trace->depth > 0) <==> (TSK_TRACE_FL_GRAPH is set)
trace->depth is more explicit to tell that it is nested.
So we use trace->depth directly and simplify the code.
No functionality is changed.
TSK_TRACE_FL_GRAPH is not removed yet, it is left for future usage.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <4B4DB0B6.7040607@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 8 |
2 files changed, 3 insertions, 7 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4df6a77eb196..ce077fbbf552 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -504,7 +504,7 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
504 | { | 504 | { |
505 | int i; | 505 | int i; |
506 | 506 | ||
507 | if (!ftrace_graph_count || test_tsk_trace_graph(current)) | 507 | if (!ftrace_graph_count) |
508 | return 1; | 508 | return 1; |
509 | 509 | ||
510 | for (i = 0; i < ftrace_graph_count; i++) { | 510 | for (i = 0; i < ftrace_graph_count; i++) { |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f2252296607c..616b135c9eb9 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -215,7 +215,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
215 | if (!ftrace_trace_task(current)) | 215 | if (!ftrace_trace_task(current)) |
216 | return 0; | 216 | return 0; |
217 | 217 | ||
218 | if (!ftrace_graph_addr(trace->func)) | 218 | /* trace it when it is-nested-in or is a function enabled. */ |
219 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | ||
219 | return 0; | 220 | return 0; |
220 | 221 | ||
221 | local_irq_save(flags); | 222 | local_irq_save(flags); |
@@ -228,9 +229,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
228 | } else { | 229 | } else { |
229 | ret = 0; | 230 | ret = 0; |
230 | } | 231 | } |
231 | /* Only do the atomic if it is not already set */ | ||
232 | if (!test_tsk_trace_graph(current)) | ||
233 | set_tsk_trace_graph(current); | ||
234 | 232 | ||
235 | atomic_dec(&data->disabled); | 233 | atomic_dec(&data->disabled); |
236 | local_irq_restore(flags); | 234 | local_irq_restore(flags); |
@@ -278,8 +276,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
278 | pc = preempt_count(); | 276 | pc = preempt_count(); |
279 | __trace_graph_return(tr, trace, flags, pc); | 277 | __trace_graph_return(tr, trace, flags, pc); |
280 | } | 278 | } |
281 | if (!trace->depth) | ||
282 | clear_tsk_trace_graph(current); | ||
283 | atomic_dec(&data->disabled); | 279 | atomic_dec(&data->disabled); |
284 | local_irq_restore(flags); | 280 | local_irq_restore(flags); |
285 | } | 281 | } |