diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-01-15 19:12:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 06:15:32 -0500 |
commit | 5361499101306cfb776c3cfa0f69d0479bc63868 (patch) | |
tree | 1acf51a942abe6582e08ed86b4bbb98f9c095c89 /kernel/trace/trace.c | |
parent | 6c1a99afbda99cd8d8c69d756387041567a13d87 (diff) |
ftrace: add stack trace to function tracer
Impact: new feature to stack trace any function
Chris Mason asked about being able to pick and choose a function
and get a stack trace from it. This feature enables his request.
# echo io_schedule > /debug/tracing/set_ftrace_filter
# echo function > /debug/tracing/current_tracer
# echo func_stack_trace > /debug/tracing/trace_options
Produces the following in /debug/tracing/trace:
kjournald-702 [001] 135.673060: io_schedule <-sync_buffer
kjournald-702 [002] 135.673671:
<= sync_buffer
<= __wait_on_bit
<= out_of_line_wait_on_bit
<= __wait_on_buffer
<= sync_dirty_buffer
<= journal_commit_transaction
<= kjournald
Note, be careful about turning this on without filtering the functions.
You may find that you have a 10 second lag between typing and seeing
what you typed. This is why the stack trace for the function tracer
does not use the same stack_trace flag as the other tracers use.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dcb757f70d21..3c54cb125228 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -835,10 +835,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
835 | trace_function(tr, data, ip, parent_ip, flags, pc); | 835 | trace_function(tr, data, ip, parent_ip, flags, pc); |
836 | } | 836 | } |
837 | 837 | ||
838 | static void ftrace_trace_stack(struct trace_array *tr, | 838 | static void __ftrace_trace_stack(struct trace_array *tr, |
839 | struct trace_array_cpu *data, | 839 | struct trace_array_cpu *data, |
840 | unsigned long flags, | 840 | unsigned long flags, |
841 | int skip, int pc) | 841 | int skip, int pc) |
842 | { | 842 | { |
843 | #ifdef CONFIG_STACKTRACE | 843 | #ifdef CONFIG_STACKTRACE |
844 | struct ring_buffer_event *event; | 844 | struct ring_buffer_event *event; |
@@ -846,9 +846,6 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
846 | struct stack_trace trace; | 846 | struct stack_trace trace; |
847 | unsigned long irq_flags; | 847 | unsigned long irq_flags; |
848 | 848 | ||
849 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
850 | return; | ||
851 | |||
852 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 849 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
853 | &irq_flags); | 850 | &irq_flags); |
854 | if (!event) | 851 | if (!event) |
@@ -869,12 +866,23 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
869 | #endif | 866 | #endif |
870 | } | 867 | } |
871 | 868 | ||
869 | static void ftrace_trace_stack(struct trace_array *tr, | ||
870 | struct trace_array_cpu *data, | ||
871 | unsigned long flags, | ||
872 | int skip, int pc) | ||
873 | { | ||
874 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
875 | return; | ||
876 | |||
877 | __ftrace_trace_stack(tr, data, flags, skip, pc); | ||
878 | } | ||
879 | |||
872 | void __trace_stack(struct trace_array *tr, | 880 | void __trace_stack(struct trace_array *tr, |
873 | struct trace_array_cpu *data, | 881 | struct trace_array_cpu *data, |
874 | unsigned long flags, | 882 | unsigned long flags, |
875 | int skip) | 883 | int skip, int pc) |
876 | { | 884 | { |
877 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 885 | __ftrace_trace_stack(tr, data, flags, skip, pc); |
878 | } | 886 | } |
879 | 887 | ||
880 | static void ftrace_trace_userstack(struct trace_array *tr, | 888 | static void ftrace_trace_userstack(struct trace_array *tr, |