diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-01-15 19:12:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 06:15:32 -0500 |
commit | 5361499101306cfb776c3cfa0f69d0479bc63868 (patch) | |
tree | 1acf51a942abe6582e08ed86b4bbb98f9c095c89 /kernel/trace/trace_functions.c | |
parent | 6c1a99afbda99cd8d8c69d756387041567a13d87 (diff) |
ftrace: add stack trace to function tracer
Impact: new feature to stack trace any function
Chris Mason asked about being able to pick and choose a function
and get a stack trace from it. This feature enables his request.
# echo io_schedule > /debug/tracing/set_ftrace_filter
# echo function > /debug/tracing/current_tracer
# echo func_stack_trace > /debug/tracing/trace_options
Produces the following in /debug/tracing/trace:
kjournald-702 [001] 135.673060: io_schedule <-sync_buffer
kjournald-702 [002] 135.673671:
<= sync_buffer
<= __wait_on_bit
<= out_of_line_wait_on_bit
<= __wait_on_buffer
<= sync_dirty_buffer
<= journal_commit_transaction
<= kjournald
Note, be careful about turning this on without filtering the functions.
You may find that you have a 10 second lag between typing and seeing
what you typed. This is why the stack trace for the function tracer
does not use the same stack_trace flag as the other tracers use.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r-- | kernel/trace/trace_functions.c | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..3a5fa08cedb0 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #include "trace.h" | 17 | #include "trace.h" |
18 | 18 | ||
19 | static struct trace_array *func_trace; | ||
20 | |||
19 | static void start_function_trace(struct trace_array *tr) | 21 | static void start_function_trace(struct trace_array *tr) |
20 | { | 22 | { |
21 | tr->cpu = get_cpu(); | 23 | tr->cpu = get_cpu(); |
@@ -34,6 +36,7 @@ static void stop_function_trace(struct trace_array *tr) | |||
34 | 36 | ||
35 | static int function_trace_init(struct trace_array *tr) | 37 | static int function_trace_init(struct trace_array *tr) |
36 | { | 38 | { |
39 | func_trace = tr; | ||
37 | start_function_trace(tr); | 40 | start_function_trace(tr); |
38 | return 0; | 41 | return 0; |
39 | } | 42 | } |
@@ -48,12 +51,93 @@ static void function_trace_start(struct trace_array *tr) | |||
48 | tracing_reset_online_cpus(tr); | 51 | tracing_reset_online_cpus(tr); |
49 | } | 52 | } |
50 | 53 | ||
54 | static void | ||
55 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
56 | { | ||
57 | struct trace_array *tr = func_trace; | ||
58 | struct trace_array_cpu *data; | ||
59 | unsigned long flags; | ||
60 | long disabled; | ||
61 | int cpu; | ||
62 | int pc; | ||
63 | |||
64 | if (unlikely(!ftrace_function_enabled)) | ||
65 | return; | ||
66 | |||
67 | /* | ||
68 | * Need to use raw, since this must be called before the | ||
69 | * recursive protection is performed. | ||
70 | */ | ||
71 | local_irq_save(flags); | ||
72 | cpu = raw_smp_processor_id(); | ||
73 | data = tr->data[cpu]; | ||
74 | disabled = atomic_inc_return(&data->disabled); | ||
75 | |||
76 | if (likely(disabled == 1)) { | ||
77 | pc = preempt_count(); | ||
78 | /* | ||
79 | * skip over 5 funcs: | ||
80 | * __ftrace_trace_stack, | ||
81 | * __trace_stack, | ||
82 | * function_stack_trace_call | ||
83 | * ftrace_list_func | ||
84 | * ftrace_call | ||
85 | */ | ||
86 | __trace_stack(tr, data, flags, 5, pc); | ||
87 | } | ||
88 | |||
89 | atomic_dec(&data->disabled); | ||
90 | local_irq_restore(flags); | ||
91 | } | ||
92 | |||
93 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
94 | { | ||
95 | .func = function_stack_trace_call, | ||
96 | }; | ||
97 | |||
98 | /* Our two options */ | ||
99 | enum { | ||
100 | TRACE_FUNC_OPT_STACK = 0x1, | ||
101 | }; | ||
102 | |||
103 | static struct tracer_opt func_opts[] = { | ||
104 | #ifdef CONFIG_STACKTRACE | ||
105 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
106 | #endif | ||
107 | { } /* Always set a last empty entry */ | ||
108 | }; | ||
109 | |||
110 | static struct tracer_flags func_flags = { | ||
111 | .val = 0, /* By default: all flags disabled */ | ||
112 | .opts = func_opts | ||
113 | }; | ||
114 | |||
115 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
116 | { | ||
117 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
118 | /* do nothing if already set */ | ||
119 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
120 | return 0; | ||
121 | |||
122 | if (set) | ||
123 | register_ftrace_function(&trace_stack_ops); | ||
124 | else | ||
125 | unregister_ftrace_function(&trace_stack_ops); | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
51 | static struct tracer function_trace __read_mostly = | 133 | static struct tracer function_trace __read_mostly = |
52 | { | 134 | { |
53 | .name = "function", | 135 | .name = "function", |
54 | .init = function_trace_init, | 136 | .init = function_trace_init, |
55 | .reset = function_trace_reset, | 137 | .reset = function_trace_reset, |
56 | .start = function_trace_start, | 138 | .start = function_trace_start, |
139 | .flags = &func_flags, | ||
140 | .set_flag = func_set_flag, | ||
57 | #ifdef CONFIG_FTRACE_SELFTEST | 141 | #ifdef CONFIG_FTRACE_SELFTEST |
58 | .selftest = trace_selftest_startup_function, | 142 | .selftest = trace_selftest_startup_function, |
59 | #endif | 143 | #endif |