diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-01-15 19:12:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 06:15:32 -0500 |
commit | 5361499101306cfb776c3cfa0f69d0479bc63868 (patch) | |
tree | 1acf51a942abe6582e08ed86b4bbb98f9c095c89 /kernel/trace | |
parent | 6c1a99afbda99cd8d8c69d756387041567a13d87 (diff) |
ftrace: add stack trace to function tracer
Impact: new feature to stack trace any function
Chris Mason asked about being able to pick and choose a function
and get a stack trace from it. This feature enables his request.
# echo io_schedule > /debug/tracing/set_ftrace_filter
# echo function > /debug/tracing/current_tracer
# echo func_stack_trace > /debug/tracing/trace_options
Produces the following in /debug/tracing/trace:
kjournald-702 [001] 135.673060: io_schedule <-sync_buffer
kjournald-702 [002] 135.673671:
<= sync_buffer
<= __wait_on_bit
<= out_of_line_wait_on_bit
<= __wait_on_buffer
<= sync_dirty_buffer
<= journal_commit_transaction
<= kjournald
Note, be careful about turning this on without filtering the functions.
You may find that you have a 10 second lag between typing and seeing
what you typed. This is why the stack trace for the function tracer
does not use the same stack_trace flag as the other tracers use.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace.h | 7 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 84 |
3 files changed, 108 insertions, 9 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dcb757f70d21..3c54cb125228 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -835,10 +835,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
835 | trace_function(tr, data, ip, parent_ip, flags, pc); | 835 | trace_function(tr, data, ip, parent_ip, flags, pc); |
836 | } | 836 | } |
837 | 837 | ||
838 | static void ftrace_trace_stack(struct trace_array *tr, | 838 | static void __ftrace_trace_stack(struct trace_array *tr, |
839 | struct trace_array_cpu *data, | 839 | struct trace_array_cpu *data, |
840 | unsigned long flags, | 840 | unsigned long flags, |
841 | int skip, int pc) | 841 | int skip, int pc) |
842 | { | 842 | { |
843 | #ifdef CONFIG_STACKTRACE | 843 | #ifdef CONFIG_STACKTRACE |
844 | struct ring_buffer_event *event; | 844 | struct ring_buffer_event *event; |
@@ -846,9 +846,6 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
846 | struct stack_trace trace; | 846 | struct stack_trace trace; |
847 | unsigned long irq_flags; | 847 | unsigned long irq_flags; |
848 | 848 | ||
849 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
850 | return; | ||
851 | |||
852 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 849 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
853 | &irq_flags); | 850 | &irq_flags); |
854 | if (!event) | 851 | if (!event) |
@@ -869,12 +866,23 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
869 | #endif | 866 | #endif |
870 | } | 867 | } |
871 | 868 | ||
869 | static void ftrace_trace_stack(struct trace_array *tr, | ||
870 | struct trace_array_cpu *data, | ||
871 | unsigned long flags, | ||
872 | int skip, int pc) | ||
873 | { | ||
874 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
875 | return; | ||
876 | |||
877 | __ftrace_trace_stack(tr, data, flags, skip, pc); | ||
878 | } | ||
879 | |||
872 | void __trace_stack(struct trace_array *tr, | 880 | void __trace_stack(struct trace_array *tr, |
873 | struct trace_array_cpu *data, | 881 | struct trace_array_cpu *data, |
874 | unsigned long flags, | 882 | unsigned long flags, |
875 | int skip) | 883 | int skip, int pc) |
876 | { | 884 | { |
877 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 885 | __ftrace_trace_stack(tr, data, flags, skip, pc); |
878 | } | 886 | } |
879 | 887 | ||
880 | static void ftrace_trace_userstack(struct trace_array *tr, | 888 | static void ftrace_trace_userstack(struct trace_array *tr, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 79c872100dd5..bf39a369e4b3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -457,6 +457,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |||
457 | void update_max_tr_single(struct trace_array *tr, | 457 | void update_max_tr_single(struct trace_array *tr, |
458 | struct task_struct *tsk, int cpu); | 458 | struct task_struct *tsk, int cpu); |
459 | 459 | ||
460 | void __trace_stack(struct trace_array *tr, | ||
461 | struct trace_array_cpu *data, | ||
462 | unsigned long flags, | ||
463 | int skip, int pc); | ||
464 | |||
460 | extern cycle_t ftrace_now(int cpu); | 465 | extern cycle_t ftrace_now(int cpu); |
461 | 466 | ||
462 | #ifdef CONFIG_FUNCTION_TRACER | 467 | #ifdef CONFIG_FUNCTION_TRACER |
@@ -467,6 +472,8 @@ void tracing_stop_function_trace(void); | |||
467 | # define tracing_stop_function_trace() do { } while (0) | 472 | # define tracing_stop_function_trace() do { } while (0) |
468 | #endif | 473 | #endif |
469 | 474 | ||
475 | extern int ftrace_function_enabled; | ||
476 | |||
470 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 477 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
471 | typedef void | 478 | typedef void |
472 | (*tracer_switch_func_t)(void *private, | 479 | (*tracer_switch_func_t)(void *private, |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..3a5fa08cedb0 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #include "trace.h" | 17 | #include "trace.h" |
18 | 18 | ||
19 | static struct trace_array *func_trace; | ||
20 | |||
19 | static void start_function_trace(struct trace_array *tr) | 21 | static void start_function_trace(struct trace_array *tr) |
20 | { | 22 | { |
21 | tr->cpu = get_cpu(); | 23 | tr->cpu = get_cpu(); |
@@ -34,6 +36,7 @@ static void stop_function_trace(struct trace_array *tr) | |||
34 | 36 | ||
35 | static int function_trace_init(struct trace_array *tr) | 37 | static int function_trace_init(struct trace_array *tr) |
36 | { | 38 | { |
39 | func_trace = tr; | ||
37 | start_function_trace(tr); | 40 | start_function_trace(tr); |
38 | return 0; | 41 | return 0; |
39 | } | 42 | } |
@@ -48,12 +51,93 @@ static void function_trace_start(struct trace_array *tr) | |||
48 | tracing_reset_online_cpus(tr); | 51 | tracing_reset_online_cpus(tr); |
49 | } | 52 | } |
50 | 53 | ||
54 | static void | ||
55 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
56 | { | ||
57 | struct trace_array *tr = func_trace; | ||
58 | struct trace_array_cpu *data; | ||
59 | unsigned long flags; | ||
60 | long disabled; | ||
61 | int cpu; | ||
62 | int pc; | ||
63 | |||
64 | if (unlikely(!ftrace_function_enabled)) | ||
65 | return; | ||
66 | |||
67 | /* | ||
68 | * Need to use raw, since this must be called before the | ||
69 | * recursive protection is performed. | ||
70 | */ | ||
71 | local_irq_save(flags); | ||
72 | cpu = raw_smp_processor_id(); | ||
73 | data = tr->data[cpu]; | ||
74 | disabled = atomic_inc_return(&data->disabled); | ||
75 | |||
76 | if (likely(disabled == 1)) { | ||
77 | pc = preempt_count(); | ||
78 | /* | ||
79 | * skip over 5 funcs: | ||
80 | * __ftrace_trace_stack, | ||
81 | * __trace_stack, | ||
82 | * function_stack_trace_call | ||
83 | * ftrace_list_func | ||
84 | * ftrace_call | ||
85 | */ | ||
86 | __trace_stack(tr, data, flags, 5, pc); | ||
87 | } | ||
88 | |||
89 | atomic_dec(&data->disabled); | ||
90 | local_irq_restore(flags); | ||
91 | } | ||
92 | |||
93 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
94 | { | ||
95 | .func = function_stack_trace_call, | ||
96 | }; | ||
97 | |||
98 | /* Our two options */ | ||
99 | enum { | ||
100 | TRACE_FUNC_OPT_STACK = 0x1, | ||
101 | }; | ||
102 | |||
103 | static struct tracer_opt func_opts[] = { | ||
104 | #ifdef CONFIG_STACKTRACE | ||
105 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
106 | #endif | ||
107 | { } /* Always set a last empty entry */ | ||
108 | }; | ||
109 | |||
110 | static struct tracer_flags func_flags = { | ||
111 | .val = 0, /* By default: all flags disabled */ | ||
112 | .opts = func_opts | ||
113 | }; | ||
114 | |||
115 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
116 | { | ||
117 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
118 | /* do nothing if already set */ | ||
119 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
120 | return 0; | ||
121 | |||
122 | if (set) | ||
123 | register_ftrace_function(&trace_stack_ops); | ||
124 | else | ||
125 | unregister_ftrace_function(&trace_stack_ops); | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
51 | static struct tracer function_trace __read_mostly = | 133 | static struct tracer function_trace __read_mostly = |
52 | { | 134 | { |
53 | .name = "function", | 135 | .name = "function", |
54 | .init = function_trace_init, | 136 | .init = function_trace_init, |
55 | .reset = function_trace_reset, | 137 | .reset = function_trace_reset, |
56 | .start = function_trace_start, | 138 | .start = function_trace_start, |
139 | .flags = &func_flags, | ||
140 | .set_flag = func_set_flag, | ||
57 | #ifdef CONFIG_FTRACE_SELFTEST | 141 | #ifdef CONFIG_FTRACE_SELFTEST |
58 | .selftest = trace_selftest_startup_function, | 142 | .selftest = trace_selftest_startup_function, |
59 | #endif | 143 | #endif |