diff options
author | Jiri Olsa <jolsa@redhat.com> | 2010-09-23 08:00:53 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-10-18 10:53:30 -0400 |
commit | 7495a5beaa22f190f4888aa8cbe4827c16575d0a (patch) | |
tree | 8e094689aba97c6739450f32ee2ca6fe0c58d319 /kernel/trace/trace_sched_wakeup.c | |
parent | 0a772620a2e21fb55a02f70fe38d4b5c3a5fbbbf (diff) |
tracing: Graph support for wakeup tracer
Add function graph support for wakeup latency tracer.
The graph output is enabled by setting the 'display-graph'
trace option.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
LKML-Reference: <1285243253-7372-4-git-send-email-jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 231 |
1 files changed, 221 insertions, 10 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4086eae6e81b..033510dbb322 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -31,13 +31,33 @@ static int wakeup_rt; | |||
31 | static arch_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = |
32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
33 | 33 | ||
34 | static void wakeup_reset(struct trace_array *tr); | ||
34 | static void __wakeup_reset(struct trace_array *tr); | 35 | static void __wakeup_reset(struct trace_array *tr); |
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | ||
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | ||
35 | 38 | ||
36 | static int save_lat_flag; | 39 | static int save_lat_flag; |
37 | 40 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | ||
42 | |||
43 | static struct tracer_opt trace_opts[] = { | ||
44 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
45 | /* display latency trace as call graph */ | ||
46 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | ||
47 | #endif | ||
48 | { } /* Empty entry */ | ||
49 | }; | ||
50 | |||
51 | static struct tracer_flags tracer_flags = { | ||
52 | .val = 0, | ||
53 | .opts = trace_opts, | ||
54 | }; | ||
55 | |||
56 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | ||
57 | |||
38 | #ifdef CONFIG_FUNCTION_TRACER | 58 | #ifdef CONFIG_FUNCTION_TRACER |
39 | /* | 59 | /* |
40 | * irqsoff uses its own tracer function to keep the overhead down: | 60 | * wakeup uses its own tracer function to keep the overhead down: |
41 | */ | 61 | */ |
42 | static void | 62 | static void |
43 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | 63 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) |
@@ -80,8 +100,191 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
80 | { | 100 | { |
81 | .func = wakeup_tracer_call, | 101 | .func = wakeup_tracer_call, |
82 | }; | 102 | }; |
103 | |||
104 | static int start_func_tracer(int graph) | ||
105 | { | ||
106 | int ret; | ||
107 | |||
108 | if (!graph) | ||
109 | ret = register_ftrace_function(&trace_ops); | ||
110 | else | ||
111 | ret = register_ftrace_graph(&wakeup_graph_return, | ||
112 | &wakeup_graph_entry); | ||
113 | |||
114 | if (!ret && tracing_is_enabled()) | ||
115 | tracer_enabled = 1; | ||
116 | else | ||
117 | tracer_enabled = 0; | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static void stop_func_tracer(int graph) | ||
123 | { | ||
124 | tracer_enabled = 0; | ||
125 | |||
126 | if (!graph) | ||
127 | unregister_ftrace_function(&trace_ops); | ||
128 | else | ||
129 | unregister_ftrace_graph(); | ||
130 | } | ||
131 | |||
83 | #endif /* CONFIG_FUNCTION_TRACER */ | 132 | #endif /* CONFIG_FUNCTION_TRACER */ |
84 | 133 | ||
134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
135 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | ||
136 | { | ||
137 | |||
138 | if (!(bit & TRACE_DISPLAY_GRAPH)) | ||
139 | return -EINVAL; | ||
140 | |||
141 | if (!(is_graph() ^ set)) | ||
142 | return 0; | ||
143 | |||
144 | stop_func_tracer(!set); | ||
145 | |||
146 | wakeup_reset(wakeup_trace); | ||
147 | tracing_max_latency = 0; | ||
148 | |||
149 | return start_func_tracer(set); | ||
150 | } | ||
151 | |||
152 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | ||
153 | { | ||
154 | struct trace_array *tr = wakeup_trace; | ||
155 | struct trace_array_cpu *data; | ||
156 | unsigned long flags; | ||
157 | long disabled; | ||
158 | int cpu, pc, ret = 0; | ||
159 | |||
160 | if (likely(!wakeup_task)) | ||
161 | return 0; | ||
162 | |||
163 | pc = preempt_count(); | ||
164 | preempt_disable_notrace(); | ||
165 | |||
166 | cpu = raw_smp_processor_id(); | ||
167 | if (cpu != wakeup_current_cpu) | ||
168 | goto out_enable; | ||
169 | |||
170 | data = tr->data[cpu]; | ||
171 | disabled = atomic_inc_return(&data->disabled); | ||
172 | if (unlikely(disabled != 1)) | ||
173 | goto out; | ||
174 | |||
175 | local_save_flags(flags); | ||
176 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
177 | |||
178 | out: | ||
179 | atomic_dec(&data->disabled); | ||
180 | |||
181 | out_enable: | ||
182 | preempt_enable_notrace(); | ||
183 | return ret; | ||
184 | } | ||
185 | |||
186 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) | ||
187 | { | ||
188 | struct trace_array *tr = wakeup_trace; | ||
189 | struct trace_array_cpu *data; | ||
190 | unsigned long flags; | ||
191 | long disabled; | ||
192 | int cpu, pc; | ||
193 | |||
194 | if (likely(!wakeup_task)) | ||
195 | return; | ||
196 | |||
197 | pc = preempt_count(); | ||
198 | preempt_disable_notrace(); | ||
199 | |||
200 | cpu = raw_smp_processor_id(); | ||
201 | if (cpu != wakeup_current_cpu) | ||
202 | goto out_enable; | ||
203 | |||
204 | data = tr->data[cpu]; | ||
205 | disabled = atomic_inc_return(&data->disabled); | ||
206 | if (unlikely(disabled != 1)) | ||
207 | goto out; | ||
208 | |||
209 | local_save_flags(flags); | ||
210 | __trace_graph_return(tr, trace, flags, pc); | ||
211 | |||
212 | out: | ||
213 | atomic_dec(&data->disabled); | ||
214 | |||
215 | out_enable: | ||
216 | preempt_enable_notrace(); | ||
217 | return; | ||
218 | } | ||
219 | |||
220 | static void wakeup_trace_open(struct trace_iterator *iter) | ||
221 | { | ||
222 | if (is_graph()) | ||
223 | graph_trace_open(iter); | ||
224 | } | ||
225 | |||
226 | static void wakeup_trace_close(struct trace_iterator *iter) | ||
227 | { | ||
228 | if (iter->private) | ||
229 | graph_trace_close(iter); | ||
230 | } | ||
231 | |||
232 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) | ||
233 | |||
234 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | ||
235 | { | ||
236 | /* | ||
237 | * In graph mode call the graph tracer output function, | ||
238 | * otherwise go with the TRACE_FN event handler | ||
239 | */ | ||
240 | if (is_graph()) | ||
241 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); | ||
242 | |||
243 | return TRACE_TYPE_UNHANDLED; | ||
244 | } | ||
245 | |||
246 | static void wakeup_print_header(struct seq_file *s) | ||
247 | { | ||
248 | if (is_graph()) | ||
249 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); | ||
250 | else | ||
251 | trace_default_header(s); | ||
252 | } | ||
253 | |||
254 | static void | ||
255 | __trace_function(struct trace_array *tr, | ||
256 | unsigned long ip, unsigned long parent_ip, | ||
257 | unsigned long flags, int pc) | ||
258 | { | ||
259 | if (is_graph()) | ||
260 | trace_graph_function(tr, ip, parent_ip, flags, pc); | ||
261 | else | ||
262 | trace_function(tr, ip, parent_ip, flags, pc); | ||
263 | } | ||
264 | #else | ||
265 | #define __trace_function trace_function | ||
266 | |||
267 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | ||
268 | { | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | ||
273 | { | ||
274 | return -1; | ||
275 | } | ||
276 | |||
277 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | ||
278 | { | ||
279 | return TRACE_TYPE_UNHANDLED; | ||
280 | } | ||
281 | |||
282 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } | ||
283 | static void wakeup_print_header(struct seq_file *s) { } | ||
284 | static void wakeup_trace_open(struct trace_iterator *iter) { } | ||
285 | static void wakeup_trace_close(struct trace_iterator *iter) { } | ||
286 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
287 | |||
85 | /* | 288 | /* |
86 | * Should this new latency be reported/recorded? | 289 | * Should this new latency be reported/recorded? |
87 | */ | 290 | */ |
@@ -152,7 +355,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
152 | /* The task we are waiting for is waking up */ | 355 | /* The task we are waiting for is waking up */ |
153 | data = wakeup_trace->data[wakeup_cpu]; | 356 | data = wakeup_trace->data[wakeup_cpu]; |
154 | 357 | ||
155 | trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); | 358 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
156 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | 359 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
157 | 360 | ||
158 | T0 = data->preempt_timestamp; | 361 | T0 = data->preempt_timestamp; |
@@ -252,7 +455,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
252 | * is not called by an assembly function (where as schedule is) | 455 | * is not called by an assembly function (where as schedule is) |
253 | * it should be safe to use it here. | 456 | * it should be safe to use it here. |
254 | */ | 457 | */ |
255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 458 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
256 | 459 | ||
257 | out_locked: | 460 | out_locked: |
258 | arch_spin_unlock(&wakeup_lock); | 461 | arch_spin_unlock(&wakeup_lock); |
@@ -303,12 +506,8 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
303 | */ | 506 | */ |
304 | smp_wmb(); | 507 | smp_wmb(); |
305 | 508 | ||
306 | register_ftrace_function(&trace_ops); | 509 | if (start_func_tracer(is_graph())) |
307 | 510 | printk(KERN_ERR "failed to start wakeup tracer\n"); | |
308 | if (tracing_is_enabled()) | ||
309 | tracer_enabled = 1; | ||
310 | else | ||
311 | tracer_enabled = 0; | ||
312 | 511 | ||
313 | return; | 512 | return; |
314 | fail_deprobe_wake_new: | 513 | fail_deprobe_wake_new: |
@@ -320,7 +519,7 @@ fail_deprobe: | |||
320 | static void stop_wakeup_tracer(struct trace_array *tr) | 519 | static void stop_wakeup_tracer(struct trace_array *tr) |
321 | { | 520 | { |
322 | tracer_enabled = 0; | 521 | tracer_enabled = 0; |
323 | unregister_ftrace_function(&trace_ops); | 522 | stop_func_tracer(is_graph()); |
324 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); | 523 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
325 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); | 524 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
326 | unregister_trace_sched_wakeup(probe_wakeup, NULL); | 525 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
@@ -379,9 +578,15 @@ static struct tracer wakeup_tracer __read_mostly = | |||
379 | .start = wakeup_tracer_start, | 578 | .start = wakeup_tracer_start, |
380 | .stop = wakeup_tracer_stop, | 579 | .stop = wakeup_tracer_stop, |
381 | .print_max = 1, | 580 | .print_max = 1, |
581 | .print_header = wakeup_print_header, | ||
582 | .print_line = wakeup_print_line, | ||
583 | .flags = &tracer_flags, | ||
584 | .set_flag = wakeup_set_flag, | ||
382 | #ifdef CONFIG_FTRACE_SELFTEST | 585 | #ifdef CONFIG_FTRACE_SELFTEST |
383 | .selftest = trace_selftest_startup_wakeup, | 586 | .selftest = trace_selftest_startup_wakeup, |
384 | #endif | 587 | #endif |
588 | .open = wakeup_trace_open, | ||
589 | .close = wakeup_trace_close, | ||
385 | .use_max_tr = 1, | 590 | .use_max_tr = 1, |
386 | }; | 591 | }; |
387 | 592 | ||
@@ -394,9 +599,15 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
394 | .stop = wakeup_tracer_stop, | 599 | .stop = wakeup_tracer_stop, |
395 | .wait_pipe = poll_wait_pipe, | 600 | .wait_pipe = poll_wait_pipe, |
396 | .print_max = 1, | 601 | .print_max = 1, |
602 | .print_header = wakeup_print_header, | ||
603 | .print_line = wakeup_print_line, | ||
604 | .flags = &tracer_flags, | ||
605 | .set_flag = wakeup_set_flag, | ||
397 | #ifdef CONFIG_FTRACE_SELFTEST | 606 | #ifdef CONFIG_FTRACE_SELFTEST |
398 | .selftest = trace_selftest_startup_wakeup, | 607 | .selftest = trace_selftest_startup_wakeup, |
399 | #endif | 608 | #endif |
609 | .open = wakeup_trace_open, | ||
610 | .close = wakeup_trace_close, | ||
400 | .use_max_tr = 1, | 611 | .use_max_tr = 1, |
401 | }; | 612 | }; |
402 | 613 | ||