diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 480 |
1 files changed, 337 insertions, 143 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..35257be6a9d6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | ||
15 | 16 | ||
16 | #define TRACE_GRAPH_INDENT 2 | 17 | #define TRACE_GRAPH_INDENT 2 |
17 | 18 | ||
@@ -20,9 +21,11 @@ | |||
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 21 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 23 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | ||
23 | 26 | ||
24 | static struct tracer_opt trace_opts[] = { | 27 | static struct tracer_opt trace_opts[] = { |
25 | /* Display overruns ? */ | 28 | /* Display overruns? (for self-debug purpose) */ |
26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
27 | /* Display CPU ? */ | 30 | /* Display CPU ? */ |
28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
@@ -30,26 +33,101 @@ static struct tracer_opt trace_opts[] = { | |||
30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
31 | /* Display proc name/pid */ | 34 | /* Display proc name/pid */ |
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
36 | /* Display duration of execution */ | ||
37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | ||
38 | /* Display absolute time of an entry */ | ||
39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | ||
33 | { } /* Empty entry */ | 40 | { } /* Empty entry */ |
34 | }; | 41 | }; |
35 | 42 | ||
36 | static struct tracer_flags tracer_flags = { | 43 | static struct tracer_flags tracer_flags = { |
37 | /* Don't display overruns and proc by default */ | 44 | /* Don't display overruns and proc by default */ |
38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
46 | TRACE_GRAPH_PRINT_DURATION, | ||
39 | .opts = trace_opts | 47 | .opts = trace_opts |
40 | }; | 48 | }; |
41 | 49 | ||
42 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
44 | 51 | ||
45 | static int graph_trace_init(struct trace_array *tr) | 52 | |
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | int | ||
55 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
56 | unsigned long func, int *depth) | ||
57 | { | ||
58 | int index; | ||
59 | |||
60 | if (!current->ret_stack) | ||
61 | return -EBUSY; | ||
62 | |||
63 | /* The return trace stack is full */ | ||
64 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
65 | atomic_inc(¤t->trace_overrun); | ||
66 | return -EBUSY; | ||
67 | } | ||
68 | |||
69 | index = ++current->curr_ret_stack; | ||
70 | barrier(); | ||
71 | current->ret_stack[index].ret = ret; | ||
72 | current->ret_stack[index].func = func; | ||
73 | current->ret_stack[index].calltime = time; | ||
74 | *depth = index; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
80 | void | ||
81 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
82 | { | ||
83 | int index; | ||
84 | |||
85 | index = current->curr_ret_stack; | ||
86 | |||
87 | if (unlikely(index < 0)) { | ||
88 | ftrace_graph_stop(); | ||
89 | WARN_ON(1); | ||
90 | /* Might as well panic, otherwise we have no where to go */ | ||
91 | *ret = (unsigned long)panic; | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | *ret = current->ret_stack[index].ret; | ||
96 | trace->func = current->ret_stack[index].func; | ||
97 | trace->calltime = current->ret_stack[index].calltime; | ||
98 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
99 | trace->depth = index; | ||
100 | barrier(); | ||
101 | current->curr_ret_stack--; | ||
102 | |||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
46 | { | 110 | { |
47 | int cpu, ret; | 111 | struct ftrace_graph_ret trace; |
112 | unsigned long ret; | ||
113 | |||
114 | ftrace_pop_return_trace(&trace, &ret); | ||
115 | trace.rettime = trace_clock_local(); | ||
116 | ftrace_graph_return(&trace); | ||
117 | |||
118 | if (unlikely(!ret)) { | ||
119 | ftrace_graph_stop(); | ||
120 | WARN_ON(1); | ||
121 | /* Might as well panic. What else to do? */ | ||
122 | ret = (unsigned long)panic; | ||
123 | } | ||
48 | 124 | ||
49 | for_each_online_cpu(cpu) | 125 | return ret; |
50 | tracing_reset(tr, cpu); | 126 | } |
51 | 127 | ||
52 | ret = register_ftrace_graph(&trace_graph_return, | 128 | static int graph_trace_init(struct trace_array *tr) |
129 | { | ||
130 | int ret = register_ftrace_graph(&trace_graph_return, | ||
53 | &trace_graph_entry); | 131 | &trace_graph_entry); |
54 | if (ret) | 132 | if (ret) |
55 | return ret; | 133 | return ret; |
@@ -153,17 +231,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
153 | 231 | ||
154 | /* If the pid changed since the last trace, output this event */ | 232 | /* If the pid changed since the last trace, output this event */ |
155 | static enum print_line_t | 233 | static enum print_line_t |
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 234 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) |
157 | { | 235 | { |
158 | pid_t prev_pid; | 236 | pid_t prev_pid; |
237 | pid_t *last_pid; | ||
159 | int ret; | 238 | int ret; |
160 | 239 | ||
161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 240 | if (!last_pids_cpu) |
241 | return TRACE_TYPE_HANDLED; | ||
242 | |||
243 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); | ||
244 | |||
245 | if (*last_pid == pid) | ||
162 | return TRACE_TYPE_HANDLED; | 246 | return TRACE_TYPE_HANDLED; |
163 | 247 | ||
164 | prev_pid = last_pid[cpu]; | 248 | prev_pid = *last_pid; |
165 | last_pid[cpu] = pid; | 249 | *last_pid = pid; |
166 | 250 | ||
251 | if (prev_pid == -1) | ||
252 | return TRACE_TYPE_HANDLED; | ||
167 | /* | 253 | /* |
168 | * Context-switch trace line: | 254 | * Context-switch trace line: |
169 | 255 | ||
@@ -175,34 +261,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu) | |||
175 | ret = trace_seq_printf(s, | 261 | ret = trace_seq_printf(s, |
176 | " ------------------------------------------\n"); | 262 | " ------------------------------------------\n"); |
177 | if (!ret) | 263 | if (!ret) |
178 | TRACE_TYPE_PARTIAL_LINE; | 264 | return TRACE_TYPE_PARTIAL_LINE; |
179 | 265 | ||
180 | ret = print_graph_cpu(s, cpu); | 266 | ret = print_graph_cpu(s, cpu); |
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 267 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
182 | TRACE_TYPE_PARTIAL_LINE; | 268 | return TRACE_TYPE_PARTIAL_LINE; |
183 | 269 | ||
184 | ret = print_graph_proc(s, prev_pid); | 270 | ret = print_graph_proc(s, prev_pid); |
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 271 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
186 | TRACE_TYPE_PARTIAL_LINE; | 272 | return TRACE_TYPE_PARTIAL_LINE; |
187 | 273 | ||
188 | ret = trace_seq_printf(s, " => "); | 274 | ret = trace_seq_printf(s, " => "); |
189 | if (!ret) | 275 | if (!ret) |
190 | TRACE_TYPE_PARTIAL_LINE; | 276 | return TRACE_TYPE_PARTIAL_LINE; |
191 | 277 | ||
192 | ret = print_graph_proc(s, pid); | 278 | ret = print_graph_proc(s, pid); |
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 279 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
194 | TRACE_TYPE_PARTIAL_LINE; | 280 | return TRACE_TYPE_PARTIAL_LINE; |
195 | 281 | ||
196 | ret = trace_seq_printf(s, | 282 | ret = trace_seq_printf(s, |
197 | "\n ------------------------------------------\n\n"); | 283 | "\n ------------------------------------------\n\n"); |
198 | if (!ret) | 284 | if (!ret) |
199 | TRACE_TYPE_PARTIAL_LINE; | 285 | return TRACE_TYPE_PARTIAL_LINE; |
200 | 286 | ||
201 | return ret; | 287 | return TRACE_TYPE_HANDLED; |
202 | } | 288 | } |
203 | 289 | ||
204 | static bool | 290 | static struct ftrace_graph_ret_entry * |
205 | trace_branch_is_leaf(struct trace_iterator *iter, | 291 | get_return_for_leaf(struct trace_iterator *iter, |
206 | struct ftrace_graph_ent_entry *curr) | 292 | struct ftrace_graph_ent_entry *curr) |
207 | { | 293 | { |
208 | struct ring_buffer_iter *ring_iter; | 294 | struct ring_buffer_iter *ring_iter; |
@@ -211,65 +297,123 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
211 | 297 | ||
212 | ring_iter = iter->buffer_iter[iter->cpu]; | 298 | ring_iter = iter->buffer_iter[iter->cpu]; |
213 | 299 | ||
214 | if (!ring_iter) | 300 | /* First peek to compare current entry and the next one */ |
215 | return false; | 301 | if (ring_iter) |
216 | 302 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
217 | event = ring_buffer_iter_peek(ring_iter, NULL); | 303 | else { |
304 | /* We need to consume the current entry to see the next one */ | ||
305 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
306 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | ||
307 | NULL); | ||
308 | } | ||
218 | 309 | ||
219 | if (!event) | 310 | if (!event) |
220 | return false; | 311 | return NULL; |
221 | 312 | ||
222 | next = ring_buffer_event_data(event); | 313 | next = ring_buffer_event_data(event); |
223 | 314 | ||
224 | if (next->ent.type != TRACE_GRAPH_RET) | 315 | if (next->ent.type != TRACE_GRAPH_RET) |
225 | return false; | 316 | return NULL; |
226 | 317 | ||
227 | if (curr->ent.pid != next->ent.pid || | 318 | if (curr->ent.pid != next->ent.pid || |
228 | curr->graph_ent.func != next->ret.func) | 319 | curr->graph_ent.func != next->ret.func) |
229 | return false; | 320 | return NULL; |
321 | |||
322 | /* this is a leaf, now advance the iterator */ | ||
323 | if (ring_iter) | ||
324 | ring_buffer_read(ring_iter, NULL); | ||
230 | 325 | ||
231 | return true; | 326 | return next; |
327 | } | ||
328 | |||
329 | /* Signal a overhead of time execution to the output */ | ||
330 | static int | ||
331 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
332 | { | ||
333 | /* If duration disappear, we don't need anything */ | ||
334 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | ||
335 | return 1; | ||
336 | |||
337 | /* Non nested entry or return */ | ||
338 | if (duration == -1) | ||
339 | return trace_seq_printf(s, " "); | ||
340 | |||
341 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
342 | /* Duration exceeded 100 msecs */ | ||
343 | if (duration > 100000ULL) | ||
344 | return trace_seq_printf(s, "! "); | ||
345 | |||
346 | /* Duration exceeded 10 msecs */ | ||
347 | if (duration > 10000ULL) | ||
348 | return trace_seq_printf(s, "+ "); | ||
349 | } | ||
350 | |||
351 | return trace_seq_printf(s, " "); | ||
352 | } | ||
353 | |||
354 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | ||
355 | { | ||
356 | unsigned long usecs_rem; | ||
357 | |||
358 | usecs_rem = do_div(t, NSEC_PER_SEC); | ||
359 | usecs_rem /= 1000; | ||
360 | |||
361 | return trace_seq_printf(s, "%5lu.%06lu | ", | ||
362 | (unsigned long)t, usecs_rem); | ||
232 | } | 363 | } |
233 | 364 | ||
234 | static enum print_line_t | 365 | static enum print_line_t |
235 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 366 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
236 | enum trace_type type, int cpu, pid_t pid) | 367 | enum trace_type type, int cpu, pid_t pid) |
237 | { | 368 | { |
238 | int ret; | 369 | int ret; |
370 | struct trace_seq *s = &iter->seq; | ||
239 | 371 | ||
240 | if (addr < (unsigned long)__irqentry_text_start || | 372 | if (addr < (unsigned long)__irqentry_text_start || |
241 | addr >= (unsigned long)__irqentry_text_end) | 373 | addr >= (unsigned long)__irqentry_text_end) |
242 | return TRACE_TYPE_UNHANDLED; | 374 | return TRACE_TYPE_UNHANDLED; |
243 | 375 | ||
244 | if (type == TRACE_GRAPH_ENT) { | 376 | /* Absolute time */ |
245 | ret = trace_seq_printf(s, "==========> | "); | 377 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
246 | } else { | 378 | ret = print_graph_abs_time(iter->ts, s); |
247 | /* Cpu */ | 379 | if (!ret) |
248 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 380 | return TRACE_TYPE_PARTIAL_LINE; |
249 | ret = print_graph_cpu(s, cpu); | 381 | } |
250 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
251 | return TRACE_TYPE_PARTIAL_LINE; | ||
252 | } | ||
253 | /* Proc */ | ||
254 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
255 | ret = print_graph_proc(s, pid); | ||
256 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
257 | return TRACE_TYPE_PARTIAL_LINE; | ||
258 | 382 | ||
259 | ret = trace_seq_printf(s, " | "); | 383 | /* Cpu */ |
260 | if (!ret) | 384 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
261 | return TRACE_TYPE_PARTIAL_LINE; | 385 | ret = print_graph_cpu(s, cpu); |
262 | } | 386 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
387 | return TRACE_TYPE_PARTIAL_LINE; | ||
388 | } | ||
389 | /* Proc */ | ||
390 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
391 | ret = print_graph_proc(s, pid); | ||
392 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
393 | return TRACE_TYPE_PARTIAL_LINE; | ||
394 | ret = trace_seq_printf(s, " | "); | ||
395 | if (!ret) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | } | ||
263 | 398 | ||
264 | /* No overhead */ | 399 | /* No overhead */ |
265 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 400 | ret = print_graph_overhead(-1, s); |
266 | ret = trace_seq_printf(s, " "); | 401 | if (!ret) |
267 | if (!ret) | 402 | return TRACE_TYPE_PARTIAL_LINE; |
268 | return TRACE_TYPE_PARTIAL_LINE; | 403 | |
269 | } | 404 | if (type == TRACE_GRAPH_ENT) |
405 | ret = trace_seq_printf(s, "==========>"); | ||
406 | else | ||
407 | ret = trace_seq_printf(s, "<=========="); | ||
408 | |||
409 | if (!ret) | ||
410 | return TRACE_TYPE_PARTIAL_LINE; | ||
411 | |||
412 | /* Don't close the duration column if haven't one */ | ||
413 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | ||
414 | trace_seq_printf(s, " |"); | ||
415 | ret = trace_seq_printf(s, "\n"); | ||
270 | 416 | ||
271 | ret = trace_seq_printf(s, "<========== |\n"); | ||
272 | } | ||
273 | if (!ret) | 417 | if (!ret) |
274 | return TRACE_TYPE_PARTIAL_LINE; | 418 | return TRACE_TYPE_PARTIAL_LINE; |
275 | return TRACE_TYPE_HANDLED; | 419 | return TRACE_TYPE_HANDLED; |
@@ -288,7 +432,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
288 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 432 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
289 | 433 | ||
290 | /* Print msecs */ | 434 | /* Print msecs */ |
291 | ret = trace_seq_printf(s, msecs_str); | 435 | ret = trace_seq_printf(s, "%s", msecs_str); |
292 | if (!ret) | 436 | if (!ret) |
293 | return TRACE_TYPE_PARTIAL_LINE; | 437 | return TRACE_TYPE_PARTIAL_LINE; |
294 | 438 | ||
@@ -321,51 +465,33 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
321 | 465 | ||
322 | } | 466 | } |
323 | 467 | ||
324 | /* Signal a overhead of time execution to the output */ | ||
325 | static int | ||
326 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
327 | { | ||
328 | /* Duration exceeded 100 msecs */ | ||
329 | if (duration > 100000ULL) | ||
330 | return trace_seq_printf(s, "! "); | ||
331 | |||
332 | /* Duration exceeded 10 msecs */ | ||
333 | if (duration > 10000ULL) | ||
334 | return trace_seq_printf(s, "+ "); | ||
335 | |||
336 | return trace_seq_printf(s, " "); | ||
337 | } | ||
338 | |||
339 | /* Case of a leaf function on its call entry */ | 468 | /* Case of a leaf function on its call entry */ |
340 | static enum print_line_t | 469 | static enum print_line_t |
341 | print_graph_entry_leaf(struct trace_iterator *iter, | 470 | print_graph_entry_leaf(struct trace_iterator *iter, |
342 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | 471 | struct ftrace_graph_ent_entry *entry, |
472 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | ||
343 | { | 473 | { |
344 | struct ftrace_graph_ret_entry *ret_entry; | ||
345 | struct ftrace_graph_ret *graph_ret; | 474 | struct ftrace_graph_ret *graph_ret; |
346 | struct ring_buffer_event *event; | ||
347 | struct ftrace_graph_ent *call; | 475 | struct ftrace_graph_ent *call; |
348 | unsigned long long duration; | 476 | unsigned long long duration; |
349 | int ret; | 477 | int ret; |
350 | int i; | 478 | int i; |
351 | 479 | ||
352 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
353 | ret_entry = ring_buffer_event_data(event); | ||
354 | graph_ret = &ret_entry->ret; | 480 | graph_ret = &ret_entry->ret; |
355 | call = &entry->graph_ent; | 481 | call = &entry->graph_ent; |
356 | duration = graph_ret->rettime - graph_ret->calltime; | 482 | duration = graph_ret->rettime - graph_ret->calltime; |
357 | 483 | ||
358 | /* Overhead */ | 484 | /* Overhead */ |
359 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 485 | ret = print_graph_overhead(duration, s); |
360 | ret = print_graph_overhead(duration, s); | 486 | if (!ret) |
361 | if (!ret) | 487 | return TRACE_TYPE_PARTIAL_LINE; |
362 | return TRACE_TYPE_PARTIAL_LINE; | ||
363 | } | ||
364 | 488 | ||
365 | /* Duration */ | 489 | /* Duration */ |
366 | ret = print_graph_duration(duration, s); | 490 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
367 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 491 | ret = print_graph_duration(duration, s); |
368 | return TRACE_TYPE_PARTIAL_LINE; | 492 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
493 | return TRACE_TYPE_PARTIAL_LINE; | ||
494 | } | ||
369 | 495 | ||
370 | /* Function */ | 496 | /* Function */ |
371 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 497 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -394,25 +520,17 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
394 | struct ftrace_graph_ent *call = &entry->graph_ent; | 520 | struct ftrace_graph_ent *call = &entry->graph_ent; |
395 | 521 | ||
396 | /* No overhead */ | 522 | /* No overhead */ |
397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 523 | ret = print_graph_overhead(-1, s); |
398 | ret = trace_seq_printf(s, " "); | 524 | if (!ret) |
399 | if (!ret) | 525 | return TRACE_TYPE_PARTIAL_LINE; |
400 | return TRACE_TYPE_PARTIAL_LINE; | ||
401 | } | ||
402 | 526 | ||
403 | /* Interrupt */ | 527 | /* No time */ |
404 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | 528 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
405 | if (ret == TRACE_TYPE_UNHANDLED) { | ||
406 | /* No time */ | ||
407 | ret = trace_seq_printf(s, " | "); | 529 | ret = trace_seq_printf(s, " | "); |
408 | if (!ret) | 530 | if (!ret) |
409 | return TRACE_TYPE_PARTIAL_LINE; | 531 | return TRACE_TYPE_PARTIAL_LINE; |
410 | } else { | ||
411 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
412 | return TRACE_TYPE_PARTIAL_LINE; | ||
413 | } | 532 | } |
414 | 533 | ||
415 | |||
416 | /* Function */ | 534 | /* Function */ |
417 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 535 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
418 | ret = trace_seq_printf(s, " "); | 536 | ret = trace_seq_printf(s, " "); |
@@ -428,20 +546,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
428 | if (!ret) | 546 | if (!ret) |
429 | return TRACE_TYPE_PARTIAL_LINE; | 547 | return TRACE_TYPE_PARTIAL_LINE; |
430 | 548 | ||
431 | return TRACE_TYPE_HANDLED; | 549 | /* |
550 | * we already consumed the current entry to check the next one | ||
551 | * and see if this is a leaf. | ||
552 | */ | ||
553 | return TRACE_TYPE_NO_CONSUME; | ||
432 | } | 554 | } |
433 | 555 | ||
434 | static enum print_line_t | 556 | static enum print_line_t |
435 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 557 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
436 | struct trace_iterator *iter, int cpu) | 558 | struct trace_iterator *iter) |
437 | { | 559 | { |
438 | int ret; | 560 | int ret; |
561 | int cpu = iter->cpu; | ||
562 | pid_t *last_entry = iter->private; | ||
439 | struct trace_entry *ent = iter->ent; | 563 | struct trace_entry *ent = iter->ent; |
564 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
565 | struct ftrace_graph_ret_entry *leaf_ret; | ||
440 | 566 | ||
441 | /* Pid */ | 567 | /* Pid */ |
442 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 568 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
569 | return TRACE_TYPE_PARTIAL_LINE; | ||
570 | |||
571 | /* Interrupt */ | ||
572 | ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | ||
573 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
443 | return TRACE_TYPE_PARTIAL_LINE; | 574 | return TRACE_TYPE_PARTIAL_LINE; |
444 | 575 | ||
576 | /* Absolute time */ | ||
577 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
578 | ret = print_graph_abs_time(iter->ts, s); | ||
579 | if (!ret) | ||
580 | return TRACE_TYPE_PARTIAL_LINE; | ||
581 | } | ||
582 | |||
445 | /* Cpu */ | 583 | /* Cpu */ |
446 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 584 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
447 | ret = print_graph_cpu(s, cpu); | 585 | ret = print_graph_cpu(s, cpu); |
@@ -460,8 +598,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
460 | return TRACE_TYPE_PARTIAL_LINE; | 598 | return TRACE_TYPE_PARTIAL_LINE; |
461 | } | 599 | } |
462 | 600 | ||
463 | if (trace_branch_is_leaf(iter, field)) | 601 | leaf_ret = get_return_for_leaf(iter, field); |
464 | return print_graph_entry_leaf(iter, field, s); | 602 | if (leaf_ret) |
603 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | ||
465 | else | 604 | else |
466 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 605 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); |
467 | 606 | ||
@@ -469,16 +608,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
469 | 608 | ||
470 | static enum print_line_t | 609 | static enum print_line_t |
471 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 610 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
472 | struct trace_entry *ent, int cpu) | 611 | struct trace_entry *ent, struct trace_iterator *iter) |
473 | { | 612 | { |
474 | int i; | 613 | int i; |
475 | int ret; | 614 | int ret; |
615 | int cpu = iter->cpu; | ||
616 | pid_t *last_pid = iter->private, pid = ent->pid; | ||
476 | unsigned long long duration = trace->rettime - trace->calltime; | 617 | unsigned long long duration = trace->rettime - trace->calltime; |
477 | 618 | ||
478 | /* Pid */ | 619 | /* Pid */ |
479 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 620 | if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
480 | return TRACE_TYPE_PARTIAL_LINE; | 621 | return TRACE_TYPE_PARTIAL_LINE; |
481 | 622 | ||
623 | /* Absolute time */ | ||
624 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
625 | ret = print_graph_abs_time(iter->ts, s); | ||
626 | if (!ret) | ||
627 | return TRACE_TYPE_PARTIAL_LINE; | ||
628 | } | ||
629 | |||
482 | /* Cpu */ | 630 | /* Cpu */ |
483 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 631 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
484 | ret = print_graph_cpu(s, cpu); | 632 | ret = print_graph_cpu(s, cpu); |
@@ -498,16 +646,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
498 | } | 646 | } |
499 | 647 | ||
500 | /* Overhead */ | 648 | /* Overhead */ |
501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 649 | ret = print_graph_overhead(duration, s); |
502 | ret = print_graph_overhead(duration, s); | 650 | if (!ret) |
503 | if (!ret) | 651 | return TRACE_TYPE_PARTIAL_LINE; |
504 | return TRACE_TYPE_PARTIAL_LINE; | ||
505 | } | ||
506 | 652 | ||
507 | /* Duration */ | 653 | /* Duration */ |
508 | ret = print_graph_duration(duration, s); | 654 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
509 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 655 | ret = print_graph_duration(duration, s); |
510 | return TRACE_TYPE_PARTIAL_LINE; | 656 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
657 | return TRACE_TYPE_PARTIAL_LINE; | ||
658 | } | ||
511 | 659 | ||
512 | /* Closing brace */ | 660 | /* Closing brace */ |
513 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 661 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -528,7 +676,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
528 | return TRACE_TYPE_PARTIAL_LINE; | 676 | return TRACE_TYPE_PARTIAL_LINE; |
529 | } | 677 | } |
530 | 678 | ||
531 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); | 679 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
532 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 680 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
533 | return TRACE_TYPE_PARTIAL_LINE; | 681 | return TRACE_TYPE_PARTIAL_LINE; |
534 | 682 | ||
@@ -541,14 +689,23 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
541 | { | 689 | { |
542 | int i; | 690 | int i; |
543 | int ret; | 691 | int ret; |
692 | int cpu = iter->cpu; | ||
693 | pid_t *last_pid = iter->private; | ||
544 | 694 | ||
545 | /* Pid */ | 695 | /* Pid */ |
546 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | 696 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
547 | return TRACE_TYPE_PARTIAL_LINE; | 697 | return TRACE_TYPE_PARTIAL_LINE; |
548 | 698 | ||
699 | /* Absolute time */ | ||
700 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
701 | ret = print_graph_abs_time(iter->ts, s); | ||
702 | if (!ret) | ||
703 | return TRACE_TYPE_PARTIAL_LINE; | ||
704 | } | ||
705 | |||
549 | /* Cpu */ | 706 | /* Cpu */ |
550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 707 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
551 | ret = print_graph_cpu(s, iter->cpu); | 708 | ret = print_graph_cpu(s, cpu); |
552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 709 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
553 | return TRACE_TYPE_PARTIAL_LINE; | 710 | return TRACE_TYPE_PARTIAL_LINE; |
554 | } | 711 | } |
@@ -565,17 +722,17 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
565 | } | 722 | } |
566 | 723 | ||
567 | /* No overhead */ | 724 | /* No overhead */ |
568 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 725 | ret = print_graph_overhead(-1, s); |
569 | ret = trace_seq_printf(s, " "); | 726 | if (!ret) |
727 | return TRACE_TYPE_PARTIAL_LINE; | ||
728 | |||
729 | /* No time */ | ||
730 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
731 | ret = trace_seq_printf(s, " | "); | ||
570 | if (!ret) | 732 | if (!ret) |
571 | return TRACE_TYPE_PARTIAL_LINE; | 733 | return TRACE_TYPE_PARTIAL_LINE; |
572 | } | 734 | } |
573 | 735 | ||
574 | /* No time */ | ||
575 | ret = trace_seq_printf(s, " | "); | ||
576 | if (!ret) | ||
577 | return TRACE_TYPE_PARTIAL_LINE; | ||
578 | |||
579 | /* Indentation */ | 736 | /* Indentation */ |
580 | if (trace->depth > 0) | 737 | if (trace->depth > 0) |
581 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 738 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { |
@@ -585,12 +742,19 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
585 | } | 742 | } |
586 | 743 | ||
587 | /* The comment */ | 744 | /* The comment */ |
588 | ret = trace_seq_printf(s, "/* %s", trace->buf); | 745 | ret = trace_seq_printf(s, "/* "); |
746 | if (!ret) | ||
747 | return TRACE_TYPE_PARTIAL_LINE; | ||
748 | |||
749 | ret = trace_seq_bprintf(s, trace->fmt, trace->buf); | ||
589 | if (!ret) | 750 | if (!ret) |
590 | return TRACE_TYPE_PARTIAL_LINE; | 751 | return TRACE_TYPE_PARTIAL_LINE; |
591 | 752 | ||
592 | if (ent->flags & TRACE_FLAG_CONT) | 753 | /* Strip ending newline */ |
593 | trace_seq_print_cont(s, iter); | 754 | if (s->buffer[s->len - 1] == '\n') { |
755 | s->buffer[s->len - 1] = '\0'; | ||
756 | s->len--; | ||
757 | } | ||
594 | 758 | ||
595 | ret = trace_seq_printf(s, " */\n"); | 759 | ret = trace_seq_printf(s, " */\n"); |
596 | if (!ret) | 760 | if (!ret) |
@@ -610,13 +774,12 @@ print_graph_function(struct trace_iterator *iter) | |||
610 | case TRACE_GRAPH_ENT: { | 774 | case TRACE_GRAPH_ENT: { |
611 | struct ftrace_graph_ent_entry *field; | 775 | struct ftrace_graph_ent_entry *field; |
612 | trace_assign_type(field, entry); | 776 | trace_assign_type(field, entry); |
613 | return print_graph_entry(field, s, iter, | 777 | return print_graph_entry(field, s, iter); |
614 | iter->cpu); | ||
615 | } | 778 | } |
616 | case TRACE_GRAPH_RET: { | 779 | case TRACE_GRAPH_RET: { |
617 | struct ftrace_graph_ret_entry *field; | 780 | struct ftrace_graph_ret_entry *field; |
618 | trace_assign_type(field, entry); | 781 | trace_assign_type(field, entry); |
619 | return print_graph_return(&field->ret, s, entry, iter->cpu); | 782 | return print_graph_return(&field->ret, s, entry, iter); |
620 | } | 783 | } |
621 | case TRACE_PRINT: { | 784 | case TRACE_PRINT: { |
622 | struct print_entry *field; | 785 | struct print_entry *field; |
@@ -632,33 +795,64 @@ static void print_graph_headers(struct seq_file *s) | |||
632 | { | 795 | { |
633 | /* 1st line */ | 796 | /* 1st line */ |
634 | seq_printf(s, "# "); | 797 | seq_printf(s, "# "); |
798 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
799 | seq_printf(s, " TIME "); | ||
635 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 800 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
636 | seq_printf(s, "CPU "); | 801 | seq_printf(s, "CPU"); |
637 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 802 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
638 | seq_printf(s, "TASK/PID "); | 803 | seq_printf(s, " TASK/PID "); |
639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | 804 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
640 | seq_printf(s, "OVERHEAD/"); | 805 | seq_printf(s, " DURATION "); |
641 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | 806 | seq_printf(s, " FUNCTION CALLS\n"); |
642 | 807 | ||
643 | /* 2nd line */ | 808 | /* 2nd line */ |
644 | seq_printf(s, "# "); | 809 | seq_printf(s, "# "); |
810 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
811 | seq_printf(s, " | "); | ||
645 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 812 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
646 | seq_printf(s, "| "); | 813 | seq_printf(s, "| "); |
647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 814 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
648 | seq_printf(s, "| | "); | 815 | seq_printf(s, " | | "); |
649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 816 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
650 | seq_printf(s, "| "); | 817 | seq_printf(s, " | | "); |
651 | seq_printf(s, "| | | | |\n"); | 818 | seq_printf(s, " | | | |\n"); |
652 | } else | ||
653 | seq_printf(s, " | | | | |\n"); | ||
654 | } | 819 | } |
820 | |||
821 | static void graph_trace_open(struct trace_iterator *iter) | ||
822 | { | ||
823 | /* pid on the last trace processed */ | ||
824 | pid_t *last_pid = alloc_percpu(pid_t); | ||
825 | int cpu; | ||
826 | |||
827 | if (!last_pid) | ||
828 | pr_warning("function graph tracer: not enough memory\n"); | ||
829 | else | ||
830 | for_each_possible_cpu(cpu) { | ||
831 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | ||
832 | *pid = -1; | ||
833 | } | ||
834 | |||
835 | iter->private = last_pid; | ||
836 | } | ||
837 | |||
838 | static void graph_trace_close(struct trace_iterator *iter) | ||
839 | { | ||
840 | free_percpu(iter->private); | ||
841 | } | ||
842 | |||
655 | static struct tracer graph_trace __read_mostly = { | 843 | static struct tracer graph_trace __read_mostly = { |
656 | .name = "function_graph", | 844 | .name = "function_graph", |
845 | .open = graph_trace_open, | ||
846 | .close = graph_trace_close, | ||
847 | .wait_pipe = poll_wait_pipe, | ||
657 | .init = graph_trace_init, | 848 | .init = graph_trace_init, |
658 | .reset = graph_trace_reset, | 849 | .reset = graph_trace_reset, |
659 | .print_line = print_graph_function, | 850 | .print_line = print_graph_function, |
660 | .print_header = print_graph_headers, | 851 | .print_header = print_graph_headers, |
661 | .flags = &tracer_flags, | 852 | .flags = &tracer_flags, |
853 | #ifdef CONFIG_FTRACE_SELFTEST | ||
854 | .selftest = trace_selftest_startup_function_graph, | ||
855 | #endif | ||
662 | }; | 856 | }; |
663 | 857 | ||
664 | static __init int init_graph_trace(void) | 858 | static __init int init_graph_trace(void) |