diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-26 08:01:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-26 08:01:52 -0500 |
commit | 5ce1b1ed27d4ab1d81b8543a96f488bba2071576 (patch) | |
tree | 6124a034fce0277570e5f10699337e20b564cbd1 /kernel | |
parent | 7e49fcce1bdadd723ae6a0b3b324c4daced61563 (diff) | |
parent | 9005f3ebebfcfe9ccd731d16c468907a35ac1f9a (diff) |
Merge branches 'tracing/ftrace' and 'tracing/function-graph-tracer' into tracing/core
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 298 |
1 files changed, 195 insertions, 103 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 3c545984816f..66fc7b806a38 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
@@ -21,9 +21,11 @@ | |||
21 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 21 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
23 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 23 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | ||
24 | 26 | ||
25 | static struct tracer_opt trace_opts[] = { | 27 | static struct tracer_opt trace_opts[] = { |
26 | /* Display overruns ? */ | 28 | /* Display overruns? (for self-debug purpose) */ |
27 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
28 | /* Display CPU ? */ | 30 | /* Display CPU ? */ |
29 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
@@ -31,17 +33,22 @@ static struct tracer_opt trace_opts[] = { | |||
31 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
32 | /* Display proc name/pid */ | 34 | /* Display proc name/pid */ |
33 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
36 | /* Display duration of execution */ | ||
37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | ||
38 | /* Display absolute time of an entry */ | ||
39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | ||
34 | { } /* Empty entry */ | 40 | { } /* Empty entry */ |
35 | }; | 41 | }; |
36 | 42 | ||
37 | static struct tracer_flags tracer_flags = { | 43 | static struct tracer_flags tracer_flags = { |
38 | /* Don't display overruns and proc by default */ | 44 | /* Don't display overruns and proc by default */ |
39 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
46 | TRACE_GRAPH_PRINT_DURATION, | ||
40 | .opts = trace_opts | 47 | .opts = trace_opts |
41 | }; | 48 | }; |
42 | 49 | ||
43 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
44 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | 51 | |
45 | 52 | ||
46 | static int graph_trace_init(struct trace_array *tr) | 53 | static int graph_trace_init(struct trace_array *tr) |
47 | { | 54 | { |
@@ -154,17 +161,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
154 | 161 | ||
155 | /* If the pid changed since the last trace, output this event */ | 162 | /* If the pid changed since the last trace, output this event */ |
156 | static enum print_line_t | 163 | static enum print_line_t |
157 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 164 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) |
158 | { | 165 | { |
159 | pid_t prev_pid; | 166 | pid_t prev_pid; |
167 | pid_t *last_pid; | ||
160 | int ret; | 168 | int ret; |
161 | 169 | ||
162 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 170 | if (!last_pids_cpu) |
171 | return TRACE_TYPE_HANDLED; | ||
172 | |||
173 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); | ||
174 | |||
175 | if (*last_pid == pid) | ||
163 | return TRACE_TYPE_HANDLED; | 176 | return TRACE_TYPE_HANDLED; |
164 | 177 | ||
165 | prev_pid = last_pid[cpu]; | 178 | prev_pid = *last_pid; |
166 | last_pid[cpu] = pid; | 179 | *last_pid = pid; |
167 | 180 | ||
181 | if (prev_pid == -1) | ||
182 | return TRACE_TYPE_HANDLED; | ||
168 | /* | 183 | /* |
169 | * Context-switch trace line: | 184 | * Context-switch trace line: |
170 | 185 | ||
@@ -232,9 +247,34 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
232 | return true; | 247 | return true; |
233 | } | 248 | } |
234 | 249 | ||
250 | /* Signal a overhead of time execution to the output */ | ||
251 | static int | ||
252 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
253 | { | ||
254 | /* If duration disappear, we don't need anything */ | ||
255 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | ||
256 | return 1; | ||
257 | |||
258 | /* Non nested entry or return */ | ||
259 | if (duration == -1) | ||
260 | return trace_seq_printf(s, " "); | ||
261 | |||
262 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
263 | /* Duration exceeded 100 msecs */ | ||
264 | if (duration > 100000ULL) | ||
265 | return trace_seq_printf(s, "! "); | ||
266 | |||
267 | /* Duration exceeded 10 msecs */ | ||
268 | if (duration > 10000ULL) | ||
269 | return trace_seq_printf(s, "+ "); | ||
270 | } | ||
271 | |||
272 | return trace_seq_printf(s, " "); | ||
273 | } | ||
274 | |||
235 | static enum print_line_t | 275 | static enum print_line_t |
236 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 276 | print_graph_irq(struct trace_seq *s, unsigned long addr, |
237 | enum trace_type type, int cpu, pid_t pid) | 277 | enum trace_type type, int cpu, pid_t pid) |
238 | { | 278 | { |
239 | int ret; | 279 | int ret; |
240 | 280 | ||
@@ -242,35 +282,40 @@ print_graph_irq(struct trace_seq *s, unsigned long addr, | |||
242 | addr >= (unsigned long)__irqentry_text_end) | 282 | addr >= (unsigned long)__irqentry_text_end) |
243 | return TRACE_TYPE_UNHANDLED; | 283 | return TRACE_TYPE_UNHANDLED; |
244 | 284 | ||
245 | if (type == TRACE_GRAPH_ENT) { | 285 | /* Cpu */ |
246 | ret = trace_seq_printf(s, "==========> | "); | 286 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
247 | } else { | 287 | ret = print_graph_cpu(s, cpu); |
248 | /* Cpu */ | 288 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
249 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 289 | return TRACE_TYPE_PARTIAL_LINE; |
250 | ret = print_graph_cpu(s, cpu); | 290 | } |
251 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 291 | /* Proc */ |
252 | return TRACE_TYPE_PARTIAL_LINE; | 292 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
253 | } | 293 | ret = print_graph_proc(s, pid); |
254 | /* Proc */ | 294 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
255 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 295 | return TRACE_TYPE_PARTIAL_LINE; |
256 | ret = print_graph_proc(s, pid); | 296 | ret = trace_seq_printf(s, " | "); |
257 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 297 | if (!ret) |
258 | return TRACE_TYPE_PARTIAL_LINE; | 298 | return TRACE_TYPE_PARTIAL_LINE; |
299 | } | ||
259 | 300 | ||
260 | ret = trace_seq_printf(s, " | "); | 301 | /* No overhead */ |
261 | if (!ret) | 302 | ret = print_graph_overhead(-1, s); |
262 | return TRACE_TYPE_PARTIAL_LINE; | 303 | if (!ret) |
263 | } | 304 | return TRACE_TYPE_PARTIAL_LINE; |
264 | 305 | ||
265 | /* No overhead */ | 306 | if (type == TRACE_GRAPH_ENT) |
266 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 307 | ret = trace_seq_printf(s, "==========>"); |
267 | ret = trace_seq_printf(s, " "); | 308 | else |
268 | if (!ret) | 309 | ret = trace_seq_printf(s, "<=========="); |
269 | return TRACE_TYPE_PARTIAL_LINE; | 310 | |
270 | } | 311 | if (!ret) |
312 | return TRACE_TYPE_PARTIAL_LINE; | ||
313 | |||
314 | /* Don't close the duration column if haven't one */ | ||
315 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | ||
316 | trace_seq_printf(s, " |"); | ||
317 | ret = trace_seq_printf(s, "\n"); | ||
271 | 318 | ||
272 | ret = trace_seq_printf(s, "<========== |\n"); | ||
273 | } | ||
274 | if (!ret) | 319 | if (!ret) |
275 | return TRACE_TYPE_PARTIAL_LINE; | 320 | return TRACE_TYPE_PARTIAL_LINE; |
276 | return TRACE_TYPE_HANDLED; | 321 | return TRACE_TYPE_HANDLED; |
@@ -289,7 +334,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
289 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 334 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
290 | 335 | ||
291 | /* Print msecs */ | 336 | /* Print msecs */ |
292 | ret = trace_seq_printf(s, msecs_str); | 337 | ret = trace_seq_printf(s, "%s", msecs_str); |
293 | if (!ret) | 338 | if (!ret) |
294 | return TRACE_TYPE_PARTIAL_LINE; | 339 | return TRACE_TYPE_PARTIAL_LINE; |
295 | 340 | ||
@@ -322,19 +367,15 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
322 | 367 | ||
323 | } | 368 | } |
324 | 369 | ||
325 | /* Signal a overhead of time execution to the output */ | 370 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
326 | static int | ||
327 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
328 | { | 371 | { |
329 | /* Duration exceeded 100 msecs */ | 372 | unsigned long usecs_rem; |
330 | if (duration > 100000ULL) | ||
331 | return trace_seq_printf(s, "! "); | ||
332 | 373 | ||
333 | /* Duration exceeded 10 msecs */ | 374 | usecs_rem = do_div(t, 1000000000); |
334 | if (duration > 10000ULL) | 375 | usecs_rem /= 1000; |
335 | return trace_seq_printf(s, "+ "); | ||
336 | 376 | ||
337 | return trace_seq_printf(s, " "); | 377 | return trace_seq_printf(s, "%5lu.%06lu | ", |
378 | (unsigned long)t, usecs_rem); | ||
338 | } | 379 | } |
339 | 380 | ||
340 | /* Case of a leaf function on its call entry */ | 381 | /* Case of a leaf function on its call entry */ |
@@ -357,16 +398,16 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
357 | duration = graph_ret->rettime - graph_ret->calltime; | 398 | duration = graph_ret->rettime - graph_ret->calltime; |
358 | 399 | ||
359 | /* Overhead */ | 400 | /* Overhead */ |
360 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 401 | ret = print_graph_overhead(duration, s); |
361 | ret = print_graph_overhead(duration, s); | 402 | if (!ret) |
362 | if (!ret) | 403 | return TRACE_TYPE_PARTIAL_LINE; |
363 | return TRACE_TYPE_PARTIAL_LINE; | ||
364 | } | ||
365 | 404 | ||
366 | /* Duration */ | 405 | /* Duration */ |
367 | ret = print_graph_duration(duration, s); | 406 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
368 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 407 | ret = print_graph_duration(duration, s); |
369 | return TRACE_TYPE_PARTIAL_LINE; | 408 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
409 | return TRACE_TYPE_PARTIAL_LINE; | ||
410 | } | ||
370 | 411 | ||
371 | /* Function */ | 412 | /* Function */ |
372 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 413 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -395,25 +436,17 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
395 | struct ftrace_graph_ent *call = &entry->graph_ent; | 436 | struct ftrace_graph_ent *call = &entry->graph_ent; |
396 | 437 | ||
397 | /* No overhead */ | 438 | /* No overhead */ |
398 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 439 | ret = print_graph_overhead(-1, s); |
399 | ret = trace_seq_printf(s, " "); | 440 | if (!ret) |
400 | if (!ret) | 441 | return TRACE_TYPE_PARTIAL_LINE; |
401 | return TRACE_TYPE_PARTIAL_LINE; | ||
402 | } | ||
403 | 442 | ||
404 | /* Interrupt */ | 443 | /* No time */ |
405 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | 444 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
406 | if (ret == TRACE_TYPE_UNHANDLED) { | ||
407 | /* No time */ | ||
408 | ret = trace_seq_printf(s, " | "); | 445 | ret = trace_seq_printf(s, " | "); |
409 | if (!ret) | 446 | if (!ret) |
410 | return TRACE_TYPE_PARTIAL_LINE; | 447 | return TRACE_TYPE_PARTIAL_LINE; |
411 | } else { | ||
412 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
413 | return TRACE_TYPE_PARTIAL_LINE; | ||
414 | } | 448 | } |
415 | 449 | ||
416 | |||
417 | /* Function */ | 450 | /* Function */ |
418 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 451 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
419 | ret = trace_seq_printf(s, " "); | 452 | ret = trace_seq_printf(s, " "); |
@@ -434,15 +467,30 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
434 | 467 | ||
435 | static enum print_line_t | 468 | static enum print_line_t |
436 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 469 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
437 | struct trace_iterator *iter, int cpu) | 470 | struct trace_iterator *iter) |
438 | { | 471 | { |
439 | int ret; | 472 | int ret; |
473 | int cpu = iter->cpu; | ||
474 | pid_t *last_entry = iter->private; | ||
440 | struct trace_entry *ent = iter->ent; | 475 | struct trace_entry *ent = iter->ent; |
476 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
441 | 477 | ||
442 | /* Pid */ | 478 | /* Pid */ |
443 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 479 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
480 | return TRACE_TYPE_PARTIAL_LINE; | ||
481 | |||
482 | /* Interrupt */ | ||
483 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | ||
484 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
444 | return TRACE_TYPE_PARTIAL_LINE; | 485 | return TRACE_TYPE_PARTIAL_LINE; |
445 | 486 | ||
487 | /* Absolute time */ | ||
488 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
489 | ret = print_graph_abs_time(iter->ts, s); | ||
490 | if (!ret) | ||
491 | return TRACE_TYPE_PARTIAL_LINE; | ||
492 | } | ||
493 | |||
446 | /* Cpu */ | 494 | /* Cpu */ |
447 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 495 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
448 | ret = print_graph_cpu(s, cpu); | 496 | ret = print_graph_cpu(s, cpu); |
@@ -470,16 +518,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
470 | 518 | ||
471 | static enum print_line_t | 519 | static enum print_line_t |
472 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 520 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
473 | struct trace_entry *ent, int cpu) | 521 | struct trace_entry *ent, struct trace_iterator *iter) |
474 | { | 522 | { |
475 | int i; | 523 | int i; |
476 | int ret; | 524 | int ret; |
525 | int cpu = iter->cpu; | ||
526 | pid_t *last_pid = iter->private; | ||
477 | unsigned long long duration = trace->rettime - trace->calltime; | 527 | unsigned long long duration = trace->rettime - trace->calltime; |
478 | 528 | ||
479 | /* Pid */ | 529 | /* Pid */ |
480 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 530 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
481 | return TRACE_TYPE_PARTIAL_LINE; | 531 | return TRACE_TYPE_PARTIAL_LINE; |
482 | 532 | ||
533 | /* Absolute time */ | ||
534 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
535 | ret = print_graph_abs_time(iter->ts, s); | ||
536 | if (!ret) | ||
537 | return TRACE_TYPE_PARTIAL_LINE; | ||
538 | } | ||
539 | |||
483 | /* Cpu */ | 540 | /* Cpu */ |
484 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 541 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
485 | ret = print_graph_cpu(s, cpu); | 542 | ret = print_graph_cpu(s, cpu); |
@@ -499,16 +556,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
499 | } | 556 | } |
500 | 557 | ||
501 | /* Overhead */ | 558 | /* Overhead */ |
502 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 559 | ret = print_graph_overhead(duration, s); |
503 | ret = print_graph_overhead(duration, s); | 560 | if (!ret) |
504 | if (!ret) | 561 | return TRACE_TYPE_PARTIAL_LINE; |
505 | return TRACE_TYPE_PARTIAL_LINE; | ||
506 | } | ||
507 | 562 | ||
508 | /* Duration */ | 563 | /* Duration */ |
509 | ret = print_graph_duration(duration, s); | 564 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
510 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 565 | ret = print_graph_duration(duration, s); |
511 | return TRACE_TYPE_PARTIAL_LINE; | 566 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
567 | return TRACE_TYPE_PARTIAL_LINE; | ||
568 | } | ||
512 | 569 | ||
513 | /* Closing brace */ | 570 | /* Closing brace */ |
514 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 571 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -542,14 +599,23 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
542 | { | 599 | { |
543 | int i; | 600 | int i; |
544 | int ret; | 601 | int ret; |
602 | int cpu = iter->cpu; | ||
603 | pid_t *last_pid = iter->private; | ||
604 | |||
605 | /* Absolute time */ | ||
606 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
607 | ret = print_graph_abs_time(iter->ts, s); | ||
608 | if (!ret) | ||
609 | return TRACE_TYPE_PARTIAL_LINE; | ||
610 | } | ||
545 | 611 | ||
546 | /* Pid */ | 612 | /* Pid */ |
547 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | 613 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
548 | return TRACE_TYPE_PARTIAL_LINE; | 614 | return TRACE_TYPE_PARTIAL_LINE; |
549 | 615 | ||
550 | /* Cpu */ | 616 | /* Cpu */ |
551 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 617 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
552 | ret = print_graph_cpu(s, iter->cpu); | 618 | ret = print_graph_cpu(s, cpu); |
553 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 619 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
554 | return TRACE_TYPE_PARTIAL_LINE; | 620 | return TRACE_TYPE_PARTIAL_LINE; |
555 | } | 621 | } |
@@ -566,17 +632,17 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
566 | } | 632 | } |
567 | 633 | ||
568 | /* No overhead */ | 634 | /* No overhead */ |
569 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 635 | ret = print_graph_overhead(-1, s); |
570 | ret = trace_seq_printf(s, " "); | 636 | if (!ret) |
637 | return TRACE_TYPE_PARTIAL_LINE; | ||
638 | |||
639 | /* No time */ | ||
640 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
641 | ret = trace_seq_printf(s, " | "); | ||
571 | if (!ret) | 642 | if (!ret) |
572 | return TRACE_TYPE_PARTIAL_LINE; | 643 | return TRACE_TYPE_PARTIAL_LINE; |
573 | } | 644 | } |
574 | 645 | ||
575 | /* No time */ | ||
576 | ret = trace_seq_printf(s, " | "); | ||
577 | if (!ret) | ||
578 | return TRACE_TYPE_PARTIAL_LINE; | ||
579 | |||
580 | /* Indentation */ | 646 | /* Indentation */ |
581 | if (trace->depth > 0) | 647 | if (trace->depth > 0) |
582 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 648 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { |
@@ -614,13 +680,12 @@ print_graph_function(struct trace_iterator *iter) | |||
614 | case TRACE_GRAPH_ENT: { | 680 | case TRACE_GRAPH_ENT: { |
615 | struct ftrace_graph_ent_entry *field; | 681 | struct ftrace_graph_ent_entry *field; |
616 | trace_assign_type(field, entry); | 682 | trace_assign_type(field, entry); |
617 | return print_graph_entry(field, s, iter, | 683 | return print_graph_entry(field, s, iter); |
618 | iter->cpu); | ||
619 | } | 684 | } |
620 | case TRACE_GRAPH_RET: { | 685 | case TRACE_GRAPH_RET: { |
621 | struct ftrace_graph_ret_entry *field; | 686 | struct ftrace_graph_ret_entry *field; |
622 | trace_assign_type(field, entry); | 687 | trace_assign_type(field, entry); |
623 | return print_graph_return(&field->ret, s, entry, iter->cpu); | 688 | return print_graph_return(&field->ret, s, entry, iter); |
624 | } | 689 | } |
625 | case TRACE_PRINT: { | 690 | case TRACE_PRINT: { |
626 | struct print_entry *field; | 691 | struct print_entry *field; |
@@ -636,28 +701,55 @@ static void print_graph_headers(struct seq_file *s) | |||
636 | { | 701 | { |
637 | /* 1st line */ | 702 | /* 1st line */ |
638 | seq_printf(s, "# "); | 703 | seq_printf(s, "# "); |
704 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
705 | seq_printf(s, " TIME "); | ||
639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 706 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
640 | seq_printf(s, "CPU "); | 707 | seq_printf(s, "CPU"); |
641 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 708 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
642 | seq_printf(s, "TASK/PID "); | 709 | seq_printf(s, " TASK/PID "); |
643 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | 710 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
644 | seq_printf(s, "OVERHEAD/"); | 711 | seq_printf(s, " DURATION "); |
645 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | 712 | seq_printf(s, " FUNCTION CALLS\n"); |
646 | 713 | ||
647 | /* 2nd line */ | 714 | /* 2nd line */ |
648 | seq_printf(s, "# "); | 715 | seq_printf(s, "# "); |
716 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
717 | seq_printf(s, " | "); | ||
649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 718 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
650 | seq_printf(s, "| "); | 719 | seq_printf(s, "| "); |
651 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 720 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
652 | seq_printf(s, "| | "); | 721 | seq_printf(s, " | | "); |
653 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 722 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
654 | seq_printf(s, "| "); | 723 | seq_printf(s, " | | "); |
655 | seq_printf(s, "| | | | |\n"); | 724 | seq_printf(s, " | | | |\n"); |
656 | } else | ||
657 | seq_printf(s, " | | | | |\n"); | ||
658 | } | 725 | } |
726 | |||
727 | static void graph_trace_open(struct trace_iterator *iter) | ||
728 | { | ||
729 | /* pid on the last trace processed */ | ||
730 | pid_t *last_pid = alloc_percpu(pid_t); | ||
731 | int cpu; | ||
732 | |||
733 | if (!last_pid) | ||
734 | pr_warning("function graph tracer: not enough memory\n"); | ||
735 | else | ||
736 | for_each_possible_cpu(cpu) { | ||
737 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | ||
738 | *pid = -1; | ||
739 | } | ||
740 | |||
741 | iter->private = last_pid; | ||
742 | } | ||
743 | |||
744 | static void graph_trace_close(struct trace_iterator *iter) | ||
745 | { | ||
746 | percpu_free(iter->private); | ||
747 | } | ||
748 | |||
659 | static struct tracer graph_trace __read_mostly = { | 749 | static struct tracer graph_trace __read_mostly = { |
660 | .name = "function_graph", | 750 | .name = "function_graph", |
751 | .open = graph_trace_open, | ||
752 | .close = graph_trace_close, | ||
661 | .init = graph_trace_init, | 753 | .init = graph_trace_init, |
662 | .reset = graph_trace_reset, | 754 | .reset = graph_trace_reset, |
663 | .print_line = print_graph_function, | 755 | .print_line = print_graph_function, |