diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 219 |
1 files changed, 121 insertions, 98 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6004ccac2dd7..e876816fa8e7 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -14,6 +14,11 @@ | |||
| 14 | #include "trace.h" | 14 | #include "trace.h" |
| 15 | #include "trace_output.h" | 15 | #include "trace_output.h" |
| 16 | 16 | ||
| 17 | struct fgraph_data { | ||
| 18 | pid_t last_pid; | ||
| 19 | int depth; | ||
| 20 | }; | ||
| 21 | |||
| 17 | #define TRACE_GRAPH_INDENT 2 | 22 | #define TRACE_GRAPH_INDENT 2 |
| 18 | 23 | ||
| 19 | /* Flag options */ | 24 | /* Flag options */ |
| @@ -231,16 +236,16 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
| 231 | 236 | ||
| 232 | /* If the pid changed since the last trace, output this event */ | 237 | /* If the pid changed since the last trace, output this event */ |
| 233 | static enum print_line_t | 238 | static enum print_line_t |
| 234 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) | 239 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
| 235 | { | 240 | { |
| 236 | pid_t prev_pid; | 241 | pid_t prev_pid; |
| 237 | pid_t *last_pid; | 242 | pid_t *last_pid; |
| 238 | int ret; | 243 | int ret; |
| 239 | 244 | ||
| 240 | if (!last_pids_cpu) | 245 | if (!data) |
| 241 | return TRACE_TYPE_HANDLED; | 246 | return TRACE_TYPE_HANDLED; |
| 242 | 247 | ||
| 243 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); | 248 | last_pid = &(per_cpu_ptr(data, cpu)->last_pid); |
| 244 | 249 | ||
| 245 | if (*last_pid == pid) | 250 | if (*last_pid == pid) |
| 246 | return TRACE_TYPE_HANDLED; | 251 | return TRACE_TYPE_HANDLED; |
| @@ -471,6 +476,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 471 | struct ftrace_graph_ent_entry *entry, | 476 | struct ftrace_graph_ent_entry *entry, |
| 472 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | 477 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) |
| 473 | { | 478 | { |
| 479 | struct fgraph_data *data = iter->private; | ||
| 474 | struct ftrace_graph_ret *graph_ret; | 480 | struct ftrace_graph_ret *graph_ret; |
| 475 | struct ftrace_graph_ent *call; | 481 | struct ftrace_graph_ent *call; |
| 476 | unsigned long long duration; | 482 | unsigned long long duration; |
| @@ -481,6 +487,18 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 481 | call = &entry->graph_ent; | 487 | call = &entry->graph_ent; |
| 482 | duration = graph_ret->rettime - graph_ret->calltime; | 488 | duration = graph_ret->rettime - graph_ret->calltime; |
| 483 | 489 | ||
| 490 | if (data) { | ||
| 491 | int cpu = iter->cpu; | ||
| 492 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | ||
| 493 | |||
| 494 | /* | ||
| 495 | * Comments display at + 1 to depth. Since | ||
| 496 | * this is a leaf function, keep the comments | ||
| 497 | * equal to this depth. | ||
| 498 | */ | ||
| 499 | *depth = call->depth - 1; | ||
| 500 | } | ||
| 501 | |||
| 484 | /* Overhead */ | 502 | /* Overhead */ |
| 485 | ret = print_graph_overhead(duration, s); | 503 | ret = print_graph_overhead(duration, s); |
| 486 | if (!ret) | 504 | if (!ret) |
| @@ -512,12 +530,21 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 512 | } | 530 | } |
| 513 | 531 | ||
| 514 | static enum print_line_t | 532 | static enum print_line_t |
| 515 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | 533 | print_graph_entry_nested(struct trace_iterator *iter, |
| 516 | struct trace_seq *s, pid_t pid, int cpu) | 534 | struct ftrace_graph_ent_entry *entry, |
| 535 | struct trace_seq *s, int cpu) | ||
| 517 | { | 536 | { |
| 518 | int i; | ||
| 519 | int ret; | ||
| 520 | struct ftrace_graph_ent *call = &entry->graph_ent; | 537 | struct ftrace_graph_ent *call = &entry->graph_ent; |
| 538 | struct fgraph_data *data = iter->private; | ||
| 539 | int ret; | ||
| 540 | int i; | ||
| 541 | |||
| 542 | if (data) { | ||
| 543 | int cpu = iter->cpu; | ||
| 544 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | ||
| 545 | |||
| 546 | *depth = call->depth; | ||
| 547 | } | ||
| 521 | 548 | ||
| 522 | /* No overhead */ | 549 | /* No overhead */ |
| 523 | ret = print_graph_overhead(-1, s); | 550 | ret = print_graph_overhead(-1, s); |
| @@ -554,24 +581,24 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
| 554 | } | 581 | } |
| 555 | 582 | ||
| 556 | static enum print_line_t | 583 | static enum print_line_t |
| 557 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 584 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
| 558 | struct trace_iterator *iter) | 585 | int type, unsigned long addr) |
| 559 | { | 586 | { |
| 560 | int ret; | 587 | struct fgraph_data *data = iter->private; |
| 561 | int cpu = iter->cpu; | ||
| 562 | pid_t *last_entry = iter->private; | ||
| 563 | struct trace_entry *ent = iter->ent; | 588 | struct trace_entry *ent = iter->ent; |
| 564 | struct ftrace_graph_ent *call = &field->graph_ent; | 589 | int cpu = iter->cpu; |
| 565 | struct ftrace_graph_ret_entry *leaf_ret; | 590 | int ret; |
| 566 | 591 | ||
| 567 | /* Pid */ | 592 | /* Pid */ |
| 568 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) | 593 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
| 569 | return TRACE_TYPE_PARTIAL_LINE; | 594 | return TRACE_TYPE_PARTIAL_LINE; |
| 570 | 595 | ||
| 571 | /* Interrupt */ | 596 | if (type) { |
| 572 | ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | 597 | /* Interrupt */ |
| 573 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 598 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); |
| 574 | return TRACE_TYPE_PARTIAL_LINE; | 599 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
| 600 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 601 | } | ||
| 575 | 602 | ||
| 576 | /* Absolute time */ | 603 | /* Absolute time */ |
| 577 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | 604 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
| @@ -598,11 +625,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 598 | return TRACE_TYPE_PARTIAL_LINE; | 625 | return TRACE_TYPE_PARTIAL_LINE; |
| 599 | } | 626 | } |
| 600 | 627 | ||
| 628 | return 0; | ||
| 629 | } | ||
| 630 | |||
| 631 | static enum print_line_t | ||
| 632 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | ||
| 633 | struct trace_iterator *iter) | ||
| 634 | { | ||
| 635 | int cpu = iter->cpu; | ||
| 636 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
| 637 | struct ftrace_graph_ret_entry *leaf_ret; | ||
| 638 | |||
| 639 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | ||
| 640 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 641 | |||
| 601 | leaf_ret = get_return_for_leaf(iter, field); | 642 | leaf_ret = get_return_for_leaf(iter, field); |
| 602 | if (leaf_ret) | 643 | if (leaf_ret) |
| 603 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | 644 | return print_graph_entry_leaf(iter, field, leaf_ret, s); |
| 604 | else | 645 | else |
| 605 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 646 | return print_graph_entry_nested(iter, field, s, cpu); |
| 606 | 647 | ||
| 607 | } | 648 | } |
| 608 | 649 | ||
| @@ -610,40 +651,27 @@ static enum print_line_t | |||
| 610 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 651 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
| 611 | struct trace_entry *ent, struct trace_iterator *iter) | 652 | struct trace_entry *ent, struct trace_iterator *iter) |
| 612 | { | 653 | { |
| 613 | int i; | ||
| 614 | int ret; | ||
| 615 | int cpu = iter->cpu; | ||
| 616 | pid_t *last_pid = iter->private, pid = ent->pid; | ||
| 617 | unsigned long long duration = trace->rettime - trace->calltime; | 654 | unsigned long long duration = trace->rettime - trace->calltime; |
| 655 | struct fgraph_data *data = iter->private; | ||
| 656 | pid_t pid = ent->pid; | ||
| 657 | int cpu = iter->cpu; | ||
| 658 | int ret; | ||
| 659 | int i; | ||
| 618 | 660 | ||
| 619 | /* Pid */ | 661 | if (data) { |
| 620 | if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) | 662 | int cpu = iter->cpu; |
| 621 | return TRACE_TYPE_PARTIAL_LINE; | 663 | int *depth = &(per_cpu_ptr(data, cpu)->depth); |
| 622 | |||
| 623 | /* Absolute time */ | ||
| 624 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
| 625 | ret = print_graph_abs_time(iter->ts, s); | ||
| 626 | if (!ret) | ||
| 627 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 628 | } | ||
| 629 | 664 | ||
| 630 | /* Cpu */ | 665 | /* |
| 631 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 666 | * Comments display at + 1 to depth. This is the |
| 632 | ret = print_graph_cpu(s, cpu); | 667 | * return from a function, we now want the comments |
| 633 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 668 | * to display at the same level of the bracket. |
| 634 | return TRACE_TYPE_PARTIAL_LINE; | 669 | */ |
| 670 | *depth = trace->depth - 1; | ||
| 635 | } | 671 | } |
| 636 | 672 | ||
| 637 | /* Proc */ | 673 | if (print_graph_prologue(iter, s, 0, 0)) |
| 638 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 674 | return TRACE_TYPE_PARTIAL_LINE; |
| 639 | ret = print_graph_proc(s, ent->pid); | ||
| 640 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 641 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 642 | |||
| 643 | ret = trace_seq_printf(s, " | "); | ||
| 644 | if (!ret) | ||
| 645 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 646 | } | ||
| 647 | 675 | ||
| 648 | /* Overhead */ | 676 | /* Overhead */ |
| 649 | ret = print_graph_overhead(duration, s); | 677 | ret = print_graph_overhead(duration, s); |
| @@ -684,42 +712,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 684 | } | 712 | } |
| 685 | 713 | ||
| 686 | static enum print_line_t | 714 | static enum print_line_t |
| 687 | print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, | 715 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
| 688 | struct trace_entry *ent, struct trace_iterator *iter) | 716 | struct trace_iterator *iter) |
| 689 | { | 717 | { |
| 690 | int i; | 718 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
| 719 | struct fgraph_data *data = iter->private; | ||
| 720 | struct trace_event *event; | ||
| 721 | int depth = 0; | ||
| 691 | int ret; | 722 | int ret; |
| 692 | int cpu = iter->cpu; | 723 | int i; |
| 693 | pid_t *last_pid = iter->private; | ||
| 694 | |||
| 695 | /* Pid */ | ||
| 696 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) | ||
| 697 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 698 | |||
| 699 | /* Absolute time */ | ||
| 700 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
| 701 | ret = print_graph_abs_time(iter->ts, s); | ||
| 702 | if (!ret) | ||
| 703 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 704 | } | ||
| 705 | |||
| 706 | /* Cpu */ | ||
| 707 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
| 708 | ret = print_graph_cpu(s, cpu); | ||
| 709 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 710 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 711 | } | ||
| 712 | 724 | ||
| 713 | /* Proc */ | 725 | if (data) |
| 714 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 726 | depth = per_cpu_ptr(data, iter->cpu)->depth; |
| 715 | ret = print_graph_proc(s, ent->pid); | ||
| 716 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 717 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 718 | 727 | ||
| 719 | ret = trace_seq_printf(s, " | "); | 728 | if (print_graph_prologue(iter, s, 0, 0)) |
| 720 | if (!ret) | 729 | return TRACE_TYPE_PARTIAL_LINE; |
| 721 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 722 | } | ||
| 723 | 730 | ||
| 724 | /* No overhead */ | 731 | /* No overhead */ |
| 725 | ret = print_graph_overhead(-1, s); | 732 | ret = print_graph_overhead(-1, s); |
| @@ -734,8 +741,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, | |||
| 734 | } | 741 | } |
| 735 | 742 | ||
| 736 | /* Indentation */ | 743 | /* Indentation */ |
| 737 | if (trace->depth > 0) | 744 | if (depth > 0) |
| 738 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 745 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
| 739 | ret = trace_seq_printf(s, " "); | 746 | ret = trace_seq_printf(s, " "); |
| 740 | if (!ret) | 747 | if (!ret) |
| 741 | return TRACE_TYPE_PARTIAL_LINE; | 748 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -746,9 +753,26 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, | |||
| 746 | if (!ret) | 753 | if (!ret) |
| 747 | return TRACE_TYPE_PARTIAL_LINE; | 754 | return TRACE_TYPE_PARTIAL_LINE; |
| 748 | 755 | ||
| 749 | ret = trace_seq_bprintf(s, trace->fmt, trace->buf); | 756 | switch (iter->ent->type) { |
| 750 | if (!ret) | 757 | case TRACE_BPRINT: |
| 751 | return TRACE_TYPE_PARTIAL_LINE; | 758 | ret = trace_print_bprintk_msg_only(iter); |
| 759 | if (ret != TRACE_TYPE_HANDLED) | ||
| 760 | return ret; | ||
| 761 | break; | ||
| 762 | case TRACE_PRINT: | ||
| 763 | ret = trace_print_printk_msg_only(iter); | ||
| 764 | if (ret != TRACE_TYPE_HANDLED) | ||
| 765 | return ret; | ||
| 766 | break; | ||
| 767 | default: | ||
| 768 | event = ftrace_find_event(ent->type); | ||
| 769 | if (!event) | ||
| 770 | return TRACE_TYPE_UNHANDLED; | ||
| 771 | |||
| 772 | ret = event->trace(iter, sym_flags); | ||
| 773 | if (ret != TRACE_TYPE_HANDLED) | ||
| 774 | return ret; | ||
| 775 | } | ||
| 752 | 776 | ||
| 753 | /* Strip ending newline */ | 777 | /* Strip ending newline */ |
| 754 | if (s->buffer[s->len - 1] == '\n') { | 778 | if (s->buffer[s->len - 1] == '\n') { |
| @@ -767,8 +791,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, | |||
| 767 | enum print_line_t | 791 | enum print_line_t |
| 768 | print_graph_function(struct trace_iterator *iter) | 792 | print_graph_function(struct trace_iterator *iter) |
| 769 | { | 793 | { |
| 770 | struct trace_seq *s = &iter->seq; | ||
| 771 | struct trace_entry *entry = iter->ent; | 794 | struct trace_entry *entry = iter->ent; |
| 795 | struct trace_seq *s = &iter->seq; | ||
| 772 | 796 | ||
| 773 | switch (entry->type) { | 797 | switch (entry->type) { |
| 774 | case TRACE_GRAPH_ENT: { | 798 | case TRACE_GRAPH_ENT: { |
| @@ -781,14 +805,11 @@ print_graph_function(struct trace_iterator *iter) | |||
| 781 | trace_assign_type(field, entry); | 805 | trace_assign_type(field, entry); |
| 782 | return print_graph_return(&field->ret, s, entry, iter); | 806 | return print_graph_return(&field->ret, s, entry, iter); |
| 783 | } | 807 | } |
| 784 | case TRACE_BPRINT: { | ||
| 785 | struct bprint_entry *field; | ||
| 786 | trace_assign_type(field, entry); | ||
| 787 | return print_graph_comment(field, s, entry, iter); | ||
| 788 | } | ||
| 789 | default: | 808 | default: |
| 790 | return TRACE_TYPE_UNHANDLED; | 809 | return print_graph_comment(s, entry, iter); |
| 791 | } | 810 | } |
| 811 | |||
| 812 | return TRACE_TYPE_HANDLED; | ||
| 792 | } | 813 | } |
| 793 | 814 | ||
| 794 | static void print_graph_headers(struct seq_file *s) | 815 | static void print_graph_headers(struct seq_file *s) |
| @@ -820,19 +841,21 @@ static void print_graph_headers(struct seq_file *s) | |||
| 820 | 841 | ||
| 821 | static void graph_trace_open(struct trace_iterator *iter) | 842 | static void graph_trace_open(struct trace_iterator *iter) |
| 822 | { | 843 | { |
| 823 | /* pid on the last trace processed */ | 844 | /* pid and depth on the last trace processed */ |
| 824 | pid_t *last_pid = alloc_percpu(pid_t); | 845 | struct fgraph_data *data = alloc_percpu(struct fgraph_data); |
| 825 | int cpu; | 846 | int cpu; |
| 826 | 847 | ||
| 827 | if (!last_pid) | 848 | if (!data) |
| 828 | pr_warning("function graph tracer: not enough memory\n"); | 849 | pr_warning("function graph tracer: not enough memory\n"); |
| 829 | else | 850 | else |
| 830 | for_each_possible_cpu(cpu) { | 851 | for_each_possible_cpu(cpu) { |
| 831 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | 852 | pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); |
| 853 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | ||
| 832 | *pid = -1; | 854 | *pid = -1; |
| 855 | *depth = 0; | ||
| 833 | } | 856 | } |
| 834 | 857 | ||
| 835 | iter->private = last_pid; | 858 | iter->private = data; |
| 836 | } | 859 | } |
| 837 | 860 | ||
| 838 | static void graph_trace_close(struct trace_iterator *iter) | 861 | static void graph_trace_close(struct trace_iterator *iter) |
