aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c227
1 files changed, 126 insertions, 101 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6004ccac2dd7..d28687e7b3a7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,6 +14,11 @@
14#include "trace.h" 14#include "trace.h"
15#include "trace_output.h" 15#include "trace_output.h"
16 16
17struct fgraph_data {
18 pid_t last_pid;
19 int depth;
20};
21
17#define TRACE_GRAPH_INDENT 2 22#define TRACE_GRAPH_INDENT 2
18 23
19/* Flag options */ 24/* Flag options */
@@ -52,9 +57,9 @@ static struct tracer_flags tracer_flags = {
52 57
53/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
54int 59int
55ftrace_push_return_trace(unsigned long ret, unsigned long long time, 60ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
56 unsigned long func, int *depth)
57{ 61{
62 unsigned long long calltime;
58 int index; 63 int index;
59 64
60 if (!current->ret_stack) 65 if (!current->ret_stack)
@@ -66,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
66 return -EBUSY; 71 return -EBUSY;
67 } 72 }
68 73
74 calltime = trace_clock_local();
75
69 index = ++current->curr_ret_stack; 76 index = ++current->curr_ret_stack;
70 barrier(); 77 barrier();
71 current->ret_stack[index].ret = ret; 78 current->ret_stack[index].ret = ret;
72 current->ret_stack[index].func = func; 79 current->ret_stack[index].func = func;
73 current->ret_stack[index].calltime = time; 80 current->ret_stack[index].calltime = calltime;
74 *depth = index; 81 *depth = index;
75 82
76 return 0; 83 return 0;
@@ -231,16 +238,16 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
231 238
232/* If the pid changed since the last trace, output this event */ 239/* If the pid changed since the last trace, output this event */
233static enum print_line_t 240static enum print_line_t
234verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) 241verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
235{ 242{
236 pid_t prev_pid; 243 pid_t prev_pid;
237 pid_t *last_pid; 244 pid_t *last_pid;
238 int ret; 245 int ret;
239 246
240 if (!last_pids_cpu) 247 if (!data)
241 return TRACE_TYPE_HANDLED; 248 return TRACE_TYPE_HANDLED;
242 249
243 last_pid = per_cpu_ptr(last_pids_cpu, cpu); 250 last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
244 251
245 if (*last_pid == pid) 252 if (*last_pid == pid)
246 return TRACE_TYPE_HANDLED; 253 return TRACE_TYPE_HANDLED;
@@ -471,6 +478,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
471 struct ftrace_graph_ent_entry *entry, 478 struct ftrace_graph_ent_entry *entry,
472 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 479 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
473{ 480{
481 struct fgraph_data *data = iter->private;
474 struct ftrace_graph_ret *graph_ret; 482 struct ftrace_graph_ret *graph_ret;
475 struct ftrace_graph_ent *call; 483 struct ftrace_graph_ent *call;
476 unsigned long long duration; 484 unsigned long long duration;
@@ -481,6 +489,18 @@ print_graph_entry_leaf(struct trace_iterator *iter,
481 call = &entry->graph_ent; 489 call = &entry->graph_ent;
482 duration = graph_ret->rettime - graph_ret->calltime; 490 duration = graph_ret->rettime - graph_ret->calltime;
483 491
492 if (data) {
493 int cpu = iter->cpu;
494 int *depth = &(per_cpu_ptr(data, cpu)->depth);
495
496 /*
497 * Comments display at + 1 to depth. Since
498 * this is a leaf function, keep the comments
499 * equal to this depth.
500 */
501 *depth = call->depth - 1;
502 }
503
484 /* Overhead */ 504 /* Overhead */
485 ret = print_graph_overhead(duration, s); 505 ret = print_graph_overhead(duration, s);
486 if (!ret) 506 if (!ret)
@@ -512,12 +532,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
512} 532}
513 533
514static enum print_line_t 534static enum print_line_t
515print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, 535print_graph_entry_nested(struct trace_iterator *iter,
516 struct trace_seq *s, pid_t pid, int cpu) 536 struct ftrace_graph_ent_entry *entry,
537 struct trace_seq *s, int cpu)
517{ 538{
518 int i;
519 int ret;
520 struct ftrace_graph_ent *call = &entry->graph_ent; 539 struct ftrace_graph_ent *call = &entry->graph_ent;
540 struct fgraph_data *data = iter->private;
541 int ret;
542 int i;
543
544 if (data) {
545 int cpu = iter->cpu;
546 int *depth = &(per_cpu_ptr(data, cpu)->depth);
547
548 *depth = call->depth;
549 }
521 550
522 /* No overhead */ 551 /* No overhead */
523 ret = print_graph_overhead(-1, s); 552 ret = print_graph_overhead(-1, s);
@@ -554,24 +583,24 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
554} 583}
555 584
556static enum print_line_t 585static enum print_line_t
557print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 586print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
558 struct trace_iterator *iter) 587 int type, unsigned long addr)
559{ 588{
560 int ret; 589 struct fgraph_data *data = iter->private;
561 int cpu = iter->cpu;
562 pid_t *last_entry = iter->private;
563 struct trace_entry *ent = iter->ent; 590 struct trace_entry *ent = iter->ent;
564 struct ftrace_graph_ent *call = &field->graph_ent; 591 int cpu = iter->cpu;
565 struct ftrace_graph_ret_entry *leaf_ret; 592 int ret;
566 593
567 /* Pid */ 594 /* Pid */
568 if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) 595 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
569 return TRACE_TYPE_PARTIAL_LINE; 596 return TRACE_TYPE_PARTIAL_LINE;
570 597
571 /* Interrupt */ 598 if (type) {
572 ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); 599 /* Interrupt */
573 if (ret == TRACE_TYPE_PARTIAL_LINE) 600 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
574 return TRACE_TYPE_PARTIAL_LINE; 601 if (ret == TRACE_TYPE_PARTIAL_LINE)
602 return TRACE_TYPE_PARTIAL_LINE;
603 }
575 604
576 /* Absolute time */ 605 /* Absolute time */
577 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 606 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
@@ -598,11 +627,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
598 return TRACE_TYPE_PARTIAL_LINE; 627 return TRACE_TYPE_PARTIAL_LINE;
599 } 628 }
600 629
630 return 0;
631}
632
633static enum print_line_t
634print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
635 struct trace_iterator *iter)
636{
637 int cpu = iter->cpu;
638 struct ftrace_graph_ent *call = &field->graph_ent;
639 struct ftrace_graph_ret_entry *leaf_ret;
640
641 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
642 return TRACE_TYPE_PARTIAL_LINE;
643
601 leaf_ret = get_return_for_leaf(iter, field); 644 leaf_ret = get_return_for_leaf(iter, field);
602 if (leaf_ret) 645 if (leaf_ret)
603 return print_graph_entry_leaf(iter, field, leaf_ret, s); 646 return print_graph_entry_leaf(iter, field, leaf_ret, s);
604 else 647 else
605 return print_graph_entry_nested(field, s, iter->ent->pid, cpu); 648 return print_graph_entry_nested(iter, field, s, cpu);
606 649
607} 650}
608 651
@@ -610,40 +653,27 @@ static enum print_line_t
610print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 653print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
611 struct trace_entry *ent, struct trace_iterator *iter) 654 struct trace_entry *ent, struct trace_iterator *iter)
612{ 655{
613 int i;
614 int ret;
615 int cpu = iter->cpu;
616 pid_t *last_pid = iter->private, pid = ent->pid;
617 unsigned long long duration = trace->rettime - trace->calltime; 656 unsigned long long duration = trace->rettime - trace->calltime;
657 struct fgraph_data *data = iter->private;
658 pid_t pid = ent->pid;
659 int cpu = iter->cpu;
660 int ret;
661 int i;
618 662
619 /* Pid */ 663 if (data) {
620 if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) 664 int cpu = iter->cpu;
621 return TRACE_TYPE_PARTIAL_LINE; 665 int *depth = &(per_cpu_ptr(data, cpu)->depth);
622 666
623 /* Absolute time */ 667 /*
624 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 668 * Comments display at + 1 to depth. This is the
625 ret = print_graph_abs_time(iter->ts, s); 669 * return from a function, we now want the comments
626 if (!ret) 670 * to display at the same level of the bracket.
627 return TRACE_TYPE_PARTIAL_LINE; 671 */
672 *depth = trace->depth - 1;
628 } 673 }
629 674
630 /* Cpu */ 675 if (print_graph_prologue(iter, s, 0, 0))
631 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 676 return TRACE_TYPE_PARTIAL_LINE;
632 ret = print_graph_cpu(s, cpu);
633 if (ret == TRACE_TYPE_PARTIAL_LINE)
634 return TRACE_TYPE_PARTIAL_LINE;
635 }
636
637 /* Proc */
638 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
639 ret = print_graph_proc(s, ent->pid);
640 if (ret == TRACE_TYPE_PARTIAL_LINE)
641 return TRACE_TYPE_PARTIAL_LINE;
642
643 ret = trace_seq_printf(s, " | ");
644 if (!ret)
645 return TRACE_TYPE_PARTIAL_LINE;
646 }
647 677
648 /* Overhead */ 678 /* Overhead */
649 ret = print_graph_overhead(duration, s); 679 ret = print_graph_overhead(duration, s);
@@ -684,42 +714,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
684} 714}
685 715
686static enum print_line_t 716static enum print_line_t
687print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, 717print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
688 struct trace_entry *ent, struct trace_iterator *iter) 718 struct trace_iterator *iter)
689{ 719{
690 int i; 720 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
721 struct fgraph_data *data = iter->private;
722 struct trace_event *event;
723 int depth = 0;
691 int ret; 724 int ret;
692 int cpu = iter->cpu; 725 int i;
693 pid_t *last_pid = iter->private;
694
695 /* Pid */
696 if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
697 return TRACE_TYPE_PARTIAL_LINE;
698
699 /* Absolute time */
700 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
701 ret = print_graph_abs_time(iter->ts, s);
702 if (!ret)
703 return TRACE_TYPE_PARTIAL_LINE;
704 }
705
706 /* Cpu */
707 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
708 ret = print_graph_cpu(s, cpu);
709 if (ret == TRACE_TYPE_PARTIAL_LINE)
710 return TRACE_TYPE_PARTIAL_LINE;
711 }
712 726
713 /* Proc */ 727 if (data)
714 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 728 depth = per_cpu_ptr(data, iter->cpu)->depth;
715 ret = print_graph_proc(s, ent->pid);
716 if (ret == TRACE_TYPE_PARTIAL_LINE)
717 return TRACE_TYPE_PARTIAL_LINE;
718 729
719 ret = trace_seq_printf(s, " | "); 730 if (print_graph_prologue(iter, s, 0, 0))
720 if (!ret) 731 return TRACE_TYPE_PARTIAL_LINE;
721 return TRACE_TYPE_PARTIAL_LINE;
722 }
723 732
724 /* No overhead */ 733 /* No overhead */
725 ret = print_graph_overhead(-1, s); 734 ret = print_graph_overhead(-1, s);
@@ -734,8 +743,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
734 } 743 }
735 744
736 /* Indentation */ 745 /* Indentation */
737 if (trace->depth > 0) 746 if (depth > 0)
738 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { 747 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
739 ret = trace_seq_printf(s, " "); 748 ret = trace_seq_printf(s, " ");
740 if (!ret) 749 if (!ret)
741 return TRACE_TYPE_PARTIAL_LINE; 750 return TRACE_TYPE_PARTIAL_LINE;
@@ -746,9 +755,26 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
746 if (!ret) 755 if (!ret)
747 return TRACE_TYPE_PARTIAL_LINE; 756 return TRACE_TYPE_PARTIAL_LINE;
748 757
749 ret = trace_seq_bprintf(s, trace->fmt, trace->buf); 758 switch (iter->ent->type) {
750 if (!ret) 759 case TRACE_BPRINT:
751 return TRACE_TYPE_PARTIAL_LINE; 760 ret = trace_print_bprintk_msg_only(iter);
761 if (ret != TRACE_TYPE_HANDLED)
762 return ret;
763 break;
764 case TRACE_PRINT:
765 ret = trace_print_printk_msg_only(iter);
766 if (ret != TRACE_TYPE_HANDLED)
767 return ret;
768 break;
769 default:
770 event = ftrace_find_event(ent->type);
771 if (!event)
772 return TRACE_TYPE_UNHANDLED;
773
774 ret = event->trace(iter, sym_flags);
775 if (ret != TRACE_TYPE_HANDLED)
776 return ret;
777 }
752 778
753 /* Strip ending newline */ 779 /* Strip ending newline */
754 if (s->buffer[s->len - 1] == '\n') { 780 if (s->buffer[s->len - 1] == '\n') {
@@ -767,8 +793,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
767enum print_line_t 793enum print_line_t
768print_graph_function(struct trace_iterator *iter) 794print_graph_function(struct trace_iterator *iter)
769{ 795{
770 struct trace_seq *s = &iter->seq;
771 struct trace_entry *entry = iter->ent; 796 struct trace_entry *entry = iter->ent;
797 struct trace_seq *s = &iter->seq;
772 798
773 switch (entry->type) { 799 switch (entry->type) {
774 case TRACE_GRAPH_ENT: { 800 case TRACE_GRAPH_ENT: {
@@ -781,14 +807,11 @@ print_graph_function(struct trace_iterator *iter)
781 trace_assign_type(field, entry); 807 trace_assign_type(field, entry);
782 return print_graph_return(&field->ret, s, entry, iter); 808 return print_graph_return(&field->ret, s, entry, iter);
783 } 809 }
784 case TRACE_BPRINT: {
785 struct bprint_entry *field;
786 trace_assign_type(field, entry);
787 return print_graph_comment(field, s, entry, iter);
788 }
789 default: 810 default:
790 return TRACE_TYPE_UNHANDLED; 811 return print_graph_comment(s, entry, iter);
791 } 812 }
813
814 return TRACE_TYPE_HANDLED;
792} 815}
793 816
794static void print_graph_headers(struct seq_file *s) 817static void print_graph_headers(struct seq_file *s)
@@ -820,19 +843,21 @@ static void print_graph_headers(struct seq_file *s)
820 843
821static void graph_trace_open(struct trace_iterator *iter) 844static void graph_trace_open(struct trace_iterator *iter)
822{ 845{
823 /* pid on the last trace processed */ 846 /* pid and depth on the last trace processed */
824 pid_t *last_pid = alloc_percpu(pid_t); 847 struct fgraph_data *data = alloc_percpu(struct fgraph_data);
825 int cpu; 848 int cpu;
826 849
827 if (!last_pid) 850 if (!data)
828 pr_warning("function graph tracer: not enough memory\n"); 851 pr_warning("function graph tracer: not enough memory\n");
829 else 852 else
830 for_each_possible_cpu(cpu) { 853 for_each_possible_cpu(cpu) {
831 pid_t *pid = per_cpu_ptr(last_pid, cpu); 854 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
855 int *depth = &(per_cpu_ptr(data, cpu)->depth);
832 *pid = -1; 856 *pid = -1;
857 *depth = 0;
833 } 858 }
834 859
835 iter->private = last_pid; 860 iter->private = data;
836} 861}
837 862
838static void graph_trace_close(struct trace_iterator *iter) 863static void graph_trace_close(struct trace_iterator *iter)