aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c284
1 files changed, 194 insertions, 90 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b1342c5d37cf..79f4bac99a94 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -9,6 +9,7 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/slab.h>
12#include <linux/fs.h> 13#include <linux/fs.h>
13 14
14#include "trace.h" 15#include "trace.h"
@@ -18,6 +19,7 @@ struct fgraph_cpu_data {
18 pid_t last_pid; 19 pid_t last_pid;
19 int depth; 20 int depth;
20 int ignore; 21 int ignore;
22 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
21}; 23};
22 24
23struct fgraph_data { 25struct fgraph_data {
@@ -38,7 +40,7 @@ struct fgraph_data {
38#define TRACE_GRAPH_PRINT_OVERHEAD 0x4 40#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
39#define TRACE_GRAPH_PRINT_PROC 0x8 41#define TRACE_GRAPH_PRINT_PROC 0x8
40#define TRACE_GRAPH_PRINT_DURATION 0x10 42#define TRACE_GRAPH_PRINT_DURATION 0x10
41#define TRACE_GRAPH_PRINT_ABS_TIME 0X20 43#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
42 44
43static struct tracer_opt trace_opts[] = { 45static struct tracer_opt trace_opts[] = {
44 /* Display overruns? (for self-debug purpose) */ 46 /* Display overruns? (for self-debug purpose) */
@@ -177,7 +179,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
177 return ret; 179 return ret;
178} 180}
179 181
180static int __trace_graph_entry(struct trace_array *tr, 182int __trace_graph_entry(struct trace_array *tr,
181 struct ftrace_graph_ent *trace, 183 struct ftrace_graph_ent *trace,
182 unsigned long flags, 184 unsigned long flags,
183 int pc) 185 int pc)
@@ -187,7 +189,7 @@ static int __trace_graph_entry(struct trace_array *tr,
187 struct ring_buffer *buffer = tr->buffer; 189 struct ring_buffer *buffer = tr->buffer;
188 struct ftrace_graph_ent_entry *entry; 190 struct ftrace_graph_ent_entry *entry;
189 191
190 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 192 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
191 return 0; 193 return 0;
192 194
193 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 195 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -212,13 +214,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
212 int cpu; 214 int cpu;
213 int pc; 215 int pc;
214 216
215 if (unlikely(!tr))
216 return 0;
217
218 if (!ftrace_trace_task(current)) 217 if (!ftrace_trace_task(current))
219 return 0; 218 return 0;
220 219
221 if (!ftrace_graph_addr(trace->func)) 220 /* trace it when it is-nested-in or is a function enabled. */
221 if (!(trace->depth || ftrace_graph_addr(trace->func)))
222 return 0; 222 return 0;
223 223
224 local_irq_save(flags); 224 local_irq_save(flags);
@@ -231,9 +231,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
231 } else { 231 } else {
232 ret = 0; 232 ret = 0;
233 } 233 }
234 /* Only do the atomic if it is not already set */
235 if (!test_tsk_trace_graph(current))
236 set_tsk_trace_graph(current);
237 234
238 atomic_dec(&data->disabled); 235 atomic_dec(&data->disabled);
239 local_irq_restore(flags); 236 local_irq_restore(flags);
@@ -241,7 +238,15 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
241 return ret; 238 return ret;
242} 239}
243 240
244static void __trace_graph_return(struct trace_array *tr, 241int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
242{
243 if (tracing_thresh)
244 return 1;
245 else
246 return trace_graph_entry(trace);
247}
248
249void __trace_graph_return(struct trace_array *tr,
245 struct ftrace_graph_ret *trace, 250 struct ftrace_graph_ret *trace,
246 unsigned long flags, 251 unsigned long flags,
247 int pc) 252 int pc)
@@ -251,7 +256,7 @@ static void __trace_graph_return(struct trace_array *tr,
251 struct ring_buffer *buffer = tr->buffer; 256 struct ring_buffer *buffer = tr->buffer;
252 struct ftrace_graph_ret_entry *entry; 257 struct ftrace_graph_ret_entry *entry;
253 258
254 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 259 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
255 return; 260 return;
256 261
257 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 262 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -281,19 +286,39 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
281 pc = preempt_count(); 286 pc = preempt_count();
282 __trace_graph_return(tr, trace, flags, pc); 287 __trace_graph_return(tr, trace, flags, pc);
283 } 288 }
284 if (!trace->depth)
285 clear_tsk_trace_graph(current);
286 atomic_dec(&data->disabled); 289 atomic_dec(&data->disabled);
287 local_irq_restore(flags); 290 local_irq_restore(flags);
288} 291}
289 292
293void set_graph_array(struct trace_array *tr)
294{
295 graph_array = tr;
296
297 /* Make graph_array visible before we start tracing */
298
299 smp_mb();
300}
301
302void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
303{
304 if (tracing_thresh &&
305 (trace->rettime - trace->calltime < tracing_thresh))
306 return;
307 else
308 trace_graph_return(trace);
309}
310
290static int graph_trace_init(struct trace_array *tr) 311static int graph_trace_init(struct trace_array *tr)
291{ 312{
292 int ret; 313 int ret;
293 314
294 graph_array = tr; 315 set_graph_array(tr);
295 ret = register_ftrace_graph(&trace_graph_return, 316 if (tracing_thresh)
296 &trace_graph_entry); 317 ret = register_ftrace_graph(&trace_graph_thresh_return,
318 &trace_graph_thresh_entry);
319 else
320 ret = register_ftrace_graph(&trace_graph_return,
321 &trace_graph_entry);
297 if (ret) 322 if (ret)
298 return ret; 323 return ret;
299 tracing_start_cmdline_record(); 324 tracing_start_cmdline_record();
@@ -301,11 +326,6 @@ static int graph_trace_init(struct trace_array *tr)
301 return 0; 326 return 0;
302} 327}
303 328
304void set_graph_array(struct trace_array *tr)
305{
306 graph_array = tr;
307}
308
309static void graph_trace_reset(struct trace_array *tr) 329static void graph_trace_reset(struct trace_array *tr)
310{ 330{
311 tracing_stop_cmdline_record(); 331 tracing_stop_cmdline_record();
@@ -470,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter,
470 * We need to consume the current entry to see 490 * We need to consume the current entry to see
471 * the next one. 491 * the next one.
472 */ 492 */
473 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 493 ring_buffer_consume(iter->tr->buffer, iter->cpu,
494 NULL, NULL);
474 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 495 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
475 NULL); 496 NULL, NULL);
476 } 497 }
477 498
478 if (!event) 499 if (!event)
@@ -506,17 +527,18 @@ get_return_for_leaf(struct trace_iterator *iter,
506 527
507/* Signal a overhead of time execution to the output */ 528/* Signal a overhead of time execution to the output */
508static int 529static int
509print_graph_overhead(unsigned long long duration, struct trace_seq *s) 530print_graph_overhead(unsigned long long duration, struct trace_seq *s,
531 u32 flags)
510{ 532{
511 /* If duration disappear, we don't need anything */ 533 /* If duration disappear, we don't need anything */
512 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 534 if (!(flags & TRACE_GRAPH_PRINT_DURATION))
513 return 1; 535 return 1;
514 536
515 /* Non nested entry or return */ 537 /* Non nested entry or return */
516 if (duration == -1) 538 if (duration == -1)
517 return trace_seq_printf(s, " "); 539 return trace_seq_printf(s, " ");
518 540
519 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 541 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
520 /* Duration exceeded 100 msecs */ 542 /* Duration exceeded 100 msecs */
521 if (duration > 100000ULL) 543 if (duration > 100000ULL)
522 return trace_seq_printf(s, "! "); 544 return trace_seq_printf(s, "! ");
@@ -542,7 +564,7 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s)
542 564
543static enum print_line_t 565static enum print_line_t
544print_graph_irq(struct trace_iterator *iter, unsigned long addr, 566print_graph_irq(struct trace_iterator *iter, unsigned long addr,
545 enum trace_type type, int cpu, pid_t pid) 567 enum trace_type type, int cpu, pid_t pid, u32 flags)
546{ 568{
547 int ret; 569 int ret;
548 struct trace_seq *s = &iter->seq; 570 struct trace_seq *s = &iter->seq;
@@ -552,21 +574,21 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
552 return TRACE_TYPE_UNHANDLED; 574 return TRACE_TYPE_UNHANDLED;
553 575
554 /* Absolute time */ 576 /* Absolute time */
555 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 577 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
556 ret = print_graph_abs_time(iter->ts, s); 578 ret = print_graph_abs_time(iter->ts, s);
557 if (!ret) 579 if (!ret)
558 return TRACE_TYPE_PARTIAL_LINE; 580 return TRACE_TYPE_PARTIAL_LINE;
559 } 581 }
560 582
561 /* Cpu */ 583 /* Cpu */
562 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 584 if (flags & TRACE_GRAPH_PRINT_CPU) {
563 ret = print_graph_cpu(s, cpu); 585 ret = print_graph_cpu(s, cpu);
564 if (ret == TRACE_TYPE_PARTIAL_LINE) 586 if (ret == TRACE_TYPE_PARTIAL_LINE)
565 return TRACE_TYPE_PARTIAL_LINE; 587 return TRACE_TYPE_PARTIAL_LINE;
566 } 588 }
567 589
568 /* Proc */ 590 /* Proc */
569 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 591 if (flags & TRACE_GRAPH_PRINT_PROC) {
570 ret = print_graph_proc(s, pid); 592 ret = print_graph_proc(s, pid);
571 if (ret == TRACE_TYPE_PARTIAL_LINE) 593 if (ret == TRACE_TYPE_PARTIAL_LINE)
572 return TRACE_TYPE_PARTIAL_LINE; 594 return TRACE_TYPE_PARTIAL_LINE;
@@ -576,7 +598,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
576 } 598 }
577 599
578 /* No overhead */ 600 /* No overhead */
579 ret = print_graph_overhead(-1, s); 601 ret = print_graph_overhead(-1, s, flags);
580 if (!ret) 602 if (!ret)
581 return TRACE_TYPE_PARTIAL_LINE; 603 return TRACE_TYPE_PARTIAL_LINE;
582 604
@@ -589,7 +611,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
589 return TRACE_TYPE_PARTIAL_LINE; 611 return TRACE_TYPE_PARTIAL_LINE;
590 612
591 /* Don't close the duration column if haven't one */ 613 /* Don't close the duration column if haven't one */
592 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 614 if (flags & TRACE_GRAPH_PRINT_DURATION)
593 trace_seq_printf(s, " |"); 615 trace_seq_printf(s, " |");
594 ret = trace_seq_printf(s, "\n"); 616 ret = trace_seq_printf(s, "\n");
595 617
@@ -659,7 +681,8 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
659static enum print_line_t 681static enum print_line_t
660print_graph_entry_leaf(struct trace_iterator *iter, 682print_graph_entry_leaf(struct trace_iterator *iter,
661 struct ftrace_graph_ent_entry *entry, 683 struct ftrace_graph_ent_entry *entry,
662 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 684 struct ftrace_graph_ret_entry *ret_entry,
685 struct trace_seq *s, u32 flags)
663{ 686{
664 struct fgraph_data *data = iter->private; 687 struct fgraph_data *data = iter->private;
665 struct ftrace_graph_ret *graph_ret; 688 struct ftrace_graph_ret *graph_ret;
@@ -673,24 +696,30 @@ print_graph_entry_leaf(struct trace_iterator *iter,
673 duration = graph_ret->rettime - graph_ret->calltime; 696 duration = graph_ret->rettime - graph_ret->calltime;
674 697
675 if (data) { 698 if (data) {
699 struct fgraph_cpu_data *cpu_data;
676 int cpu = iter->cpu; 700 int cpu = iter->cpu;
677 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 701
702 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
678 703
679 /* 704 /*
680 * Comments display at + 1 to depth. Since 705 * Comments display at + 1 to depth. Since
681 * this is a leaf function, keep the comments 706 * this is a leaf function, keep the comments
682 * equal to this depth. 707 * equal to this depth.
683 */ 708 */
684 *depth = call->depth - 1; 709 cpu_data->depth = call->depth - 1;
710
711 /* No need to keep this function around for this depth */
712 if (call->depth < FTRACE_RETFUNC_DEPTH)
713 cpu_data->enter_funcs[call->depth] = 0;
685 } 714 }
686 715
687 /* Overhead */ 716 /* Overhead */
688 ret = print_graph_overhead(duration, s); 717 ret = print_graph_overhead(duration, s, flags);
689 if (!ret) 718 if (!ret)
690 return TRACE_TYPE_PARTIAL_LINE; 719 return TRACE_TYPE_PARTIAL_LINE;
691 720
692 /* Duration */ 721 /* Duration */
693 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 722 if (flags & TRACE_GRAPH_PRINT_DURATION) {
694 ret = print_graph_duration(duration, s); 723 ret = print_graph_duration(duration, s);
695 if (ret == TRACE_TYPE_PARTIAL_LINE) 724 if (ret == TRACE_TYPE_PARTIAL_LINE)
696 return TRACE_TYPE_PARTIAL_LINE; 725 return TRACE_TYPE_PARTIAL_LINE;
@@ -713,7 +742,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
713static enum print_line_t 742static enum print_line_t
714print_graph_entry_nested(struct trace_iterator *iter, 743print_graph_entry_nested(struct trace_iterator *iter,
715 struct ftrace_graph_ent_entry *entry, 744 struct ftrace_graph_ent_entry *entry,
716 struct trace_seq *s, int cpu) 745 struct trace_seq *s, int cpu, u32 flags)
717{ 746{
718 struct ftrace_graph_ent *call = &entry->graph_ent; 747 struct ftrace_graph_ent *call = &entry->graph_ent;
719 struct fgraph_data *data = iter->private; 748 struct fgraph_data *data = iter->private;
@@ -721,19 +750,24 @@ print_graph_entry_nested(struct trace_iterator *iter,
721 int i; 750 int i;
722 751
723 if (data) { 752 if (data) {
753 struct fgraph_cpu_data *cpu_data;
724 int cpu = iter->cpu; 754 int cpu = iter->cpu;
725 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
726 755
727 *depth = call->depth; 756 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
757 cpu_data->depth = call->depth;
758
759 /* Save this function pointer to see if the exit matches */
760 if (call->depth < FTRACE_RETFUNC_DEPTH)
761 cpu_data->enter_funcs[call->depth] = call->func;
728 } 762 }
729 763
730 /* No overhead */ 764 /* No overhead */
731 ret = print_graph_overhead(-1, s); 765 ret = print_graph_overhead(-1, s, flags);
732 if (!ret) 766 if (!ret)
733 return TRACE_TYPE_PARTIAL_LINE; 767 return TRACE_TYPE_PARTIAL_LINE;
734 768
735 /* No time */ 769 /* No time */
736 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 770 if (flags & TRACE_GRAPH_PRINT_DURATION) {
737 ret = trace_seq_printf(s, " | "); 771 ret = trace_seq_printf(s, " | ");
738 if (!ret) 772 if (!ret)
739 return TRACE_TYPE_PARTIAL_LINE; 773 return TRACE_TYPE_PARTIAL_LINE;
@@ -759,7 +793,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
759 793
760static enum print_line_t 794static enum print_line_t
761print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 795print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
762 int type, unsigned long addr) 796 int type, unsigned long addr, u32 flags)
763{ 797{
764 struct fgraph_data *data = iter->private; 798 struct fgraph_data *data = iter->private;
765 struct trace_entry *ent = iter->ent; 799 struct trace_entry *ent = iter->ent;
@@ -772,27 +806,27 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
772 806
773 if (type) { 807 if (type) {
774 /* Interrupt */ 808 /* Interrupt */
775 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 809 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
776 if (ret == TRACE_TYPE_PARTIAL_LINE) 810 if (ret == TRACE_TYPE_PARTIAL_LINE)
777 return TRACE_TYPE_PARTIAL_LINE; 811 return TRACE_TYPE_PARTIAL_LINE;
778 } 812 }
779 813
780 /* Absolute time */ 814 /* Absolute time */
781 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 815 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
782 ret = print_graph_abs_time(iter->ts, s); 816 ret = print_graph_abs_time(iter->ts, s);
783 if (!ret) 817 if (!ret)
784 return TRACE_TYPE_PARTIAL_LINE; 818 return TRACE_TYPE_PARTIAL_LINE;
785 } 819 }
786 820
787 /* Cpu */ 821 /* Cpu */
788 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 822 if (flags & TRACE_GRAPH_PRINT_CPU) {
789 ret = print_graph_cpu(s, cpu); 823 ret = print_graph_cpu(s, cpu);
790 if (ret == TRACE_TYPE_PARTIAL_LINE) 824 if (ret == TRACE_TYPE_PARTIAL_LINE)
791 return TRACE_TYPE_PARTIAL_LINE; 825 return TRACE_TYPE_PARTIAL_LINE;
792 } 826 }
793 827
794 /* Proc */ 828 /* Proc */
795 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 829 if (flags & TRACE_GRAPH_PRINT_PROC) {
796 ret = print_graph_proc(s, ent->pid); 830 ret = print_graph_proc(s, ent->pid);
797 if (ret == TRACE_TYPE_PARTIAL_LINE) 831 if (ret == TRACE_TYPE_PARTIAL_LINE)
798 return TRACE_TYPE_PARTIAL_LINE; 832 return TRACE_TYPE_PARTIAL_LINE;
@@ -814,7 +848,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
814 848
815static enum print_line_t 849static enum print_line_t
816print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 850print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
817 struct trace_iterator *iter) 851 struct trace_iterator *iter, u32 flags)
818{ 852{
819 struct fgraph_data *data = iter->private; 853 struct fgraph_data *data = iter->private;
820 struct ftrace_graph_ent *call = &field->graph_ent; 854 struct ftrace_graph_ent *call = &field->graph_ent;
@@ -822,14 +856,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
822 static enum print_line_t ret; 856 static enum print_line_t ret;
823 int cpu = iter->cpu; 857 int cpu = iter->cpu;
824 858
825 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 859 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
826 return TRACE_TYPE_PARTIAL_LINE; 860 return TRACE_TYPE_PARTIAL_LINE;
827 861
828 leaf_ret = get_return_for_leaf(iter, field); 862 leaf_ret = get_return_for_leaf(iter, field);
829 if (leaf_ret) 863 if (leaf_ret)
830 ret = print_graph_entry_leaf(iter, field, leaf_ret, s); 864 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
831 else 865 else
832 ret = print_graph_entry_nested(iter, field, s, cpu); 866 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
833 867
834 if (data) { 868 if (data) {
835 /* 869 /*
@@ -848,37 +882,47 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
848 882
849static enum print_line_t 883static enum print_line_t
850print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 884print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
851 struct trace_entry *ent, struct trace_iterator *iter) 885 struct trace_entry *ent, struct trace_iterator *iter,
886 u32 flags)
852{ 887{
853 unsigned long long duration = trace->rettime - trace->calltime; 888 unsigned long long duration = trace->rettime - trace->calltime;
854 struct fgraph_data *data = iter->private; 889 struct fgraph_data *data = iter->private;
855 pid_t pid = ent->pid; 890 pid_t pid = ent->pid;
856 int cpu = iter->cpu; 891 int cpu = iter->cpu;
892 int func_match = 1;
857 int ret; 893 int ret;
858 int i; 894 int i;
859 895
860 if (data) { 896 if (data) {
897 struct fgraph_cpu_data *cpu_data;
861 int cpu = iter->cpu; 898 int cpu = iter->cpu;
862 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 899
900 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
863 901
864 /* 902 /*
865 * Comments display at + 1 to depth. This is the 903 * Comments display at + 1 to depth. This is the
866 * return from a function, we now want the comments 904 * return from a function, we now want the comments
867 * to display at the same level of the bracket. 905 * to display at the same level of the bracket.
868 */ 906 */
869 *depth = trace->depth - 1; 907 cpu_data->depth = trace->depth - 1;
908
909 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
910 if (cpu_data->enter_funcs[trace->depth] != trace->func)
911 func_match = 0;
912 cpu_data->enter_funcs[trace->depth] = 0;
913 }
870 } 914 }
871 915
872 if (print_graph_prologue(iter, s, 0, 0)) 916 if (print_graph_prologue(iter, s, 0, 0, flags))
873 return TRACE_TYPE_PARTIAL_LINE; 917 return TRACE_TYPE_PARTIAL_LINE;
874 918
875 /* Overhead */ 919 /* Overhead */
876 ret = print_graph_overhead(duration, s); 920 ret = print_graph_overhead(duration, s, flags);
877 if (!ret) 921 if (!ret)
878 return TRACE_TYPE_PARTIAL_LINE; 922 return TRACE_TYPE_PARTIAL_LINE;
879 923
880 /* Duration */ 924 /* Duration */
881 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 925 if (flags & TRACE_GRAPH_PRINT_DURATION) {
882 ret = print_graph_duration(duration, s); 926 ret = print_graph_duration(duration, s);
883 if (ret == TRACE_TYPE_PARTIAL_LINE) 927 if (ret == TRACE_TYPE_PARTIAL_LINE)
884 return TRACE_TYPE_PARTIAL_LINE; 928 return TRACE_TYPE_PARTIAL_LINE;
@@ -891,19 +935,32 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
891 return TRACE_TYPE_PARTIAL_LINE; 935 return TRACE_TYPE_PARTIAL_LINE;
892 } 936 }
893 937
894 ret = trace_seq_printf(s, "}\n"); 938 /*
895 if (!ret) 939 * If the return function does not have a matching entry,
896 return TRACE_TYPE_PARTIAL_LINE; 940 * then the entry was lost. Instead of just printing
941 * the '}' and letting the user guess what function this
942 * belongs to, write out the function name.
943 */
944 if (func_match) {
945 ret = trace_seq_printf(s, "}\n");
946 if (!ret)
947 return TRACE_TYPE_PARTIAL_LINE;
948 } else {
949 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
950 if (!ret)
951 return TRACE_TYPE_PARTIAL_LINE;
952 }
897 953
898 /* Overrun */ 954 /* Overrun */
899 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 955 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
900 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 956 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
901 trace->overrun); 957 trace->overrun);
902 if (!ret) 958 if (!ret)
903 return TRACE_TYPE_PARTIAL_LINE; 959 return TRACE_TYPE_PARTIAL_LINE;
904 } 960 }
905 961
906 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 962 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
963 cpu, pid, flags);
907 if (ret == TRACE_TYPE_PARTIAL_LINE) 964 if (ret == TRACE_TYPE_PARTIAL_LINE)
908 return TRACE_TYPE_PARTIAL_LINE; 965 return TRACE_TYPE_PARTIAL_LINE;
909 966
@@ -911,8 +968,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
911} 968}
912 969
913static enum print_line_t 970static enum print_line_t
914print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 971print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
915 struct trace_iterator *iter) 972 struct trace_iterator *iter, u32 flags)
916{ 973{
917 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 974 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
918 struct fgraph_data *data = iter->private; 975 struct fgraph_data *data = iter->private;
@@ -924,16 +981,16 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
924 if (data) 981 if (data)
925 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 982 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
926 983
927 if (print_graph_prologue(iter, s, 0, 0)) 984 if (print_graph_prologue(iter, s, 0, 0, flags))
928 return TRACE_TYPE_PARTIAL_LINE; 985 return TRACE_TYPE_PARTIAL_LINE;
929 986
930 /* No overhead */ 987 /* No overhead */
931 ret = print_graph_overhead(-1, s); 988 ret = print_graph_overhead(-1, s, flags);
932 if (!ret) 989 if (!ret)
933 return TRACE_TYPE_PARTIAL_LINE; 990 return TRACE_TYPE_PARTIAL_LINE;
934 991
935 /* No time */ 992 /* No time */
936 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 993 if (flags & TRACE_GRAPH_PRINT_DURATION) {
937 ret = trace_seq_printf(s, " | "); 994 ret = trace_seq_printf(s, " | ");
938 if (!ret) 995 if (!ret)
939 return TRACE_TYPE_PARTIAL_LINE; 996 return TRACE_TYPE_PARTIAL_LINE;
@@ -968,7 +1025,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
968 if (!event) 1025 if (!event)
969 return TRACE_TYPE_UNHANDLED; 1026 return TRACE_TYPE_UNHANDLED;
970 1027
971 ret = event->trace(iter, sym_flags); 1028 ret = event->funcs->trace(iter, sym_flags, event);
972 if (ret != TRACE_TYPE_HANDLED) 1029 if (ret != TRACE_TYPE_HANDLED)
973 return ret; 1030 return ret;
974 } 1031 }
@@ -988,7 +1045,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
988 1045
989 1046
990enum print_line_t 1047enum print_line_t
991print_graph_function(struct trace_iterator *iter) 1048print_graph_function_flags(struct trace_iterator *iter, u32 flags)
992{ 1049{
993 struct ftrace_graph_ent_entry *field; 1050 struct ftrace_graph_ent_entry *field;
994 struct fgraph_data *data = iter->private; 1051 struct fgraph_data *data = iter->private;
@@ -1009,7 +1066,7 @@ print_graph_function(struct trace_iterator *iter)
1009 if (data && data->failed) { 1066 if (data && data->failed) {
1010 field = &data->ent; 1067 field = &data->ent;
1011 iter->cpu = data->cpu; 1068 iter->cpu = data->cpu;
1012 ret = print_graph_entry(field, s, iter); 1069 ret = print_graph_entry(field, s, iter, flags);
1013 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1070 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1014 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1071 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1015 ret = TRACE_TYPE_NO_CONSUME; 1072 ret = TRACE_TYPE_NO_CONSUME;
@@ -1029,32 +1086,50 @@ print_graph_function(struct trace_iterator *iter)
1029 struct ftrace_graph_ent_entry saved; 1086 struct ftrace_graph_ent_entry saved;
1030 trace_assign_type(field, entry); 1087 trace_assign_type(field, entry);
1031 saved = *field; 1088 saved = *field;
1032 return print_graph_entry(&saved, s, iter); 1089 return print_graph_entry(&saved, s, iter, flags);
1033 } 1090 }
1034 case TRACE_GRAPH_RET: { 1091 case TRACE_GRAPH_RET: {
1035 struct ftrace_graph_ret_entry *field; 1092 struct ftrace_graph_ret_entry *field;
1036 trace_assign_type(field, entry); 1093 trace_assign_type(field, entry);
1037 return print_graph_return(&field->ret, s, entry, iter); 1094 return print_graph_return(&field->ret, s, entry, iter, flags);
1038 } 1095 }
1096 case TRACE_STACK:
1097 case TRACE_FN:
1098 /* dont trace stack and functions as comments */
1099 return TRACE_TYPE_UNHANDLED;
1100
1039 default: 1101 default:
1040 return print_graph_comment(s, entry, iter); 1102 return print_graph_comment(s, entry, iter, flags);
1041 } 1103 }
1042 1104
1043 return TRACE_TYPE_HANDLED; 1105 return TRACE_TYPE_HANDLED;
1044} 1106}
1045 1107
1046static void print_lat_header(struct seq_file *s) 1108static enum print_line_t
1109print_graph_function(struct trace_iterator *iter)
1110{
1111 return print_graph_function_flags(iter, tracer_flags.val);
1112}
1113
1114static enum print_line_t
1115print_graph_function_event(struct trace_iterator *iter, int flags,
1116 struct trace_event *event)
1117{
1118 return print_graph_function(iter);
1119}
1120
1121static void print_lat_header(struct seq_file *s, u32 flags)
1047{ 1122{
1048 static const char spaces[] = " " /* 16 spaces */ 1123 static const char spaces[] = " " /* 16 spaces */
1049 " " /* 4 spaces */ 1124 " " /* 4 spaces */
1050 " "; /* 17 spaces */ 1125 " "; /* 17 spaces */
1051 int size = 0; 1126 int size = 0;
1052 1127
1053 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1128 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1054 size += 16; 1129 size += 16;
1055 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1130 if (flags & TRACE_GRAPH_PRINT_CPU)
1056 size += 4; 1131 size += 4;
1057 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1132 if (flags & TRACE_GRAPH_PRINT_PROC)
1058 size += 17; 1133 size += 17;
1059 1134
1060 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1135 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
@@ -1065,43 +1140,48 @@ static void print_lat_header(struct seq_file *s)
1065 seq_printf(s, "#%.*s|||| / \n", size, spaces); 1140 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1066} 1141}
1067 1142
1068static void print_graph_headers(struct seq_file *s) 1143void print_graph_headers_flags(struct seq_file *s, u32 flags)
1069{ 1144{
1070 int lat = trace_flags & TRACE_ITER_LATENCY_FMT; 1145 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1071 1146
1072 if (lat) 1147 if (lat)
1073 print_lat_header(s); 1148 print_lat_header(s, flags);
1074 1149
1075 /* 1st line */ 1150 /* 1st line */
1076 seq_printf(s, "#"); 1151 seq_printf(s, "#");
1077 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1152 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1078 seq_printf(s, " TIME "); 1153 seq_printf(s, " TIME ");
1079 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1154 if (flags & TRACE_GRAPH_PRINT_CPU)
1080 seq_printf(s, " CPU"); 1155 seq_printf(s, " CPU");
1081 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1156 if (flags & TRACE_GRAPH_PRINT_PROC)
1082 seq_printf(s, " TASK/PID "); 1157 seq_printf(s, " TASK/PID ");
1083 if (lat) 1158 if (lat)
1084 seq_printf(s, "|||||"); 1159 seq_printf(s, "|||||");
1085 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 1160 if (flags & TRACE_GRAPH_PRINT_DURATION)
1086 seq_printf(s, " DURATION "); 1161 seq_printf(s, " DURATION ");
1087 seq_printf(s, " FUNCTION CALLS\n"); 1162 seq_printf(s, " FUNCTION CALLS\n");
1088 1163
1089 /* 2nd line */ 1164 /* 2nd line */
1090 seq_printf(s, "#"); 1165 seq_printf(s, "#");
1091 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1166 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1092 seq_printf(s, " | "); 1167 seq_printf(s, " | ");
1093 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1168 if (flags & TRACE_GRAPH_PRINT_CPU)
1094 seq_printf(s, " | "); 1169 seq_printf(s, " | ");
1095 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1170 if (flags & TRACE_GRAPH_PRINT_PROC)
1096 seq_printf(s, " | | "); 1171 seq_printf(s, " | | ");
1097 if (lat) 1172 if (lat)
1098 seq_printf(s, "|||||"); 1173 seq_printf(s, "|||||");
1099 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 1174 if (flags & TRACE_GRAPH_PRINT_DURATION)
1100 seq_printf(s, " | | "); 1175 seq_printf(s, " | | ");
1101 seq_printf(s, " | | | |\n"); 1176 seq_printf(s, " | | | |\n");
1102} 1177}
1103 1178
1104static void graph_trace_open(struct trace_iterator *iter) 1179void print_graph_headers(struct seq_file *s)
1180{
1181 print_graph_headers_flags(s, tracer_flags.val);
1182}
1183
1184void graph_trace_open(struct trace_iterator *iter)
1105{ 1185{
1106 /* pid and depth on the last trace processed */ 1186 /* pid and depth on the last trace processed */
1107 struct fgraph_data *data; 1187 struct fgraph_data *data;
@@ -1136,7 +1216,7 @@ static void graph_trace_open(struct trace_iterator *iter)
1136 pr_warning("function graph tracer: not enough memory\n"); 1216 pr_warning("function graph tracer: not enough memory\n");
1137} 1217}
1138 1218
1139static void graph_trace_close(struct trace_iterator *iter) 1219void graph_trace_close(struct trace_iterator *iter)
1140{ 1220{
1141 struct fgraph_data *data = iter->private; 1221 struct fgraph_data *data = iter->private;
1142 1222
@@ -1146,6 +1226,20 @@ static void graph_trace_close(struct trace_iterator *iter)
1146 } 1226 }
1147} 1227}
1148 1228
1229static struct trace_event_functions graph_functions = {
1230 .trace = print_graph_function_event,
1231};
1232
1233static struct trace_event graph_trace_entry_event = {
1234 .type = TRACE_GRAPH_ENT,
1235 .funcs = &graph_functions,
1236};
1237
1238static struct trace_event graph_trace_ret_event = {
1239 .type = TRACE_GRAPH_RET,
1240 .funcs = &graph_functions
1241};
1242
1149static struct tracer graph_trace __read_mostly = { 1243static struct tracer graph_trace __read_mostly = {
1150 .name = "function_graph", 1244 .name = "function_graph",
1151 .open = graph_trace_open, 1245 .open = graph_trace_open,
@@ -1167,6 +1261,16 @@ static __init int init_graph_trace(void)
1167{ 1261{
1168 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); 1262 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1169 1263
1264 if (!register_ftrace_event(&graph_trace_entry_event)) {
1265 pr_warning("Warning: could not register graph trace events\n");
1266 return 1;
1267 }
1268
1269 if (!register_ftrace_event(&graph_trace_ret_event)) {
1270 pr_warning("Warning: could not register graph trace events\n");
1271 return 1;
1272 }
1273
1170 return register_tracer(&graph_trace); 1274 return register_tracer(&graph_trace);
1171} 1275}
1172 1276