aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c267
1 files changed, 208 insertions, 59 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 45e6c01b2e4..9aed1a5cf55 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -9,14 +9,27 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/slab.h>
12#include <linux/fs.h> 13#include <linux/fs.h>
13 14
14#include "trace.h" 15#include "trace.h"
15#include "trace_output.h" 16#include "trace_output.h"
16 17
17struct fgraph_data { 18struct fgraph_cpu_data {
18 pid_t last_pid; 19 pid_t last_pid;
19 int depth; 20 int depth;
21 int ignore;
22 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
23};
24
25struct fgraph_data {
26 struct fgraph_cpu_data *cpu_data;
27
28 /* Place to preserve last processed entry. */
29 struct ftrace_graph_ent_entry ent;
30 struct ftrace_graph_ret_entry ret;
31 int failed;
32 int cpu;
20}; 33};
21 34
22#define TRACE_GRAPH_INDENT 2 35#define TRACE_GRAPH_INDENT 2
@@ -176,7 +189,7 @@ static int __trace_graph_entry(struct trace_array *tr,
176 struct ring_buffer *buffer = tr->buffer; 189 struct ring_buffer *buffer = tr->buffer;
177 struct ftrace_graph_ent_entry *entry; 190 struct ftrace_graph_ent_entry *entry;
178 191
179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 192 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
180 return 0; 193 return 0;
181 194
182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 195 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -201,13 +214,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
201 int cpu; 214 int cpu;
202 int pc; 215 int pc;
203 216
204 if (unlikely(!tr))
205 return 0;
206
207 if (!ftrace_trace_task(current)) 217 if (!ftrace_trace_task(current))
208 return 0; 218 return 0;
209 219
210 if (!ftrace_graph_addr(trace->func)) 220 /* trace it when it is-nested-in or is a function enabled. */
221 if (!(trace->depth || ftrace_graph_addr(trace->func)))
211 return 0; 222 return 0;
212 223
213 local_irq_save(flags); 224 local_irq_save(flags);
@@ -220,9 +231,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
220 } else { 231 } else {
221 ret = 0; 232 ret = 0;
222 } 233 }
223 /* Only do the atomic if it is not already set */
224 if (!test_tsk_trace_graph(current))
225 set_tsk_trace_graph(current);
226 234
227 atomic_dec(&data->disabled); 235 atomic_dec(&data->disabled);
228 local_irq_restore(flags); 236 local_irq_restore(flags);
@@ -230,6 +238,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
230 return ret; 238 return ret;
231} 239}
232 240
241int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
242{
243 if (tracing_thresh)
244 return 1;
245 else
246 return trace_graph_entry(trace);
247}
248
233static void __trace_graph_return(struct trace_array *tr, 249static void __trace_graph_return(struct trace_array *tr,
234 struct ftrace_graph_ret *trace, 250 struct ftrace_graph_ret *trace,
235 unsigned long flags, 251 unsigned long flags,
@@ -240,7 +256,7 @@ static void __trace_graph_return(struct trace_array *tr,
240 struct ring_buffer *buffer = tr->buffer; 256 struct ring_buffer *buffer = tr->buffer;
241 struct ftrace_graph_ret_entry *entry; 257 struct ftrace_graph_ret_entry *entry;
242 258
243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 259 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
244 return; 260 return;
245 261
246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 262 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -270,19 +286,39 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
270 pc = preempt_count(); 286 pc = preempt_count();
271 __trace_graph_return(tr, trace, flags, pc); 287 __trace_graph_return(tr, trace, flags, pc);
272 } 288 }
273 if (!trace->depth)
274 clear_tsk_trace_graph(current);
275 atomic_dec(&data->disabled); 289 atomic_dec(&data->disabled);
276 local_irq_restore(flags); 290 local_irq_restore(flags);
277} 291}
278 292
293void set_graph_array(struct trace_array *tr)
294{
295 graph_array = tr;
296
297 /* Make graph_array visible before we start tracing */
298
299 smp_mb();
300}
301
302void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
303{
304 if (tracing_thresh &&
305 (trace->rettime - trace->calltime < tracing_thresh))
306 return;
307 else
308 trace_graph_return(trace);
309}
310
279static int graph_trace_init(struct trace_array *tr) 311static int graph_trace_init(struct trace_array *tr)
280{ 312{
281 int ret; 313 int ret;
282 314
283 graph_array = tr; 315 set_graph_array(tr);
284 ret = register_ftrace_graph(&trace_graph_return, 316 if (tracing_thresh)
285 &trace_graph_entry); 317 ret = register_ftrace_graph(&trace_graph_thresh_return,
318 &trace_graph_thresh_entry);
319 else
320 ret = register_ftrace_graph(&trace_graph_return,
321 &trace_graph_entry);
286 if (ret) 322 if (ret)
287 return ret; 323 return ret;
288 tracing_start_cmdline_record(); 324 tracing_start_cmdline_record();
@@ -290,11 +326,6 @@ static int graph_trace_init(struct trace_array *tr)
290 return 0; 326 return 0;
291} 327}
292 328
293void set_graph_array(struct trace_array *tr)
294{
295 graph_array = tr;
296}
297
298static void graph_trace_reset(struct trace_array *tr) 329static void graph_trace_reset(struct trace_array *tr)
299{ 330{
300 tracing_stop_cmdline_record(); 331 tracing_stop_cmdline_record();
@@ -384,7 +415,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
384 if (!data) 415 if (!data)
385 return TRACE_TYPE_HANDLED; 416 return TRACE_TYPE_HANDLED;
386 417
387 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 418 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
388 419
389 if (*last_pid == pid) 420 if (*last_pid == pid)
390 return TRACE_TYPE_HANDLED; 421 return TRACE_TYPE_HANDLED;
@@ -435,26 +466,49 @@ static struct ftrace_graph_ret_entry *
435get_return_for_leaf(struct trace_iterator *iter, 466get_return_for_leaf(struct trace_iterator *iter,
436 struct ftrace_graph_ent_entry *curr) 467 struct ftrace_graph_ent_entry *curr)
437{ 468{
438 struct ring_buffer_iter *ring_iter; 469 struct fgraph_data *data = iter->private;
470 struct ring_buffer_iter *ring_iter = NULL;
439 struct ring_buffer_event *event; 471 struct ring_buffer_event *event;
440 struct ftrace_graph_ret_entry *next; 472 struct ftrace_graph_ret_entry *next;
441 473
442 ring_iter = iter->buffer_iter[iter->cpu]; 474 /*
475 * If the previous output failed to write to the seq buffer,
476 * then we just reuse the data from before.
477 */
478 if (data && data->failed) {
479 curr = &data->ent;
480 next = &data->ret;
481 } else {
443 482
444 /* First peek to compare current entry and the next one */ 483 ring_iter = iter->buffer_iter[iter->cpu];
445 if (ring_iter) 484
446 event = ring_buffer_iter_peek(ring_iter, NULL); 485 /* First peek to compare current entry and the next one */
447 else { 486 if (ring_iter)
448 /* We need to consume the current entry to see the next one */ 487 event = ring_buffer_iter_peek(ring_iter, NULL);
449 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 488 else {
450 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 489 /*
451 NULL); 490 * We need to consume the current entry to see
452 } 491 * the next one.
492 */
493 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
494 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
495 NULL);
496 }
453 497
454 if (!event) 498 if (!event)
455 return NULL; 499 return NULL;
456 500
457 next = ring_buffer_event_data(event); 501 next = ring_buffer_event_data(event);
502
503 if (data) {
504 /*
505 * Save current and next entries for later reference
506 * if the output fails.
507 */
508 data->ent = *curr;
509 data->ret = *next;
510 }
511 }
458 512
459 if (next->ent.type != TRACE_GRAPH_RET) 513 if (next->ent.type != TRACE_GRAPH_RET)
460 return NULL; 514 return NULL;
@@ -639,15 +693,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
639 duration = graph_ret->rettime - graph_ret->calltime; 693 duration = graph_ret->rettime - graph_ret->calltime;
640 694
641 if (data) { 695 if (data) {
696 struct fgraph_cpu_data *cpu_data;
642 int cpu = iter->cpu; 697 int cpu = iter->cpu;
643 int *depth = &(per_cpu_ptr(data, cpu)->depth); 698
699 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
644 700
645 /* 701 /*
646 * Comments display at + 1 to depth. Since 702 * Comments display at + 1 to depth. Since
647 * this is a leaf function, keep the comments 703 * this is a leaf function, keep the comments
648 * equal to this depth. 704 * equal to this depth.
649 */ 705 */
650 *depth = call->depth - 1; 706 cpu_data->depth = call->depth - 1;
707
708 /* No need to keep this function around for this depth */
709 if (call->depth < FTRACE_RETFUNC_DEPTH)
710 cpu_data->enter_funcs[call->depth] = 0;
651 } 711 }
652 712
653 /* Overhead */ 713 /* Overhead */
@@ -687,10 +747,15 @@ print_graph_entry_nested(struct trace_iterator *iter,
687 int i; 747 int i;
688 748
689 if (data) { 749 if (data) {
750 struct fgraph_cpu_data *cpu_data;
690 int cpu = iter->cpu; 751 int cpu = iter->cpu;
691 int *depth = &(per_cpu_ptr(data, cpu)->depth);
692 752
693 *depth = call->depth; 753 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
754 cpu_data->depth = call->depth;
755
756 /* Save this function pointer to see if the exit matches */
757 if (call->depth < FTRACE_RETFUNC_DEPTH)
758 cpu_data->enter_funcs[call->depth] = call->func;
694 } 759 }
695 760
696 /* No overhead */ 761 /* No overhead */
@@ -782,19 +847,34 @@ static enum print_line_t
782print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 847print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
783 struct trace_iterator *iter) 848 struct trace_iterator *iter)
784{ 849{
785 int cpu = iter->cpu; 850 struct fgraph_data *data = iter->private;
786 struct ftrace_graph_ent *call = &field->graph_ent; 851 struct ftrace_graph_ent *call = &field->graph_ent;
787 struct ftrace_graph_ret_entry *leaf_ret; 852 struct ftrace_graph_ret_entry *leaf_ret;
853 static enum print_line_t ret;
854 int cpu = iter->cpu;
788 855
789 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 856 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
790 return TRACE_TYPE_PARTIAL_LINE; 857 return TRACE_TYPE_PARTIAL_LINE;
791 858
792 leaf_ret = get_return_for_leaf(iter, field); 859 leaf_ret = get_return_for_leaf(iter, field);
793 if (leaf_ret) 860 if (leaf_ret)
794 return print_graph_entry_leaf(iter, field, leaf_ret, s); 861 ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
795 else 862 else
796 return print_graph_entry_nested(iter, field, s, cpu); 863 ret = print_graph_entry_nested(iter, field, s, cpu);
864
865 if (data) {
866 /*
867 * If we failed to write our output, then we need to make
868 * note of it. Because we already consumed our entry.
869 */
870 if (s->full) {
871 data->failed = 1;
872 data->cpu = cpu;
873 } else
874 data->failed = 0;
875 }
797 876
877 return ret;
798} 878}
799 879
800static enum print_line_t 880static enum print_line_t
@@ -805,19 +885,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
805 struct fgraph_data *data = iter->private; 885 struct fgraph_data *data = iter->private;
806 pid_t pid = ent->pid; 886 pid_t pid = ent->pid;
807 int cpu = iter->cpu; 887 int cpu = iter->cpu;
888 int func_match = 1;
808 int ret; 889 int ret;
809 int i; 890 int i;
810 891
811 if (data) { 892 if (data) {
893 struct fgraph_cpu_data *cpu_data;
812 int cpu = iter->cpu; 894 int cpu = iter->cpu;
813 int *depth = &(per_cpu_ptr(data, cpu)->depth); 895
896 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
814 897
815 /* 898 /*
816 * Comments display at + 1 to depth. This is the 899 * Comments display at + 1 to depth. This is the
817 * return from a function, we now want the comments 900 * return from a function, we now want the comments
818 * to display at the same level of the bracket. 901 * to display at the same level of the bracket.
819 */ 902 */
820 *depth = trace->depth - 1; 903 cpu_data->depth = trace->depth - 1;
904
905 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
906 if (cpu_data->enter_funcs[trace->depth] != trace->func)
907 func_match = 0;
908 cpu_data->enter_funcs[trace->depth] = 0;
909 }
821 } 910 }
822 911
823 if (print_graph_prologue(iter, s, 0, 0)) 912 if (print_graph_prologue(iter, s, 0, 0))
@@ -842,9 +931,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
842 return TRACE_TYPE_PARTIAL_LINE; 931 return TRACE_TYPE_PARTIAL_LINE;
843 } 932 }
844 933
845 ret = trace_seq_printf(s, "}\n"); 934 /*
846 if (!ret) 935 * If the return function does not have a matching entry,
847 return TRACE_TYPE_PARTIAL_LINE; 936 * then the entry was lost. Instead of just printing
937 * the '}' and letting the user guess what function this
938 * belongs to, write out the function name.
939 */
940 if (func_match) {
941 ret = trace_seq_printf(s, "}\n");
942 if (!ret)
943 return TRACE_TYPE_PARTIAL_LINE;
944 } else {
945 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
946 if (!ret)
947 return TRACE_TYPE_PARTIAL_LINE;
948 }
848 949
849 /* Overrun */ 950 /* Overrun */
850 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 951 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
@@ -873,7 +974,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
873 int i; 974 int i;
874 975
875 if (data) 976 if (data)
876 depth = per_cpu_ptr(data, iter->cpu)->depth; 977 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
877 978
878 if (print_graph_prologue(iter, s, 0, 0)) 979 if (print_graph_prologue(iter, s, 0, 0))
879 return TRACE_TYPE_PARTIAL_LINE; 980 return TRACE_TYPE_PARTIAL_LINE;
@@ -941,8 +1042,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
941enum print_line_t 1042enum print_line_t
942print_graph_function(struct trace_iterator *iter) 1043print_graph_function(struct trace_iterator *iter)
943{ 1044{
1045 struct ftrace_graph_ent_entry *field;
1046 struct fgraph_data *data = iter->private;
944 struct trace_entry *entry = iter->ent; 1047 struct trace_entry *entry = iter->ent;
945 struct trace_seq *s = &iter->seq; 1048 struct trace_seq *s = &iter->seq;
1049 int cpu = iter->cpu;
1050 int ret;
1051
1052 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1053 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1054 return TRACE_TYPE_HANDLED;
1055 }
1056
1057 /*
1058 * If the last output failed, there's a possibility we need
1059 * to print out the missing entry which would never go out.
1060 */
1061 if (data && data->failed) {
1062 field = &data->ent;
1063 iter->cpu = data->cpu;
1064 ret = print_graph_entry(field, s, iter);
1065 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1066 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1067 ret = TRACE_TYPE_NO_CONSUME;
1068 }
1069 iter->cpu = cpu;
1070 return ret;
1071 }
946 1072
947 switch (entry->type) { 1073 switch (entry->type) {
948 case TRACE_GRAPH_ENT: { 1074 case TRACE_GRAPH_ENT: {
@@ -952,7 +1078,7 @@ print_graph_function(struct trace_iterator *iter)
952 * sizeof(struct ftrace_graph_ent_entry) is very small, 1078 * sizeof(struct ftrace_graph_ent_entry) is very small,
953 * it can be safely saved at the stack. 1079 * it can be safely saved at the stack.
954 */ 1080 */
955 struct ftrace_graph_ent_entry *field, saved; 1081 struct ftrace_graph_ent_entry saved;
956 trace_assign_type(field, entry); 1082 trace_assign_type(field, entry);
957 saved = *field; 1083 saved = *field;
958 return print_graph_entry(&saved, s, iter); 1084 return print_graph_entry(&saved, s, iter);
@@ -1030,31 +1156,54 @@ static void print_graph_headers(struct seq_file *s)
1030static void graph_trace_open(struct trace_iterator *iter) 1156static void graph_trace_open(struct trace_iterator *iter)
1031{ 1157{
1032 /* pid and depth on the last trace processed */ 1158 /* pid and depth on the last trace processed */
1033 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 1159 struct fgraph_data *data;
1034 int cpu; 1160 int cpu;
1035 1161
1162 iter->private = NULL;
1163
1164 data = kzalloc(sizeof(*data), GFP_KERNEL);
1036 if (!data) 1165 if (!data)
1037 pr_warning("function graph tracer: not enough memory\n"); 1166 goto out_err;
1038 else 1167
1039 for_each_possible_cpu(cpu) { 1168 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1040 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 1169 if (!data->cpu_data)
1041 int *depth = &(per_cpu_ptr(data, cpu)->depth); 1170 goto out_err_free;
1042 *pid = -1; 1171
1043 *depth = 0; 1172 for_each_possible_cpu(cpu) {
1044 } 1173 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1174 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1175 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1176 *pid = -1;
1177 *depth = 0;
1178 *ignore = 0;
1179 }
1045 1180
1046 iter->private = data; 1181 iter->private = data;
1182
1183 return;
1184
1185 out_err_free:
1186 kfree(data);
1187 out_err:
1188 pr_warning("function graph tracer: not enough memory\n");
1047} 1189}
1048 1190
1049static void graph_trace_close(struct trace_iterator *iter) 1191static void graph_trace_close(struct trace_iterator *iter)
1050{ 1192{
1051 free_percpu(iter->private); 1193 struct fgraph_data *data = iter->private;
1194
1195 if (data) {
1196 free_percpu(data->cpu_data);
1197 kfree(data);
1198 }
1052} 1199}
1053 1200
1054static struct tracer graph_trace __read_mostly = { 1201static struct tracer graph_trace __read_mostly = {
1055 .name = "function_graph", 1202 .name = "function_graph",
1056 .open = graph_trace_open, 1203 .open = graph_trace_open,
1204 .pipe_open = graph_trace_open,
1057 .close = graph_trace_close, 1205 .close = graph_trace_close,
1206 .pipe_close = graph_trace_close,
1058 .wait_pipe = poll_wait_pipe, 1207 .wait_pipe = poll_wait_pipe,
1059 .init = graph_trace_init, 1208 .init = graph_trace_init,
1060 .reset = graph_trace_reset, 1209 .reset = graph_trace_reset,