aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c241
1 files changed, 184 insertions, 57 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 45e6c01b2e4..e998a824e9d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,9 +14,21 @@
14#include "trace.h" 14#include "trace.h"
15#include "trace_output.h" 15#include "trace_output.h"
16 16
17struct fgraph_data { 17struct fgraph_cpu_data {
18 pid_t last_pid; 18 pid_t last_pid;
19 int depth; 19 int depth;
20 int ignore;
21 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
22};
23
24struct fgraph_data {
25 struct fgraph_cpu_data *cpu_data;
26
27 /* Place to preserve last processed entry. */
28 struct ftrace_graph_ent_entry ent;
29 struct ftrace_graph_ret_entry ret;
30 int failed;
31 int cpu;
20}; 32};
21 33
22#define TRACE_GRAPH_INDENT 2 34#define TRACE_GRAPH_INDENT 2
@@ -176,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
176 struct ring_buffer *buffer = tr->buffer; 188 struct ring_buffer *buffer = tr->buffer;
177 struct ftrace_graph_ent_entry *entry; 189 struct ftrace_graph_ent_entry *entry;
178 190
179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 191 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
180 return 0; 192 return 0;
181 193
182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -201,13 +213,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
201 int cpu; 213 int cpu;
202 int pc; 214 int pc;
203 215
204 if (unlikely(!tr))
205 return 0;
206
207 if (!ftrace_trace_task(current)) 216 if (!ftrace_trace_task(current))
208 return 0; 217 return 0;
209 218
210 if (!ftrace_graph_addr(trace->func)) 219 /* trace it when it is-nested-in or is a function enabled. */
220 if (!(trace->depth || ftrace_graph_addr(trace->func)))
211 return 0; 221 return 0;
212 222
213 local_irq_save(flags); 223 local_irq_save(flags);
@@ -220,9 +230,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
220 } else { 230 } else {
221 ret = 0; 231 ret = 0;
222 } 232 }
223 /* Only do the atomic if it is not already set */
224 if (!test_tsk_trace_graph(current))
225 set_tsk_trace_graph(current);
226 233
227 atomic_dec(&data->disabled); 234 atomic_dec(&data->disabled);
228 local_irq_restore(flags); 235 local_irq_restore(flags);
@@ -240,7 +247,7 @@ static void __trace_graph_return(struct trace_array *tr,
240 struct ring_buffer *buffer = tr->buffer; 247 struct ring_buffer *buffer = tr->buffer;
241 struct ftrace_graph_ret_entry *entry; 248 struct ftrace_graph_ret_entry *entry;
242 249
243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 250 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
244 return; 251 return;
245 252
246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -270,17 +277,24 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
270 pc = preempt_count(); 277 pc = preempt_count();
271 __trace_graph_return(tr, trace, flags, pc); 278 __trace_graph_return(tr, trace, flags, pc);
272 } 279 }
273 if (!trace->depth)
274 clear_tsk_trace_graph(current);
275 atomic_dec(&data->disabled); 280 atomic_dec(&data->disabled);
276 local_irq_restore(flags); 281 local_irq_restore(flags);
277} 282}
278 283
284void set_graph_array(struct trace_array *tr)
285{
286 graph_array = tr;
287
288 /* Make graph_array visible before we start tracing */
289
290 smp_mb();
291}
292
279static int graph_trace_init(struct trace_array *tr) 293static int graph_trace_init(struct trace_array *tr)
280{ 294{
281 int ret; 295 int ret;
282 296
283 graph_array = tr; 297 set_graph_array(tr);
284 ret = register_ftrace_graph(&trace_graph_return, 298 ret = register_ftrace_graph(&trace_graph_return,
285 &trace_graph_entry); 299 &trace_graph_entry);
286 if (ret) 300 if (ret)
@@ -290,11 +304,6 @@ static int graph_trace_init(struct trace_array *tr)
290 return 0; 304 return 0;
291} 305}
292 306
293void set_graph_array(struct trace_array *tr)
294{
295 graph_array = tr;
296}
297
298static void graph_trace_reset(struct trace_array *tr) 307static void graph_trace_reset(struct trace_array *tr)
299{ 308{
300 tracing_stop_cmdline_record(); 309 tracing_stop_cmdline_record();
@@ -384,7 +393,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
384 if (!data) 393 if (!data)
385 return TRACE_TYPE_HANDLED; 394 return TRACE_TYPE_HANDLED;
386 395
387 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 396 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
388 397
389 if (*last_pid == pid) 398 if (*last_pid == pid)
390 return TRACE_TYPE_HANDLED; 399 return TRACE_TYPE_HANDLED;
@@ -435,26 +444,49 @@ static struct ftrace_graph_ret_entry *
435get_return_for_leaf(struct trace_iterator *iter, 444get_return_for_leaf(struct trace_iterator *iter,
436 struct ftrace_graph_ent_entry *curr) 445 struct ftrace_graph_ent_entry *curr)
437{ 446{
438 struct ring_buffer_iter *ring_iter; 447 struct fgraph_data *data = iter->private;
448 struct ring_buffer_iter *ring_iter = NULL;
439 struct ring_buffer_event *event; 449 struct ring_buffer_event *event;
440 struct ftrace_graph_ret_entry *next; 450 struct ftrace_graph_ret_entry *next;
441 451
442 ring_iter = iter->buffer_iter[iter->cpu]; 452 /*
453 * If the previous output failed to write to the seq buffer,
454 * then we just reuse the data from before.
455 */
456 if (data && data->failed) {
457 curr = &data->ent;
458 next = &data->ret;
459 } else {
443 460
444 /* First peek to compare current entry and the next one */ 461 ring_iter = iter->buffer_iter[iter->cpu];
445 if (ring_iter) 462
446 event = ring_buffer_iter_peek(ring_iter, NULL); 463 /* First peek to compare current entry and the next one */
447 else { 464 if (ring_iter)
448 /* We need to consume the current entry to see the next one */ 465 event = ring_buffer_iter_peek(ring_iter, NULL);
449 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 466 else {
450 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 467 /*
451 NULL); 468 * We need to consume the current entry to see
452 } 469 * the next one.
470 */
471 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
472 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
473 NULL);
474 }
453 475
454 if (!event) 476 if (!event)
455 return NULL; 477 return NULL;
456 478
457 next = ring_buffer_event_data(event); 479 next = ring_buffer_event_data(event);
480
481 if (data) {
482 /*
483 * Save current and next entries for later reference
484 * if the output fails.
485 */
486 data->ent = *curr;
487 data->ret = *next;
488 }
489 }
458 490
459 if (next->ent.type != TRACE_GRAPH_RET) 491 if (next->ent.type != TRACE_GRAPH_RET)
460 return NULL; 492 return NULL;
@@ -639,15 +671,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
639 duration = graph_ret->rettime - graph_ret->calltime; 671 duration = graph_ret->rettime - graph_ret->calltime;
640 672
641 if (data) { 673 if (data) {
674 struct fgraph_cpu_data *cpu_data;
642 int cpu = iter->cpu; 675 int cpu = iter->cpu;
643 int *depth = &(per_cpu_ptr(data, cpu)->depth); 676
677 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
644 678
645 /* 679 /*
646 * Comments display at + 1 to depth. Since 680 * Comments display at + 1 to depth. Since
647 * this is a leaf function, keep the comments 681 * this is a leaf function, keep the comments
648 * equal to this depth. 682 * equal to this depth.
649 */ 683 */
650 *depth = call->depth - 1; 684 cpu_data->depth = call->depth - 1;
685
686 /* No need to keep this function around for this depth */
687 if (call->depth < FTRACE_RETFUNC_DEPTH)
688 cpu_data->enter_funcs[call->depth] = 0;
651 } 689 }
652 690
653 /* Overhead */ 691 /* Overhead */
@@ -687,10 +725,15 @@ print_graph_entry_nested(struct trace_iterator *iter,
687 int i; 725 int i;
688 726
689 if (data) { 727 if (data) {
728 struct fgraph_cpu_data *cpu_data;
690 int cpu = iter->cpu; 729 int cpu = iter->cpu;
691 int *depth = &(per_cpu_ptr(data, cpu)->depth);
692 730
693 *depth = call->depth; 731 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
732 cpu_data->depth = call->depth;
733
734 /* Save this function pointer to see if the exit matches */
735 if (call->depth < FTRACE_RETFUNC_DEPTH)
736 cpu_data->enter_funcs[call->depth] = call->func;
694 } 737 }
695 738
696 /* No overhead */ 739 /* No overhead */
@@ -782,19 +825,34 @@ static enum print_line_t
782print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 825print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
783 struct trace_iterator *iter) 826 struct trace_iterator *iter)
784{ 827{
785 int cpu = iter->cpu; 828 struct fgraph_data *data = iter->private;
786 struct ftrace_graph_ent *call = &field->graph_ent; 829 struct ftrace_graph_ent *call = &field->graph_ent;
787 struct ftrace_graph_ret_entry *leaf_ret; 830 struct ftrace_graph_ret_entry *leaf_ret;
831 static enum print_line_t ret;
832 int cpu = iter->cpu;
788 833
789 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 834 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
790 return TRACE_TYPE_PARTIAL_LINE; 835 return TRACE_TYPE_PARTIAL_LINE;
791 836
792 leaf_ret = get_return_for_leaf(iter, field); 837 leaf_ret = get_return_for_leaf(iter, field);
793 if (leaf_ret) 838 if (leaf_ret)
794 return print_graph_entry_leaf(iter, field, leaf_ret, s); 839 ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
795 else 840 else
796 return print_graph_entry_nested(iter, field, s, cpu); 841 ret = print_graph_entry_nested(iter, field, s, cpu);
842
843 if (data) {
844 /*
845 * If we failed to write our output, then we need to make
846 * note of it. Because we already consumed our entry.
847 */
848 if (s->full) {
849 data->failed = 1;
850 data->cpu = cpu;
851 } else
852 data->failed = 0;
853 }
797 854
855 return ret;
798} 856}
799 857
800static enum print_line_t 858static enum print_line_t
@@ -805,19 +863,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
805 struct fgraph_data *data = iter->private; 863 struct fgraph_data *data = iter->private;
806 pid_t pid = ent->pid; 864 pid_t pid = ent->pid;
807 int cpu = iter->cpu; 865 int cpu = iter->cpu;
866 int func_match = 1;
808 int ret; 867 int ret;
809 int i; 868 int i;
810 869
811 if (data) { 870 if (data) {
871 struct fgraph_cpu_data *cpu_data;
812 int cpu = iter->cpu; 872 int cpu = iter->cpu;
813 int *depth = &(per_cpu_ptr(data, cpu)->depth); 873
874 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
814 875
815 /* 876 /*
816 * Comments display at + 1 to depth. This is the 877 * Comments display at + 1 to depth. This is the
817 * return from a function, we now want the comments 878 * return from a function, we now want the comments
818 * to display at the same level of the bracket. 879 * to display at the same level of the bracket.
819 */ 880 */
820 *depth = trace->depth - 1; 881 cpu_data->depth = trace->depth - 1;
882
883 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
884 if (cpu_data->enter_funcs[trace->depth] != trace->func)
885 func_match = 0;
886 cpu_data->enter_funcs[trace->depth] = 0;
887 }
821 } 888 }
822 889
823 if (print_graph_prologue(iter, s, 0, 0)) 890 if (print_graph_prologue(iter, s, 0, 0))
@@ -842,9 +909,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
842 return TRACE_TYPE_PARTIAL_LINE; 909 return TRACE_TYPE_PARTIAL_LINE;
843 } 910 }
844 911
845 ret = trace_seq_printf(s, "}\n"); 912 /*
846 if (!ret) 913 * If the return function does not have a matching entry,
847 return TRACE_TYPE_PARTIAL_LINE; 914 * then the entry was lost. Instead of just printing
915 * the '}' and letting the user guess what function this
916 * belongs to, write out the function name.
917 */
918 if (func_match) {
919 ret = trace_seq_printf(s, "}\n");
920 if (!ret)
921 return TRACE_TYPE_PARTIAL_LINE;
922 } else {
923 ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func);
924 if (!ret)
925 return TRACE_TYPE_PARTIAL_LINE;
926 }
848 927
849 /* Overrun */ 928 /* Overrun */
850 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 929 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
@@ -873,7 +952,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
873 int i; 952 int i;
874 953
875 if (data) 954 if (data)
876 depth = per_cpu_ptr(data, iter->cpu)->depth; 955 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
877 956
878 if (print_graph_prologue(iter, s, 0, 0)) 957 if (print_graph_prologue(iter, s, 0, 0))
879 return TRACE_TYPE_PARTIAL_LINE; 958 return TRACE_TYPE_PARTIAL_LINE;
@@ -941,8 +1020,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
941enum print_line_t 1020enum print_line_t
942print_graph_function(struct trace_iterator *iter) 1021print_graph_function(struct trace_iterator *iter)
943{ 1022{
1023 struct ftrace_graph_ent_entry *field;
1024 struct fgraph_data *data = iter->private;
944 struct trace_entry *entry = iter->ent; 1025 struct trace_entry *entry = iter->ent;
945 struct trace_seq *s = &iter->seq; 1026 struct trace_seq *s = &iter->seq;
1027 int cpu = iter->cpu;
1028 int ret;
1029
1030 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1031 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1032 return TRACE_TYPE_HANDLED;
1033 }
1034
1035 /*
1036 * If the last output failed, there's a possibility we need
1037 * to print out the missing entry which would never go out.
1038 */
1039 if (data && data->failed) {
1040 field = &data->ent;
1041 iter->cpu = data->cpu;
1042 ret = print_graph_entry(field, s, iter);
1043 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1044 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1045 ret = TRACE_TYPE_NO_CONSUME;
1046 }
1047 iter->cpu = cpu;
1048 return ret;
1049 }
946 1050
947 switch (entry->type) { 1051 switch (entry->type) {
948 case TRACE_GRAPH_ENT: { 1052 case TRACE_GRAPH_ENT: {
@@ -952,7 +1056,7 @@ print_graph_function(struct trace_iterator *iter)
952 * sizeof(struct ftrace_graph_ent_entry) is very small, 1056 * sizeof(struct ftrace_graph_ent_entry) is very small,
953 * it can be safely saved at the stack. 1057 * it can be safely saved at the stack.
954 */ 1058 */
955 struct ftrace_graph_ent_entry *field, saved; 1059 struct ftrace_graph_ent_entry saved;
956 trace_assign_type(field, entry); 1060 trace_assign_type(field, entry);
957 saved = *field; 1061 saved = *field;
958 return print_graph_entry(&saved, s, iter); 1062 return print_graph_entry(&saved, s, iter);
@@ -1030,31 +1134,54 @@ static void print_graph_headers(struct seq_file *s)
1030static void graph_trace_open(struct trace_iterator *iter) 1134static void graph_trace_open(struct trace_iterator *iter)
1031{ 1135{
1032 /* pid and depth on the last trace processed */ 1136 /* pid and depth on the last trace processed */
1033 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 1137 struct fgraph_data *data;
1034 int cpu; 1138 int cpu;
1035 1139
1140 iter->private = NULL;
1141
1142 data = kzalloc(sizeof(*data), GFP_KERNEL);
1036 if (!data) 1143 if (!data)
1037 pr_warning("function graph tracer: not enough memory\n"); 1144 goto out_err;
1038 else 1145
1039 for_each_possible_cpu(cpu) { 1146 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1040 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 1147 if (!data->cpu_data)
1041 int *depth = &(per_cpu_ptr(data, cpu)->depth); 1148 goto out_err_free;
1042 *pid = -1; 1149
1043 *depth = 0; 1150 for_each_possible_cpu(cpu) {
1044 } 1151 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1152 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1153 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1154 *pid = -1;
1155 *depth = 0;
1156 *ignore = 0;
1157 }
1045 1158
1046 iter->private = data; 1159 iter->private = data;
1160
1161 return;
1162
1163 out_err_free:
1164 kfree(data);
1165 out_err:
1166 pr_warning("function graph tracer: not enough memory\n");
1047} 1167}
1048 1168
1049static void graph_trace_close(struct trace_iterator *iter) 1169static void graph_trace_close(struct trace_iterator *iter)
1050{ 1170{
1051 free_percpu(iter->private); 1171 struct fgraph_data *data = iter->private;
1172
1173 if (data) {
1174 free_percpu(data->cpu_data);
1175 kfree(data);
1176 }
1052} 1177}
1053 1178
1054static struct tracer graph_trace __read_mostly = { 1179static struct tracer graph_trace __read_mostly = {
1055 .name = "function_graph", 1180 .name = "function_graph",
1056 .open = graph_trace_open, 1181 .open = graph_trace_open,
1182 .pipe_open = graph_trace_open,
1057 .close = graph_trace_close, 1183 .close = graph_trace_close,
1184 .pipe_close = graph_trace_close,
1058 .wait_pipe = poll_wait_pipe, 1185 .wait_pipe = poll_wait_pipe,
1059 .init = graph_trace_init, 1186 .init = graph_trace_init,
1060 .reset = graph_trace_reset, 1187 .reset = graph_trace_reset,