diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 107 |
1 files changed, 79 insertions, 28 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b1342c5d37c..e6989d9b44d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -18,6 +18,7 @@ struct fgraph_cpu_data { | |||
18 | pid_t last_pid; | 18 | pid_t last_pid; |
19 | int depth; | 19 | int depth; |
20 | int ignore; | 20 | int ignore; |
21 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | struct fgraph_data { | 24 | struct fgraph_data { |
@@ -187,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
187 | struct ring_buffer *buffer = tr->buffer; | 188 | struct ring_buffer *buffer = tr->buffer; |
188 | struct ftrace_graph_ent_entry *entry; | 189 | struct ftrace_graph_ent_entry *entry; |
189 | 190 | ||
190 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 191 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
191 | return 0; | 192 | return 0; |
192 | 193 | ||
193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 194 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -212,13 +213,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
212 | int cpu; | 213 | int cpu; |
213 | int pc; | 214 | int pc; |
214 | 215 | ||
215 | if (unlikely(!tr)) | ||
216 | return 0; | ||
217 | |||
218 | if (!ftrace_trace_task(current)) | 216 | if (!ftrace_trace_task(current)) |
219 | return 0; | 217 | return 0; |
220 | 218 | ||
221 | if (!ftrace_graph_addr(trace->func)) | 219 | /* trace it when it is-nested-in or is a function enabled. */ |
220 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | ||
222 | return 0; | 221 | return 0; |
223 | 222 | ||
224 | local_irq_save(flags); | 223 | local_irq_save(flags); |
@@ -231,9 +230,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
231 | } else { | 230 | } else { |
232 | ret = 0; | 231 | ret = 0; |
233 | } | 232 | } |
234 | /* Only do the atomic if it is not already set */ | ||
235 | if (!test_tsk_trace_graph(current)) | ||
236 | set_tsk_trace_graph(current); | ||
237 | 233 | ||
238 | atomic_dec(&data->disabled); | 234 | atomic_dec(&data->disabled); |
239 | local_irq_restore(flags); | 235 | local_irq_restore(flags); |
@@ -241,6 +237,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
241 | return ret; | 237 | return ret; |
242 | } | 238 | } |
243 | 239 | ||
240 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | ||
241 | { | ||
242 | if (tracing_thresh) | ||
243 | return 1; | ||
244 | else | ||
245 | return trace_graph_entry(trace); | ||
246 | } | ||
247 | |||
244 | static void __trace_graph_return(struct trace_array *tr, | 248 | static void __trace_graph_return(struct trace_array *tr, |
245 | struct ftrace_graph_ret *trace, | 249 | struct ftrace_graph_ret *trace, |
246 | unsigned long flags, | 250 | unsigned long flags, |
@@ -251,7 +255,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
251 | struct ring_buffer *buffer = tr->buffer; | 255 | struct ring_buffer *buffer = tr->buffer; |
252 | struct ftrace_graph_ret_entry *entry; | 256 | struct ftrace_graph_ret_entry *entry; |
253 | 257 | ||
254 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 258 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
255 | return; | 259 | return; |
256 | 260 | ||
257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 261 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
@@ -281,19 +285,39 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
281 | pc = preempt_count(); | 285 | pc = preempt_count(); |
282 | __trace_graph_return(tr, trace, flags, pc); | 286 | __trace_graph_return(tr, trace, flags, pc); |
283 | } | 287 | } |
284 | if (!trace->depth) | ||
285 | clear_tsk_trace_graph(current); | ||
286 | atomic_dec(&data->disabled); | 288 | atomic_dec(&data->disabled); |
287 | local_irq_restore(flags); | 289 | local_irq_restore(flags); |
288 | } | 290 | } |
289 | 291 | ||
292 | void set_graph_array(struct trace_array *tr) | ||
293 | { | ||
294 | graph_array = tr; | ||
295 | |||
296 | /* Make graph_array visible before we start tracing */ | ||
297 | |||
298 | smp_mb(); | ||
299 | } | ||
300 | |||
301 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | ||
302 | { | ||
303 | if (tracing_thresh && | ||
304 | (trace->rettime - trace->calltime < tracing_thresh)) | ||
305 | return; | ||
306 | else | ||
307 | trace_graph_return(trace); | ||
308 | } | ||
309 | |||
290 | static int graph_trace_init(struct trace_array *tr) | 310 | static int graph_trace_init(struct trace_array *tr) |
291 | { | 311 | { |
292 | int ret; | 312 | int ret; |
293 | 313 | ||
294 | graph_array = tr; | 314 | set_graph_array(tr); |
295 | ret = register_ftrace_graph(&trace_graph_return, | 315 | if (tracing_thresh) |
296 | &trace_graph_entry); | 316 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
317 | &trace_graph_thresh_entry); | ||
318 | else | ||
319 | ret = register_ftrace_graph(&trace_graph_return, | ||
320 | &trace_graph_entry); | ||
297 | if (ret) | 321 | if (ret) |
298 | return ret; | 322 | return ret; |
299 | tracing_start_cmdline_record(); | 323 | tracing_start_cmdline_record(); |
@@ -301,11 +325,6 @@ static int graph_trace_init(struct trace_array *tr) | |||
301 | return 0; | 325 | return 0; |
302 | } | 326 | } |
303 | 327 | ||
304 | void set_graph_array(struct trace_array *tr) | ||
305 | { | ||
306 | graph_array = tr; | ||
307 | } | ||
308 | |||
309 | static void graph_trace_reset(struct trace_array *tr) | 328 | static void graph_trace_reset(struct trace_array *tr) |
310 | { | 329 | { |
311 | tracing_stop_cmdline_record(); | 330 | tracing_stop_cmdline_record(); |
@@ -673,15 +692,21 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
673 | duration = graph_ret->rettime - graph_ret->calltime; | 692 | duration = graph_ret->rettime - graph_ret->calltime; |
674 | 693 | ||
675 | if (data) { | 694 | if (data) { |
695 | struct fgraph_cpu_data *cpu_data; | ||
676 | int cpu = iter->cpu; | 696 | int cpu = iter->cpu; |
677 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 697 | |
698 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | ||
678 | 699 | ||
679 | /* | 700 | /* |
680 | * Comments display at + 1 to depth. Since | 701 | * Comments display at + 1 to depth. Since |
681 | * this is a leaf function, keep the comments | 702 | * this is a leaf function, keep the comments |
682 | * equal to this depth. | 703 | * equal to this depth. |
683 | */ | 704 | */ |
684 | *depth = call->depth - 1; | 705 | cpu_data->depth = call->depth - 1; |
706 | |||
707 | /* No need to keep this function around for this depth */ | ||
708 | if (call->depth < FTRACE_RETFUNC_DEPTH) | ||
709 | cpu_data->enter_funcs[call->depth] = 0; | ||
685 | } | 710 | } |
686 | 711 | ||
687 | /* Overhead */ | 712 | /* Overhead */ |
@@ -721,10 +746,15 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
721 | int i; | 746 | int i; |
722 | 747 | ||
723 | if (data) { | 748 | if (data) { |
749 | struct fgraph_cpu_data *cpu_data; | ||
724 | int cpu = iter->cpu; | 750 | int cpu = iter->cpu; |
725 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | ||
726 | 751 | ||
727 | *depth = call->depth; | 752 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
753 | cpu_data->depth = call->depth; | ||
754 | |||
755 | /* Save this function pointer to see if the exit matches */ | ||
756 | if (call->depth < FTRACE_RETFUNC_DEPTH) | ||
757 | cpu_data->enter_funcs[call->depth] = call->func; | ||
728 | } | 758 | } |
729 | 759 | ||
730 | /* No overhead */ | 760 | /* No overhead */ |
@@ -854,19 +884,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
854 | struct fgraph_data *data = iter->private; | 884 | struct fgraph_data *data = iter->private; |
855 | pid_t pid = ent->pid; | 885 | pid_t pid = ent->pid; |
856 | int cpu = iter->cpu; | 886 | int cpu = iter->cpu; |
887 | int func_match = 1; | ||
857 | int ret; | 888 | int ret; |
858 | int i; | 889 | int i; |
859 | 890 | ||
860 | if (data) { | 891 | if (data) { |
892 | struct fgraph_cpu_data *cpu_data; | ||
861 | int cpu = iter->cpu; | 893 | int cpu = iter->cpu; |
862 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 894 | |
895 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | ||
863 | 896 | ||
864 | /* | 897 | /* |
865 | * Comments display at + 1 to depth. This is the | 898 | * Comments display at + 1 to depth. This is the |
866 | * return from a function, we now want the comments | 899 | * return from a function, we now want the comments |
867 | * to display at the same level of the bracket. | 900 | * to display at the same level of the bracket. |
868 | */ | 901 | */ |
869 | *depth = trace->depth - 1; | 902 | cpu_data->depth = trace->depth - 1; |
903 | |||
904 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | ||
905 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | ||
906 | func_match = 0; | ||
907 | cpu_data->enter_funcs[trace->depth] = 0; | ||
908 | } | ||
870 | } | 909 | } |
871 | 910 | ||
872 | if (print_graph_prologue(iter, s, 0, 0)) | 911 | if (print_graph_prologue(iter, s, 0, 0)) |
@@ -891,9 +930,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
891 | return TRACE_TYPE_PARTIAL_LINE; | 930 | return TRACE_TYPE_PARTIAL_LINE; |
892 | } | 931 | } |
893 | 932 | ||
894 | ret = trace_seq_printf(s, "}\n"); | 933 | /* |
895 | if (!ret) | 934 | * If the return function does not have a matching entry, |
896 | return TRACE_TYPE_PARTIAL_LINE; | 935 | * then the entry was lost. Instead of just printing |
936 | * the '}' and letting the user guess what function this | ||
937 | * belongs to, write out the function name. | ||
938 | */ | ||
939 | if (func_match) { | ||
940 | ret = trace_seq_printf(s, "}\n"); | ||
941 | if (!ret) | ||
942 | return TRACE_TYPE_PARTIAL_LINE; | ||
943 | } else { | ||
944 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | ||
945 | if (!ret) | ||
946 | return TRACE_TYPE_PARTIAL_LINE; | ||
947 | } | ||
897 | 948 | ||
898 | /* Overrun */ | 949 | /* Overrun */ |
899 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 950 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |