diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 78 |
1 files changed, 54 insertions, 24 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b1342c5d37c..e998a824e9d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -18,6 +18,7 @@ struct fgraph_cpu_data { | |||
| 18 | pid_t last_pid; | 18 | pid_t last_pid; |
| 19 | int depth; | 19 | int depth; |
| 20 | int ignore; | 20 | int ignore; |
| 21 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | ||
| 21 | }; | 22 | }; |
| 22 | 23 | ||
| 23 | struct fgraph_data { | 24 | struct fgraph_data { |
| @@ -212,13 +213,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 212 | int cpu; | 213 | int cpu; |
| 213 | int pc; | 214 | int pc; |
| 214 | 215 | ||
| 215 | if (unlikely(!tr)) | ||
| 216 | return 0; | ||
| 217 | |||
| 218 | if (!ftrace_trace_task(current)) | 216 | if (!ftrace_trace_task(current)) |
| 219 | return 0; | 217 | return 0; |
| 220 | 218 | ||
| 221 | if (!ftrace_graph_addr(trace->func)) | 219 | /* trace it when it is-nested-in or is a function enabled. */ |
| 220 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | ||
| 222 | return 0; | 221 | return 0; |
| 223 | 222 | ||
| 224 | local_irq_save(flags); | 223 | local_irq_save(flags); |
| @@ -231,9 +230,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 231 | } else { | 230 | } else { |
| 232 | ret = 0; | 231 | ret = 0; |
| 233 | } | 232 | } |
| 234 | /* Only do the atomic if it is not already set */ | ||
| 235 | if (!test_tsk_trace_graph(current)) | ||
| 236 | set_tsk_trace_graph(current); | ||
| 237 | 233 | ||
| 238 | atomic_dec(&data->disabled); | 234 | atomic_dec(&data->disabled); |
| 239 | local_irq_restore(flags); | 235 | local_irq_restore(flags); |
| @@ -281,17 +277,24 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
| 281 | pc = preempt_count(); | 277 | pc = preempt_count(); |
| 282 | __trace_graph_return(tr, trace, flags, pc); | 278 | __trace_graph_return(tr, trace, flags, pc); |
| 283 | } | 279 | } |
| 284 | if (!trace->depth) | ||
| 285 | clear_tsk_trace_graph(current); | ||
| 286 | atomic_dec(&data->disabled); | 280 | atomic_dec(&data->disabled); |
| 287 | local_irq_restore(flags); | 281 | local_irq_restore(flags); |
| 288 | } | 282 | } |
| 289 | 283 | ||
| 284 | void set_graph_array(struct trace_array *tr) | ||
| 285 | { | ||
| 286 | graph_array = tr; | ||
| 287 | |||
| 288 | /* Make graph_array visible before we start tracing */ | ||
| 289 | |||
| 290 | smp_mb(); | ||
| 291 | } | ||
| 292 | |||
| 290 | static int graph_trace_init(struct trace_array *tr) | 293 | static int graph_trace_init(struct trace_array *tr) |
| 291 | { | 294 | { |
| 292 | int ret; | 295 | int ret; |
| 293 | 296 | ||
| 294 | graph_array = tr; | 297 | set_graph_array(tr); |
| 295 | ret = register_ftrace_graph(&trace_graph_return, | 298 | ret = register_ftrace_graph(&trace_graph_return, |
| 296 | &trace_graph_entry); | 299 | &trace_graph_entry); |
| 297 | if (ret) | 300 | if (ret) |
| @@ -301,11 +304,6 @@ static int graph_trace_init(struct trace_array *tr) | |||
| 301 | return 0; | 304 | return 0; |
| 302 | } | 305 | } |
| 303 | 306 | ||
| 304 | void set_graph_array(struct trace_array *tr) | ||
| 305 | { | ||
| 306 | graph_array = tr; | ||
| 307 | } | ||
| 308 | |||
| 309 | static void graph_trace_reset(struct trace_array *tr) | 307 | static void graph_trace_reset(struct trace_array *tr) |
| 310 | { | 308 | { |
| 311 | tracing_stop_cmdline_record(); | 309 | tracing_stop_cmdline_record(); |
| @@ -673,15 +671,21 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 673 | duration = graph_ret->rettime - graph_ret->calltime; | 671 | duration = graph_ret->rettime - graph_ret->calltime; |
| 674 | 672 | ||
| 675 | if (data) { | 673 | if (data) { |
| 674 | struct fgraph_cpu_data *cpu_data; | ||
| 676 | int cpu = iter->cpu; | 675 | int cpu = iter->cpu; |
| 677 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 676 | |
| 677 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | ||
| 678 | 678 | ||
| 679 | /* | 679 | /* |
| 680 | * Comments display at + 1 to depth. Since | 680 | * Comments display at + 1 to depth. Since |
| 681 | * this is a leaf function, keep the comments | 681 | * this is a leaf function, keep the comments |
| 682 | * equal to this depth. | 682 | * equal to this depth. |
| 683 | */ | 683 | */ |
| 684 | *depth = call->depth - 1; | 684 | cpu_data->depth = call->depth - 1; |
| 685 | |||
| 686 | /* No need to keep this function around for this depth */ | ||
| 687 | if (call->depth < FTRACE_RETFUNC_DEPTH) | ||
| 688 | cpu_data->enter_funcs[call->depth] = 0; | ||
| 685 | } | 689 | } |
| 686 | 690 | ||
| 687 | /* Overhead */ | 691 | /* Overhead */ |
| @@ -721,10 +725,15 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 721 | int i; | 725 | int i; |
| 722 | 726 | ||
| 723 | if (data) { | 727 | if (data) { |
| 728 | struct fgraph_cpu_data *cpu_data; | ||
| 724 | int cpu = iter->cpu; | 729 | int cpu = iter->cpu; |
| 725 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | ||
| 726 | 730 | ||
| 727 | *depth = call->depth; | 731 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
| 732 | cpu_data->depth = call->depth; | ||
| 733 | |||
| 734 | /* Save this function pointer to see if the exit matches */ | ||
| 735 | if (call->depth < FTRACE_RETFUNC_DEPTH) | ||
| 736 | cpu_data->enter_funcs[call->depth] = call->func; | ||
| 728 | } | 737 | } |
| 729 | 738 | ||
| 730 | /* No overhead */ | 739 | /* No overhead */ |
| @@ -854,19 +863,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 854 | struct fgraph_data *data = iter->private; | 863 | struct fgraph_data *data = iter->private; |
| 855 | pid_t pid = ent->pid; | 864 | pid_t pid = ent->pid; |
| 856 | int cpu = iter->cpu; | 865 | int cpu = iter->cpu; |
| 866 | int func_match = 1; | ||
| 857 | int ret; | 867 | int ret; |
| 858 | int i; | 868 | int i; |
| 859 | 869 | ||
| 860 | if (data) { | 870 | if (data) { |
| 871 | struct fgraph_cpu_data *cpu_data; | ||
| 861 | int cpu = iter->cpu; | 872 | int cpu = iter->cpu; |
| 862 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 873 | |
| 874 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | ||
| 863 | 875 | ||
| 864 | /* | 876 | /* |
| 865 | * Comments display at + 1 to depth. This is the | 877 | * Comments display at + 1 to depth. This is the |
| 866 | * return from a function, we now want the comments | 878 | * return from a function, we now want the comments |
| 867 | * to display at the same level of the bracket. | 879 | * to display at the same level of the bracket. |
| 868 | */ | 880 | */ |
| 869 | *depth = trace->depth - 1; | 881 | cpu_data->depth = trace->depth - 1; |
| 882 | |||
| 883 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | ||
| 884 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | ||
| 885 | func_match = 0; | ||
| 886 | cpu_data->enter_funcs[trace->depth] = 0; | ||
| 887 | } | ||
| 870 | } | 888 | } |
| 871 | 889 | ||
| 872 | if (print_graph_prologue(iter, s, 0, 0)) | 890 | if (print_graph_prologue(iter, s, 0, 0)) |
| @@ -891,9 +909,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 891 | return TRACE_TYPE_PARTIAL_LINE; | 909 | return TRACE_TYPE_PARTIAL_LINE; |
| 892 | } | 910 | } |
| 893 | 911 | ||
| 894 | ret = trace_seq_printf(s, "}\n"); | 912 | /* |
| 895 | if (!ret) | 913 | * If the return function does not have a matching entry, |
| 896 | return TRACE_TYPE_PARTIAL_LINE; | 914 | * then the entry was lost. Instead of just printing |
| 915 | * the '}' and letting the user guess what function this | ||
| 916 | * belongs to, write out the function name. | ||
| 917 | */ | ||
| 918 | if (func_match) { | ||
| 919 | ret = trace_seq_printf(s, "}\n"); | ||
| 920 | if (!ret) | ||
| 921 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 922 | } else { | ||
| 923 | ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func); | ||
| 924 | if (!ret) | ||
| 925 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 926 | } | ||
| 897 | 927 | ||
| 898 | /* Overrun */ | 928 | /* Overrun */ |
| 899 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 929 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
