diff options
| author | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
|---|---|---|
| committer | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
| commit | 6268464b370e234e0255330190f9bd5d19386ad7 (patch) | |
| tree | 5742641092ce64227dd2086d78baaede57da1f80 /kernel/trace/trace_functions_graph.c | |
| parent | 7df01d96b295e400167e78061b81d4c91630b12d (diff) | |
| parent | 0fdf13606b67f830559abdaad15980c7f4f05ec4 (diff) | |
Merge remote branch 'tip/perf/core' into oprofile/core
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 131 |
1 files changed, 126 insertions, 5 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6f233698518e..ef49e9370b25 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -15,15 +15,19 @@ | |||
| 15 | #include "trace.h" | 15 | #include "trace.h" |
| 16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
| 17 | 17 | ||
| 18 | /* When set, irq functions will be ignored */ | ||
| 19 | static int ftrace_graph_skip_irqs; | ||
| 20 | |||
| 18 | struct fgraph_cpu_data { | 21 | struct fgraph_cpu_data { |
| 19 | pid_t last_pid; | 22 | pid_t last_pid; |
| 20 | int depth; | 23 | int depth; |
| 24 | int depth_irq; | ||
| 21 | int ignore; | 25 | int ignore; |
| 22 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
| 23 | }; | 27 | }; |
| 24 | 28 | ||
| 25 | struct fgraph_data { | 29 | struct fgraph_data { |
| 26 | struct fgraph_cpu_data *cpu_data; | 30 | struct fgraph_cpu_data __percpu *cpu_data; |
| 27 | 31 | ||
| 28 | /* Place to preserve last processed entry. */ | 32 | /* Place to preserve last processed entry. */ |
| 29 | struct ftrace_graph_ent_entry ent; | 33 | struct ftrace_graph_ent_entry ent; |
| @@ -41,6 +45,7 @@ struct fgraph_data { | |||
| 41 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 45 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
| 43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
| 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | ||
| 44 | 49 | ||
| 45 | static struct tracer_opt trace_opts[] = { | 50 | static struct tracer_opt trace_opts[] = { |
| 46 | /* Display overruns? (for self-debug purpose) */ | 51 | /* Display overruns? (for self-debug purpose) */ |
| @@ -55,13 +60,15 @@ static struct tracer_opt trace_opts[] = { | |||
| 55 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
| 56 | /* Display absolute time of an entry */ | 61 | /* Display absolute time of an entry */ |
| 57 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
| 63 | /* Display interrupts */ | ||
| 64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | ||
| 58 | { } /* Empty entry */ | 65 | { } /* Empty entry */ |
| 59 | }; | 66 | }; |
| 60 | 67 | ||
| 61 | static struct tracer_flags tracer_flags = { | 68 | static struct tracer_flags tracer_flags = { |
| 62 | /* Don't display overruns and proc by default */ | 69 | /* Don't display overruns and proc by default */ |
| 63 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
| 64 | TRACE_GRAPH_PRINT_DURATION, | 71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
| 65 | .opts = trace_opts | 72 | .opts = trace_opts |
| 66 | }; | 73 | }; |
| 67 | 74 | ||
| @@ -204,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr, | |||
| 204 | return 1; | 211 | return 1; |
| 205 | } | 212 | } |
| 206 | 213 | ||
| 214 | static inline int ftrace_graph_ignore_irqs(void) | ||
| 215 | { | ||
| 216 | if (!ftrace_graph_skip_irqs) | ||
| 217 | return 0; | ||
| 218 | |||
| 219 | return in_irq(); | ||
| 220 | } | ||
| 221 | |||
| 207 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 222 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
| 208 | { | 223 | { |
| 209 | struct trace_array *tr = graph_array; | 224 | struct trace_array *tr = graph_array; |
| @@ -218,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 218 | return 0; | 233 | return 0; |
| 219 | 234 | ||
| 220 | /* trace it when it is-nested-in or is a function enabled. */ | 235 | /* trace it when it is-nested-in or is a function enabled. */ |
| 221 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | 236 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || |
| 237 | ftrace_graph_ignore_irqs()) | ||
| 222 | return 0; | 238 | return 0; |
| 223 | 239 | ||
| 224 | local_irq_save(flags); | 240 | local_irq_save(flags); |
| @@ -649,8 +665,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 649 | 665 | ||
| 650 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 666 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
| 651 | if (len < 7) { | 667 | if (len < 7) { |
| 652 | snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", | 668 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
| 653 | nsecs_rem); | 669 | |
| 670 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | ||
| 654 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 671 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
| 655 | if (!ret) | 672 | if (!ret) |
| 656 | return TRACE_TYPE_PARTIAL_LINE; | 673 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -855,6 +872,92 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
| 855 | return 0; | 872 | return 0; |
| 856 | } | 873 | } |
| 857 | 874 | ||
| 875 | /* | ||
| 876 | * Entry check for irq code | ||
| 877 | * | ||
| 878 | * returns 1 if | ||
| 879 | * - we are inside irq code | ||
| 880 | * - we just extered irq code | ||
| 881 | * | ||
| 882 | * retunns 0 if | ||
| 883 | * - funcgraph-interrupts option is set | ||
| 884 | * - we are not inside irq code | ||
| 885 | */ | ||
| 886 | static int | ||
| 887 | check_irq_entry(struct trace_iterator *iter, u32 flags, | ||
| 888 | unsigned long addr, int depth) | ||
| 889 | { | ||
| 890 | int cpu = iter->cpu; | ||
| 891 | struct fgraph_data *data = iter->private; | ||
| 892 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 893 | |||
| 894 | if (flags & TRACE_GRAPH_PRINT_IRQS) | ||
| 895 | return 0; | ||
| 896 | |||
| 897 | /* | ||
| 898 | * We are inside the irq code | ||
| 899 | */ | ||
| 900 | if (*depth_irq >= 0) | ||
| 901 | return 1; | ||
| 902 | |||
| 903 | if ((addr < (unsigned long)__irqentry_text_start) || | ||
| 904 | (addr >= (unsigned long)__irqentry_text_end)) | ||
| 905 | return 0; | ||
| 906 | |||
| 907 | /* | ||
| 908 | * We are entering irq code. | ||
| 909 | */ | ||
| 910 | *depth_irq = depth; | ||
| 911 | return 1; | ||
| 912 | } | ||
| 913 | |||
| 914 | /* | ||
| 915 | * Return check for irq code | ||
| 916 | * | ||
| 917 | * returns 1 if | ||
| 918 | * - we are inside irq code | ||
| 919 | * - we just left irq code | ||
| 920 | * | ||
| 921 | * returns 0 if | ||
| 922 | * - funcgraph-interrupts option is set | ||
| 923 | * - we are not inside irq code | ||
| 924 | */ | ||
| 925 | static int | ||
| 926 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | ||
| 927 | { | ||
| 928 | int cpu = iter->cpu; | ||
| 929 | struct fgraph_data *data = iter->private; | ||
| 930 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 931 | |||
| 932 | if (flags & TRACE_GRAPH_PRINT_IRQS) | ||
| 933 | return 0; | ||
| 934 | |||
| 935 | /* | ||
| 936 | * We are not inside the irq code. | ||
| 937 | */ | ||
| 938 | if (*depth_irq == -1) | ||
| 939 | return 0; | ||
| 940 | |||
| 941 | /* | ||
| 942 | * We are inside the irq code, and this is returning entry. | ||
| 943 | * Let's not trace it and clear the entry depth, since | ||
| 944 | * we are out of irq code. | ||
| 945 | * | ||
| 946 | * This condition ensures that we 'leave the irq code' once | ||
| 947 | * we are out of the entry depth. Thus protecting us from | ||
| 948 | * the RETURN entry loss. | ||
| 949 | */ | ||
| 950 | if (*depth_irq >= depth) { | ||
| 951 | *depth_irq = -1; | ||
| 952 | return 1; | ||
| 953 | } | ||
| 954 | |||
| 955 | /* | ||
| 956 | * We are inside the irq code, and this is not the entry. | ||
| 957 | */ | ||
| 958 | return 1; | ||
| 959 | } | ||
| 960 | |||
| 858 | static enum print_line_t | 961 | static enum print_line_t |
| 859 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 962 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
| 860 | struct trace_iterator *iter, u32 flags) | 963 | struct trace_iterator *iter, u32 flags) |
| @@ -865,6 +968,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 865 | static enum print_line_t ret; | 968 | static enum print_line_t ret; |
| 866 | int cpu = iter->cpu; | 969 | int cpu = iter->cpu; |
| 867 | 970 | ||
| 971 | if (check_irq_entry(iter, flags, call->func, call->depth)) | ||
| 972 | return TRACE_TYPE_HANDLED; | ||
| 973 | |||
| 868 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 974 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
| 869 | return TRACE_TYPE_PARTIAL_LINE; | 975 | return TRACE_TYPE_PARTIAL_LINE; |
| 870 | 976 | ||
| @@ -902,6 +1008,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 902 | int ret; | 1008 | int ret; |
| 903 | int i; | 1009 | int i; |
| 904 | 1010 | ||
| 1011 | if (check_irq_return(iter, flags, trace->depth)) | ||
| 1012 | return TRACE_TYPE_HANDLED; | ||
| 1013 | |||
| 905 | if (data) { | 1014 | if (data) { |
| 906 | struct fgraph_cpu_data *cpu_data; | 1015 | struct fgraph_cpu_data *cpu_data; |
| 907 | int cpu = iter->cpu; | 1016 | int cpu = iter->cpu; |
| @@ -1210,9 +1319,12 @@ void graph_trace_open(struct trace_iterator *iter) | |||
| 1210 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1319 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
| 1211 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1320 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
| 1212 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1321 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
| 1322 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 1323 | |||
| 1213 | *pid = -1; | 1324 | *pid = -1; |
| 1214 | *depth = 0; | 1325 | *depth = 0; |
| 1215 | *ignore = 0; | 1326 | *ignore = 0; |
| 1327 | *depth_irq = -1; | ||
| 1216 | } | 1328 | } |
| 1217 | 1329 | ||
| 1218 | iter->private = data; | 1330 | iter->private = data; |
| @@ -1235,6 +1347,14 @@ void graph_trace_close(struct trace_iterator *iter) | |||
| 1235 | } | 1347 | } |
| 1236 | } | 1348 | } |
| 1237 | 1349 | ||
| 1350 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | ||
| 1351 | { | ||
| 1352 | if (bit == TRACE_GRAPH_PRINT_IRQS) | ||
| 1353 | ftrace_graph_skip_irqs = !set; | ||
| 1354 | |||
| 1355 | return 0; | ||
| 1356 | } | ||
| 1357 | |||
| 1238 | static struct trace_event_functions graph_functions = { | 1358 | static struct trace_event_functions graph_functions = { |
| 1239 | .trace = print_graph_function_event, | 1359 | .trace = print_graph_function_event, |
| 1240 | }; | 1360 | }; |
| @@ -1261,6 +1381,7 @@ static struct tracer graph_trace __read_mostly = { | |||
| 1261 | .print_line = print_graph_function, | 1381 | .print_line = print_graph_function, |
| 1262 | .print_header = print_graph_headers, | 1382 | .print_header = print_graph_headers, |
| 1263 | .flags = &tracer_flags, | 1383 | .flags = &tracer_flags, |
| 1384 | .set_flag = func_graph_set_flag, | ||
| 1264 | #ifdef CONFIG_FTRACE_SELFTEST | 1385 | #ifdef CONFIG_FTRACE_SELFTEST |
| 1265 | .selftest = trace_selftest_startup_function_graph, | 1386 | .selftest = trace_selftest_startup_function_graph, |
| 1266 | #endif | 1387 | #endif |
