diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 101 |
1 files changed, 100 insertions, 1 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index c93bcb248638..8674750a5ece 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -18,6 +18,7 @@ | |||
18 | struct fgraph_cpu_data { | 18 | struct fgraph_cpu_data { |
19 | pid_t last_pid; | 19 | pid_t last_pid; |
20 | int depth; | 20 | int depth; |
21 | int depth_irq; | ||
21 | int ignore; | 22 | int ignore; |
22 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 23 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
23 | }; | 24 | }; |
@@ -41,6 +42,7 @@ struct fgraph_data { | |||
41 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 42 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 43 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 44 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
45 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | ||
44 | 46 | ||
45 | static struct tracer_opt trace_opts[] = { | 47 | static struct tracer_opt trace_opts[] = { |
46 | /* Display overruns? (for self-debug purpose) */ | 48 | /* Display overruns? (for self-debug purpose) */ |
@@ -55,13 +57,15 @@ static struct tracer_opt trace_opts[] = { | |||
55 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 57 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
56 | /* Display absolute time of an entry */ | 58 | /* Display absolute time of an entry */ |
57 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 59 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
60 | /* Display interrupts */ | ||
61 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | ||
58 | { } /* Empty entry */ | 62 | { } /* Empty entry */ |
59 | }; | 63 | }; |
60 | 64 | ||
61 | static struct tracer_flags tracer_flags = { | 65 | static struct tracer_flags tracer_flags = { |
62 | /* Don't display overruns and proc by default */ | 66 | /* Don't display overruns and proc by default */ |
63 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 67 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
64 | TRACE_GRAPH_PRINT_DURATION, | 68 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
65 | .opts = trace_opts | 69 | .opts = trace_opts |
66 | }; | 70 | }; |
67 | 71 | ||
@@ -855,6 +859,92 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
855 | return 0; | 859 | return 0; |
856 | } | 860 | } |
857 | 861 | ||
862 | /* | ||
863 | * Entry check for irq code | ||
864 | * | ||
865 | * returns 1 if | ||
866 | * - we are inside irq code | ||
867 | * - we just extered irq code | ||
868 | * | ||
869 | * retunns 0 if | ||
870 | * - funcgraph-interrupts option is set | ||
871 | * - we are not inside irq code | ||
872 | */ | ||
873 | static int | ||
874 | check_irq_entry(struct trace_iterator *iter, u32 flags, | ||
875 | unsigned long addr, int depth) | ||
876 | { | ||
877 | int cpu = iter->cpu; | ||
878 | struct fgraph_data *data = iter->private; | ||
879 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
880 | |||
881 | if (flags & TRACE_GRAPH_PRINT_IRQS) | ||
882 | return 0; | ||
883 | |||
884 | /* | ||
885 | * We are inside the irq code | ||
886 | */ | ||
887 | if (*depth_irq >= 0) | ||
888 | return 1; | ||
889 | |||
890 | if ((addr < (unsigned long)__irqentry_text_start) || | ||
891 | (addr >= (unsigned long)__irqentry_text_end)) | ||
892 | return 0; | ||
893 | |||
894 | /* | ||
895 | * We are entering irq code. | ||
896 | */ | ||
897 | *depth_irq = depth; | ||
898 | return 1; | ||
899 | } | ||
900 | |||
901 | /* | ||
902 | * Return check for irq code | ||
903 | * | ||
904 | * returns 1 if | ||
905 | * - we are inside irq code | ||
906 | * - we just left irq code | ||
907 | * | ||
908 | * returns 0 if | ||
909 | * - funcgraph-interrupts option is set | ||
910 | * - we are not inside irq code | ||
911 | */ | ||
912 | static int | ||
913 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | ||
914 | { | ||
915 | int cpu = iter->cpu; | ||
916 | struct fgraph_data *data = iter->private; | ||
917 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
918 | |||
919 | if (flags & TRACE_GRAPH_PRINT_IRQS) | ||
920 | return 0; | ||
921 | |||
922 | /* | ||
923 | * We are not inside the irq code. | ||
924 | */ | ||
925 | if (*depth_irq == -1) | ||
926 | return 0; | ||
927 | |||
928 | /* | ||
929 | * We are inside the irq code, and this is returning entry. | ||
930 | * Let's not trace it and clear the entry depth, since | ||
931 | * we are out of irq code. | ||
932 | * | ||
933 | * This condition ensures that we 'leave the irq code' once | ||
934 | * we are out of the entry depth. Thus protecting us from | ||
935 | * the RETURN entry loss. | ||
936 | */ | ||
937 | if (*depth_irq >= depth) { | ||
938 | *depth_irq = -1; | ||
939 | return 1; | ||
940 | } | ||
941 | |||
942 | /* | ||
943 | * We are inside the irq code, and this is not the entry. | ||
944 | */ | ||
945 | return 1; | ||
946 | } | ||
947 | |||
858 | static enum print_line_t | 948 | static enum print_line_t |
859 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 949 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
860 | struct trace_iterator *iter, u32 flags) | 950 | struct trace_iterator *iter, u32 flags) |
@@ -865,6 +955,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
865 | static enum print_line_t ret; | 955 | static enum print_line_t ret; |
866 | int cpu = iter->cpu; | 956 | int cpu = iter->cpu; |
867 | 957 | ||
958 | if (check_irq_entry(iter, flags, call->func, call->depth)) | ||
959 | return TRACE_TYPE_HANDLED; | ||
960 | |||
868 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 961 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
869 | return TRACE_TYPE_PARTIAL_LINE; | 962 | return TRACE_TYPE_PARTIAL_LINE; |
870 | 963 | ||
@@ -902,6 +995,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
902 | int ret; | 995 | int ret; |
903 | int i; | 996 | int i; |
904 | 997 | ||
998 | if (check_irq_return(iter, flags, trace->depth)) | ||
999 | return TRACE_TYPE_HANDLED; | ||
1000 | |||
905 | if (data) { | 1001 | if (data) { |
906 | struct fgraph_cpu_data *cpu_data; | 1002 | struct fgraph_cpu_data *cpu_data; |
907 | int cpu = iter->cpu; | 1003 | int cpu = iter->cpu; |
@@ -1210,9 +1306,12 @@ void graph_trace_open(struct trace_iterator *iter) | |||
1210 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1306 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
1211 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1307 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
1212 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1308 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
1309 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
1310 | |||
1213 | *pid = -1; | 1311 | *pid = -1; |
1214 | *depth = 0; | 1312 | *depth = 0; |
1215 | *ignore = 0; | 1313 | *ignore = 0; |
1314 | *depth_irq = -1; | ||
1216 | } | 1315 | } |
1217 | 1316 | ||
1218 | iter->private = data; | 1317 | iter->private = data; |