diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 267 |
1 files changed, 246 insertions, 21 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d31d695174aa..894b50bca313 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -15,15 +15,24 @@ | |||
| 15 | 15 | ||
| 16 | #define TRACE_GRAPH_INDENT 2 | 16 | #define TRACE_GRAPH_INDENT 2 |
| 17 | 17 | ||
| 18 | /* Flag options */ | ||
| 18 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 19 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
| 20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | ||
| 21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | ||
| 22 | |||
| 19 | static struct tracer_opt trace_opts[] = { | 23 | static struct tracer_opt trace_opts[] = { |
| 20 | /* Display overruns or not */ | 24 | /* Display overruns ? */ |
| 21 | { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 25 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
| 26 | /* Display CPU ? */ | ||
| 27 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | ||
| 28 | /* Display Overhead ? */ | ||
| 29 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | ||
| 22 | { } /* Empty entry */ | 30 | { } /* Empty entry */ |
| 23 | }; | 31 | }; |
| 24 | 32 | ||
| 25 | static struct tracer_flags tracer_flags = { | 33 | static struct tracer_flags tracer_flags = { |
| 26 | .val = 0, /* Don't display overruns by default */ | 34 | /* Don't display overruns by default */ |
| 35 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | ||
| 27 | .opts = trace_opts | 36 | .opts = trace_opts |
| 28 | }; | 37 | }; |
| 29 | 38 | ||
| @@ -52,37 +61,208 @@ static void graph_trace_reset(struct trace_array *tr) | |||
| 52 | unregister_ftrace_graph(); | 61 | unregister_ftrace_graph(); |
| 53 | } | 62 | } |
| 54 | 63 | ||
| 64 | static inline int log10_cpu(int nb) | ||
| 65 | { | ||
| 66 | if (nb / 100) | ||
| 67 | return 3; | ||
| 68 | if (nb / 10) | ||
| 69 | return 2; | ||
| 70 | return 1; | ||
| 71 | } | ||
| 72 | |||
| 73 | static enum print_line_t | ||
| 74 | print_graph_cpu(struct trace_seq *s, int cpu) | ||
| 75 | { | ||
| 76 | int i; | ||
| 77 | int ret; | ||
| 78 | int log10_this = log10_cpu(cpu); | ||
| 79 | int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); | ||
| 80 | |||
| 81 | |||
| 82 | /* | ||
| 83 | * Start with a space character - to make it stand out | ||
| 84 | * to the right a bit when trace output is pasted into | ||
| 85 | * email: | ||
| 86 | */ | ||
| 87 | ret = trace_seq_printf(s, " "); | ||
| 88 | |||
| 89 | /* | ||
| 90 | * Tricky - we space the CPU field according to the max | ||
| 91 | * number of online CPUs. On a 2-cpu system it would take | ||
| 92 | * a maximum of 1 digit - on a 128 cpu system it would | ||
| 93 | * take up to 3 digits: | ||
| 94 | */ | ||
| 95 | for (i = 0; i < log10_all - log10_this; i++) { | ||
| 96 | ret = trace_seq_printf(s, " "); | ||
| 97 | if (!ret) | ||
| 98 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 99 | } | ||
| 100 | ret = trace_seq_printf(s, "%d) ", cpu); | ||
| 101 | if (!ret) | ||
| 102 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 103 | |||
| 104 | return TRACE_TYPE_HANDLED; | ||
| 105 | } | ||
| 106 | |||
| 107 | |||
| 55 | /* If the pid changed since the last trace, output this event */ | 108 | /* If the pid changed since the last trace, output this event */ |
| 56 | static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 109 | static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) |
| 57 | { | 110 | { |
| 58 | char *comm; | 111 | char *comm, *prev_comm; |
| 112 | pid_t prev_pid; | ||
| 113 | int ret; | ||
| 59 | 114 | ||
| 60 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 115 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) |
| 61 | return 1; | 116 | return 1; |
| 62 | 117 | ||
| 118 | prev_pid = last_pid[cpu]; | ||
| 63 | last_pid[cpu] = pid; | 119 | last_pid[cpu] = pid; |
| 120 | |||
| 64 | comm = trace_find_cmdline(pid); | 121 | comm = trace_find_cmdline(pid); |
| 122 | prev_comm = trace_find_cmdline(prev_pid); | ||
| 65 | 123 | ||
| 66 | return trace_seq_printf(s, "\nCPU[%03d]" | 124 | /* |
| 67 | " ------------8<---------- thread %s-%d" | 125 | * Context-switch trace line: |
| 68 | " ------------8<----------\n\n", | 126 | |
| 69 | cpu, comm, pid); | 127 | ------------------------------------------ |
| 128 | | 1) migration/0--1 => sshd-1755 | ||
| 129 | ------------------------------------------ | ||
| 130 | |||
| 131 | */ | ||
| 132 | ret = trace_seq_printf(s, | ||
| 133 | " ------------------------------------------\n"); | ||
| 134 | ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n", | ||
| 135 | cpu, prev_comm, prev_pid, comm, pid); | ||
| 136 | ret += trace_seq_printf(s, | ||
| 137 | " ------------------------------------------\n\n"); | ||
| 138 | return ret; | ||
| 70 | } | 139 | } |
| 71 | 140 | ||
| 141 | static bool | ||
| 142 | trace_branch_is_leaf(struct trace_iterator *iter, | ||
| 143 | struct ftrace_graph_ent_entry *curr) | ||
| 144 | { | ||
| 145 | struct ring_buffer_iter *ring_iter; | ||
| 146 | struct ring_buffer_event *event; | ||
| 147 | struct ftrace_graph_ret_entry *next; | ||
| 148 | |||
| 149 | ring_iter = iter->buffer_iter[iter->cpu]; | ||
| 150 | |||
| 151 | if (!ring_iter) | ||
| 152 | return false; | ||
| 153 | |||
| 154 | event = ring_buffer_iter_peek(ring_iter, NULL); | ||
| 155 | |||
| 156 | if (!event) | ||
| 157 | return false; | ||
| 158 | |||
| 159 | next = ring_buffer_event_data(event); | ||
| 160 | |||
| 161 | if (next->ent.type != TRACE_GRAPH_RET) | ||
| 162 | return false; | ||
| 163 | |||
| 164 | if (curr->ent.pid != next->ent.pid || | ||
| 165 | curr->graph_ent.func != next->ret.func) | ||
| 166 | return false; | ||
| 167 | |||
| 168 | return true; | ||
| 169 | } | ||
| 170 | |||
| 171 | |||
| 172 | static inline int | ||
| 173 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
| 174 | { | ||
| 175 | unsigned long nsecs_rem = do_div(duration, 1000); | ||
| 176 | return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem); | ||
| 177 | } | ||
| 178 | |||
| 179 | /* Signal a overhead of time execution to the output */ | ||
| 180 | static int | ||
| 181 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
| 182 | { | ||
| 183 | /* Duration exceeded 100 msecs */ | ||
| 184 | if (duration > 100000ULL) | ||
| 185 | return trace_seq_printf(s, "! "); | ||
| 186 | |||
| 187 | /* Duration exceeded 10 msecs */ | ||
| 188 | if (duration > 10000ULL) | ||
| 189 | return trace_seq_printf(s, "+ "); | ||
| 190 | |||
| 191 | return trace_seq_printf(s, " "); | ||
| 192 | } | ||
| 193 | |||
| 194 | /* Case of a leaf function on its call entry */ | ||
| 72 | static enum print_line_t | 195 | static enum print_line_t |
| 73 | print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, | 196 | print_graph_entry_leaf(struct trace_iterator *iter, |
| 74 | struct trace_entry *ent, int cpu) | 197 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) |
| 75 | { | 198 | { |
| 76 | int i; | 199 | struct ftrace_graph_ret_entry *ret_entry; |
| 200 | struct ftrace_graph_ret *graph_ret; | ||
| 201 | struct ring_buffer_event *event; | ||
| 202 | struct ftrace_graph_ent *call; | ||
| 203 | unsigned long long duration; | ||
| 77 | int ret; | 204 | int ret; |
| 205 | int i; | ||
| 78 | 206 | ||
| 79 | if (!verif_pid(s, ent->pid, cpu)) | 207 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
| 208 | ret_entry = ring_buffer_event_data(event); | ||
| 209 | graph_ret = &ret_entry->ret; | ||
| 210 | call = &entry->graph_ent; | ||
| 211 | duration = graph_ret->rettime - graph_ret->calltime; | ||
| 212 | |||
| 213 | /* Must not exceed 8 characters: 9999.999 us */ | ||
| 214 | if (duration > 10000000ULL) | ||
| 215 | duration = 9999999ULL; | ||
| 216 | |||
| 217 | /* Overhead */ | ||
| 218 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
| 219 | ret = print_graph_overhead(duration, s); | ||
| 220 | if (!ret) | ||
| 221 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* Duration */ | ||
| 225 | ret = print_graph_duration(duration, s); | ||
| 226 | if (!ret) | ||
| 80 | return TRACE_TYPE_PARTIAL_LINE; | 227 | return TRACE_TYPE_PARTIAL_LINE; |
| 81 | 228 | ||
| 82 | ret = trace_seq_printf(s, "CPU[%03d] ", cpu); | 229 | /* Function */ |
| 230 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
| 231 | ret = trace_seq_printf(s, " "); | ||
| 232 | if (!ret) | ||
| 233 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 234 | } | ||
| 235 | |||
| 236 | ret = seq_print_ip_sym(s, call->func, 0); | ||
| 237 | if (!ret) | ||
| 238 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 239 | |||
| 240 | ret = trace_seq_printf(s, "();\n"); | ||
| 83 | if (!ret) | 241 | if (!ret) |
| 84 | return TRACE_TYPE_PARTIAL_LINE; | 242 | return TRACE_TYPE_PARTIAL_LINE; |
| 85 | 243 | ||
| 244 | return TRACE_TYPE_HANDLED; | ||
| 245 | } | ||
| 246 | |||
| 247 | static enum print_line_t | ||
| 248 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | ||
| 249 | struct trace_seq *s) | ||
| 250 | { | ||
| 251 | int i; | ||
| 252 | int ret; | ||
| 253 | struct ftrace_graph_ent *call = &entry->graph_ent; | ||
| 254 | |||
| 255 | /* No overhead */ | ||
| 256 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
| 257 | ret = trace_seq_printf(s, " "); | ||
| 258 | if (!ret) | ||
| 259 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 260 | } | ||
| 261 | |||
| 262 | /* No time */ | ||
| 263 | ret = trace_seq_printf(s, " | "); | ||
| 264 | |||
| 265 | /* Function */ | ||
| 86 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 266 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
| 87 | ret = trace_seq_printf(s, " "); | 267 | ret = trace_seq_printf(s, " "); |
| 88 | if (!ret) | 268 | if (!ret) |
| @@ -96,37 +276,82 @@ print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, | |||
| 96 | ret = trace_seq_printf(s, "() {\n"); | 276 | ret = trace_seq_printf(s, "() {\n"); |
| 97 | if (!ret) | 277 | if (!ret) |
| 98 | return TRACE_TYPE_PARTIAL_LINE; | 278 | return TRACE_TYPE_PARTIAL_LINE; |
| 279 | |||
| 99 | return TRACE_TYPE_HANDLED; | 280 | return TRACE_TYPE_HANDLED; |
| 100 | } | 281 | } |
| 101 | 282 | ||
| 102 | static enum print_line_t | 283 | static enum print_line_t |
| 284 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | ||
| 285 | struct trace_iterator *iter, int cpu) | ||
| 286 | { | ||
| 287 | int ret; | ||
| 288 | struct trace_entry *ent = iter->ent; | ||
| 289 | |||
| 290 | /* Pid */ | ||
| 291 | if (!verif_pid(s, ent->pid, cpu)) | ||
| 292 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 293 | |||
| 294 | /* Cpu */ | ||
| 295 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
| 296 | ret = print_graph_cpu(s, cpu); | ||
| 297 | if (!ret) | ||
| 298 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 299 | } | ||
| 300 | |||
| 301 | if (trace_branch_is_leaf(iter, field)) | ||
| 302 | return print_graph_entry_leaf(iter, field, s); | ||
| 303 | else | ||
| 304 | return print_graph_entry_nested(field, s); | ||
| 305 | |||
| 306 | } | ||
| 307 | |||
| 308 | static enum print_line_t | ||
| 103 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 309 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
| 104 | struct trace_entry *ent, int cpu) | 310 | struct trace_entry *ent, int cpu) |
| 105 | { | 311 | { |
| 106 | int i; | 312 | int i; |
| 107 | int ret; | 313 | int ret; |
| 314 | unsigned long long duration = trace->rettime - trace->calltime; | ||
| 315 | |||
| 316 | /* Must not exceed 8 characters: xxxx.yyy us */ | ||
| 317 | if (duration > 10000000ULL) | ||
| 318 | duration = 9999999ULL; | ||
| 108 | 319 | ||
| 320 | /* Pid */ | ||
| 109 | if (!verif_pid(s, ent->pid, cpu)) | 321 | if (!verif_pid(s, ent->pid, cpu)) |
| 110 | return TRACE_TYPE_PARTIAL_LINE; | 322 | return TRACE_TYPE_PARTIAL_LINE; |
| 111 | 323 | ||
| 112 | ret = trace_seq_printf(s, "CPU[%03d] ", cpu); | 324 | /* Cpu */ |
| 325 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
| 326 | ret = print_graph_cpu(s, cpu); | ||
| 327 | if (!ret) | ||
| 328 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* Overhead */ | ||
| 332 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
| 333 | ret = print_graph_overhead(duration, s); | ||
| 334 | if (!ret) | ||
| 335 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 336 | } | ||
| 337 | |||
| 338 | /* Duration */ | ||
| 339 | ret = print_graph_duration(duration, s); | ||
| 113 | if (!ret) | 340 | if (!ret) |
| 114 | return TRACE_TYPE_PARTIAL_LINE; | 341 | return TRACE_TYPE_PARTIAL_LINE; |
| 115 | 342 | ||
| 343 | /* Closing brace */ | ||
| 116 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 344 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
| 117 | ret = trace_seq_printf(s, " "); | 345 | ret = trace_seq_printf(s, " "); |
| 118 | if (!ret) | 346 | if (!ret) |
| 119 | return TRACE_TYPE_PARTIAL_LINE; | 347 | return TRACE_TYPE_PARTIAL_LINE; |
| 120 | } | 348 | } |
| 121 | 349 | ||
| 122 | ret = trace_seq_printf(s, "} "); | 350 | ret = trace_seq_printf(s, "}\n"); |
| 123 | if (!ret) | ||
| 124 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 125 | |||
| 126 | ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime); | ||
| 127 | if (!ret) | 351 | if (!ret) |
| 128 | return TRACE_TYPE_PARTIAL_LINE; | 352 | return TRACE_TYPE_PARTIAL_LINE; |
| 129 | 353 | ||
| 354 | /* Overrun */ | ||
| 130 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | 355 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
| 131 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 356 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
| 132 | trace->overrun); | 357 | trace->overrun); |
| @@ -146,7 +371,7 @@ print_graph_function(struct trace_iterator *iter) | |||
| 146 | case TRACE_GRAPH_ENT: { | 371 | case TRACE_GRAPH_ENT: { |
| 147 | struct ftrace_graph_ent_entry *field; | 372 | struct ftrace_graph_ent_entry *field; |
| 148 | trace_assign_type(field, entry); | 373 | trace_assign_type(field, entry); |
| 149 | return print_graph_entry(&field->graph_ent, s, entry, | 374 | return print_graph_entry(field, s, iter, |
| 150 | iter->cpu); | 375 | iter->cpu); |
| 151 | } | 376 | } |
| 152 | case TRACE_GRAPH_RET: { | 377 | case TRACE_GRAPH_RET: { |
| @@ -160,7 +385,7 @@ print_graph_function(struct trace_iterator *iter) | |||
| 160 | } | 385 | } |
| 161 | 386 | ||
| 162 | static struct tracer graph_trace __read_mostly = { | 387 | static struct tracer graph_trace __read_mostly = { |
| 163 | .name = "function-graph", | 388 | .name = "function_graph", |
| 164 | .init = graph_trace_init, | 389 | .init = graph_trace_init, |
| 165 | .reset = graph_trace_reset, | 390 | .reset = graph_trace_reset, |
| 166 | .print_line = print_graph_function, | 391 | .print_line = print_graph_function, |
