diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 635 |
1 files changed, 427 insertions, 208 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..d28687e7b3a7 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
@@ -12,6 +12,12 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | ||
16 | |||
17 | struct fgraph_data { | ||
18 | pid_t last_pid; | ||
19 | int depth; | ||
20 | }; | ||
15 | 21 | ||
16 | #define TRACE_GRAPH_INDENT 2 | 22 | #define TRACE_GRAPH_INDENT 2 |
17 | 23 | ||
@@ -20,9 +26,11 @@ | |||
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 26 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 27 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 28 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
29 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
30 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | ||
23 | 31 | ||
24 | static struct tracer_opt trace_opts[] = { | 32 | static struct tracer_opt trace_opts[] = { |
25 | /* Display overruns ? */ | 33 | /* Display overruns? (for self-debug purpose) */ |
26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 34 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
27 | /* Display CPU ? */ | 35 | /* Display CPU ? */ |
28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 36 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
@@ -30,26 +38,103 @@ static struct tracer_opt trace_opts[] = { | |||
30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 38 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
31 | /* Display proc name/pid */ | 39 | /* Display proc name/pid */ |
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 40 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
41 | /* Display duration of execution */ | ||
42 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | ||
43 | /* Display absolute time of an entry */ | ||
44 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | ||
33 | { } /* Empty entry */ | 45 | { } /* Empty entry */ |
34 | }; | 46 | }; |
35 | 47 | ||
36 | static struct tracer_flags tracer_flags = { | 48 | static struct tracer_flags tracer_flags = { |
37 | /* Don't display overruns and proc by default */ | 49 | /* Don't display overruns and proc by default */ |
38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 50 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
51 | TRACE_GRAPH_PRINT_DURATION, | ||
39 | .opts = trace_opts | 52 | .opts = trace_opts |
40 | }; | 53 | }; |
41 | 54 | ||
42 | /* pid on the last trace processed */ | 55 | /* pid on the last trace processed */ |
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
44 | 56 | ||
45 | static int graph_trace_init(struct trace_array *tr) | 57 | |
58 | /* Add a function return address to the trace stack on thread info.*/ | ||
59 | int | ||
60 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | ||
46 | { | 61 | { |
47 | int cpu, ret; | 62 | unsigned long long calltime; |
63 | int index; | ||
64 | |||
65 | if (!current->ret_stack) | ||
66 | return -EBUSY; | ||
67 | |||
68 | /* The return trace stack is full */ | ||
69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
70 | atomic_inc(¤t->trace_overrun); | ||
71 | return -EBUSY; | ||
72 | } | ||
73 | |||
74 | calltime = trace_clock_local(); | ||
75 | |||
76 | index = ++current->curr_ret_stack; | ||
77 | barrier(); | ||
78 | current->ret_stack[index].ret = ret; | ||
79 | current->ret_stack[index].func = func; | ||
80 | current->ret_stack[index].calltime = calltime; | ||
81 | *depth = index; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
87 | void | ||
88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
89 | { | ||
90 | int index; | ||
91 | |||
92 | index = current->curr_ret_stack; | ||
93 | |||
94 | if (unlikely(index < 0)) { | ||
95 | ftrace_graph_stop(); | ||
96 | WARN_ON(1); | ||
97 | /* Might as well panic, otherwise we have no where to go */ | ||
98 | *ret = (unsigned long)panic; | ||
99 | return; | ||
100 | } | ||
48 | 101 | ||
49 | for_each_online_cpu(cpu) | 102 | *ret = current->ret_stack[index].ret; |
50 | tracing_reset(tr, cpu); | 103 | trace->func = current->ret_stack[index].func; |
104 | trace->calltime = current->ret_stack[index].calltime; | ||
105 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
106 | trace->depth = index; | ||
107 | barrier(); | ||
108 | current->curr_ret_stack--; | ||
51 | 109 | ||
52 | ret = register_ftrace_graph(&trace_graph_return, | 110 | } |
111 | |||
112 | /* | ||
113 | * Send the trace to the ring-buffer. | ||
114 | * @return the original return address. | ||
115 | */ | ||
116 | unsigned long ftrace_return_to_handler(void) | ||
117 | { | ||
118 | struct ftrace_graph_ret trace; | ||
119 | unsigned long ret; | ||
120 | |||
121 | ftrace_pop_return_trace(&trace, &ret); | ||
122 | trace.rettime = trace_clock_local(); | ||
123 | ftrace_graph_return(&trace); | ||
124 | |||
125 | if (unlikely(!ret)) { | ||
126 | ftrace_graph_stop(); | ||
127 | WARN_ON(1); | ||
128 | /* Might as well panic. What else to do? */ | ||
129 | ret = (unsigned long)panic; | ||
130 | } | ||
131 | |||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static int graph_trace_init(struct trace_array *tr) | ||
136 | { | ||
137 | int ret = register_ftrace_graph(&trace_graph_return, | ||
53 | &trace_graph_entry); | 138 | &trace_graph_entry); |
54 | if (ret) | 139 | if (ret) |
55 | return ret; | 140 | return ret; |
@@ -112,15 +197,15 @@ print_graph_cpu(struct trace_seq *s, int cpu) | |||
112 | static enum print_line_t | 197 | static enum print_line_t |
113 | print_graph_proc(struct trace_seq *s, pid_t pid) | 198 | print_graph_proc(struct trace_seq *s, pid_t pid) |
114 | { | 199 | { |
115 | int i; | 200 | char comm[TASK_COMM_LEN]; |
116 | int ret; | ||
117 | int len; | ||
118 | char comm[8]; | ||
119 | int spaces = 0; | ||
120 | /* sign + log10(MAX_INT) + '\0' */ | 201 | /* sign + log10(MAX_INT) + '\0' */ |
121 | char pid_str[11]; | 202 | char pid_str[11]; |
203 | int spaces = 0; | ||
204 | int ret; | ||
205 | int len; | ||
206 | int i; | ||
122 | 207 | ||
123 | strncpy(comm, trace_find_cmdline(pid), 7); | 208 | trace_find_cmdline(pid, comm); |
124 | comm[7] = '\0'; | 209 | comm[7] = '\0'; |
125 | sprintf(pid_str, "%d", pid); | 210 | sprintf(pid_str, "%d", pid); |
126 | 211 | ||
@@ -153,17 +238,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
153 | 238 | ||
154 | /* If the pid changed since the last trace, output this event */ | 239 | /* If the pid changed since the last trace, output this event */ |
155 | static enum print_line_t | 240 | static enum print_line_t |
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 241 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
157 | { | 242 | { |
158 | pid_t prev_pid; | 243 | pid_t prev_pid; |
244 | pid_t *last_pid; | ||
159 | int ret; | 245 | int ret; |
160 | 246 | ||
161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 247 | if (!data) |
162 | return TRACE_TYPE_HANDLED; | 248 | return TRACE_TYPE_HANDLED; |
163 | 249 | ||
164 | prev_pid = last_pid[cpu]; | 250 | last_pid = &(per_cpu_ptr(data, cpu)->last_pid); |
165 | last_pid[cpu] = pid; | ||
166 | 251 | ||
252 | if (*last_pid == pid) | ||
253 | return TRACE_TYPE_HANDLED; | ||
254 | |||
255 | prev_pid = *last_pid; | ||
256 | *last_pid = pid; | ||
257 | |||
258 | if (prev_pid == -1) | ||
259 | return TRACE_TYPE_HANDLED; | ||
167 | /* | 260 | /* |
168 | * Context-switch trace line: | 261 | * Context-switch trace line: |
169 | 262 | ||
@@ -175,34 +268,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu) | |||
175 | ret = trace_seq_printf(s, | 268 | ret = trace_seq_printf(s, |
176 | " ------------------------------------------\n"); | 269 | " ------------------------------------------\n"); |
177 | if (!ret) | 270 | if (!ret) |
178 | TRACE_TYPE_PARTIAL_LINE; | 271 | return TRACE_TYPE_PARTIAL_LINE; |
179 | 272 | ||
180 | ret = print_graph_cpu(s, cpu); | 273 | ret = print_graph_cpu(s, cpu); |
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 274 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
182 | TRACE_TYPE_PARTIAL_LINE; | 275 | return TRACE_TYPE_PARTIAL_LINE; |
183 | 276 | ||
184 | ret = print_graph_proc(s, prev_pid); | 277 | ret = print_graph_proc(s, prev_pid); |
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 278 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
186 | TRACE_TYPE_PARTIAL_LINE; | 279 | return TRACE_TYPE_PARTIAL_LINE; |
187 | 280 | ||
188 | ret = trace_seq_printf(s, " => "); | 281 | ret = trace_seq_printf(s, " => "); |
189 | if (!ret) | 282 | if (!ret) |
190 | TRACE_TYPE_PARTIAL_LINE; | 283 | return TRACE_TYPE_PARTIAL_LINE; |
191 | 284 | ||
192 | ret = print_graph_proc(s, pid); | 285 | ret = print_graph_proc(s, pid); |
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 286 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
194 | TRACE_TYPE_PARTIAL_LINE; | 287 | return TRACE_TYPE_PARTIAL_LINE; |
195 | 288 | ||
196 | ret = trace_seq_printf(s, | 289 | ret = trace_seq_printf(s, |
197 | "\n ------------------------------------------\n\n"); | 290 | "\n ------------------------------------------\n\n"); |
198 | if (!ret) | 291 | if (!ret) |
199 | TRACE_TYPE_PARTIAL_LINE; | 292 | return TRACE_TYPE_PARTIAL_LINE; |
200 | 293 | ||
201 | return ret; | 294 | return TRACE_TYPE_HANDLED; |
202 | } | 295 | } |
203 | 296 | ||
204 | static bool | 297 | static struct ftrace_graph_ret_entry * |
205 | trace_branch_is_leaf(struct trace_iterator *iter, | 298 | get_return_for_leaf(struct trace_iterator *iter, |
206 | struct ftrace_graph_ent_entry *curr) | 299 | struct ftrace_graph_ent_entry *curr) |
207 | { | 300 | { |
208 | struct ring_buffer_iter *ring_iter; | 301 | struct ring_buffer_iter *ring_iter; |
@@ -211,65 +304,123 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
211 | 304 | ||
212 | ring_iter = iter->buffer_iter[iter->cpu]; | 305 | ring_iter = iter->buffer_iter[iter->cpu]; |
213 | 306 | ||
214 | if (!ring_iter) | 307 | /* First peek to compare current entry and the next one */ |
215 | return false; | 308 | if (ring_iter) |
216 | 309 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
217 | event = ring_buffer_iter_peek(ring_iter, NULL); | 310 | else { |
311 | /* We need to consume the current entry to see the next one */ | ||
312 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
313 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | ||
314 | NULL); | ||
315 | } | ||
218 | 316 | ||
219 | if (!event) | 317 | if (!event) |
220 | return false; | 318 | return NULL; |
221 | 319 | ||
222 | next = ring_buffer_event_data(event); | 320 | next = ring_buffer_event_data(event); |
223 | 321 | ||
224 | if (next->ent.type != TRACE_GRAPH_RET) | 322 | if (next->ent.type != TRACE_GRAPH_RET) |
225 | return false; | 323 | return NULL; |
226 | 324 | ||
227 | if (curr->ent.pid != next->ent.pid || | 325 | if (curr->ent.pid != next->ent.pid || |
228 | curr->graph_ent.func != next->ret.func) | 326 | curr->graph_ent.func != next->ret.func) |
229 | return false; | 327 | return NULL; |
328 | |||
329 | /* this is a leaf, now advance the iterator */ | ||
330 | if (ring_iter) | ||
331 | ring_buffer_read(ring_iter, NULL); | ||
332 | |||
333 | return next; | ||
334 | } | ||
335 | |||
336 | /* Signal a overhead of time execution to the output */ | ||
337 | static int | ||
338 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
339 | { | ||
340 | /* If duration disappear, we don't need anything */ | ||
341 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | ||
342 | return 1; | ||
343 | |||
344 | /* Non nested entry or return */ | ||
345 | if (duration == -1) | ||
346 | return trace_seq_printf(s, " "); | ||
347 | |||
348 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
349 | /* Duration exceeded 100 msecs */ | ||
350 | if (duration > 100000ULL) | ||
351 | return trace_seq_printf(s, "! "); | ||
230 | 352 | ||
231 | return true; | 353 | /* Duration exceeded 10 msecs */ |
354 | if (duration > 10000ULL) | ||
355 | return trace_seq_printf(s, "+ "); | ||
356 | } | ||
357 | |||
358 | return trace_seq_printf(s, " "); | ||
359 | } | ||
360 | |||
361 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | ||
362 | { | ||
363 | unsigned long usecs_rem; | ||
364 | |||
365 | usecs_rem = do_div(t, NSEC_PER_SEC); | ||
366 | usecs_rem /= 1000; | ||
367 | |||
368 | return trace_seq_printf(s, "%5lu.%06lu | ", | ||
369 | (unsigned long)t, usecs_rem); | ||
232 | } | 370 | } |
233 | 371 | ||
234 | static enum print_line_t | 372 | static enum print_line_t |
235 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 373 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
236 | enum trace_type type, int cpu, pid_t pid) | 374 | enum trace_type type, int cpu, pid_t pid) |
237 | { | 375 | { |
238 | int ret; | 376 | int ret; |
377 | struct trace_seq *s = &iter->seq; | ||
239 | 378 | ||
240 | if (addr < (unsigned long)__irqentry_text_start || | 379 | if (addr < (unsigned long)__irqentry_text_start || |
241 | addr >= (unsigned long)__irqentry_text_end) | 380 | addr >= (unsigned long)__irqentry_text_end) |
242 | return TRACE_TYPE_UNHANDLED; | 381 | return TRACE_TYPE_UNHANDLED; |
243 | 382 | ||
244 | if (type == TRACE_GRAPH_ENT) { | 383 | /* Absolute time */ |
245 | ret = trace_seq_printf(s, "==========> | "); | 384 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
246 | } else { | 385 | ret = print_graph_abs_time(iter->ts, s); |
247 | /* Cpu */ | 386 | if (!ret) |
248 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 387 | return TRACE_TYPE_PARTIAL_LINE; |
249 | ret = print_graph_cpu(s, cpu); | 388 | } |
250 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
251 | return TRACE_TYPE_PARTIAL_LINE; | ||
252 | } | ||
253 | /* Proc */ | ||
254 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
255 | ret = print_graph_proc(s, pid); | ||
256 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
257 | return TRACE_TYPE_PARTIAL_LINE; | ||
258 | 389 | ||
259 | ret = trace_seq_printf(s, " | "); | 390 | /* Cpu */ |
260 | if (!ret) | 391 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
261 | return TRACE_TYPE_PARTIAL_LINE; | 392 | ret = print_graph_cpu(s, cpu); |
262 | } | 393 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
394 | return TRACE_TYPE_PARTIAL_LINE; | ||
395 | } | ||
396 | /* Proc */ | ||
397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
398 | ret = print_graph_proc(s, pid); | ||
399 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
400 | return TRACE_TYPE_PARTIAL_LINE; | ||
401 | ret = trace_seq_printf(s, " | "); | ||
402 | if (!ret) | ||
403 | return TRACE_TYPE_PARTIAL_LINE; | ||
404 | } | ||
263 | 405 | ||
264 | /* No overhead */ | 406 | /* No overhead */ |
265 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 407 | ret = print_graph_overhead(-1, s); |
266 | ret = trace_seq_printf(s, " "); | 408 | if (!ret) |
267 | if (!ret) | 409 | return TRACE_TYPE_PARTIAL_LINE; |
268 | return TRACE_TYPE_PARTIAL_LINE; | 410 | |
269 | } | 411 | if (type == TRACE_GRAPH_ENT) |
412 | ret = trace_seq_printf(s, "==========>"); | ||
413 | else | ||
414 | ret = trace_seq_printf(s, "<=========="); | ||
415 | |||
416 | if (!ret) | ||
417 | return TRACE_TYPE_PARTIAL_LINE; | ||
418 | |||
419 | /* Don't close the duration column if haven't one */ | ||
420 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | ||
421 | trace_seq_printf(s, " |"); | ||
422 | ret = trace_seq_printf(s, "\n"); | ||
270 | 423 | ||
271 | ret = trace_seq_printf(s, "<========== |\n"); | ||
272 | } | ||
273 | if (!ret) | 424 | if (!ret) |
274 | return TRACE_TYPE_PARTIAL_LINE; | 425 | return TRACE_TYPE_PARTIAL_LINE; |
275 | return TRACE_TYPE_HANDLED; | 426 | return TRACE_TYPE_HANDLED; |
@@ -288,7 +439,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
288 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 439 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
289 | 440 | ||
290 | /* Print msecs */ | 441 | /* Print msecs */ |
291 | ret = trace_seq_printf(s, msecs_str); | 442 | ret = trace_seq_printf(s, "%s", msecs_str); |
292 | if (!ret) | 443 | if (!ret) |
293 | return TRACE_TYPE_PARTIAL_LINE; | 444 | return TRACE_TYPE_PARTIAL_LINE; |
294 | 445 | ||
@@ -321,52 +472,47 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
321 | 472 | ||
322 | } | 473 | } |
323 | 474 | ||
324 | /* Signal a overhead of time execution to the output */ | ||
325 | static int | ||
326 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
327 | { | ||
328 | /* Duration exceeded 100 msecs */ | ||
329 | if (duration > 100000ULL) | ||
330 | return trace_seq_printf(s, "! "); | ||
331 | |||
332 | /* Duration exceeded 10 msecs */ | ||
333 | if (duration > 10000ULL) | ||
334 | return trace_seq_printf(s, "+ "); | ||
335 | |||
336 | return trace_seq_printf(s, " "); | ||
337 | } | ||
338 | |||
339 | /* Case of a leaf function on its call entry */ | 475 | /* Case of a leaf function on its call entry */ |
340 | static enum print_line_t | 476 | static enum print_line_t |
341 | print_graph_entry_leaf(struct trace_iterator *iter, | 477 | print_graph_entry_leaf(struct trace_iterator *iter, |
342 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | 478 | struct ftrace_graph_ent_entry *entry, |
479 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | ||
343 | { | 480 | { |
344 | struct ftrace_graph_ret_entry *ret_entry; | 481 | struct fgraph_data *data = iter->private; |
345 | struct ftrace_graph_ret *graph_ret; | 482 | struct ftrace_graph_ret *graph_ret; |
346 | struct ring_buffer_event *event; | ||
347 | struct ftrace_graph_ent *call; | 483 | struct ftrace_graph_ent *call; |
348 | unsigned long long duration; | 484 | unsigned long long duration; |
349 | int ret; | 485 | int ret; |
350 | int i; | 486 | int i; |
351 | 487 | ||
352 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
353 | ret_entry = ring_buffer_event_data(event); | ||
354 | graph_ret = &ret_entry->ret; | 488 | graph_ret = &ret_entry->ret; |
355 | call = &entry->graph_ent; | 489 | call = &entry->graph_ent; |
356 | duration = graph_ret->rettime - graph_ret->calltime; | 490 | duration = graph_ret->rettime - graph_ret->calltime; |
357 | 491 | ||
358 | /* Overhead */ | 492 | if (data) { |
359 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 493 | int cpu = iter->cpu; |
360 | ret = print_graph_overhead(duration, s); | 494 | int *depth = &(per_cpu_ptr(data, cpu)->depth); |
361 | if (!ret) | 495 | |
362 | return TRACE_TYPE_PARTIAL_LINE; | 496 | /* |
497 | * Comments display at + 1 to depth. Since | ||
498 | * this is a leaf function, keep the comments | ||
499 | * equal to this depth. | ||
500 | */ | ||
501 | *depth = call->depth - 1; | ||
363 | } | 502 | } |
364 | 503 | ||
365 | /* Duration */ | 504 | /* Overhead */ |
366 | ret = print_graph_duration(duration, s); | 505 | ret = print_graph_overhead(duration, s); |
367 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 506 | if (!ret) |
368 | return TRACE_TYPE_PARTIAL_LINE; | 507 | return TRACE_TYPE_PARTIAL_LINE; |
369 | 508 | ||
509 | /* Duration */ | ||
510 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
511 | ret = print_graph_duration(duration, s); | ||
512 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
513 | return TRACE_TYPE_PARTIAL_LINE; | ||
514 | } | ||
515 | |||
370 | /* Function */ | 516 | /* Function */ |
371 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 517 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
372 | ret = trace_seq_printf(s, " "); | 518 | ret = trace_seq_printf(s, " "); |
@@ -386,33 +532,34 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
386 | } | 532 | } |
387 | 533 | ||
388 | static enum print_line_t | 534 | static enum print_line_t |
389 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | 535 | print_graph_entry_nested(struct trace_iterator *iter, |
390 | struct trace_seq *s, pid_t pid, int cpu) | 536 | struct ftrace_graph_ent_entry *entry, |
537 | struct trace_seq *s, int cpu) | ||
391 | { | 538 | { |
392 | int i; | ||
393 | int ret; | ||
394 | struct ftrace_graph_ent *call = &entry->graph_ent; | 539 | struct ftrace_graph_ent *call = &entry->graph_ent; |
540 | struct fgraph_data *data = iter->private; | ||
541 | int ret; | ||
542 | int i; | ||
395 | 543 | ||
396 | /* No overhead */ | 544 | if (data) { |
397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 545 | int cpu = iter->cpu; |
398 | ret = trace_seq_printf(s, " "); | 546 | int *depth = &(per_cpu_ptr(data, cpu)->depth); |
399 | if (!ret) | 547 | |
400 | return TRACE_TYPE_PARTIAL_LINE; | 548 | *depth = call->depth; |
401 | } | 549 | } |
402 | 550 | ||
403 | /* Interrupt */ | 551 | /* No overhead */ |
404 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | 552 | ret = print_graph_overhead(-1, s); |
405 | if (ret == TRACE_TYPE_UNHANDLED) { | 553 | if (!ret) |
406 | /* No time */ | 554 | return TRACE_TYPE_PARTIAL_LINE; |
555 | |||
556 | /* No time */ | ||
557 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
407 | ret = trace_seq_printf(s, " | "); | 558 | ret = trace_seq_printf(s, " | "); |
408 | if (!ret) | 559 | if (!ret) |
409 | return TRACE_TYPE_PARTIAL_LINE; | 560 | return TRACE_TYPE_PARTIAL_LINE; |
410 | } else { | ||
411 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
412 | return TRACE_TYPE_PARTIAL_LINE; | ||
413 | } | 561 | } |
414 | 562 | ||
415 | |||
416 | /* Function */ | 563 | /* Function */ |
417 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 564 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
418 | ret = trace_seq_printf(s, " "); | 565 | ret = trace_seq_printf(s, " "); |
@@ -428,20 +575,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
428 | if (!ret) | 575 | if (!ret) |
429 | return TRACE_TYPE_PARTIAL_LINE; | 576 | return TRACE_TYPE_PARTIAL_LINE; |
430 | 577 | ||
431 | return TRACE_TYPE_HANDLED; | 578 | /* |
579 | * we already consumed the current entry to check the next one | ||
580 | * and see if this is a leaf. | ||
581 | */ | ||
582 | return TRACE_TYPE_NO_CONSUME; | ||
432 | } | 583 | } |
433 | 584 | ||
434 | static enum print_line_t | 585 | static enum print_line_t |
435 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 586 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
436 | struct trace_iterator *iter, int cpu) | 587 | int type, unsigned long addr) |
437 | { | 588 | { |
438 | int ret; | 589 | struct fgraph_data *data = iter->private; |
439 | struct trace_entry *ent = iter->ent; | 590 | struct trace_entry *ent = iter->ent; |
591 | int cpu = iter->cpu; | ||
592 | int ret; | ||
440 | 593 | ||
441 | /* Pid */ | 594 | /* Pid */ |
442 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 595 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
443 | return TRACE_TYPE_PARTIAL_LINE; | 596 | return TRACE_TYPE_PARTIAL_LINE; |
444 | 597 | ||
598 | if (type) { | ||
599 | /* Interrupt */ | ||
600 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | ||
601 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
602 | return TRACE_TYPE_PARTIAL_LINE; | ||
603 | } | ||
604 | |||
605 | /* Absolute time */ | ||
606 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
607 | ret = print_graph_abs_time(iter->ts, s); | ||
608 | if (!ret) | ||
609 | return TRACE_TYPE_PARTIAL_LINE; | ||
610 | } | ||
611 | |||
445 | /* Cpu */ | 612 | /* Cpu */ |
446 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 613 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
447 | ret = print_graph_cpu(s, cpu); | 614 | ret = print_graph_cpu(s, cpu); |
@@ -460,54 +627,65 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
460 | return TRACE_TYPE_PARTIAL_LINE; | 627 | return TRACE_TYPE_PARTIAL_LINE; |
461 | } | 628 | } |
462 | 629 | ||
463 | if (trace_branch_is_leaf(iter, field)) | 630 | return 0; |
464 | return print_graph_entry_leaf(iter, field, s); | 631 | } |
632 | |||
633 | static enum print_line_t | ||
634 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | ||
635 | struct trace_iterator *iter) | ||
636 | { | ||
637 | int cpu = iter->cpu; | ||
638 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
639 | struct ftrace_graph_ret_entry *leaf_ret; | ||
640 | |||
641 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | ||
642 | return TRACE_TYPE_PARTIAL_LINE; | ||
643 | |||
644 | leaf_ret = get_return_for_leaf(iter, field); | ||
645 | if (leaf_ret) | ||
646 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | ||
465 | else | 647 | else |
466 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 648 | return print_graph_entry_nested(iter, field, s, cpu); |
467 | 649 | ||
468 | } | 650 | } |
469 | 651 | ||
470 | static enum print_line_t | 652 | static enum print_line_t |
471 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 653 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
472 | struct trace_entry *ent, int cpu) | 654 | struct trace_entry *ent, struct trace_iterator *iter) |
473 | { | 655 | { |
474 | int i; | ||
475 | int ret; | ||
476 | unsigned long long duration = trace->rettime - trace->calltime; | 656 | unsigned long long duration = trace->rettime - trace->calltime; |
657 | struct fgraph_data *data = iter->private; | ||
658 | pid_t pid = ent->pid; | ||
659 | int cpu = iter->cpu; | ||
660 | int ret; | ||
661 | int i; | ||
477 | 662 | ||
478 | /* Pid */ | 663 | if (data) { |
479 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 664 | int cpu = iter->cpu; |
480 | return TRACE_TYPE_PARTIAL_LINE; | 665 | int *depth = &(per_cpu_ptr(data, cpu)->depth); |
481 | 666 | ||
482 | /* Cpu */ | 667 | /* |
483 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 668 | * Comments display at + 1 to depth. This is the |
484 | ret = print_graph_cpu(s, cpu); | 669 | * return from a function, we now want the comments |
485 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 670 | * to display at the same level of the bracket. |
486 | return TRACE_TYPE_PARTIAL_LINE; | 671 | */ |
672 | *depth = trace->depth - 1; | ||
487 | } | 673 | } |
488 | 674 | ||
489 | /* Proc */ | 675 | if (print_graph_prologue(iter, s, 0, 0)) |
490 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 676 | return TRACE_TYPE_PARTIAL_LINE; |
491 | ret = print_graph_proc(s, ent->pid); | ||
492 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
493 | return TRACE_TYPE_PARTIAL_LINE; | ||
494 | |||
495 | ret = trace_seq_printf(s, " | "); | ||
496 | if (!ret) | ||
497 | return TRACE_TYPE_PARTIAL_LINE; | ||
498 | } | ||
499 | 677 | ||
500 | /* Overhead */ | 678 | /* Overhead */ |
501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 679 | ret = print_graph_overhead(duration, s); |
502 | ret = print_graph_overhead(duration, s); | 680 | if (!ret) |
503 | if (!ret) | 681 | return TRACE_TYPE_PARTIAL_LINE; |
504 | return TRACE_TYPE_PARTIAL_LINE; | ||
505 | } | ||
506 | 682 | ||
507 | /* Duration */ | 683 | /* Duration */ |
508 | ret = print_graph_duration(duration, s); | 684 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
509 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 685 | ret = print_graph_duration(duration, s); |
510 | return TRACE_TYPE_PARTIAL_LINE; | 686 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
687 | return TRACE_TYPE_PARTIAL_LINE; | ||
688 | } | ||
511 | 689 | ||
512 | /* Closing brace */ | 690 | /* Closing brace */ |
513 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 691 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -528,7 +706,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
528 | return TRACE_TYPE_PARTIAL_LINE; | 706 | return TRACE_TYPE_PARTIAL_LINE; |
529 | } | 707 | } |
530 | 708 | ||
531 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); | 709 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
532 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 710 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
533 | return TRACE_TYPE_PARTIAL_LINE; | 711 | return TRACE_TYPE_PARTIAL_LINE; |
534 | 712 | ||
@@ -536,61 +714,73 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
536 | } | 714 | } |
537 | 715 | ||
538 | static enum print_line_t | 716 | static enum print_line_t |
539 | print_graph_comment(struct print_entry *trace, struct trace_seq *s, | 717 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
540 | struct trace_entry *ent, struct trace_iterator *iter) | 718 | struct trace_iterator *iter) |
541 | { | 719 | { |
542 | int i; | 720 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
721 | struct fgraph_data *data = iter->private; | ||
722 | struct trace_event *event; | ||
723 | int depth = 0; | ||
543 | int ret; | 724 | int ret; |
725 | int i; | ||
544 | 726 | ||
545 | /* Pid */ | 727 | if (data) |
546 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | 728 | depth = per_cpu_ptr(data, iter->cpu)->depth; |
547 | return TRACE_TYPE_PARTIAL_LINE; | ||
548 | |||
549 | /* Cpu */ | ||
550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
551 | ret = print_graph_cpu(s, iter->cpu); | ||
552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
553 | return TRACE_TYPE_PARTIAL_LINE; | ||
554 | } | ||
555 | |||
556 | /* Proc */ | ||
557 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
558 | ret = print_graph_proc(s, ent->pid); | ||
559 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
560 | return TRACE_TYPE_PARTIAL_LINE; | ||
561 | 729 | ||
562 | ret = trace_seq_printf(s, " | "); | 730 | if (print_graph_prologue(iter, s, 0, 0)) |
563 | if (!ret) | 731 | return TRACE_TYPE_PARTIAL_LINE; |
564 | return TRACE_TYPE_PARTIAL_LINE; | ||
565 | } | ||
566 | 732 | ||
567 | /* No overhead */ | 733 | /* No overhead */ |
568 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 734 | ret = print_graph_overhead(-1, s); |
569 | ret = trace_seq_printf(s, " "); | 735 | if (!ret) |
736 | return TRACE_TYPE_PARTIAL_LINE; | ||
737 | |||
738 | /* No time */ | ||
739 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
740 | ret = trace_seq_printf(s, " | "); | ||
570 | if (!ret) | 741 | if (!ret) |
571 | return TRACE_TYPE_PARTIAL_LINE; | 742 | return TRACE_TYPE_PARTIAL_LINE; |
572 | } | 743 | } |
573 | 744 | ||
574 | /* No time */ | ||
575 | ret = trace_seq_printf(s, " | "); | ||
576 | if (!ret) | ||
577 | return TRACE_TYPE_PARTIAL_LINE; | ||
578 | |||
579 | /* Indentation */ | 745 | /* Indentation */ |
580 | if (trace->depth > 0) | 746 | if (depth > 0) |
581 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 747 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
582 | ret = trace_seq_printf(s, " "); | 748 | ret = trace_seq_printf(s, " "); |
583 | if (!ret) | 749 | if (!ret) |
584 | return TRACE_TYPE_PARTIAL_LINE; | 750 | return TRACE_TYPE_PARTIAL_LINE; |
585 | } | 751 | } |
586 | 752 | ||
587 | /* The comment */ | 753 | /* The comment */ |
588 | ret = trace_seq_printf(s, "/* %s", trace->buf); | 754 | ret = trace_seq_printf(s, "/* "); |
589 | if (!ret) | 755 | if (!ret) |
590 | return TRACE_TYPE_PARTIAL_LINE; | 756 | return TRACE_TYPE_PARTIAL_LINE; |
591 | 757 | ||
592 | if (ent->flags & TRACE_FLAG_CONT) | 758 | switch (iter->ent->type) { |
593 | trace_seq_print_cont(s, iter); | 759 | case TRACE_BPRINT: |
760 | ret = trace_print_bprintk_msg_only(iter); | ||
761 | if (ret != TRACE_TYPE_HANDLED) | ||
762 | return ret; | ||
763 | break; | ||
764 | case TRACE_PRINT: | ||
765 | ret = trace_print_printk_msg_only(iter); | ||
766 | if (ret != TRACE_TYPE_HANDLED) | ||
767 | return ret; | ||
768 | break; | ||
769 | default: | ||
770 | event = ftrace_find_event(ent->type); | ||
771 | if (!event) | ||
772 | return TRACE_TYPE_UNHANDLED; | ||
773 | |||
774 | ret = event->trace(iter, sym_flags); | ||
775 | if (ret != TRACE_TYPE_HANDLED) | ||
776 | return ret; | ||
777 | } | ||
778 | |||
779 | /* Strip ending newline */ | ||
780 | if (s->buffer[s->len - 1] == '\n') { | ||
781 | s->buffer[s->len - 1] = '\0'; | ||
782 | s->len--; | ||
783 | } | ||
594 | 784 | ||
595 | ret = trace_seq_printf(s, " */\n"); | 785 | ret = trace_seq_printf(s, " */\n"); |
596 | if (!ret) | 786 | if (!ret) |
@@ -603,62 +793,91 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
603 | enum print_line_t | 793 | enum print_line_t |
604 | print_graph_function(struct trace_iterator *iter) | 794 | print_graph_function(struct trace_iterator *iter) |
605 | { | 795 | { |
606 | struct trace_seq *s = &iter->seq; | ||
607 | struct trace_entry *entry = iter->ent; | 796 | struct trace_entry *entry = iter->ent; |
797 | struct trace_seq *s = &iter->seq; | ||
608 | 798 | ||
609 | switch (entry->type) { | 799 | switch (entry->type) { |
610 | case TRACE_GRAPH_ENT: { | 800 | case TRACE_GRAPH_ENT: { |
611 | struct ftrace_graph_ent_entry *field; | 801 | struct ftrace_graph_ent_entry *field; |
612 | trace_assign_type(field, entry); | 802 | trace_assign_type(field, entry); |
613 | return print_graph_entry(field, s, iter, | 803 | return print_graph_entry(field, s, iter); |
614 | iter->cpu); | ||
615 | } | 804 | } |
616 | case TRACE_GRAPH_RET: { | 805 | case TRACE_GRAPH_RET: { |
617 | struct ftrace_graph_ret_entry *field; | 806 | struct ftrace_graph_ret_entry *field; |
618 | trace_assign_type(field, entry); | 807 | trace_assign_type(field, entry); |
619 | return print_graph_return(&field->ret, s, entry, iter->cpu); | 808 | return print_graph_return(&field->ret, s, entry, iter); |
620 | } | ||
621 | case TRACE_PRINT: { | ||
622 | struct print_entry *field; | ||
623 | trace_assign_type(field, entry); | ||
624 | return print_graph_comment(field, s, entry, iter); | ||
625 | } | 809 | } |
626 | default: | 810 | default: |
627 | return TRACE_TYPE_UNHANDLED; | 811 | return print_graph_comment(s, entry, iter); |
628 | } | 812 | } |
813 | |||
814 | return TRACE_TYPE_HANDLED; | ||
629 | } | 815 | } |
630 | 816 | ||
631 | static void print_graph_headers(struct seq_file *s) | 817 | static void print_graph_headers(struct seq_file *s) |
632 | { | 818 | { |
633 | /* 1st line */ | 819 | /* 1st line */ |
634 | seq_printf(s, "# "); | 820 | seq_printf(s, "# "); |
821 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
822 | seq_printf(s, " TIME "); | ||
635 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 823 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
636 | seq_printf(s, "CPU "); | 824 | seq_printf(s, "CPU"); |
637 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 825 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
638 | seq_printf(s, "TASK/PID "); | 826 | seq_printf(s, " TASK/PID "); |
639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | 827 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
640 | seq_printf(s, "OVERHEAD/"); | 828 | seq_printf(s, " DURATION "); |
641 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | 829 | seq_printf(s, " FUNCTION CALLS\n"); |
642 | 830 | ||
643 | /* 2nd line */ | 831 | /* 2nd line */ |
644 | seq_printf(s, "# "); | 832 | seq_printf(s, "# "); |
833 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
834 | seq_printf(s, " | "); | ||
645 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 835 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
646 | seq_printf(s, "| "); | 836 | seq_printf(s, "| "); |
647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 837 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
648 | seq_printf(s, "| | "); | 838 | seq_printf(s, " | | "); |
649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 839 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
650 | seq_printf(s, "| "); | 840 | seq_printf(s, " | | "); |
651 | seq_printf(s, "| | | | |\n"); | 841 | seq_printf(s, " | | | |\n"); |
652 | } else | 842 | } |
653 | seq_printf(s, " | | | | |\n"); | 843 | |
844 | static void graph_trace_open(struct trace_iterator *iter) | ||
845 | { | ||
846 | /* pid and depth on the last trace processed */ | ||
847 | struct fgraph_data *data = alloc_percpu(struct fgraph_data); | ||
848 | int cpu; | ||
849 | |||
850 | if (!data) | ||
851 | pr_warning("function graph tracer: not enough memory\n"); | ||
852 | else | ||
853 | for_each_possible_cpu(cpu) { | ||
854 | pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); | ||
855 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | ||
856 | *pid = -1; | ||
857 | *depth = 0; | ||
858 | } | ||
859 | |||
860 | iter->private = data; | ||
654 | } | 861 | } |
862 | |||
863 | static void graph_trace_close(struct trace_iterator *iter) | ||
864 | { | ||
865 | free_percpu(iter->private); | ||
866 | } | ||
867 | |||
655 | static struct tracer graph_trace __read_mostly = { | 868 | static struct tracer graph_trace __read_mostly = { |
656 | .name = "function_graph", | 869 | .name = "function_graph", |
657 | .init = graph_trace_init, | 870 | .open = graph_trace_open, |
658 | .reset = graph_trace_reset, | 871 | .close = graph_trace_close, |
872 | .wait_pipe = poll_wait_pipe, | ||
873 | .init = graph_trace_init, | ||
874 | .reset = graph_trace_reset, | ||
659 | .print_line = print_graph_function, | 875 | .print_line = print_graph_function, |
660 | .print_header = print_graph_headers, | 876 | .print_header = print_graph_headers, |
661 | .flags = &tracer_flags, | 877 | .flags = &tracer_flags, |
878 | #ifdef CONFIG_FTRACE_SELFTEST | ||
879 | .selftest = trace_selftest_startup_function_graph, | ||
880 | #endif | ||
662 | }; | 881 | }; |
663 | 882 | ||
664 | static __init int init_graph_trace(void) | 883 | static __init int init_graph_trace(void) |