diff options
-rw-r--r-- | kernel/trace/trace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.h | 7 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 48 |
3 files changed, 36 insertions, 23 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 93040f1bef13..5b1e9a9e9906 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2468,8 +2468,8 @@ waitagain: | |||
2468 | iter->seq.len = len; | 2468 | iter->seq.len = len; |
2469 | break; | 2469 | break; |
2470 | } | 2470 | } |
2471 | 2471 | if (ret != TRACE_TYPE_NO_CONSUME) | |
2472 | trace_consume(iter); | 2472 | trace_consume(iter); |
2473 | 2473 | ||
2474 | if (iter->seq.len >= cnt) | 2474 | if (iter->seq.len >= cnt) |
2475 | break; | 2475 | break; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1ecfb9d2b365..7b0518adf6d7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -63,13 +63,13 @@ struct ftrace_entry { | |||
63 | 63 | ||
64 | /* Function call entry */ | 64 | /* Function call entry */ |
65 | struct ftrace_graph_ent_entry { | 65 | struct ftrace_graph_ent_entry { |
66 | struct trace_entry ent; | 66 | struct trace_entry ent; |
67 | struct ftrace_graph_ent graph_ent; | 67 | struct ftrace_graph_ent graph_ent; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* Function return entry */ | 70 | /* Function return entry */ |
71 | struct ftrace_graph_ret_entry { | 71 | struct ftrace_graph_ret_entry { |
72 | struct trace_entry ent; | 72 | struct trace_entry ent; |
73 | struct ftrace_graph_ret ret; | 73 | struct ftrace_graph_ret ret; |
74 | }; | 74 | }; |
75 | extern struct tracer boot_tracer; | 75 | extern struct tracer boot_tracer; |
@@ -309,7 +309,8 @@ extern void __ftrace_bad_type(void); | |||
309 | enum print_line_t { | 309 | enum print_line_t { |
310 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | 310 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
311 | TRACE_TYPE_HANDLED = 1, | 311 | TRACE_TYPE_HANDLED = 1, |
312 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | 312 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ |
313 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
313 | }; | 314 | }; |
314 | 315 | ||
315 | 316 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 88f8d9d80a93..782ec0fdf453 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -212,8 +212,8 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) | |||
212 | return ret; | 212 | return ret; |
213 | } | 213 | } |
214 | 214 | ||
215 | static bool | 215 | static struct ftrace_graph_ret_entry * |
216 | trace_branch_is_leaf(struct trace_iterator *iter, | 216 | get_return_for_leaf(struct trace_iterator *iter, |
217 | struct ftrace_graph_ent_entry *curr) | 217 | struct ftrace_graph_ent_entry *curr) |
218 | { | 218 | { |
219 | struct ring_buffer_iter *ring_iter; | 219 | struct ring_buffer_iter *ring_iter; |
@@ -222,24 +222,33 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
222 | 222 | ||
223 | ring_iter = iter->buffer_iter[iter->cpu]; | 223 | ring_iter = iter->buffer_iter[iter->cpu]; |
224 | 224 | ||
225 | if (!ring_iter) | 225 | /* First peek to compare current entry and the next one */ |
226 | return false; | 226 | if (ring_iter) |
227 | 227 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
228 | event = ring_buffer_iter_peek(ring_iter, NULL); | 228 | else { |
229 | /* We need to consume the current entry to see the next one */ | ||
230 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
231 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | ||
232 | NULL); | ||
233 | } | ||
229 | 234 | ||
230 | if (!event) | 235 | if (!event) |
231 | return false; | 236 | return NULL; |
232 | 237 | ||
233 | next = ring_buffer_event_data(event); | 238 | next = ring_buffer_event_data(event); |
234 | 239 | ||
235 | if (next->ent.type != TRACE_GRAPH_RET) | 240 | if (next->ent.type != TRACE_GRAPH_RET) |
236 | return false; | 241 | return NULL; |
237 | 242 | ||
238 | if (curr->ent.pid != next->ent.pid || | 243 | if (curr->ent.pid != next->ent.pid || |
239 | curr->graph_ent.func != next->ret.func) | 244 | curr->graph_ent.func != next->ret.func) |
240 | return false; | 245 | return NULL; |
241 | 246 | ||
242 | return true; | 247 | /* this is a leaf, now advance the iterator */ |
248 | if (ring_iter) | ||
249 | ring_buffer_read(ring_iter, NULL); | ||
250 | |||
251 | return next; | ||
243 | } | 252 | } |
244 | 253 | ||
245 | /* Signal a overhead of time execution to the output */ | 254 | /* Signal a overhead of time execution to the output */ |
@@ -376,18 +385,15 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s) | |||
376 | /* Case of a leaf function on its call entry */ | 385 | /* Case of a leaf function on its call entry */ |
377 | static enum print_line_t | 386 | static enum print_line_t |
378 | print_graph_entry_leaf(struct trace_iterator *iter, | 387 | print_graph_entry_leaf(struct trace_iterator *iter, |
379 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | 388 | struct ftrace_graph_ent_entry *entry, |
389 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | ||
380 | { | 390 | { |
381 | struct ftrace_graph_ret_entry *ret_entry; | ||
382 | struct ftrace_graph_ret *graph_ret; | 391 | struct ftrace_graph_ret *graph_ret; |
383 | struct ring_buffer_event *event; | ||
384 | struct ftrace_graph_ent *call; | 392 | struct ftrace_graph_ent *call; |
385 | unsigned long long duration; | 393 | unsigned long long duration; |
386 | int ret; | 394 | int ret; |
387 | int i; | 395 | int i; |
388 | 396 | ||
389 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
390 | ret_entry = ring_buffer_event_data(event); | ||
391 | graph_ret = &ret_entry->ret; | 397 | graph_ret = &ret_entry->ret; |
392 | call = &entry->graph_ent; | 398 | call = &entry->graph_ent; |
393 | duration = graph_ret->rettime - graph_ret->calltime; | 399 | duration = graph_ret->rettime - graph_ret->calltime; |
@@ -457,7 +463,11 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
457 | if (!ret) | 463 | if (!ret) |
458 | return TRACE_TYPE_PARTIAL_LINE; | 464 | return TRACE_TYPE_PARTIAL_LINE; |
459 | 465 | ||
460 | return TRACE_TYPE_HANDLED; | 466 | /* |
467 | * we already consumed the current entry to check the next one | ||
468 | * and see if this is a leaf. | ||
469 | */ | ||
470 | return TRACE_TYPE_NO_CONSUME; | ||
461 | } | 471 | } |
462 | 472 | ||
463 | static enum print_line_t | 473 | static enum print_line_t |
@@ -469,6 +479,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
469 | pid_t *last_entry = iter->private; | 479 | pid_t *last_entry = iter->private; |
470 | struct trace_entry *ent = iter->ent; | 480 | struct trace_entry *ent = iter->ent; |
471 | struct ftrace_graph_ent *call = &field->graph_ent; | 481 | struct ftrace_graph_ent *call = &field->graph_ent; |
482 | struct ftrace_graph_ret_entry *leaf_ret; | ||
472 | 483 | ||
473 | /* Pid */ | 484 | /* Pid */ |
474 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) | 485 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
@@ -504,8 +515,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
504 | return TRACE_TYPE_PARTIAL_LINE; | 515 | return TRACE_TYPE_PARTIAL_LINE; |
505 | } | 516 | } |
506 | 517 | ||
507 | if (trace_branch_is_leaf(iter, field)) | 518 | leaf_ret = get_return_for_leaf(iter, field); |
508 | return print_graph_entry_leaf(iter, field, s); | 519 | if (leaf_ret) |
520 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | ||
509 | else | 521 | else |
510 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 522 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); |
511 | 523 | ||