aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c48
1 files changed, 30 insertions, 18 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 88f8d9d80a93..782ec0fdf453 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -212,8 +212,8 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
212 return ret; 212 return ret;
213} 213}
214 214
215static bool 215static struct ftrace_graph_ret_entry *
216trace_branch_is_leaf(struct trace_iterator *iter, 216get_return_for_leaf(struct trace_iterator *iter,
217 struct ftrace_graph_ent_entry *curr) 217 struct ftrace_graph_ent_entry *curr)
218{ 218{
219 struct ring_buffer_iter *ring_iter; 219 struct ring_buffer_iter *ring_iter;
@@ -222,24 +222,33 @@ trace_branch_is_leaf(struct trace_iterator *iter,
222 222
223 ring_iter = iter->buffer_iter[iter->cpu]; 223 ring_iter = iter->buffer_iter[iter->cpu];
224 224
225 if (!ring_iter) 225 /* First peek to compare current entry and the next one */
226 return false; 226 if (ring_iter)
227 227 event = ring_buffer_iter_peek(ring_iter, NULL);
228 event = ring_buffer_iter_peek(ring_iter, NULL); 228 else {
229 /* We need to consume the current entry to see the next one */
230 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
231 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
232 NULL);
233 }
229 234
230 if (!event) 235 if (!event)
231 return false; 236 return NULL;
232 237
233 next = ring_buffer_event_data(event); 238 next = ring_buffer_event_data(event);
234 239
235 if (next->ent.type != TRACE_GRAPH_RET) 240 if (next->ent.type != TRACE_GRAPH_RET)
236 return false; 241 return NULL;
237 242
238 if (curr->ent.pid != next->ent.pid || 243 if (curr->ent.pid != next->ent.pid ||
239 curr->graph_ent.func != next->ret.func) 244 curr->graph_ent.func != next->ret.func)
240 return false; 245 return NULL;
241 246
242 return true; 247 /* this is a leaf, now advance the iterator */
248 if (ring_iter)
249 ring_buffer_read(ring_iter, NULL);
250
251 return next;
243} 252}
244 253
245/* Signal a overhead of time execution to the output */ 254/* Signal a overhead of time execution to the output */
@@ -376,18 +385,15 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s)
376/* Case of a leaf function on its call entry */ 385/* Case of a leaf function on its call entry */
377static enum print_line_t 386static enum print_line_t
378print_graph_entry_leaf(struct trace_iterator *iter, 387print_graph_entry_leaf(struct trace_iterator *iter,
379 struct ftrace_graph_ent_entry *entry, struct trace_seq *s) 388 struct ftrace_graph_ent_entry *entry,
389 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
380{ 390{
381 struct ftrace_graph_ret_entry *ret_entry;
382 struct ftrace_graph_ret *graph_ret; 391 struct ftrace_graph_ret *graph_ret;
383 struct ring_buffer_event *event;
384 struct ftrace_graph_ent *call; 392 struct ftrace_graph_ent *call;
385 unsigned long long duration; 393 unsigned long long duration;
386 int ret; 394 int ret;
387 int i; 395 int i;
388 396
389 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
390 ret_entry = ring_buffer_event_data(event);
391 graph_ret = &ret_entry->ret; 397 graph_ret = &ret_entry->ret;
392 call = &entry->graph_ent; 398 call = &entry->graph_ent;
393 duration = graph_ret->rettime - graph_ret->calltime; 399 duration = graph_ret->rettime - graph_ret->calltime;
@@ -457,7 +463,11 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
457 if (!ret) 463 if (!ret)
458 return TRACE_TYPE_PARTIAL_LINE; 464 return TRACE_TYPE_PARTIAL_LINE;
459 465
460 return TRACE_TYPE_HANDLED; 466 /*
467 * we already consumed the current entry to check the next one
468 * and see if this is a leaf.
469 */
470 return TRACE_TYPE_NO_CONSUME;
461} 471}
462 472
463static enum print_line_t 473static enum print_line_t
@@ -469,6 +479,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
469 pid_t *last_entry = iter->private; 479 pid_t *last_entry = iter->private;
470 struct trace_entry *ent = iter->ent; 480 struct trace_entry *ent = iter->ent;
471 struct ftrace_graph_ent *call = &field->graph_ent; 481 struct ftrace_graph_ent *call = &field->graph_ent;
482 struct ftrace_graph_ret_entry *leaf_ret;
472 483
473 /* Pid */ 484 /* Pid */
474 if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) 485 if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
@@ -504,8 +515,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
504 return TRACE_TYPE_PARTIAL_LINE; 515 return TRACE_TYPE_PARTIAL_LINE;
505 } 516 }
506 517
507 if (trace_branch_is_leaf(iter, field)) 518 leaf_ret = get_return_for_leaf(iter, field);
508 return print_graph_entry_leaf(iter, field, s); 519 if (leaf_ret)
520 return print_graph_entry_leaf(iter, field, leaf_ret, s);
509 else 521 else
510 return print_graph_entry_nested(field, s, iter->ent->pid, cpu); 522 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
511 523