aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 18:57:25 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 19:59:45 -0500
commit287b6e68ca7209caec40b2f44f837c580a413bae (patch)
treeb0867d75868f6049dc5747bd39fdae2d477dde66 /kernel/trace/ftrace.c
parentfb52607afcd0629776f1dc9e657647ceae81dd50 (diff)
tracing/function-return-tracer: set a more human readable output
Impact: feature This patch sets a C-like output for the function graph tracing. For this aim, we now call two handler for each function: one on the entry and one other on return. This way we can draw a well-ordered call stack. The pid of the previous trace is loosely stored to be compared against the one of the current trace to see if there were a context switch. Without this little feature, the call tree would seem broken at some locations. We could use the sched_tracer to capture these sched_events but this way of processing is much more simpler. 2 spaces have been chosen for indentation to fit the screen while deep calls. The time of execution in nanosecs is printed just after closed braces, it seems more easy this way to find the corresponding function. If the time was printed as a first column, it would be not so easy to find the corresponding function if it is called on a deep depth. I plan to output the return value but on 32 bits CPU, the return value can be 32 or 64, and its difficult to guess on which case we are. I don't know what would be the better solution on X86-32: only print eax (low-part) or even edx (high-part). Actually it's thee same problem when a function return a 8 bits value, the high part of eax could contain junk values... Here is an example of trace: sys_read() { fget_light() { } 526 vfs_read() { rw_verify_area() { security_file_permission() { cap_file_permission() { } 519 } 1564 } 2640 do_sync_read() { pipe_read() { __might_sleep() { } 511 pipe_wait() { prepare_to_wait() { } 760 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { update_min_vruntime() { } 504 } 1587 clear_buddies() { } 512 add_cfs_task_weight() { } 519 update_min_vruntime() { } 511 } 5602 dequeue_entity() { update_curr() { update_min_vruntime() { } 496 } 1631 clear_buddies() { } 496 update_min_vruntime() { } 527 } 4580 hrtick_update() { hrtick_start_fair() { } 488 } 1489 } 13700 } 14949 } 16016 msecs_to_jiffies() { } 496 put_prev_task_fair() { } 504 pick_next_task_fair() { } 489 pick_next_task_rt() { } 496 pick_next_task_fair() { } 489 pick_next_task_idle() { } 489 ------------8<---------- thread 4 ------------8<---------- finish_task_switch() { } 1203 do_softirq() { __do_softirq() { __local_bh_disable() { } 669 rcu_process_callbacks() { __rcu_process_callbacks() { cpu_quiet() { rcu_start_batch() { } 503 } 1647 } 3128 __rcu_process_callbacks() { } 542 } 5362 _local_bh_enable() { } 587 } 8880 } 9986 kthread_should_stop() { } 669 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { calc_delta_mine() { } 511 update_min_vruntime() { } 511 } 2813 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9e19976af727..7e2d3b91692d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,12 +1498,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active; 1501static atomic_t ftrace_graph_active;
1502
1503/* The callback that hooks the return of a function */
1504trace_function_graph_t ftrace_graph_function =
1505 (trace_function_graph_t)ftrace_stub;
1506 1502
1503/* The callbacks that hook a function */
1504trace_func_graph_ret_t ftrace_graph_return =
1505 (trace_func_graph_ret_t)ftrace_stub;
1506trace_func_graph_ent_t ftrace_graph_entry =
1507 (trace_func_graph_ent_t)ftrace_stub;
1507 1508
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1509/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1510static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1569,7 +1570,8 @@ static int start_graph_tracing(void)
1569 return ret; 1570 return ret;
1570} 1571}
1571 1572
1572int register_ftrace_graph(trace_function_graph_t func) 1573int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1574 trace_func_graph_ent_t entryfunc)
1573{ 1575{
1574 int ret = 0; 1576 int ret = 0;
1575 1577
@@ -1583,14 +1585,15 @@ int register_ftrace_graph(trace_function_graph_t func)
1583 ret = -EBUSY; 1585 ret = -EBUSY;
1584 goto out; 1586 goto out;
1585 } 1587 }
1586 atomic_inc(&ftrace_retfunc_active); 1588 atomic_inc(&ftrace_graph_active);
1587 ret = start_graph_tracing(); 1589 ret = start_graph_tracing();
1588 if (ret) { 1590 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1591 atomic_dec(&ftrace_graph_active);
1590 goto out; 1592 goto out;
1591 } 1593 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1594 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1593 ftrace_graph_function = func; 1595 ftrace_graph_return = retfunc;
1596 ftrace_graph_entry = entryfunc;
1594 ftrace_startup(); 1597 ftrace_startup();
1595 1598
1596out: 1599out:
@@ -1602,8 +1605,9 @@ void unregister_ftrace_graph(void)
1602{ 1605{
1603 mutex_lock(&ftrace_sysctl_lock); 1606 mutex_lock(&ftrace_sysctl_lock);
1604 1607
1605 atomic_dec(&ftrace_retfunc_active); 1608 atomic_dec(&ftrace_graph_active);
1606 ftrace_graph_function = (trace_function_graph_t)ftrace_stub; 1609 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1610 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1607 ftrace_shutdown(); 1611 ftrace_shutdown();
1608 /* Restore normal tracing type */ 1612 /* Restore normal tracing type */
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER; 1613 ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1614,7 +1618,7 @@ void unregister_ftrace_graph(void)
1614/* Allocate a return stack for newly created task */ 1618/* Allocate a return stack for newly created task */
1615void ftrace_graph_init_task(struct task_struct *t) 1619void ftrace_graph_init_task(struct task_struct *t)
1616{ 1620{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1621 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1622 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 1623 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 1624 GFP_KERNEL);
@@ -1638,5 +1642,3 @@ void ftrace_graph_exit_task(struct task_struct *t)
1638} 1642}
1639#endif 1643#endif
1640 1644
1641
1642