aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 18:57:25 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 19:59:45 -0500
commit287b6e68ca7209caec40b2f44f837c580a413bae (patch)
treeb0867d75868f6049dc5747bd39fdae2d477dde66 /kernel/trace/trace.c
parentfb52607afcd0629776f1dc9e657647ceae81dd50 (diff)
tracing/function-return-tracer: set a more human readable output
Impact: feature This patch sets a C-like output for the function graph tracing. For this aim, we now call two handler for each function: one on the entry and one other on return. This way we can draw a well-ordered call stack. The pid of the previous trace is loosely stored to be compared against the one of the current trace to see if there were a context switch. Without this little feature, the call tree would seem broken at some locations. We could use the sched_tracer to capture these sched_events but this way of processing is much more simpler. 2 spaces have been chosen for indentation to fit the screen while deep calls. The time of execution in nanosecs is printed just after closed braces, it seems more easy this way to find the corresponding function. If the time was printed as a first column, it would be not so easy to find the corresponding function if it is called on a deep depth. I plan to output the return value but on 32 bits CPU, the return value can be 32 or 64, and its difficult to guess on which case we are. I don't know what would be the better solution on X86-32: only print eax (low-part) or even edx (high-part). Actually it's thee same problem when a function return a 8 bits value, the high part of eax could contain junk values... Here is an example of trace: sys_read() { fget_light() { } 526 vfs_read() { rw_verify_area() { security_file_permission() { cap_file_permission() { } 519 } 1564 } 2640 do_sync_read() { pipe_read() { __might_sleep() { } 511 pipe_wait() { prepare_to_wait() { } 760 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { update_min_vruntime() { } 504 } 1587 clear_buddies() { } 512 add_cfs_task_weight() { } 519 update_min_vruntime() { } 511 } 5602 dequeue_entity() { update_curr() { update_min_vruntime() { } 496 } 1631 clear_buddies() { } 496 update_min_vruntime() { } 527 } 4580 hrtick_update() { hrtick_start_fair() { } 488 } 1489 } 13700 } 14949 } 16016 msecs_to_jiffies() { } 496 put_prev_task_fair() { } 504 pick_next_task_fair() { } 489 pick_next_task_rt() { } 496 pick_next_task_fair() { } 489 pick_next_task_idle() { } 489 ------------8<---------- thread 4 ------------8<---------- finish_task_switch() { } 1203 do_softirq() { __do_softirq() { __local_bh_disable() { } 669 rcu_process_callbacks() { __rcu_process_callbacks() { cpu_quiet() { rcu_start_batch() { } 503 } 1647 } 3128 __rcu_process_callbacks() { } 542 } 5362 _local_bh_enable() { } 587 } 8880 } 9986 kthread_should_stop() { } 669 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { calc_delta_mine() { } 511 update_min_vruntime() { } 511 } 2813 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c67
1 files changed, 55 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f21ab2c68fd..9d5f7c94f25 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -879,14 +879,38 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_GRAPH_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_graph(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data,
884 struct ftrace_graph_ent *trace,
885 unsigned long flags,
886 int pc)
887{
888 struct ring_buffer_event *event;
889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags;
891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
893 return;
894
895 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
896 &irq_flags);
897 if (!event)
898 return;
899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->graph_ent = *trace;
903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904}
905
906static void __trace_graph_return(struct trace_array *tr,
883 struct trace_array_cpu *data, 907 struct trace_array_cpu *data,
884 struct ftrace_graph_ret *trace, 908 struct ftrace_graph_ret *trace,
885 unsigned long flags, 909 unsigned long flags,
886 int pc) 910 int pc)
887{ 911{
888 struct ring_buffer_event *event; 912 struct ring_buffer_event *event;
889 struct ftrace_graph_entry *entry; 913 struct ftrace_graph_ret_entry *entry;
890 unsigned long irq_flags; 914 unsigned long irq_flags;
891 915
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +922,8 @@ static void __trace_function_graph(struct trace_array *tr,
898 return; 922 return;
899 entry = ring_buffer_event_data(event); 923 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 924 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 925 entry->ent.type = TRACE_GRAPH_RET;
902 entry->ip = trace->func; 926 entry->ret = *trace;
903 entry->parent_ip = trace->ret;
904 entry->rettime = trace->rettime;
905 entry->calltime = trace->calltime;
906 entry->overrun = trace->overrun;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1178,7 +1198,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_graph(struct ftrace_graph_ret *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1182{ 1202{
1183 struct trace_array *tr = &global_trace; 1203 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1204 struct trace_array_cpu *data;
@@ -1193,7 +1213,28 @@ void trace_function_graph(struct ftrace_graph_ret *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1213 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1214 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1215 pc = preempt_count();
1196 __trace_function_graph(tr, data, trace, flags, pc); 1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1223{
1224 struct trace_array *tr = &global_trace;
1225 struct trace_array_cpu *data;
1226 unsigned long flags;
1227 long disabled;
1228 int cpu;
1229 int pc;
1230
1231 raw_local_irq_save(flags);
1232 cpu = raw_smp_processor_id();
1233 data = tr->data[cpu];
1234 disabled = atomic_inc_return(&data->disabled);
1235 if (likely(disabled == 1)) {
1236 pc = preempt_count();
1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2045 return print_graph_function(iter);
2046 }
2047 case TRACE_GRAPH_ENT: {
2004 return print_graph_function(iter); 2048 return print_graph_function(iter);
2005 break;
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;