diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2008-11-16 21:22:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-18 05:11:00 -0500 |
commit | 0231022cc32d5f2e7f3c06b75691dda0ad6aec33 (patch) | |
tree | b45429e95fc7d52bae32e62ef514f3d7ccf62ce6 /arch/x86/kernel/ftrace.c | |
parent | 0619faf657806b943e6acf51f60f1cd023a96c78 (diff) |
tracing/function-return-tracer: add the overrun field
Impact: help to find the better depth of trace
We decided to arbitrary define the depth of function return trace as
"20". Perhaps this is not enough. To help finding an optimal depth, we
measure now the overrun: the number of functions that have been missed
for the current thread. By default this is not displayed, we have to
do set a particular flag on the return tracer: echo overrun >
/debug/tracing/trace_options And the overrun will be printed on the
right.
As the trace shows below, the current 20 depth is not enough.
update_wall_time+0x37f/0x8c0 -> update_xtime_cache (345 ns) (Overruns: 2838)
update_wall_time+0x384/0x8c0 -> clocksource_get_next (1141 ns) (Overruns: 2838)
do_timer+0x23/0x100 -> update_wall_time (3882 ns) (Overruns: 2838)
tick_do_update_jiffies64+0xbf/0x160 -> do_timer (5339 ns) (Overruns: 2838)
tick_sched_timer+0x6a/0xf0 -> tick_do_update_jiffies64 (7209 ns) (Overruns: 2838)
vgacon_set_cursor_size+0x98/0x120 -> native_io_delay (2613 ns) (Overruns: 274)
vgacon_cursor+0x16e/0x1d0 -> vgacon_set_cursor_size (33151 ns) (Overruns: 274)
set_cursor+0x5f/0x80 -> vgacon_cursor (36432 ns) (Overruns: 274)
con_flush_chars+0x34/0x40 -> set_cursor (38790 ns) (Overruns: 274)
release_console_sem+0x1ec/0x230 -> up (721 ns) (Overruns: 274)
release_console_sem+0x225/0x230 -> wake_up_klogd (316 ns) (Overruns: 274)
con_flush_chars+0x39/0x40 -> release_console_sem (2996 ns) (Overruns: 274)
con_write+0x22/0x30 -> con_flush_chars (46067 ns) (Overruns: 274)
n_tty_write+0x1cc/0x360 -> con_write (292670 ns) (Overruns: 274)
smp_apic_timer_interrupt+0x2a/0x90 -> native_apic_mem_write (330 ns) (Overruns: 274)
irq_enter+0x17/0x70 -> idle_cpu (413 ns) (Overruns: 274)
smp_apic_timer_interrupt+0x2f/0x90 -> irq_enter (1525 ns) (Overruns: 274)
ktime_get_ts+0x40/0x70 -> getnstimeofday (465 ns) (Overruns: 274)
ktime_get_ts+0x60/0x70 -> set_normalized_timespec (436 ns) (Overruns: 274)
ktime_get+0x16/0x30 -> ktime_get_ts (2501 ns) (Overruns: 274)
hrtimer_interrupt+0x77/0x1a0 -> ktime_get (3439 ns) (Overruns: 274)
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r-- | arch/x86/kernel/ftrace.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 924153edd973..356bb1eb6e9a 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -353,8 +353,10 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
353 | struct thread_info *ti = current_thread_info(); | 353 | struct thread_info *ti = current_thread_info(); |
354 | 354 | ||
355 | /* The return trace stack is full */ | 355 | /* The return trace stack is full */ |
356 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) | 356 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { |
357 | atomic_inc(&ti->trace_overrun); | ||
357 | return -EBUSY; | 358 | return -EBUSY; |
359 | } | ||
358 | 360 | ||
359 | index = ++ti->curr_ret_stack; | 361 | index = ++ti->curr_ret_stack; |
360 | barrier(); | 362 | barrier(); |
@@ -367,7 +369,7 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
367 | 369 | ||
368 | /* Retrieve a function return address to the trace stack on thread info.*/ | 370 | /* Retrieve a function return address to the trace stack on thread info.*/ |
369 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | 371 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, |
370 | unsigned long *func) | 372 | unsigned long *func, unsigned long *overrun) |
371 | { | 373 | { |
372 | int index; | 374 | int index; |
373 | 375 | ||
@@ -376,6 +378,7 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
376 | *ret = ti->ret_stack[index].ret; | 378 | *ret = ti->ret_stack[index].ret; |
377 | *func = ti->ret_stack[index].func; | 379 | *func = ti->ret_stack[index].func; |
378 | *time = ti->ret_stack[index].calltime; | 380 | *time = ti->ret_stack[index].calltime; |
381 | *overrun = atomic_read(&ti->trace_overrun); | ||
379 | ti->curr_ret_stack--; | 382 | ti->curr_ret_stack--; |
380 | } | 383 | } |
381 | 384 | ||
@@ -386,7 +389,8 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
386 | unsigned long ftrace_return_to_handler(void) | 389 | unsigned long ftrace_return_to_handler(void) |
387 | { | 390 | { |
388 | struct ftrace_retfunc trace; | 391 | struct ftrace_retfunc trace; |
389 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | 392 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func, |
393 | &trace.overrun); | ||
390 | trace.rettime = cpu_clock(raw_smp_processor_id()); | 394 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
391 | ftrace_function_return(&trace); | 395 | ftrace_function_return(&trace); |
392 | 396 | ||