diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2008-11-23 00:22:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-23 03:17:26 -0500 |
commit | f201ae2356c74bcae130b2177b3dca903ea98071 (patch) | |
tree | c4b1b43fbe0a4594cb86749b2e7098fe15eb86ba /arch | |
parent | a0a70c735ef714fe1b6777b571630c3d50c7b008 (diff) |
tracing/function-return-tracer: store return stack into task_struct and allocate it dynamically
Impact: use deeper function tracing depth safely
Some tests showed that function return tracing needed a more deeper depth
of function calls. But it could be unsafe to store these return addresses
to the stack.
So these arrays will now be allocated dynamically into task_struct of current
only when the tracer is activated.
Typical scheme when tracer is activated:
- allocate a return stack for each task in global list.
- fork: allocate the return stack for the newly created task
- exit: free return stack of current
- idle init: same as fork
I chose a default depth of 50. I don't have overruns anymore.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 29 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 29 |
3 files changed, 15 insertions, 44 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 2bb43b433e07..754a3e082f94 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -29,7 +29,6 @@ struct dyn_arch_ftrace { | |||
29 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
30 | 30 | ||
31 | #ifdef CONFIG_FUNCTION_RET_TRACER | 31 | #ifdef CONFIG_FUNCTION_RET_TRACER |
32 | #define FTRACE_RET_STACK_SIZE 20 | ||
33 | 32 | ||
34 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
35 | 34 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e90e81ef6ab9..0921b4018c11 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -40,36 +40,8 @@ struct thread_info { | |||
40 | */ | 40 | */ |
41 | __u8 supervisor_stack[0]; | 41 | __u8 supervisor_stack[0]; |
42 | #endif | 42 | #endif |
43 | |||
44 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
45 | /* Index of current stored adress in ret_stack */ | ||
46 | int curr_ret_stack; | ||
47 | /* Stack of return addresses for return function tracing */ | ||
48 | struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE]; | ||
49 | /* | ||
50 | * Number of functions that haven't been traced | ||
51 | * because of depth overrun. | ||
52 | */ | ||
53 | atomic_t trace_overrun; | ||
54 | #endif | ||
55 | }; | 43 | }; |
56 | 44 | ||
57 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
58 | #define INIT_THREAD_INFO(tsk) \ | ||
59 | { \ | ||
60 | .task = &tsk, \ | ||
61 | .exec_domain = &default_exec_domain, \ | ||
62 | .flags = 0, \ | ||
63 | .cpu = 0, \ | ||
64 | .preempt_count = 1, \ | ||
65 | .addr_limit = KERNEL_DS, \ | ||
66 | .restart_block = { \ | ||
67 | .fn = do_no_restart_syscall, \ | ||
68 | }, \ | ||
69 | .curr_ret_stack = -1,\ | ||
70 | .trace_overrun = ATOMIC_INIT(0) \ | ||
71 | } | ||
72 | #else | ||
73 | #define INIT_THREAD_INFO(tsk) \ | 45 | #define INIT_THREAD_INFO(tsk) \ |
74 | { \ | 46 | { \ |
75 | .task = &tsk, \ | 47 | .task = &tsk, \ |
@@ -82,7 +54,6 @@ struct thread_info { | |||
82 | .fn = do_no_restart_syscall, \ | 54 | .fn = do_no_restart_syscall, \ |
83 | }, \ | 55 | }, \ |
84 | } | 56 | } |
85 | #endif | ||
86 | 57 | ||
87 | #define init_thread_info (init_thread_union.thread_info) | 58 | #define init_thread_info (init_thread_union.thread_info) |
88 | #define init_stack (init_thread_union.stack) | 59 | #define init_stack (init_thread_union.stack) |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 356bb1eb6e9a..bb137f7297ed 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time, | |||
350 | unsigned long func) | 350 | unsigned long func) |
351 | { | 351 | { |
352 | int index; | 352 | int index; |
353 | struct thread_info *ti = current_thread_info(); | 353 | |
354 | if (!current->ret_stack) | ||
355 | return -EBUSY; | ||
354 | 356 | ||
355 | /* The return trace stack is full */ | 357 | /* The return trace stack is full */ |
356 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { | 358 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
357 | atomic_inc(&ti->trace_overrun); | 359 | atomic_inc(¤t->trace_overrun); |
358 | return -EBUSY; | 360 | return -EBUSY; |
359 | } | 361 | } |
360 | 362 | ||
361 | index = ++ti->curr_ret_stack; | 363 | index = ++current->curr_ret_stack; |
362 | barrier(); | 364 | barrier(); |
363 | ti->ret_stack[index].ret = ret; | 365 | current->ret_stack[index].ret = ret; |
364 | ti->ret_stack[index].func = func; | 366 | current->ret_stack[index].func = func; |
365 | ti->ret_stack[index].calltime = time; | 367 | current->ret_stack[index].calltime = time; |
366 | 368 | ||
367 | return 0; | 369 | return 0; |
368 | } | 370 | } |
@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |||
373 | { | 375 | { |
374 | int index; | 376 | int index; |
375 | 377 | ||
376 | struct thread_info *ti = current_thread_info(); | 378 | index = current->curr_ret_stack; |
377 | index = ti->curr_ret_stack; | 379 | *ret = current->ret_stack[index].ret; |
378 | *ret = ti->ret_stack[index].ret; | 380 | *func = current->ret_stack[index].func; |
379 | *func = ti->ret_stack[index].func; | 381 | *time = current->ret_stack[index].calltime; |
380 | *time = ti->ret_stack[index].calltime; | 382 | *overrun = atomic_read(¤t->trace_overrun); |
381 | *overrun = atomic_read(&ti->trace_overrun); | 383 | current->curr_ret_stack--; |
382 | ti->curr_ret_stack--; | ||
383 | } | 384 | } |
384 | 385 | ||
385 | /* | 386 | /* |