aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/thread_info.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-23 00:22:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-23 03:17:26 -0500
commitf201ae2356c74bcae130b2177b3dca903ea98071 (patch)
treec4b1b43fbe0a4594cb86749b2e7098fe15eb86ba /arch/x86/include/asm/thread_info.h
parenta0a70c735ef714fe1b6777b571630c3d50c7b008 (diff)
tracing/function-return-tracer: store return stack into task_struct and allocate it dynamically
Impact: use deeper function tracing depth safely Some tests showed that function return tracing needed a more deeper depth of function calls. But it could be unsafe to store these return addresses to the stack. So these arrays will now be allocated dynamically into task_struct of current only when the tracer is activated. Typical scheme when tracer is activated: - allocate a return stack for each task in global list. - fork: allocate the return stack for the newly created task - exit: free return stack of current - idle init: same as fork I chose a default depth of 50. I don't have overruns anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/thread_info.h')
-rw-r--r--arch/x86/include/asm/thread_info.h29
1 files changed, 0 insertions, 29 deletions
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e90e81ef6ab9..0921b4018c11 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,36 +40,8 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43
44#ifdef CONFIG_FUNCTION_RET_TRACER
45 /* Index of current stored adress in ret_stack */
46 int curr_ret_stack;
47 /* Stack of return addresses for return function tracing */
48 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
49 /*
50 * Number of functions that haven't been traced
51 * because of depth overrun.
52 */
53 atomic_t trace_overrun;
54#endif
55}; 43};
56 44
57#ifdef CONFIG_FUNCTION_RET_TRACER
58#define INIT_THREAD_INFO(tsk) \
59{ \
60 .task = &tsk, \
61 .exec_domain = &default_exec_domain, \
62 .flags = 0, \
63 .cpu = 0, \
64 .preempt_count = 1, \
65 .addr_limit = KERNEL_DS, \
66 .restart_block = { \
67 .fn = do_no_restart_syscall, \
68 }, \
69 .curr_ret_stack = -1,\
70 .trace_overrun = ATOMIC_INIT(0) \
71}
72#else
73#define INIT_THREAD_INFO(tsk) \ 45#define INIT_THREAD_INFO(tsk) \
74{ \ 46{ \
75 .task = &tsk, \ 47 .task = &tsk, \
@@ -82,7 +54,6 @@ struct thread_info {
82 .fn = do_no_restart_syscall, \ 54 .fn = do_no_restart_syscall, \
83 }, \ 55 }, \
84} 56}
85#endif
86 57
87#define init_thread_info (init_thread_union.thread_info) 58#define init_thread_info (init_thread_union.thread_info)
88#define init_stack (init_thread_union.stack) 59#define init_stack (init_thread_union.stack)