aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-23 00:22:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-23 03:17:26 -0500
commitf201ae2356c74bcae130b2177b3dca903ea98071 (patch)
treec4b1b43fbe0a4594cb86749b2e7098fe15eb86ba /include/linux
parenta0a70c735ef714fe1b6777b571630c3d50c7b008 (diff)
tracing/function-return-tracer: store return stack into task_struct and allocate it dynamically
Impact: use deeper function tracing depth safely Some tests showed that function return tracing needed a more deeper depth of function calls. But it could be unsafe to store these return addresses to the stack. So these arrays will now be allocated dynamically into task_struct of current only when the tracer is activated. Typical scheme when tracer is activated: - allocate a return stack for each task in global list. - fork: allocate the return stack for the newly created task - exit: free return stack of current - idle init: same as fork I chose a default depth of 50. I don't have overruns anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/sched.h23
2 files changed, 16 insertions, 12 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f7ba4ea5e128..2ba259b2defa 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -323,6 +323,8 @@ struct ftrace_retfunc {
323}; 323};
324 324
325#ifdef CONFIG_FUNCTION_RET_TRACER 325#ifdef CONFIG_FUNCTION_RET_TRACER
326#define FTRACE_RETFUNC_DEPTH 50
327#define FTRACE_RETSTACK_ALLOC_SIZE 32
326/* Type of a callback handler of tracing return function */ 328/* Type of a callback handler of tracing return function */
327typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 329typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
328 330
@@ -330,6 +332,9 @@ extern int register_ftrace_return(trace_function_return_t func);
330/* The current handler in use */ 332/* The current handler in use */
331extern trace_function_return_t ftrace_function_return; 333extern trace_function_return_t ftrace_function_return;
332extern void unregister_ftrace_return(void); 334extern void unregister_ftrace_return(void);
335
336extern void ftrace_retfunc_init_task(struct task_struct *t);
337extern void ftrace_retfunc_exit_task(struct task_struct *t);
333#endif 338#endif
334 339
335#endif /* _LINUX_FTRACE_H */ 340#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c8e0db464206..bee1e93c95ad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1352,6 +1352,17 @@ struct task_struct {
1352 unsigned long default_timer_slack_ns; 1352 unsigned long default_timer_slack_ns;
1353 1353
1354 struct list_head *scm_work_list; 1354 struct list_head *scm_work_list;
1355#ifdef CONFIG_FUNCTION_RET_TRACER
1356 /* Index of current stored adress in ret_stack */
1357 int curr_ret_stack;
1358 /* Stack of return addresses for return function tracing */
1359 struct ftrace_ret_stack *ret_stack;
1360 /*
1361 * Number of functions that haven't been traced
1362 * because of depth overrun.
1363 */
1364 atomic_t trace_overrun;
1365#endif
1355}; 1366};
1356 1367
1357/* 1368/*
@@ -2006,18 +2017,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
2006{ 2017{
2007 *task_thread_info(p) = *task_thread_info(org); 2018 *task_thread_info(p) = *task_thread_info(org);
2008 task_thread_info(p)->task = p; 2019 task_thread_info(p)->task = p;
2009
2010#ifdef CONFIG_FUNCTION_RET_TRACER
2011 /*
2012 * When fork() creates a child process, this function is called.
2013 * But the child task may not inherit the return adresses traced
2014 * by the return function tracer because it will directly execute
2015 * in userspace and will not return to kernel functions its parent
2016 * used.
2017 */
2018 task_thread_info(p)->curr_ret_stack = -1;
2019 atomic_set(&task_thread_info(p)->trace_overrun, 0);
2020#endif
2021} 2020}
2022 2021
2023static inline unsigned long *end_of_stack(struct task_struct *p) 2022static inline unsigned long *end_of_stack(struct task_struct *p)