diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c8e0db464206..d02a0ca70ee9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -96,6 +96,7 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 96 | struct futex_pi_state; |
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | ||
99 | 100 | ||
100 | /* | 101 | /* |
101 | * List of flags we want to share for kernel threads, | 102 | * List of flags we want to share for kernel threads, |
@@ -1161,6 +1162,18 @@ struct task_struct { | |||
1161 | struct list_head ptraced; | 1162 | struct list_head ptraced; |
1162 | struct list_head ptrace_entry; | 1163 | struct list_head ptrace_entry; |
1163 | 1164 | ||
1165 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1166 | /* | ||
1167 | * This is the tracer handle for the ptrace BTS extension. | ||
1168 | * This field actually belongs to the ptracer task. | ||
1169 | */ | ||
1170 | struct bts_tracer *bts; | ||
1171 | /* | ||
1172 | * The buffer to hold the BTS data. | ||
1173 | */ | ||
1174 | void *bts_buffer; | ||
1175 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1176 | |||
1164 | /* PID/PID hash table linkage. */ | 1177 | /* PID/PID hash table linkage. */ |
1165 | struct pid_link pids[PIDTYPE_MAX]; | 1178 | struct pid_link pids[PIDTYPE_MAX]; |
1166 | struct list_head thread_group; | 1179 | struct list_head thread_group; |
@@ -1352,6 +1365,17 @@ struct task_struct { | |||
1352 | unsigned long default_timer_slack_ns; | 1365 | unsigned long default_timer_slack_ns; |
1353 | 1366 | ||
1354 | struct list_head *scm_work_list; | 1367 | struct list_head *scm_work_list; |
1368 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1369 | /* Index of current stored adress in ret_stack */ | ||
1370 | int curr_ret_stack; | ||
1371 | /* Stack of return addresses for return function tracing */ | ||
1372 | struct ftrace_ret_stack *ret_stack; | ||
1373 | /* | ||
1374 | * Number of functions that haven't been traced | ||
1375 | * because of depth overrun. | ||
1376 | */ | ||
1377 | atomic_t trace_overrun; | ||
1378 | #endif | ||
1355 | }; | 1379 | }; |
1356 | 1380 | ||
1357 | /* | 1381 | /* |
@@ -2006,18 +2030,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct | |||
2006 | { | 2030 | { |
2007 | *task_thread_info(p) = *task_thread_info(org); | 2031 | *task_thread_info(p) = *task_thread_info(org); |
2008 | task_thread_info(p)->task = p; | 2032 | task_thread_info(p)->task = p; |
2009 | |||
2010 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
2011 | /* | ||
2012 | * When fork() creates a child process, this function is called. | ||
2013 | * But the child task may not inherit the return adresses traced | ||
2014 | * by the return function tracer because it will directly execute | ||
2015 | * in userspace and will not return to kernel functions its parent | ||
2016 | * used. | ||
2017 | */ | ||
2018 | task_thread_info(p)->curr_ret_stack = -1; | ||
2019 | atomic_set(&task_thread_info(p)->trace_overrun, 0); | ||
2020 | #endif | ||
2021 | } | 2033 | } |
2022 | 2034 | ||
2023 | static inline unsigned long *end_of_stack(struct task_struct *p) | 2035 | static inline unsigned long *end_of_stack(struct task_struct *p) |