diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-03 16:59:33 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-02 19:43:47 -0500 |
commit | f3ac60671954c8d413532627b1be13a76f394c49 (patch) | |
tree | 31357b110d793102098d3950c61d1ae86807fee7 /include/linux/sched/task_stack.h | |
parent | d04b0ad37e4b6ac39a56c823ae76ab37cd044dc7 (diff) |
sched/headers: Move task-stack related APIs from <linux/sched.h> to <linux/sched/task_stack.h>
Split out the task->stack related functionality, which is not really
part of the core scheduler APIs.
Only keep task_thread_info() because it's used by sched.h.
Update the code that uses those facilities.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched/task_stack.h')
-rw-r--r-- | include/linux/sched/task_stack.h | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index d2d578e85f7d..aaa5c2a6a0e9 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h | |||
@@ -1,7 +1,111 @@ | |||
1 | #ifndef _LINUX_SCHED_TASK_STACK_H | 1 | #ifndef _LINUX_SCHED_TASK_STACK_H |
2 | #define _LINUX_SCHED_TASK_STACK_H | 2 | #define _LINUX_SCHED_TASK_STACK_H |
3 | 3 | ||
4 | /* | ||
5 | * task->stack (kernel stack) handling interfaces: | ||
6 | */ | ||
7 | |||
4 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
5 | #include <linux/magic.h> | 9 | #include <linux/magic.h> |
6 | 10 | ||
11 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
12 | |||
13 | /* | ||
14 | * When accessing the stack of a non-current task that might exit, use | ||
15 | * try_get_task_stack() instead. task_stack_page will return a pointer | ||
16 | * that could get freed out from under you. | ||
17 | */ | ||
18 | static inline void *task_stack_page(const struct task_struct *task) | ||
19 | { | ||
20 | return task->stack; | ||
21 | } | ||
22 | |||
23 | #define setup_thread_stack(new,old) do { } while(0) | ||
24 | |||
25 | static inline unsigned long *end_of_stack(const struct task_struct *task) | ||
26 | { | ||
27 | return task->stack; | ||
28 | } | ||
29 | |||
30 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
31 | |||
32 | #define task_stack_page(task) ((void *)(task)->stack) | ||
33 | |||
34 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | ||
35 | { | ||
36 | *task_thread_info(p) = *task_thread_info(org); | ||
37 | task_thread_info(p)->task = p; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * Return the address of the last usable long on the stack. | ||
42 | * | ||
43 | * When the stack grows down, this is just above the thread | ||
44 | * info struct. Going any lower will corrupt the threadinfo. | ||
45 | * | ||
46 | * When the stack grows up, this is the highest address. | ||
47 | * Beyond that position, we corrupt data on the next page. | ||
48 | */ | ||
49 | static inline unsigned long *end_of_stack(struct task_struct *p) | ||
50 | { | ||
51 | #ifdef CONFIG_STACK_GROWSUP | ||
52 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
53 | #else | ||
54 | return (unsigned long *)(task_thread_info(p) + 1); | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | #endif | ||
59 | |||
60 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
61 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
62 | { | ||
63 | return atomic_inc_not_zero(&tsk->stack_refcount) ? | ||
64 | task_stack_page(tsk) : NULL; | ||
65 | } | ||
66 | |||
67 | extern void put_task_stack(struct task_struct *tsk); | ||
68 | #else | ||
69 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
70 | { | ||
71 | return task_stack_page(tsk); | ||
72 | } | ||
73 | |||
74 | static inline void put_task_stack(struct task_struct *tsk) {} | ||
75 | #endif | ||
76 | |||
77 | #define task_stack_end_corrupted(task) \ | ||
78 | (*(end_of_stack(task)) != STACK_END_MAGIC) | ||
79 | |||
80 | static inline int object_is_on_stack(void *obj) | ||
81 | { | ||
82 | void *stack = task_stack_page(current); | ||
83 | |||
84 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | ||
85 | } | ||
86 | |||
87 | extern void thread_stack_cache_init(void); | ||
88 | |||
89 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
90 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
91 | { | ||
92 | unsigned long *n = end_of_stack(p); | ||
93 | |||
94 | do { /* Skip over canary */ | ||
95 | # ifdef CONFIG_STACK_GROWSUP | ||
96 | n--; | ||
97 | # else | ||
98 | n++; | ||
99 | # endif | ||
100 | } while (!*n); | ||
101 | |||
102 | # ifdef CONFIG_STACK_GROWSUP | ||
103 | return (unsigned long)end_of_stack(p) - (unsigned long)n; | ||
104 | # else | ||
105 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
106 | # endif | ||
107 | } | ||
108 | #endif | ||
109 | extern void set_task_stack_end_magic(struct task_struct *tsk); | ||
110 | |||
7 | #endif /* _LINUX_SCHED_TASK_STACK_H */ | 111 | #endif /* _LINUX_SCHED_TASK_STACK_H */ |