diff options
author | Akinobu Mita <akinobu.mita@gmail.com> | 2012-07-30 17:42:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-30 20:25:20 -0400 |
commit | f19b9f74b7ea3b21ddcee55d852a6488239608a4 (patch) | |
tree | d57515cb110bc8b30043e1ab67c962ab78898e2f /kernel/fork.c | |
parent | 87bec58a52652e2eb2a575692a40f9466c7bd31b (diff) |
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 088025bd1925..8efac1fe56bc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -114,6 +114,10 @@ int nr_processes(void) | |||
114 | return total; | 114 | return total; |
115 | } | 115 | } |
116 | 116 | ||
117 | void __weak arch_release_task_struct(struct task_struct *tsk) | ||
118 | { | ||
119 | } | ||
120 | |||
117 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR | 121 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
118 | static struct kmem_cache *task_struct_cachep; | 122 | static struct kmem_cache *task_struct_cachep; |
119 | 123 | ||
@@ -122,17 +126,17 @@ static inline struct task_struct *alloc_task_struct_node(int node) | |||
122 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); | 126 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); |
123 | } | 127 | } |
124 | 128 | ||
125 | void __weak arch_release_task_struct(struct task_struct *tsk) { } | ||
126 | |||
127 | static inline void free_task_struct(struct task_struct *tsk) | 129 | static inline void free_task_struct(struct task_struct *tsk) |
128 | { | 130 | { |
129 | arch_release_task_struct(tsk); | ||
130 | kmem_cache_free(task_struct_cachep, tsk); | 131 | kmem_cache_free(task_struct_cachep, tsk); |
131 | } | 132 | } |
132 | #endif | 133 | #endif |
133 | 134 | ||
135 | void __weak arch_release_thread_info(struct thread_info *ti) | ||
136 | { | ||
137 | } | ||
138 | |||
134 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR | 139 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR |
135 | void __weak arch_release_thread_info(struct thread_info *ti) { } | ||
136 | 140 | ||
137 | /* | 141 | /* |
138 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a | 142 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
@@ -150,7 +154,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
150 | 154 | ||
151 | static inline void free_thread_info(struct thread_info *ti) | 155 | static inline void free_thread_info(struct thread_info *ti) |
152 | { | 156 | { |
153 | arch_release_thread_info(ti); | ||
154 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | 157 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); |
155 | } | 158 | } |
156 | # else | 159 | # else |
@@ -164,7 +167,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
164 | 167 | ||
165 | static void free_thread_info(struct thread_info *ti) | 168 | static void free_thread_info(struct thread_info *ti) |
166 | { | 169 | { |
167 | arch_release_thread_info(ti); | ||
168 | kmem_cache_free(thread_info_cache, ti); | 170 | kmem_cache_free(thread_info_cache, ti); |
169 | } | 171 | } |
170 | 172 | ||
@@ -205,10 +207,12 @@ static void account_kernel_stack(struct thread_info *ti, int account) | |||
205 | void free_task(struct task_struct *tsk) | 207 | void free_task(struct task_struct *tsk) |
206 | { | 208 | { |
207 | account_kernel_stack(tsk->stack, -1); | 209 | account_kernel_stack(tsk->stack, -1); |
210 | arch_release_thread_info(tsk->stack); | ||
208 | free_thread_info(tsk->stack); | 211 | free_thread_info(tsk->stack); |
209 | rt_mutex_debug_task_free(tsk); | 212 | rt_mutex_debug_task_free(tsk); |
210 | ftrace_graph_exit_task(tsk); | 213 | ftrace_graph_exit_task(tsk); |
211 | put_seccomp_filter(tsk); | 214 | put_seccomp_filter(tsk); |
215 | arch_release_task_struct(tsk); | ||
212 | free_task_struct(tsk); | 216 | free_task_struct(tsk); |
213 | } | 217 | } |
214 | EXPORT_SYMBOL(free_task); | 218 | EXPORT_SYMBOL(free_task); |
@@ -298,14 +302,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
298 | return NULL; | 302 | return NULL; |
299 | 303 | ||
300 | ti = alloc_thread_info_node(tsk, node); | 304 | ti = alloc_thread_info_node(tsk, node); |
301 | if (!ti) { | 305 | if (!ti) |
302 | free_task_struct(tsk); | 306 | goto free_tsk; |
303 | return NULL; | ||
304 | } | ||
305 | 307 | ||
306 | err = arch_dup_task_struct(tsk, orig); | 308 | err = arch_dup_task_struct(tsk, orig); |
307 | if (err) | 309 | if (err) |
308 | goto out; | 310 | goto free_ti; |
309 | 311 | ||
310 | tsk->stack = ti; | 312 | tsk->stack = ti; |
311 | 313 | ||
@@ -333,8 +335,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
333 | 335 | ||
334 | return tsk; | 336 | return tsk; |
335 | 337 | ||
336 | out: | 338 | free_ti: |
337 | free_thread_info(ti); | 339 | free_thread_info(ti); |
340 | free_tsk: | ||
338 | free_task_struct(tsk); | 341 | free_task_struct(tsk); |
339 | return NULL; | 342 | return NULL; |
340 | } | 343 | } |