aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c75
1 files changed, 55 insertions, 20 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b9372a0bff18..ad54c833116a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -34,6 +34,7 @@
34#include <linux/cgroup.h> 34#include <linux/cgroup.h>
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37#include <linux/seccomp.h>
37#include <linux/swap.h> 38#include <linux/swap.h>
38#include <linux/syscalls.h> 39#include <linux/syscalls.h>
39#include <linux/jiffies.h> 40#include <linux/jiffies.h>
@@ -47,6 +48,7 @@
47#include <linux/audit.h> 48#include <linux/audit.h>
48#include <linux/memcontrol.h> 49#include <linux/memcontrol.h>
49#include <linux/ftrace.h> 50#include <linux/ftrace.h>
51#include <linux/proc_fs.h>
50#include <linux/profile.h> 52#include <linux/profile.h>
51#include <linux/rmap.h> 53#include <linux/rmap.h>
52#include <linux/ksm.h> 54#include <linux/ksm.h>
@@ -111,32 +113,67 @@ int nr_processes(void)
111 return total; 113 return total;
112} 114}
113 115
114#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 116#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
115# define alloc_task_struct_node(node) \
116 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
117# define free_task_struct(tsk) \
118 kmem_cache_free(task_struct_cachep, (tsk))
119static struct kmem_cache *task_struct_cachep; 117static struct kmem_cache *task_struct_cachep;
118
119static inline struct task_struct *alloc_task_struct_node(int node)
120{
121 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
122}
123
124void __weak arch_release_task_struct(struct task_struct *tsk) { }
125
126static inline void free_task_struct(struct task_struct *tsk)
127{
128 arch_release_task_struct(tsk);
129 kmem_cache_free(task_struct_cachep, tsk);
130}
120#endif 131#endif
121 132
122#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 133#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
134void __weak arch_release_thread_info(struct thread_info *ti) { }
135
136/*
137 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
138 * kmemcache based allocator.
139 */
140# if THREAD_SIZE >= PAGE_SIZE
123static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 141static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
124 int node) 142 int node)
125{ 143{
126#ifdef CONFIG_DEBUG_STACK_USAGE 144 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
127 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 145 THREAD_SIZE_ORDER);
128#else
129 gfp_t mask = GFP_KERNEL;
130#endif
131 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
132 146
133 return page ? page_address(page) : NULL; 147 return page ? page_address(page) : NULL;
134} 148}
135 149
136static inline void free_thread_info(struct thread_info *ti) 150static inline void free_thread_info(struct thread_info *ti)
137{ 151{
152 arch_release_thread_info(ti);
138 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 153 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
139} 154}
155# else
156static struct kmem_cache *thread_info_cache;
157
158static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
159 int node)
160{
161 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
162}
163
164static void free_thread_info(struct thread_info *ti)
165{
166 arch_release_thread_info(ti);
167 kmem_cache_free(thread_info_cache, ti);
168}
169
170void thread_info_cache_init(void)
171{
172 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
173 THREAD_SIZE, 0, NULL);
174 BUG_ON(thread_info_cache == NULL);
175}
176# endif
140#endif 177#endif
141 178
142/* SLAB cache for signal_struct structures (tsk->signal) */ 179/* SLAB cache for signal_struct structures (tsk->signal) */
@@ -170,6 +207,7 @@ void free_task(struct task_struct *tsk)
170 free_thread_info(tsk->stack); 207 free_thread_info(tsk->stack);
171 rt_mutex_debug_task_free(tsk); 208 rt_mutex_debug_task_free(tsk);
172 ftrace_graph_exit_task(tsk); 209 ftrace_graph_exit_task(tsk);
210 put_seccomp_filter(tsk);
173 free_task_struct(tsk); 211 free_task_struct(tsk);
174} 212}
175EXPORT_SYMBOL(free_task); 213EXPORT_SYMBOL(free_task);
@@ -203,17 +241,11 @@ void __put_task_struct(struct task_struct *tsk)
203} 241}
204EXPORT_SYMBOL_GPL(__put_task_struct); 242EXPORT_SYMBOL_GPL(__put_task_struct);
205 243
206/* 244void __init __weak arch_task_cache_init(void) { }
207 * macro override instead of weak attribute alias, to workaround
208 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
209 */
210#ifndef arch_task_cache_init
211#define arch_task_cache_init()
212#endif
213 245
214void __init fork_init(unsigned long mempages) 246void __init fork_init(unsigned long mempages)
215{ 247{
216#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 248#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
217#ifndef ARCH_MIN_TASKALIGN 249#ifndef ARCH_MIN_TASKALIGN
218#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 250#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
219#endif 251#endif
@@ -1162,6 +1194,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1162 goto fork_out; 1194 goto fork_out;
1163 1195
1164 ftrace_graph_init_task(p); 1196 ftrace_graph_init_task(p);
1197 get_seccomp_filter(p);
1165 1198
1166 rt_mutex_init_task(p); 1199 rt_mutex_init_task(p);
1167 1200
@@ -1464,6 +1497,8 @@ bad_fork_cleanup_io:
1464 if (p->io_context) 1497 if (p->io_context)
1465 exit_io_context(p); 1498 exit_io_context(p);
1466bad_fork_cleanup_namespaces: 1499bad_fork_cleanup_namespaces:
1500 if (unlikely(clone_flags & CLONE_NEWPID))
1501 pid_ns_release_proc(p->nsproxy->pid_ns);
1467 exit_task_namespaces(p); 1502 exit_task_namespaces(p);
1468bad_fork_cleanup_mm: 1503bad_fork_cleanup_mm:
1469 if (p->mm) 1504 if (p->mm)