diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 48 |
1 files changed, 22 insertions, 26 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index f00e319d8376..3bd2280d79f6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -114,6 +114,10 @@ int nr_processes(void) | |||
| 114 | return total; | 114 | return total; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void __weak arch_release_task_struct(struct task_struct *tsk) | ||
| 118 | { | ||
| 119 | } | ||
| 120 | |||
| 117 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR | 121 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
| 118 | static struct kmem_cache *task_struct_cachep; | 122 | static struct kmem_cache *task_struct_cachep; |
| 119 | 123 | ||
| @@ -122,17 +126,17 @@ static inline struct task_struct *alloc_task_struct_node(int node) | |||
| 122 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); | 126 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); |
| 123 | } | 127 | } |
| 124 | 128 | ||
| 125 | void __weak arch_release_task_struct(struct task_struct *tsk) { } | ||
| 126 | |||
| 127 | static inline void free_task_struct(struct task_struct *tsk) | 129 | static inline void free_task_struct(struct task_struct *tsk) |
| 128 | { | 130 | { |
| 129 | arch_release_task_struct(tsk); | ||
| 130 | kmem_cache_free(task_struct_cachep, tsk); | 131 | kmem_cache_free(task_struct_cachep, tsk); |
| 131 | } | 132 | } |
| 132 | #endif | 133 | #endif |
| 133 | 134 | ||
| 135 | void __weak arch_release_thread_info(struct thread_info *ti) | ||
| 136 | { | ||
| 137 | } | ||
| 138 | |||
| 134 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR | 139 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR |
| 135 | void __weak arch_release_thread_info(struct thread_info *ti) { } | ||
| 136 | 140 | ||
| 137 | /* | 141 | /* |
| 138 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a | 142 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
| @@ -150,7 +154,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
| 150 | 154 | ||
| 151 | static inline void free_thread_info(struct thread_info *ti) | 155 | static inline void free_thread_info(struct thread_info *ti) |
| 152 | { | 156 | { |
| 153 | arch_release_thread_info(ti); | ||
| 154 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | 157 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); |
| 155 | } | 158 | } |
| 156 | # else | 159 | # else |
| @@ -164,7 +167,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
| 164 | 167 | ||
| 165 | static void free_thread_info(struct thread_info *ti) | 168 | static void free_thread_info(struct thread_info *ti) |
| 166 | { | 169 | { |
| 167 | arch_release_thread_info(ti); | ||
| 168 | kmem_cache_free(thread_info_cache, ti); | 170 | kmem_cache_free(thread_info_cache, ti); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| @@ -205,10 +207,12 @@ static void account_kernel_stack(struct thread_info *ti, int account) | |||
| 205 | void free_task(struct task_struct *tsk) | 207 | void free_task(struct task_struct *tsk) |
| 206 | { | 208 | { |
| 207 | account_kernel_stack(tsk->stack, -1); | 209 | account_kernel_stack(tsk->stack, -1); |
| 210 | arch_release_thread_info(tsk->stack); | ||
| 208 | free_thread_info(tsk->stack); | 211 | free_thread_info(tsk->stack); |
| 209 | rt_mutex_debug_task_free(tsk); | 212 | rt_mutex_debug_task_free(tsk); |
| 210 | ftrace_graph_exit_task(tsk); | 213 | ftrace_graph_exit_task(tsk); |
| 211 | put_seccomp_filter(tsk); | 214 | put_seccomp_filter(tsk); |
| 215 | arch_release_task_struct(tsk); | ||
| 212 | free_task_struct(tsk); | 216 | free_task_struct(tsk); |
| 213 | } | 217 | } |
| 214 | EXPORT_SYMBOL(free_task); | 218 | EXPORT_SYMBOL(free_task); |
| @@ -298,23 +302,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
| 298 | return NULL; | 302 | return NULL; |
| 299 | 303 | ||
| 300 | ti = alloc_thread_info_node(tsk, node); | 304 | ti = alloc_thread_info_node(tsk, node); |
| 301 | if (!ti) { | 305 | if (!ti) |
| 302 | free_task_struct(tsk); | 306 | goto free_tsk; |
| 303 | return NULL; | ||
| 304 | } | ||
| 305 | 307 | ||
| 306 | err = arch_dup_task_struct(tsk, orig); | 308 | err = arch_dup_task_struct(tsk, orig); |
| 309 | if (err) | ||
| 310 | goto free_ti; | ||
| 307 | 311 | ||
| 308 | /* | ||
| 309 | * We defer looking at err, because we will need this setup | ||
| 310 | * for the clean up path to work correctly. | ||
| 311 | */ | ||
| 312 | tsk->stack = ti; | 312 | tsk->stack = ti; |
| 313 | setup_thread_stack(tsk, orig); | ||
| 314 | |||
| 315 | if (err) | ||
| 316 | goto out; | ||
| 317 | 313 | ||
| 314 | setup_thread_stack(tsk, orig); | ||
| 318 | clear_user_return_notifier(tsk); | 315 | clear_user_return_notifier(tsk); |
| 319 | clear_tsk_need_resched(tsk); | 316 | clear_tsk_need_resched(tsk); |
| 320 | stackend = end_of_stack(tsk); | 317 | stackend = end_of_stack(tsk); |
| @@ -338,8 +335,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
| 338 | 335 | ||
| 339 | return tsk; | 336 | return tsk; |
| 340 | 337 | ||
| 341 | out: | 338 | free_ti: |
| 342 | free_thread_info(ti); | 339 | free_thread_info(ti); |
| 340 | free_tsk: | ||
| 343 | free_task_struct(tsk); | 341 | free_task_struct(tsk); |
| 344 | return NULL; | 342 | return NULL; |
| 345 | } | 343 | } |
| @@ -383,16 +381,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 383 | struct file *file; | 381 | struct file *file; |
| 384 | 382 | ||
| 385 | if (mpnt->vm_flags & VM_DONTCOPY) { | 383 | if (mpnt->vm_flags & VM_DONTCOPY) { |
| 386 | long pages = vma_pages(mpnt); | ||
| 387 | mm->total_vm -= pages; | ||
| 388 | vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, | 384 | vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, |
| 389 | -pages); | 385 | -vma_pages(mpnt)); |
| 390 | continue; | 386 | continue; |
| 391 | } | 387 | } |
| 392 | charge = 0; | 388 | charge = 0; |
| 393 | if (mpnt->vm_flags & VM_ACCOUNT) { | 389 | if (mpnt->vm_flags & VM_ACCOUNT) { |
| 394 | unsigned long len; | 390 | unsigned long len = vma_pages(mpnt); |
| 395 | len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; | 391 | |
| 396 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ | 392 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ |
| 397 | goto fail_nomem; | 393 | goto fail_nomem; |
| 398 | charge = len; | 394 | charge = len; |
| @@ -1310,7 +1306,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1310 | #ifdef CONFIG_DEBUG_MUTEXES | 1306 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1311 | p->blocked_on = NULL; /* not blocked yet */ | 1307 | p->blocked_on = NULL; /* not blocked yet */ |
| 1312 | #endif | 1308 | #endif |
| 1313 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 1309 | #ifdef CONFIG_MEMCG |
| 1314 | p->memcg_batch.do_batch = 0; | 1310 | p->memcg_batch.do_batch = 0; |
| 1315 | p->memcg_batch.memcg = NULL; | 1311 | p->memcg_batch.memcg = NULL; |
| 1316 | #endif | 1312 | #endif |
| @@ -1420,7 +1416,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1420 | */ | 1416 | */ |
| 1421 | p->group_leader = p; | 1417 | p->group_leader = p; |
| 1422 | INIT_LIST_HEAD(&p->thread_group); | 1418 | INIT_LIST_HEAD(&p->thread_group); |
| 1423 | INIT_HLIST_HEAD(&p->task_works); | 1419 | p->task_works = NULL; |
| 1424 | 1420 | ||
| 1425 | /* Now that the task is set up, run cgroup callbacks if | 1421 | /* Now that the task is set up, run cgroup callbacks if |
| 1426 | * necessary. We need to run them before the task is visible | 1422 | * necessary. We need to run them before the task is visible |
