diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 88 |
1 files changed, 52 insertions, 36 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index aeae5b11b62e..e7ceaca89609 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -80,7 +80,7 @@ | |||
80 | * Protected counters by write_lock_irq(&tasklist_lock) | 80 | * Protected counters by write_lock_irq(&tasklist_lock) |
81 | */ | 81 | */ |
82 | unsigned long total_forks; /* Handle normal Linux uptimes. */ | 82 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
83 | int nr_threads; /* The idle threads do not count.. */ | 83 | int nr_threads; /* The idle threads do not count.. */ |
84 | 84 | ||
85 | int max_threads; /* tunable limit on nr_threads */ | 85 | int max_threads; /* tunable limit on nr_threads */ |
86 | 86 | ||
@@ -232,7 +232,7 @@ void __init fork_init(unsigned long mempages) | |||
232 | /* | 232 | /* |
233 | * we need to allow at least 20 threads to boot a system | 233 | * we need to allow at least 20 threads to boot a system |
234 | */ | 234 | */ |
235 | if(max_threads < 20) | 235 | if (max_threads < 20) |
236 | max_threads = 20; | 236 | max_threads = 20; |
237 | 237 | ||
238 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; | 238 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
@@ -268,7 +268,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
268 | return NULL; | 268 | return NULL; |
269 | } | 269 | } |
270 | 270 | ||
271 | err = arch_dup_task_struct(tsk, orig); | 271 | err = arch_dup_task_struct(tsk, orig); |
272 | if (err) | 272 | if (err) |
273 | goto out; | 273 | goto out; |
274 | 274 | ||
@@ -288,9 +288,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
288 | tsk->stack_canary = get_random_int(); | 288 | tsk->stack_canary = get_random_int(); |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | /* One for us, one for whoever does the "release_task()" (usually parent) */ | 291 | /* |
292 | atomic_set(&tsk->usage,2); | 292 | * One for us, one for whoever does the "release_task()" (usually |
293 | atomic_set(&tsk->fs_excl, 0); | 293 | * parent) |
294 | */ | ||
295 | atomic_set(&tsk->usage, 2); | ||
294 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 296 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
295 | tsk->btrace_seq = 0; | 297 | tsk->btrace_seq = 0; |
296 | #endif | 298 | #endif |
@@ -438,7 +440,7 @@ fail_nomem: | |||
438 | goto out; | 440 | goto out; |
439 | } | 441 | } |
440 | 442 | ||
441 | static inline int mm_alloc_pgd(struct mm_struct * mm) | 443 | static inline int mm_alloc_pgd(struct mm_struct *mm) |
442 | { | 444 | { |
443 | mm->pgd = pgd_alloc(mm); | 445 | mm->pgd = pgd_alloc(mm); |
444 | if (unlikely(!mm->pgd)) | 446 | if (unlikely(!mm->pgd)) |
@@ -446,7 +448,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm) | |||
446 | return 0; | 448 | return 0; |
447 | } | 449 | } |
448 | 450 | ||
449 | static inline void mm_free_pgd(struct mm_struct * mm) | 451 | static inline void mm_free_pgd(struct mm_struct *mm) |
450 | { | 452 | { |
451 | pgd_free(mm, mm->pgd); | 453 | pgd_free(mm, mm->pgd); |
452 | } | 454 | } |
@@ -483,7 +485,7 @@ static void mm_init_aio(struct mm_struct *mm) | |||
483 | #endif | 485 | #endif |
484 | } | 486 | } |
485 | 487 | ||
486 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | 488 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) |
487 | { | 489 | { |
488 | atomic_set(&mm->mm_users, 1); | 490 | atomic_set(&mm->mm_users, 1); |
489 | atomic_set(&mm->mm_count, 1); | 491 | atomic_set(&mm->mm_count, 1); |
@@ -514,9 +516,9 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
514 | /* | 516 | /* |
515 | * Allocate and initialize an mm_struct. | 517 | * Allocate and initialize an mm_struct. |
516 | */ | 518 | */ |
517 | struct mm_struct * mm_alloc(void) | 519 | struct mm_struct *mm_alloc(void) |
518 | { | 520 | { |
519 | struct mm_struct * mm; | 521 | struct mm_struct *mm; |
520 | 522 | ||
521 | mm = allocate_mm(); | 523 | mm = allocate_mm(); |
522 | if (!mm) | 524 | if (!mm) |
@@ -584,7 +586,7 @@ void added_exe_file_vma(struct mm_struct *mm) | |||
584 | void removed_exe_file_vma(struct mm_struct *mm) | 586 | void removed_exe_file_vma(struct mm_struct *mm) |
585 | { | 587 | { |
586 | mm->num_exe_file_vmas--; | 588 | mm->num_exe_file_vmas--; |
587 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ | 589 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file) { |
588 | fput(mm->exe_file); | 590 | fput(mm->exe_file); |
589 | mm->exe_file = NULL; | 591 | mm->exe_file = NULL; |
590 | } | 592 | } |
@@ -776,9 +778,9 @@ fail_nocontext: | |||
776 | return NULL; | 778 | return NULL; |
777 | } | 779 | } |
778 | 780 | ||
779 | static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | 781 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) |
780 | { | 782 | { |
781 | struct mm_struct * mm, *oldmm; | 783 | struct mm_struct *mm, *oldmm; |
782 | int retval; | 784 | int retval; |
783 | 785 | ||
784 | tsk->min_flt = tsk->maj_flt = 0; | 786 | tsk->min_flt = tsk->maj_flt = 0; |
@@ -845,7 +847,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | |||
845 | return 0; | 847 | return 0; |
846 | } | 848 | } |
847 | 849 | ||
848 | static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | 850 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk) |
849 | { | 851 | { |
850 | struct files_struct *oldf, *newf; | 852 | struct files_struct *oldf, *newf; |
851 | int error = 0; | 853 | int error = 0; |
@@ -1167,13 +1169,17 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1167 | cgroup_fork(p); | 1169 | cgroup_fork(p); |
1168 | #ifdef CONFIG_NUMA | 1170 | #ifdef CONFIG_NUMA |
1169 | p->mempolicy = mpol_dup(p->mempolicy); | 1171 | p->mempolicy = mpol_dup(p->mempolicy); |
1170 | if (IS_ERR(p->mempolicy)) { | 1172 | if (IS_ERR(p->mempolicy)) { |
1171 | retval = PTR_ERR(p->mempolicy); | 1173 | retval = PTR_ERR(p->mempolicy); |
1172 | p->mempolicy = NULL; | 1174 | p->mempolicy = NULL; |
1173 | goto bad_fork_cleanup_cgroup; | 1175 | goto bad_fork_cleanup_cgroup; |
1174 | } | 1176 | } |
1175 | mpol_fix_fork_child_flag(p); | 1177 | mpol_fix_fork_child_flag(p); |
1176 | #endif | 1178 | #endif |
1179 | #ifdef CONFIG_CPUSETS | ||
1180 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; | ||
1181 | p->cpuset_slab_spread_rotor = NUMA_NO_NODE; | ||
1182 | #endif | ||
1177 | #ifdef CONFIG_TRACE_IRQFLAGS | 1183 | #ifdef CONFIG_TRACE_IRQFLAGS |
1178 | p->irq_events = 0; | 1184 | p->irq_events = 0; |
1179 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 1185 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
@@ -1213,25 +1219,33 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1213 | retval = perf_event_init_task(p); | 1219 | retval = perf_event_init_task(p); |
1214 | if (retval) | 1220 | if (retval) |
1215 | goto bad_fork_cleanup_policy; | 1221 | goto bad_fork_cleanup_policy; |
1216 | 1222 | retval = audit_alloc(p); | |
1217 | if ((retval = audit_alloc(p))) | 1223 | if (retval) |
1218 | goto bad_fork_cleanup_policy; | 1224 | goto bad_fork_cleanup_policy; |
1219 | /* copy all the process information */ | 1225 | /* copy all the process information */ |
1220 | if ((retval = copy_semundo(clone_flags, p))) | 1226 | retval = copy_semundo(clone_flags, p); |
1227 | if (retval) | ||
1221 | goto bad_fork_cleanup_audit; | 1228 | goto bad_fork_cleanup_audit; |
1222 | if ((retval = copy_files(clone_flags, p))) | 1229 | retval = copy_files(clone_flags, p); |
1230 | if (retval) | ||
1223 | goto bad_fork_cleanup_semundo; | 1231 | goto bad_fork_cleanup_semundo; |
1224 | if ((retval = copy_fs(clone_flags, p))) | 1232 | retval = copy_fs(clone_flags, p); |
1233 | if (retval) | ||
1225 | goto bad_fork_cleanup_files; | 1234 | goto bad_fork_cleanup_files; |
1226 | if ((retval = copy_sighand(clone_flags, p))) | 1235 | retval = copy_sighand(clone_flags, p); |
1236 | if (retval) | ||
1227 | goto bad_fork_cleanup_fs; | 1237 | goto bad_fork_cleanup_fs; |
1228 | if ((retval = copy_signal(clone_flags, p))) | 1238 | retval = copy_signal(clone_flags, p); |
1239 | if (retval) | ||
1229 | goto bad_fork_cleanup_sighand; | 1240 | goto bad_fork_cleanup_sighand; |
1230 | if ((retval = copy_mm(clone_flags, p))) | 1241 | retval = copy_mm(clone_flags, p); |
1242 | if (retval) | ||
1231 | goto bad_fork_cleanup_signal; | 1243 | goto bad_fork_cleanup_signal; |
1232 | if ((retval = copy_namespaces(clone_flags, p))) | 1244 | retval = copy_namespaces(clone_flags, p); |
1245 | if (retval) | ||
1233 | goto bad_fork_cleanup_mm; | 1246 | goto bad_fork_cleanup_mm; |
1234 | if ((retval = copy_io(clone_flags, p))) | 1247 | retval = copy_io(clone_flags, p); |
1248 | if (retval) | ||
1235 | goto bad_fork_cleanup_namespaces; | 1249 | goto bad_fork_cleanup_namespaces; |
1236 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); | 1250 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
1237 | if (retval) | 1251 | if (retval) |
@@ -1253,7 +1267,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1253 | /* | 1267 | /* |
1254 | * Clear TID on mm_release()? | 1268 | * Clear TID on mm_release()? |
1255 | */ | 1269 | */ |
1256 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; | 1270 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; |
1257 | #ifdef CONFIG_BLOCK | 1271 | #ifdef CONFIG_BLOCK |
1258 | p->plug = NULL; | 1272 | p->plug = NULL; |
1259 | #endif | 1273 | #endif |
@@ -1321,7 +1335,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1321 | * it's process group. | 1335 | * it's process group. |
1322 | * A fatal signal pending means that current will exit, so the new | 1336 | * A fatal signal pending means that current will exit, so the new |
1323 | * thread can't slip out of an OOM kill (or normal SIGKILL). | 1337 | * thread can't slip out of an OOM kill (or normal SIGKILL). |
1324 | */ | 1338 | */ |
1325 | recalc_sigpending(); | 1339 | recalc_sigpending(); |
1326 | if (signal_pending(current)) { | 1340 | if (signal_pending(current)) { |
1327 | spin_unlock(¤t->sighand->siglock); | 1341 | spin_unlock(¤t->sighand->siglock); |
@@ -1682,12 +1696,14 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1682 | */ | 1696 | */ |
1683 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) | 1697 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) |
1684 | do_sysvsem = 1; | 1698 | do_sysvsem = 1; |
1685 | if ((err = unshare_fs(unshare_flags, &new_fs))) | 1699 | err = unshare_fs(unshare_flags, &new_fs); |
1700 | if (err) | ||
1686 | goto bad_unshare_out; | 1701 | goto bad_unshare_out; |
1687 | if ((err = unshare_fd(unshare_flags, &new_fd))) | 1702 | err = unshare_fd(unshare_flags, &new_fd); |
1703 | if (err) | ||
1688 | goto bad_unshare_cleanup_fs; | 1704 | goto bad_unshare_cleanup_fs; |
1689 | if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, | 1705 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs); |
1690 | new_fs))) | 1706 | if (err) |
1691 | goto bad_unshare_cleanup_fd; | 1707 | goto bad_unshare_cleanup_fd; |
1692 | 1708 | ||
1693 | if (new_fs || new_fd || do_sysvsem || new_nsproxy) { | 1709 | if (new_fs || new_fd || do_sysvsem || new_nsproxy) { |