diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 51 |
1 files changed, 17 insertions, 34 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index be022c200da6..e6c04d462ab2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/completion.h> | 19 | #include <linux/completion.h> |
20 | #include <linux/mnt_namespace.h> | ||
21 | #include <linux/personality.h> | 20 | #include <linux/personality.h> |
22 | #include <linux/mempolicy.h> | 21 | #include <linux/mempolicy.h> |
23 | #include <linux/sem.h> | 22 | #include <linux/sem.h> |
@@ -568,18 +567,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
568 | * the value intact in a core dump, and to save the unnecessary | 567 | * the value intact in a core dump, and to save the unnecessary |
569 | * trouble otherwise. Userland only wants this done for a sys_exit. | 568 | * trouble otherwise. Userland only wants this done for a sys_exit. |
570 | */ | 569 | */ |
571 | if (tsk->clear_child_tid | 570 | if (tsk->clear_child_tid) { |
572 | && !(tsk->flags & PF_SIGNALED) | 571 | if (!(tsk->flags & PF_SIGNALED) && |
573 | && atomic_read(&mm->mm_users) > 1) { | 572 | atomic_read(&mm->mm_users) > 1) { |
574 | u32 __user * tidptr = tsk->clear_child_tid; | 573 | /* |
574 | * We don't check the error code - if userspace has | ||
575 | * not set up a proper pointer then tough luck. | ||
576 | */ | ||
577 | put_user(0, tsk->clear_child_tid); | ||
578 | sys_futex(tsk->clear_child_tid, FUTEX_WAKE, | ||
579 | 1, NULL, NULL, 0); | ||
580 | } | ||
575 | tsk->clear_child_tid = NULL; | 581 | tsk->clear_child_tid = NULL; |
576 | |||
577 | /* | ||
578 | * We don't check the error code - if userspace has | ||
579 | * not set up a proper pointer then tough luck. | ||
580 | */ | ||
581 | put_user(0, tidptr); | ||
582 | sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); | ||
583 | } | 582 | } |
584 | } | 583 | } |
585 | 584 | ||
@@ -816,11 +815,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
816 | { | 815 | { |
817 | struct signal_struct *sig; | 816 | struct signal_struct *sig; |
818 | 817 | ||
819 | if (clone_flags & CLONE_THREAD) { | 818 | if (clone_flags & CLONE_THREAD) |
820 | atomic_inc(¤t->signal->count); | ||
821 | atomic_inc(¤t->signal->live); | ||
822 | return 0; | 819 | return 0; |
823 | } | ||
824 | 820 | ||
825 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 821 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
826 | tsk->signal = sig; | 822 | tsk->signal = sig; |
@@ -878,16 +874,6 @@ void __cleanup_signal(struct signal_struct *sig) | |||
878 | kmem_cache_free(signal_cachep, sig); | 874 | kmem_cache_free(signal_cachep, sig); |
879 | } | 875 | } |
880 | 876 | ||
881 | static void cleanup_signal(struct task_struct *tsk) | ||
882 | { | ||
883 | struct signal_struct *sig = tsk->signal; | ||
884 | |||
885 | atomic_dec(&sig->live); | ||
886 | |||
887 | if (atomic_dec_and_test(&sig->count)) | ||
888 | __cleanup_signal(sig); | ||
889 | } | ||
890 | |||
891 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | 877 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) |
892 | { | 878 | { |
893 | unsigned long new_flags = p->flags; | 879 | unsigned long new_flags = p->flags; |
@@ -1029,7 +1015,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1029 | p->vfork_done = NULL; | 1015 | p->vfork_done = NULL; |
1030 | spin_lock_init(&p->alloc_lock); | 1016 | spin_lock_init(&p->alloc_lock); |
1031 | 1017 | ||
1032 | clear_tsk_thread_flag(p, TIF_SIGPENDING); | ||
1033 | init_sigpending(&p->pending); | 1018 | init_sigpending(&p->pending); |
1034 | 1019 | ||
1035 | p->utime = cputime_zero; | 1020 | p->utime = cputime_zero; |
@@ -1241,6 +1226,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1241 | } | 1226 | } |
1242 | 1227 | ||
1243 | if (clone_flags & CLONE_THREAD) { | 1228 | if (clone_flags & CLONE_THREAD) { |
1229 | atomic_inc(¤t->signal->count); | ||
1230 | atomic_inc(¤t->signal->live); | ||
1244 | p->group_leader = current->group_leader; | 1231 | p->group_leader = current->group_leader; |
1245 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | 1232 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1246 | } | 1233 | } |
@@ -1270,6 +1257,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1270 | write_unlock_irq(&tasklist_lock); | 1257 | write_unlock_irq(&tasklist_lock); |
1271 | proc_fork_connector(p); | 1258 | proc_fork_connector(p); |
1272 | cgroup_post_fork(p); | 1259 | cgroup_post_fork(p); |
1260 | perf_counter_fork(p); | ||
1273 | return p; | 1261 | return p; |
1274 | 1262 | ||
1275 | bad_fork_free_pid: | 1263 | bad_fork_free_pid: |
@@ -1283,7 +1271,8 @@ bad_fork_cleanup_mm: | |||
1283 | if (p->mm) | 1271 | if (p->mm) |
1284 | mmput(p->mm); | 1272 | mmput(p->mm); |
1285 | bad_fork_cleanup_signal: | 1273 | bad_fork_cleanup_signal: |
1286 | cleanup_signal(p); | 1274 | if (!(clone_flags & CLONE_THREAD)) |
1275 | __cleanup_signal(p->signal); | ||
1287 | bad_fork_cleanup_sighand: | 1276 | bad_fork_cleanup_sighand: |
1288 | __cleanup_sighand(p->sighand); | 1277 | __cleanup_sighand(p->sighand); |
1289 | bad_fork_cleanup_fs: | 1278 | bad_fork_cleanup_fs: |
@@ -1409,12 +1398,6 @@ long do_fork(unsigned long clone_flags, | |||
1409 | if (clone_flags & CLONE_VFORK) { | 1398 | if (clone_flags & CLONE_VFORK) { |
1410 | p->vfork_done = &vfork; | 1399 | p->vfork_done = &vfork; |
1411 | init_completion(&vfork); | 1400 | init_completion(&vfork); |
1412 | } else if (!(clone_flags & CLONE_VM)) { | ||
1413 | /* | ||
1414 | * vfork will do an exec which will call | ||
1415 | * set_task_comm() | ||
1416 | */ | ||
1417 | perf_counter_fork(p); | ||
1418 | } | 1401 | } |
1419 | 1402 | ||
1420 | audit_finish_fork(p); | 1403 | audit_finish_fork(p); |