aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c149
1 files changed, 118 insertions, 31 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 2a9d98c641ac..d3ad54677f9c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -52,6 +52,11 @@
52 52
53static void exit_mm(struct task_struct * tsk); 53static void exit_mm(struct task_struct * tsk);
54 54
55static inline int task_detached(struct task_struct *p)
56{
57 return p->exit_signal == -1;
58}
59
55static void __unhash_process(struct task_struct *p) 60static void __unhash_process(struct task_struct *p)
56{ 61{
57 nr_threads--; 62 nr_threads--;
@@ -160,7 +165,7 @@ repeat:
160 zap_leader = 0; 165 zap_leader = 0;
161 leader = p->group_leader; 166 leader = p->group_leader;
162 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 167 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
163 BUG_ON(leader->exit_signal == -1); 168 BUG_ON(task_detached(leader));
164 do_notify_parent(leader, leader->exit_signal); 169 do_notify_parent(leader, leader->exit_signal);
165 /* 170 /*
166 * If we were the last child thread and the leader has 171 * If we were the last child thread and the leader has
@@ -170,7 +175,7 @@ repeat:
170 * do_notify_parent() will have marked it self-reaping in 175 * do_notify_parent() will have marked it self-reaping in
171 * that case. 176 * that case.
172 */ 177 */
173 zap_leader = (leader->exit_signal == -1); 178 zap_leader = task_detached(leader);
174 } 179 }
175 180
176 write_unlock_irq(&tasklist_lock); 181 write_unlock_irq(&tasklist_lock);
@@ -329,13 +334,11 @@ void __set_special_pids(struct pid *pid)
329 pid_t nr = pid_nr(pid); 334 pid_t nr = pid_nr(pid);
330 335
331 if (task_session(curr) != pid) { 336 if (task_session(curr) != pid) {
332 detach_pid(curr, PIDTYPE_SID); 337 change_pid(curr, PIDTYPE_SID, pid);
333 attach_pid(curr, PIDTYPE_SID, pid);
334 set_task_session(curr, nr); 338 set_task_session(curr, nr);
335 } 339 }
336 if (task_pgrp(curr) != pid) { 340 if (task_pgrp(curr) != pid) {
337 detach_pid(curr, PIDTYPE_PGID); 341 change_pid(curr, PIDTYPE_PGID, pid);
338 attach_pid(curr, PIDTYPE_PGID, pid);
339 set_task_pgrp(curr, nr); 342 set_task_pgrp(curr, nr);
340 } 343 }
341} 344}
@@ -557,6 +560,88 @@ void exit_fs(struct task_struct *tsk)
557 560
558EXPORT_SYMBOL_GPL(exit_fs); 561EXPORT_SYMBOL_GPL(exit_fs);
559 562
563#ifdef CONFIG_MM_OWNER
564/*
565 * Task p is exiting and it owned mm, lets find a new owner for it
566 */
567static inline int
568mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
569{
570 /*
571 * If there are other users of the mm and the owner (us) is exiting
572 * we need to find a new owner to take on the responsibility.
573 */
574 if (!mm)
575 return 0;
576 if (atomic_read(&mm->mm_users) <= 1)
577 return 0;
578 if (mm->owner != p)
579 return 0;
580 return 1;
581}
582
583void mm_update_next_owner(struct mm_struct *mm)
584{
585 struct task_struct *c, *g, *p = current;
586
587retry:
588 if (!mm_need_new_owner(mm, p))
589 return;
590
591 read_lock(&tasklist_lock);
592 /*
593 * Search in the children
594 */
595 list_for_each_entry(c, &p->children, sibling) {
596 if (c->mm == mm)
597 goto assign_new_owner;
598 }
599
600 /*
601 * Search in the siblings
602 */
603 list_for_each_entry(c, &p->parent->children, sibling) {
604 if (c->mm == mm)
605 goto assign_new_owner;
606 }
607
608 /*
609 * Search through everything else. We should not get
610 * here often
611 */
612 do_each_thread(g, c) {
613 if (c->mm == mm)
614 goto assign_new_owner;
615 } while_each_thread(g, c);
616
617 read_unlock(&tasklist_lock);
618 return;
619
620assign_new_owner:
621 BUG_ON(c == p);
622 get_task_struct(c);
623 /*
624 * The task_lock protects c->mm from changing.
625 * We always want mm->owner->mm == mm
626 */
627 task_lock(c);
628 /*
629 * Delay read_unlock() till we have the task_lock()
630 * to ensure that c does not slip away underneath us
631 */
632 read_unlock(&tasklist_lock);
633 if (c->mm != mm) {
634 task_unlock(c);
635 put_task_struct(c);
636 goto retry;
637 }
638 cgroup_mm_owner_callbacks(mm->owner, c);
639 mm->owner = c;
640 task_unlock(c);
641 put_task_struct(c);
642}
643#endif /* CONFIG_MM_OWNER */
644
560/* 645/*
561 * Turn us into a lazy TLB process if we 646 * Turn us into a lazy TLB process if we
562 * aren't already.. 647 * aren't already..
@@ -596,6 +681,7 @@ static void exit_mm(struct task_struct * tsk)
596 /* We don't want this task to be frozen prematurely */ 681 /* We don't want this task to be frozen prematurely */
597 clear_freeze_flag(tsk); 682 clear_freeze_flag(tsk);
598 task_unlock(tsk); 683 task_unlock(tsk);
684 mm_update_next_owner(mm);
599 mmput(mm); 685 mmput(mm);
600} 686}
601 687
@@ -610,7 +696,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
610 if (unlikely(traced)) { 696 if (unlikely(traced)) {
611 /* Preserve ptrace links if someone else is tracing this child. */ 697 /* Preserve ptrace links if someone else is tracing this child. */
612 list_del_init(&p->ptrace_list); 698 list_del_init(&p->ptrace_list);
613 if (p->parent != p->real_parent) 699 if (ptrace_reparented(p))
614 list_add(&p->ptrace_list, &p->real_parent->ptrace_children); 700 list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
615 } else { 701 } else {
616 /* If this child is being traced, then we're the one tracing it 702 /* If this child is being traced, then we're the one tracing it
@@ -634,18 +720,18 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
634 /* If this is a threaded reparent there is no need to 720 /* If this is a threaded reparent there is no need to
635 * notify anyone anything has happened. 721 * notify anyone anything has happened.
636 */ 722 */
637 if (p->real_parent->group_leader == father->group_leader) 723 if (same_thread_group(p->real_parent, father))
638 return; 724 return;
639 725
640 /* We don't want people slaying init. */ 726 /* We don't want people slaying init. */
641 if (p->exit_signal != -1) 727 if (!task_detached(p))
642 p->exit_signal = SIGCHLD; 728 p->exit_signal = SIGCHLD;
643 729
644 /* If we'd notified the old parent about this child's death, 730 /* If we'd notified the old parent about this child's death,
645 * also notify the new parent. 731 * also notify the new parent.
646 */ 732 */
647 if (!traced && p->exit_state == EXIT_ZOMBIE && 733 if (!traced && p->exit_state == EXIT_ZOMBIE &&
648 p->exit_signal != -1 && thread_group_empty(p)) 734 !task_detached(p) && thread_group_empty(p))
649 do_notify_parent(p, p->exit_signal); 735 do_notify_parent(p, p->exit_signal);
650 736
651 kill_orphaned_pgrp(p, father); 737 kill_orphaned_pgrp(p, father);
@@ -698,18 +784,18 @@ static void forget_original_parent(struct task_struct *father)
698 } else { 784 } else {
699 /* reparent ptraced task to its real parent */ 785 /* reparent ptraced task to its real parent */
700 __ptrace_unlink (p); 786 __ptrace_unlink (p);
701 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && 787 if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
702 thread_group_empty(p)) 788 thread_group_empty(p))
703 do_notify_parent(p, p->exit_signal); 789 do_notify_parent(p, p->exit_signal);
704 } 790 }
705 791
706 /* 792 /*
707 * if the ptraced child is a zombie with exit_signal == -1 793 * if the ptraced child is a detached zombie we must collect
708 * we must collect it before we exit, or it will remain 794 * it before we exit, or it will remain zombie forever since
709 * zombie forever since we prevented it from self-reap itself 795 * we prevented it from self-reap itself while it was being
710 * while it was being traced by us, to be able to see it in wait4. 796 * traced by us, to be able to see it in wait4.
711 */ 797 */
712 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) 798 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
713 list_add(&p->ptrace_list, &ptrace_dead); 799 list_add(&p->ptrace_list, &ptrace_dead);
714 } 800 }
715 801
@@ -766,29 +852,30 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
766 * we have changed execution domain as these two values started 852 * we have changed execution domain as these two values started
767 * the same after a fork. 853 * the same after a fork.
768 */ 854 */
769 if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && 855 if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
770 (tsk->parent_exec_id != tsk->real_parent->self_exec_id || 856 (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
771 tsk->self_exec_id != tsk->parent_exec_id) 857 tsk->self_exec_id != tsk->parent_exec_id) &&
772 && !capable(CAP_KILL)) 858 !capable(CAP_KILL))
773 tsk->exit_signal = SIGCHLD; 859 tsk->exit_signal = SIGCHLD;
774 860
775
776 /* If something other than our normal parent is ptracing us, then 861 /* If something other than our normal parent is ptracing us, then
777 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 862 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
778 * only has special meaning to our real parent. 863 * only has special meaning to our real parent.
779 */ 864 */
780 if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { 865 if (!task_detached(tsk) && thread_group_empty(tsk)) {
781 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; 866 int signal = ptrace_reparented(tsk) ?
867 SIGCHLD : tsk->exit_signal;
782 do_notify_parent(tsk, signal); 868 do_notify_parent(tsk, signal);
783 } else if (tsk->ptrace) { 869 } else if (tsk->ptrace) {
784 do_notify_parent(tsk, SIGCHLD); 870 do_notify_parent(tsk, SIGCHLD);
785 } 871 }
786 872
787 state = EXIT_ZOMBIE; 873 state = EXIT_ZOMBIE;
788 if (tsk->exit_signal == -1 && likely(!tsk->ptrace)) 874 if (task_detached(tsk) && likely(!tsk->ptrace))
789 state = EXIT_DEAD; 875 state = EXIT_DEAD;
790 tsk->exit_state = state; 876 tsk->exit_state = state;
791 877
878 /* mt-exec, de_thread() is waiting for us */
792 if (thread_group_leader(tsk) && 879 if (thread_group_leader(tsk) &&
793 tsk->signal->notify_count < 0 && 880 tsk->signal->notify_count < 0 &&
794 tsk->signal->group_exit_task) 881 tsk->signal->group_exit_task)
@@ -1032,12 +1119,13 @@ asmlinkage long sys_exit(int error_code)
1032NORET_TYPE void 1119NORET_TYPE void
1033do_group_exit(int exit_code) 1120do_group_exit(int exit_code)
1034{ 1121{
1122 struct signal_struct *sig = current->signal;
1123
1035 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 1124 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1036 1125
1037 if (current->signal->flags & SIGNAL_GROUP_EXIT) 1126 if (signal_group_exit(sig))
1038 exit_code = current->signal->group_exit_code; 1127 exit_code = sig->group_exit_code;
1039 else if (!thread_group_empty(current)) { 1128 else if (!thread_group_empty(current)) {
1040 struct signal_struct *const sig = current->signal;
1041 struct sighand_struct *const sighand = current->sighand; 1129 struct sighand_struct *const sighand = current->sighand;
1042 spin_lock_irq(&sighand->siglock); 1130 spin_lock_irq(&sighand->siglock);
1043 if (signal_group_exit(sig)) 1131 if (signal_group_exit(sig))
@@ -1089,7 +1177,7 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
1089 * Do not consider detached threads that are 1177 * Do not consider detached threads that are
1090 * not ptraced: 1178 * not ptraced:
1091 */ 1179 */
1092 if (p->exit_signal == -1 && !p->ptrace) 1180 if (task_detached(p) && !p->ptrace)
1093 return 0; 1181 return 0;
1094 1182
1095 /* Wait for all children (clone and not) if __WALL is set; 1183 /* Wait for all children (clone and not) if __WALL is set;
@@ -1179,8 +1267,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1179 return 0; 1267 return 0;
1180 } 1268 }
1181 1269
1182 /* traced means p->ptrace, but not vice versa */ 1270 traced = ptrace_reparented(p);
1183 traced = (p->real_parent != p->parent);
1184 1271
1185 if (likely(!traced)) { 1272 if (likely(!traced)) {
1186 struct signal_struct *psig; 1273 struct signal_struct *psig;
@@ -1281,9 +1368,9 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1281 * If it's still not detached after that, don't release 1368 * If it's still not detached after that, don't release
1282 * it now. 1369 * it now.
1283 */ 1370 */
1284 if (p->exit_signal != -1) { 1371 if (!task_detached(p)) {
1285 do_notify_parent(p, p->exit_signal); 1372 do_notify_parent(p, p->exit_signal);
1286 if (p->exit_signal != -1) { 1373 if (!task_detached(p)) {
1287 p->exit_state = EXIT_ZOMBIE; 1374 p->exit_state = EXIT_ZOMBIE;
1288 p = NULL; 1375 p = NULL;
1289 } 1376 }