diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 116 |
1 files changed, 49 insertions, 67 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 7f7959de4a87..2c704c86edb3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/resource.h> | 44 | #include <linux/resource.h> |
45 | #include <linux/blkdev.h> | 45 | #include <linux/blkdev.h> |
46 | #include <linux/task_io_accounting_ops.h> | 46 | #include <linux/task_io_accounting_ops.h> |
47 | #include <linux/freezer.h> | ||
48 | 47 | ||
49 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
50 | #include <asm/unistd.h> | 49 | #include <asm/unistd.h> |
@@ -93,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
93 | * If there is any task waiting for the group exit | 92 | * If there is any task waiting for the group exit |
94 | * then notify it: | 93 | * then notify it: |
95 | */ | 94 | */ |
96 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | 95 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) |
97 | wake_up_process(sig->group_exit_task); | 96 | wake_up_process(sig->group_exit_task); |
98 | sig->group_exit_task = NULL; | 97 | |
99 | } | ||
100 | if (tsk == sig->curr_target) | 98 | if (tsk == sig->curr_target) |
101 | sig->curr_target = next_thread(tsk); | 99 | sig->curr_target = next_thread(tsk); |
102 | /* | 100 | /* |
@@ -593,17 +591,6 @@ static void exit_mm(struct task_struct * tsk) | |||
593 | mmput(mm); | 591 | mmput(mm); |
594 | } | 592 | } |
595 | 593 | ||
596 | static inline void | ||
597 | choose_new_parent(struct task_struct *p, struct task_struct *reaper) | ||
598 | { | ||
599 | /* | ||
600 | * Make sure we're not reparenting to ourselves and that | ||
601 | * the parent is not a zombie. | ||
602 | */ | ||
603 | BUG_ON(p == reaper || reaper->exit_state); | ||
604 | p->real_parent = reaper; | ||
605 | } | ||
606 | |||
607 | static void | 594 | static void |
608 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | 595 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) |
609 | { | 596 | { |
@@ -711,7 +698,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) | |||
711 | 698 | ||
712 | if (father == p->real_parent) { | 699 | if (father == p->real_parent) { |
713 | /* reparent with a reaper, real father it's us */ | 700 | /* reparent with a reaper, real father it's us */ |
714 | choose_new_parent(p, reaper); | 701 | p->real_parent = reaper; |
715 | reparent_thread(p, father, 0); | 702 | reparent_thread(p, father, 0); |
716 | } else { | 703 | } else { |
717 | /* reparent ptraced task to its real parent */ | 704 | /* reparent ptraced task to its real parent */ |
@@ -732,7 +719,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) | |||
732 | } | 719 | } |
733 | list_for_each_safe(_p, _n, &father->ptrace_children) { | 720 | list_for_each_safe(_p, _n, &father->ptrace_children) { |
734 | p = list_entry(_p, struct task_struct, ptrace_list); | 721 | p = list_entry(_p, struct task_struct, ptrace_list); |
735 | choose_new_parent(p, reaper); | 722 | p->real_parent = reaper; |
736 | reparent_thread(p, father, 1); | 723 | reparent_thread(p, father, 1); |
737 | } | 724 | } |
738 | } | 725 | } |
@@ -759,13 +746,11 @@ static void exit_notify(struct task_struct *tsk) | |||
759 | * Now we'll wake all the threads in the group just to make | 746 | * Now we'll wake all the threads in the group just to make |
760 | * sure someone gets all the pending signals. | 747 | * sure someone gets all the pending signals. |
761 | */ | 748 | */ |
762 | read_lock(&tasklist_lock); | ||
763 | spin_lock_irq(&tsk->sighand->siglock); | 749 | spin_lock_irq(&tsk->sighand->siglock); |
764 | for (t = next_thread(tsk); t != tsk; t = next_thread(t)) | 750 | for (t = next_thread(tsk); t != tsk; t = next_thread(t)) |
765 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) | 751 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) |
766 | recalc_sigpending_and_wake(t); | 752 | recalc_sigpending_and_wake(t); |
767 | spin_unlock_irq(&tsk->sighand->siglock); | 753 | spin_unlock_irq(&tsk->sighand->siglock); |
768 | read_unlock(&tasklist_lock); | ||
769 | } | 754 | } |
770 | 755 | ||
771 | write_lock_irq(&tasklist_lock); | 756 | write_lock_irq(&tasklist_lock); |
@@ -793,9 +778,8 @@ static void exit_notify(struct task_struct *tsk) | |||
793 | * and we were the only connection outside, so our pgrp | 778 | * and we were the only connection outside, so our pgrp |
794 | * is about to become orphaned. | 779 | * is about to become orphaned. |
795 | */ | 780 | */ |
796 | |||
797 | t = tsk->real_parent; | 781 | t = tsk->real_parent; |
798 | 782 | ||
799 | pgrp = task_pgrp(tsk); | 783 | pgrp = task_pgrp(tsk); |
800 | if ((task_pgrp(t) != pgrp) && | 784 | if ((task_pgrp(t) != pgrp) && |
801 | (task_session(t) == task_session(tsk)) && | 785 | (task_session(t) == task_session(tsk)) && |
@@ -842,6 +826,11 @@ static void exit_notify(struct task_struct *tsk) | |||
842 | state = EXIT_DEAD; | 826 | state = EXIT_DEAD; |
843 | tsk->exit_state = state; | 827 | tsk->exit_state = state; |
844 | 828 | ||
829 | if (thread_group_leader(tsk) && | ||
830 | tsk->signal->notify_count < 0 && | ||
831 | tsk->signal->group_exit_task) | ||
832 | wake_up_process(tsk->signal->group_exit_task); | ||
833 | |||
845 | write_unlock_irq(&tasklist_lock); | 834 | write_unlock_irq(&tasklist_lock); |
846 | 835 | ||
847 | list_for_each_safe(_p, _n, &ptrace_dead) { | 836 | list_for_each_safe(_p, _n, &ptrace_dead) { |
@@ -883,6 +872,14 @@ static void check_stack_usage(void) | |||
883 | static inline void check_stack_usage(void) {} | 872 | static inline void check_stack_usage(void) {} |
884 | #endif | 873 | #endif |
885 | 874 | ||
875 | static inline void exit_child_reaper(struct task_struct *tsk) | ||
876 | { | ||
877 | if (likely(tsk->group_leader != child_reaper(tsk))) | ||
878 | return; | ||
879 | |||
880 | panic("Attempted to kill init!"); | ||
881 | } | ||
882 | |||
886 | fastcall NORET_TYPE void do_exit(long code) | 883 | fastcall NORET_TYPE void do_exit(long code) |
887 | { | 884 | { |
888 | struct task_struct *tsk = current; | 885 | struct task_struct *tsk = current; |
@@ -896,13 +893,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
896 | panic("Aiee, killing interrupt handler!"); | 893 | panic("Aiee, killing interrupt handler!"); |
897 | if (unlikely(!tsk->pid)) | 894 | if (unlikely(!tsk->pid)) |
898 | panic("Attempted to kill the idle task!"); | 895 | panic("Attempted to kill the idle task!"); |
899 | if (unlikely(tsk == child_reaper(tsk))) { | ||
900 | if (tsk->nsproxy->pid_ns != &init_pid_ns) | ||
901 | tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
902 | else | ||
903 | panic("Attempted to kill init!"); | ||
904 | } | ||
905 | |||
906 | 896 | ||
907 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 897 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
908 | current->ptrace_message = code; | 898 | current->ptrace_message = code; |
@@ -932,13 +922,13 @@ fastcall NORET_TYPE void do_exit(long code) | |||
932 | schedule(); | 922 | schedule(); |
933 | } | 923 | } |
934 | 924 | ||
925 | tsk->flags |= PF_EXITING; | ||
935 | /* | 926 | /* |
936 | * tsk->flags are checked in the futex code to protect against | 927 | * tsk->flags are checked in the futex code to protect against |
937 | * an exiting task cleaning up the robust pi futexes. | 928 | * an exiting task cleaning up the robust pi futexes. |
938 | */ | 929 | */ |
939 | spin_lock_irq(&tsk->pi_lock); | 930 | smp_mb(); |
940 | tsk->flags |= PF_EXITING; | 931 | spin_unlock_wait(&tsk->pi_lock); |
941 | spin_unlock_irq(&tsk->pi_lock); | ||
942 | 932 | ||
943 | if (unlikely(in_atomic())) | 933 | if (unlikely(in_atomic())) |
944 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 934 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
@@ -952,16 +942,19 @@ fastcall NORET_TYPE void do_exit(long code) | |||
952 | } | 942 | } |
953 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 943 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
954 | if (group_dead) { | 944 | if (group_dead) { |
945 | exit_child_reaper(tsk); | ||
955 | hrtimer_cancel(&tsk->signal->real_timer); | 946 | hrtimer_cancel(&tsk->signal->real_timer); |
956 | exit_itimers(tsk->signal); | 947 | exit_itimers(tsk->signal); |
957 | } | 948 | } |
958 | acct_collect(code, group_dead); | 949 | acct_collect(code, group_dead); |
950 | #ifdef CONFIG_FUTEX | ||
959 | if (unlikely(tsk->robust_list)) | 951 | if (unlikely(tsk->robust_list)) |
960 | exit_robust_list(tsk); | 952 | exit_robust_list(tsk); |
961 | #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT) | 953 | #ifdef CONFIG_COMPAT |
962 | if (unlikely(tsk->compat_robust_list)) | 954 | if (unlikely(tsk->compat_robust_list)) |
963 | compat_exit_robust_list(tsk); | 955 | compat_exit_robust_list(tsk); |
964 | #endif | 956 | #endif |
957 | #endif | ||
965 | if (group_dead) | 958 | if (group_dead) |
966 | tty_audit_exit(); | 959 | tty_audit_exit(); |
967 | if (unlikely(tsk->audit_context)) | 960 | if (unlikely(tsk->audit_context)) |
@@ -996,6 +989,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
996 | mpol_free(tsk->mempolicy); | 989 | mpol_free(tsk->mempolicy); |
997 | tsk->mempolicy = NULL; | 990 | tsk->mempolicy = NULL; |
998 | #endif | 991 | #endif |
992 | #ifdef CONFIG_FUTEX | ||
999 | /* | 993 | /* |
1000 | * This must happen late, after the PID is not | 994 | * This must happen late, after the PID is not |
1001 | * hashed anymore: | 995 | * hashed anymore: |
@@ -1004,6 +998,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
1004 | exit_pi_state_list(tsk); | 998 | exit_pi_state_list(tsk); |
1005 | if (unlikely(current->pi_state_cache)) | 999 | if (unlikely(current->pi_state_cache)) |
1006 | kfree(current->pi_state_cache); | 1000 | kfree(current->pi_state_cache); |
1001 | #endif | ||
1007 | /* | 1002 | /* |
1008 | * Make sure we are holding no locks: | 1003 | * Make sure we are holding no locks: |
1009 | */ | 1004 | */ |
@@ -1168,8 +1163,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1168 | int __user *stat_addr, struct rusage __user *ru) | 1163 | int __user *stat_addr, struct rusage __user *ru) |
1169 | { | 1164 | { |
1170 | unsigned long state; | 1165 | unsigned long state; |
1171 | int retval; | 1166 | int retval, status, traced; |
1172 | int status; | ||
1173 | 1167 | ||
1174 | if (unlikely(noreap)) { | 1168 | if (unlikely(noreap)) { |
1175 | pid_t pid = p->pid; | 1169 | pid_t pid = p->pid; |
@@ -1203,15 +1197,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1203 | BUG_ON(state != EXIT_DEAD); | 1197 | BUG_ON(state != EXIT_DEAD); |
1204 | return 0; | 1198 | return 0; |
1205 | } | 1199 | } |
1206 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { | ||
1207 | /* | ||
1208 | * This can only happen in a race with a ptraced thread | ||
1209 | * dying on another processor. | ||
1210 | */ | ||
1211 | return 0; | ||
1212 | } | ||
1213 | 1200 | ||
1214 | if (likely(p->real_parent == p->parent) && likely(p->signal)) { | 1201 | /* traced means p->ptrace, but not vice versa */ |
1202 | traced = (p->real_parent != p->parent); | ||
1203 | |||
1204 | if (likely(!traced)) { | ||
1215 | struct signal_struct *psig; | 1205 | struct signal_struct *psig; |
1216 | struct signal_struct *sig; | 1206 | struct signal_struct *sig; |
1217 | 1207 | ||
@@ -1298,35 +1288,30 @@ static int wait_task_zombie(struct task_struct *p, int noreap, | |||
1298 | retval = put_user(p->pid, &infop->si_pid); | 1288 | retval = put_user(p->pid, &infop->si_pid); |
1299 | if (!retval && infop) | 1289 | if (!retval && infop) |
1300 | retval = put_user(p->uid, &infop->si_uid); | 1290 | retval = put_user(p->uid, &infop->si_uid); |
1301 | if (retval) { | 1291 | if (!retval) |
1302 | // TODO: is this safe? | 1292 | retval = p->pid; |
1303 | p->exit_state = EXIT_ZOMBIE; | 1293 | |
1304 | return retval; | 1294 | if (traced) { |
1305 | } | ||
1306 | retval = p->pid; | ||
1307 | if (p->real_parent != p->parent) { | ||
1308 | write_lock_irq(&tasklist_lock); | 1295 | write_lock_irq(&tasklist_lock); |
1309 | /* Double-check with lock held. */ | 1296 | /* We dropped tasklist, ptracer could die and untrace */ |
1310 | if (p->real_parent != p->parent) { | 1297 | ptrace_unlink(p); |
1311 | __ptrace_unlink(p); | 1298 | /* |
1312 | // TODO: is this safe? | 1299 | * If this is not a detached task, notify the parent. |
1313 | p->exit_state = EXIT_ZOMBIE; | 1300 | * If it's still not detached after that, don't release |
1314 | /* | 1301 | * it now. |
1315 | * If this is not a detached task, notify the parent. | 1302 | */ |
1316 | * If it's still not detached after that, don't release | 1303 | if (p->exit_signal != -1) { |
1317 | * it now. | 1304 | do_notify_parent(p, p->exit_signal); |
1318 | */ | ||
1319 | if (p->exit_signal != -1) { | 1305 | if (p->exit_signal != -1) { |
1320 | do_notify_parent(p, p->exit_signal); | 1306 | p->exit_state = EXIT_ZOMBIE; |
1321 | if (p->exit_signal != -1) | 1307 | p = NULL; |
1322 | p = NULL; | ||
1323 | } | 1308 | } |
1324 | } | 1309 | } |
1325 | write_unlock_irq(&tasklist_lock); | 1310 | write_unlock_irq(&tasklist_lock); |
1326 | } | 1311 | } |
1327 | if (p != NULL) | 1312 | if (p != NULL) |
1328 | release_task(p); | 1313 | release_task(p); |
1329 | BUG_ON(!retval); | 1314 | |
1330 | return retval; | 1315 | return retval; |
1331 | } | 1316 | } |
1332 | 1317 | ||
@@ -1345,7 +1330,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, | |||
1345 | if (!p->exit_code) | 1330 | if (!p->exit_code) |
1346 | return 0; | 1331 | return 0; |
1347 | if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && | 1332 | if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && |
1348 | p->signal && p->signal->group_stop_count > 0) | 1333 | p->signal->group_stop_count > 0) |
1349 | /* | 1334 | /* |
1350 | * A group stop is in progress and this is the group leader. | 1335 | * A group stop is in progress and this is the group leader. |
1351 | * We won't report until all threads have stopped. | 1336 | * We won't report until all threads have stopped. |
@@ -1459,9 +1444,6 @@ static int wait_task_continued(struct task_struct *p, int noreap, | |||
1459 | pid_t pid; | 1444 | pid_t pid; |
1460 | uid_t uid; | 1445 | uid_t uid; |
1461 | 1446 | ||
1462 | if (unlikely(!p->signal)) | ||
1463 | return 0; | ||
1464 | |||
1465 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | 1447 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) |
1466 | return 0; | 1448 | return 0; |
1467 | 1449 | ||