aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c245
1 files changed, 69 insertions, 176 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index ca0b3488c4a9..789b8862fe3b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/blkdev.h> 46#include <linux/blkdev.h>
47#include <linux/task_io_accounting_ops.h> 47#include <linux/task_io_accounting_ops.h>
48#include <linux/tracehook.h> 48#include <linux/tracehook.h>
49#include <linux/fs_struct.h>
49#include <linux/init_task.h> 50#include <linux/init_task.h>
50#include <trace/sched.h> 51#include <trace/sched.h>
51 52
@@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait);
61 62
62static void exit_mm(struct task_struct * tsk); 63static void exit_mm(struct task_struct * tsk);
63 64
64static inline int task_detached(struct task_struct *p)
65{
66 return p->exit_signal == -1;
67}
68
69static void __unhash_process(struct task_struct *p) 65static void __unhash_process(struct task_struct *p)
70{ 66{
71 nr_threads--; 67 nr_threads--;
@@ -362,16 +358,12 @@ static void reparent_to_kthreadd(void)
362void __set_special_pids(struct pid *pid) 358void __set_special_pids(struct pid *pid)
363{ 359{
364 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
365 pid_t nr = pid_nr(pid);
366 361
367 if (task_session(curr) != pid) { 362 if (task_session(curr) != pid)
368 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
369 set_task_session(curr, nr); 364
370 } 365 if (task_pgrp(curr) != pid)
371 if (task_pgrp(curr) != pid) {
372 change_pid(curr, PIDTYPE_PGID, pid); 366 change_pid(curr, PIDTYPE_PGID, pid);
373 set_task_pgrp(curr, nr);
374 }
375} 367}
376 368
377static void set_special_pids(struct pid *pid) 369static void set_special_pids(struct pid *pid)
@@ -429,7 +421,6 @@ EXPORT_SYMBOL(disallow_signal);
429void daemonize(const char *name, ...) 421void daemonize(const char *name, ...)
430{ 422{
431 va_list args; 423 va_list args;
432 struct fs_struct *fs;
433 sigset_t blocked; 424 sigset_t blocked;
434 425
435 va_start(args, name); 426 va_start(args, name);
@@ -462,11 +453,7 @@ void daemonize(const char *name, ...)
462 453
463 /* Become as one with the init task */ 454 /* Become as one with the init task */
464 455
465 exit_fs(current); /* current->fs->count--; */ 456 daemonize_fs_struct();
466 fs = init_task.fs;
467 current->fs = fs;
468 atomic_inc(&fs->count);
469
470 exit_files(current); 457 exit_files(current);
471 current->files = init_task.files; 458 current->files = init_task.files;
472 atomic_inc(&current->files->count); 459 atomic_inc(&current->files->count);
@@ -565,30 +552,6 @@ void exit_files(struct task_struct *tsk)
565 } 552 }
566} 553}
567 554
568void put_fs_struct(struct fs_struct *fs)
569{
570 /* No need to hold fs->lock if we are killing it */
571 if (atomic_dec_and_test(&fs->count)) {
572 path_put(&fs->root);
573 path_put(&fs->pwd);
574 kmem_cache_free(fs_cachep, fs);
575 }
576}
577
578void exit_fs(struct task_struct *tsk)
579{
580 struct fs_struct * fs = tsk->fs;
581
582 if (fs) {
583 task_lock(tsk);
584 tsk->fs = NULL;
585 task_unlock(tsk);
586 put_fs_struct(fs);
587 }
588}
589
590EXPORT_SYMBOL_GPL(exit_fs);
591
592#ifdef CONFIG_MM_OWNER 555#ifdef CONFIG_MM_OWNER
593/* 556/*
594 * Task p is exiting and it owned mm, lets find a new owner for it 557 * Task p is exiting and it owned mm, lets find a new owner for it
@@ -732,119 +695,6 @@ static void exit_mm(struct task_struct * tsk)
732} 695}
733 696
734/* 697/*
735 * Return nonzero if @parent's children should reap themselves.
736 *
737 * Called with write_lock_irq(&tasklist_lock) held.
738 */
739static int ignoring_children(struct task_struct *parent)
740{
741 int ret;
742 struct sighand_struct *psig = parent->sighand;
743 unsigned long flags;
744 spin_lock_irqsave(&psig->siglock, flags);
745 ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
746 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
747 spin_unlock_irqrestore(&psig->siglock, flags);
748 return ret;
749}
750
751/*
752 * Detach all tasks we were using ptrace on.
753 * Any that need to be release_task'd are put on the @dead list.
754 *
755 * Called with write_lock(&tasklist_lock) held.
756 */
757static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
758{
759 struct task_struct *p, *n;
760 int ign = -1;
761
762 list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
763 __ptrace_unlink(p);
764
765 if (p->exit_state != EXIT_ZOMBIE)
766 continue;
767
768 /*
769 * If it's a zombie, our attachedness prevented normal
770 * parent notification or self-reaping. Do notification
771 * now if it would have happened earlier. If it should
772 * reap itself, add it to the @dead list. We can't call
773 * release_task() here because we already hold tasklist_lock.
774 *
775 * If it's our own child, there is no notification to do.
776 * But if our normal children self-reap, then this child
777 * was prevented by ptrace and we must reap it now.
778 */
779 if (!task_detached(p) && thread_group_empty(p)) {
780 if (!same_thread_group(p->real_parent, parent))
781 do_notify_parent(p, p->exit_signal);
782 else {
783 if (ign < 0)
784 ign = ignoring_children(parent);
785 if (ign)
786 p->exit_signal = -1;
787 }
788 }
789
790 if (task_detached(p)) {
791 /*
792 * Mark it as in the process of being reaped.
793 */
794 p->exit_state = EXIT_DEAD;
795 list_add(&p->ptrace_entry, dead);
796 }
797 }
798}
799
800/*
801 * Finish up exit-time ptrace cleanup.
802 *
803 * Called without locks.
804 */
805static void ptrace_exit_finish(struct task_struct *parent,
806 struct list_head *dead)
807{
808 struct task_struct *p, *n;
809
810 BUG_ON(!list_empty(&parent->ptraced));
811
812 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
813 list_del_init(&p->ptrace_entry);
814 release_task(p);
815 }
816}
817
818static void reparent_thread(struct task_struct *p, struct task_struct *father)
819{
820 if (p->pdeath_signal)
821 /* We already hold the tasklist_lock here. */
822 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
823
824 list_move_tail(&p->sibling, &p->real_parent->children);
825
826 /* If this is a threaded reparent there is no need to
827 * notify anyone anything has happened.
828 */
829 if (same_thread_group(p->real_parent, father))
830 return;
831
832 /* We don't want people slaying init. */
833 if (!task_detached(p))
834 p->exit_signal = SIGCHLD;
835
836 /* If we'd notified the old parent about this child's death,
837 * also notify the new parent.
838 */
839 if (!ptrace_reparented(p) &&
840 p->exit_state == EXIT_ZOMBIE &&
841 !task_detached(p) && thread_group_empty(p))
842 do_notify_parent(p, p->exit_signal);
843
844 kill_orphaned_pgrp(p, father);
845}
846
847/*
848 * When we die, we re-parent all our children. 698 * When we die, we re-parent all our children.
849 * Try to give them to another thread in our thread 699 * Try to give them to another thread in our thread
850 * group, and if no such member exists, give it to 700 * group, and if no such member exists, give it to
@@ -883,17 +733,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
883 return pid_ns->child_reaper; 733 return pid_ns->child_reaper;
884} 734}
885 735
736/*
737* Any that need to be release_task'd are put on the @dead list.
738 */
739static void reparent_thread(struct task_struct *father, struct task_struct *p,
740 struct list_head *dead)
741{
742 if (p->pdeath_signal)
743 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
744
745 list_move_tail(&p->sibling, &p->real_parent->children);
746
747 if (task_detached(p))
748 return;
749 /*
750 * If this is a threaded reparent there is no need to
751 * notify anyone anything has happened.
752 */
753 if (same_thread_group(p->real_parent, father))
754 return;
755
756 /* We don't want people slaying init. */
757 p->exit_signal = SIGCHLD;
758
759 /* If it has exited notify the new parent about this child's death. */
760 if (!p->ptrace &&
761 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
762 do_notify_parent(p, p->exit_signal);
763 if (task_detached(p)) {
764 p->exit_state = EXIT_DEAD;
765 list_move_tail(&p->sibling, dead);
766 }
767 }
768
769 kill_orphaned_pgrp(p, father);
770}
771
886static void forget_original_parent(struct task_struct *father) 772static void forget_original_parent(struct task_struct *father)
887{ 773{
888 struct task_struct *p, *n, *reaper; 774 struct task_struct *p, *n, *reaper;
889 LIST_HEAD(ptrace_dead); 775 LIST_HEAD(dead_children);
776
777 exit_ptrace(father);
890 778
891 write_lock_irq(&tasklist_lock); 779 write_lock_irq(&tasklist_lock);
892 reaper = find_new_reaper(father); 780 reaper = find_new_reaper(father);
893 /*
894 * First clean up ptrace if we were using it.
895 */
896 ptrace_exit(father, &ptrace_dead);
897 781
898 list_for_each_entry_safe(p, n, &father->children, sibling) { 782 list_for_each_entry_safe(p, n, &father->children, sibling) {
899 p->real_parent = reaper; 783 p->real_parent = reaper;
@@ -901,13 +785,16 @@ static void forget_original_parent(struct task_struct *father)
901 BUG_ON(p->ptrace); 785 BUG_ON(p->ptrace);
902 p->parent = p->real_parent; 786 p->parent = p->real_parent;
903 } 787 }
904 reparent_thread(p, father); 788 reparent_thread(father, p, &dead_children);
905 } 789 }
906
907 write_unlock_irq(&tasklist_lock); 790 write_unlock_irq(&tasklist_lock);
791
908 BUG_ON(!list_empty(&father->children)); 792 BUG_ON(!list_empty(&father->children));
909 793
910 ptrace_exit_finish(father, &ptrace_dead); 794 list_for_each_entry_safe(p, n, &dead_children, sibling) {
795 list_del_init(&p->sibling);
796 release_task(p);
797 }
911} 798}
912 799
913/* 800/*
@@ -1419,6 +1306,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
1419 return retval; 1306 return retval;
1420} 1307}
1421 1308
1309static int *task_stopped_code(struct task_struct *p, bool ptrace)
1310{
1311 if (ptrace) {
1312 if (task_is_stopped_or_traced(p))
1313 return &p->exit_code;
1314 } else {
1315 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1316 return &p->signal->group_exit_code;
1317 }
1318 return NULL;
1319}
1320
1422/* 1321/*
1423 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold 1322 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
1424 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1323 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
@@ -1429,7 +1328,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
1429 int options, struct siginfo __user *infop, 1328 int options, struct siginfo __user *infop,
1430 int __user *stat_addr, struct rusage __user *ru) 1329 int __user *stat_addr, struct rusage __user *ru)
1431{ 1330{
1432 int retval, exit_code, why; 1331 int retval, exit_code, *p_code, why;
1433 uid_t uid = 0; /* unneeded, required by compiler */ 1332 uid_t uid = 0; /* unneeded, required by compiler */
1434 pid_t pid; 1333 pid_t pid;
1435 1334
@@ -1439,22 +1338,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
1439 exit_code = 0; 1338 exit_code = 0;
1440 spin_lock_irq(&p->sighand->siglock); 1339 spin_lock_irq(&p->sighand->siglock);
1441 1340
1442 if (unlikely(!task_is_stopped_or_traced(p))) 1341 p_code = task_stopped_code(p, ptrace);
1443 goto unlock_sig; 1342 if (unlikely(!p_code))
1444
1445 if (!ptrace && p->signal->group_stop_count > 0)
1446 /*
1447 * A group stop is in progress and this is the group leader.
1448 * We won't report until all threads have stopped.
1449 */
1450 goto unlock_sig; 1343 goto unlock_sig;
1451 1344
1452 exit_code = p->exit_code; 1345 exit_code = *p_code;
1453 if (!exit_code) 1346 if (!exit_code)
1454 goto unlock_sig; 1347 goto unlock_sig;
1455 1348
1456 if (!unlikely(options & WNOWAIT)) 1349 if (!unlikely(options & WNOWAIT))
1457 p->exit_code = 0; 1350 *p_code = 0;
1458 1351
1459 /* don't need the RCU readlock here as we're holding a spinlock */ 1352 /* don't need the RCU readlock here as we're holding a spinlock */
1460 uid = __task_cred(p)->uid; 1353 uid = __task_cred(p)->uid;
@@ -1610,7 +1503,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
1610 */ 1503 */
1611 *notask_error = 0; 1504 *notask_error = 0;
1612 1505
1613 if (task_is_stopped_or_traced(p)) 1506 if (task_stopped_code(p, ptrace))
1614 return wait_task_stopped(ptrace, p, options, 1507 return wait_task_stopped(ptrace, p, options,
1615 infop, stat_addr, ru); 1508 infop, stat_addr, ru);
1616 1509
@@ -1814,7 +1707,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1814 pid = find_get_pid(-upid); 1707 pid = find_get_pid(-upid);
1815 } else if (upid == 0) { 1708 } else if (upid == 0) {
1816 type = PIDTYPE_PGID; 1709 type = PIDTYPE_PGID;
1817 pid = get_pid(task_pgrp(current)); 1710 pid = get_task_pid(current, PIDTYPE_PGID);
1818 } else /* upid > 0 */ { 1711 } else /* upid > 0 */ {
1819 type = PIDTYPE_PID; 1712 type = PIDTYPE_PID;
1820 pid = find_get_pid(upid); 1713 pid = find_get_pid(upid);