aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c87
1 files changed, 47 insertions, 40 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index e95b93282210..dba194a8d416 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */ 5 */
6 6
7#include <linux/config.h>
8#include <linux/mm.h> 7#include <linux/mm.h>
9#include <linux/slab.h> 8#include <linux/slab.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
@@ -26,6 +25,8 @@
26#include <linux/mount.h> 25#include <linux/mount.h>
27#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
28#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/taskstats_kern.h>
29#include <linux/delayacct.h>
29#include <linux/cpuset.h> 30#include <linux/cpuset.h>
30#include <linux/syscalls.h> 31#include <linux/syscalls.h>
31#include <linux/signal.h> 32#include <linux/signal.h>
@@ -36,6 +37,7 @@
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/pipe_fs_i.h> 38#include <linux/pipe_fs_i.h>
38#include <linux/audit.h> /* for audit_free() */ 39#include <linux/audit.h> /* for audit_free() */
40#include <linux/resource.h>
39 41
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/unistd.h> 43#include <asm/unistd.h>
@@ -45,8 +47,6 @@
45extern void sem_exit (void); 47extern void sem_exit (void);
46extern struct task_struct *child_reaper; 48extern struct task_struct *child_reaper;
47 49
48int getrusage(struct task_struct *, int, struct rusage __user *);
49
50static void exit_mm(struct task_struct * tsk); 50static void exit_mm(struct task_struct * tsk);
51 51
52static void __unhash_process(struct task_struct *p) 52static void __unhash_process(struct task_struct *p)
@@ -136,14 +136,10 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
136 136
137void release_task(struct task_struct * p) 137void release_task(struct task_struct * p)
138{ 138{
139 struct task_struct *leader;
139 int zap_leader; 140 int zap_leader;
140 task_t *leader;
141 struct dentry *proc_dentry;
142
143repeat: 141repeat:
144 atomic_dec(&p->user->processes); 142 atomic_dec(&p->user->processes);
145 spin_lock(&p->proc_lock);
146 proc_dentry = proc_pid_unhash(p);
147 write_lock_irq(&tasklist_lock); 143 write_lock_irq(&tasklist_lock);
148 ptrace_unlink(p); 144 ptrace_unlink(p);
149 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 145 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -172,8 +168,7 @@ repeat:
172 168
173 sched_exit(p); 169 sched_exit(p);
174 write_unlock_irq(&tasklist_lock); 170 write_unlock_irq(&tasklist_lock);
175 spin_unlock(&p->proc_lock); 171 proc_flush_task(p);
176 proc_pid_flush(proc_dentry);
177 release_thread(p); 172 release_thread(p);
178 call_rcu(&p->rcu, delayed_put_task_struct); 173 call_rcu(&p->rcu, delayed_put_task_struct);
179 174
@@ -216,7 +211,7 @@ out:
216 * 211 *
217 * "I ask you, have you ever known what it is to be an orphan?" 212 * "I ask you, have you ever known what it is to be an orphan?"
218 */ 213 */
219static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) 214static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
220{ 215{
221 struct task_struct *p; 216 struct task_struct *p;
222 int ret = 1; 217 int ret = 1;
@@ -579,7 +574,7 @@ static void exit_mm(struct task_struct * tsk)
579 down_read(&mm->mmap_sem); 574 down_read(&mm->mmap_sem);
580 } 575 }
581 atomic_inc(&mm->mm_count); 576 atomic_inc(&mm->mm_count);
582 if (mm != tsk->active_mm) BUG(); 577 BUG_ON(mm != tsk->active_mm);
583 /* more a memory barrier than a real lock */ 578 /* more a memory barrier than a real lock */
584 task_lock(tsk); 579 task_lock(tsk);
585 tsk->mm = NULL; 580 tsk->mm = NULL;
@@ -589,7 +584,8 @@ static void exit_mm(struct task_struct * tsk)
589 mmput(mm); 584 mmput(mm);
590} 585}
591 586
592static inline void choose_new_parent(task_t *p, task_t *reaper) 587static inline void
588choose_new_parent(struct task_struct *p, struct task_struct *reaper)
593{ 589{
594 /* 590 /*
595 * Make sure we're not reparenting to ourselves and that 591 * Make sure we're not reparenting to ourselves and that
@@ -599,7 +595,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper)
599 p->real_parent = reaper; 595 p->real_parent = reaper;
600} 596}
601 597
602static void reparent_thread(task_t *p, task_t *father, int traced) 598static void
599reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
603{ 600{
604 /* We don't want people slaying init. */ 601 /* We don't want people slaying init. */
605 if (p->exit_signal != -1) 602 if (p->exit_signal != -1)
@@ -663,8 +660,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced)
663 * group, and if no such member exists, give it to 660 * group, and if no such member exists, give it to
664 * the global child reaper process (ie "init") 661 * the global child reaper process (ie "init")
665 */ 662 */
666static void forget_original_parent(struct task_struct * father, 663static void
667 struct list_head *to_release) 664forget_original_parent(struct task_struct *father, struct list_head *to_release)
668{ 665{
669 struct task_struct *p, *reaper = father; 666 struct task_struct *p, *reaper = father;
670 struct list_head *_p, *_n; 667 struct list_head *_p, *_n;
@@ -687,7 +684,7 @@ static void forget_original_parent(struct task_struct * father,
687 */ 684 */
688 list_for_each_safe(_p, _n, &father->children) { 685 list_for_each_safe(_p, _n, &father->children) {
689 int ptrace; 686 int ptrace;
690 p = list_entry(_p,struct task_struct,sibling); 687 p = list_entry(_p, struct task_struct, sibling);
691 688
692 ptrace = p->ptrace; 689 ptrace = p->ptrace;
693 690
@@ -716,7 +713,7 @@ static void forget_original_parent(struct task_struct * father,
716 list_add(&p->ptrace_list, to_release); 713 list_add(&p->ptrace_list, to_release);
717 } 714 }
718 list_for_each_safe(_p, _n, &father->ptrace_children) { 715 list_for_each_safe(_p, _n, &father->ptrace_children) {
719 p = list_entry(_p,struct task_struct,ptrace_list); 716 p = list_entry(_p, struct task_struct, ptrace_list);
720 choose_new_parent(p, reaper); 717 choose_new_parent(p, reaper);
721 reparent_thread(p, father, 1); 718 reparent_thread(p, father, 1);
722 } 719 }
@@ -836,7 +833,7 @@ static void exit_notify(struct task_struct *tsk)
836 833
837 list_for_each_safe(_p, _n, &ptrace_dead) { 834 list_for_each_safe(_p, _n, &ptrace_dead) {
838 list_del_init(_p); 835 list_del_init(_p);
839 t = list_entry(_p,struct task_struct,ptrace_list); 836 t = list_entry(_p, struct task_struct, ptrace_list);
840 release_task(t); 837 release_task(t);
841 } 838 }
842 839
@@ -848,7 +845,9 @@ static void exit_notify(struct task_struct *tsk)
848fastcall NORET_TYPE void do_exit(long code) 845fastcall NORET_TYPE void do_exit(long code)
849{ 846{
850 struct task_struct *tsk = current; 847 struct task_struct *tsk = current;
848 struct taskstats *tidstats;
851 int group_dead; 849 int group_dead;
850 unsigned int mycpu;
852 851
853 profile_task_exit(tsk); 852 profile_task_exit(tsk);
854 853
@@ -881,19 +880,13 @@ fastcall NORET_TYPE void do_exit(long code)
881 880
882 tsk->flags |= PF_EXITING; 881 tsk->flags |= PF_EXITING;
883 882
884 /*
885 * Make sure we don't try to process any timer firings
886 * while we are already exiting.
887 */
888 tsk->it_virt_expires = cputime_zero;
889 tsk->it_prof_expires = cputime_zero;
890 tsk->it_sched_expires = 0;
891
892 if (unlikely(in_atomic())) 883 if (unlikely(in_atomic()))
893 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 884 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
894 current->comm, current->pid, 885 current->comm, current->pid,
895 preempt_count()); 886 preempt_count());
896 887
888 taskstats_exit_alloc(&tidstats, &mycpu);
889
897 acct_update_integrals(tsk); 890 acct_update_integrals(tsk);
898 if (tsk->mm) { 891 if (tsk->mm) {
899 update_hiwater_rss(tsk->mm); 892 update_hiwater_rss(tsk->mm);
@@ -903,18 +896,24 @@ fastcall NORET_TYPE void do_exit(long code)
903 if (group_dead) { 896 if (group_dead) {
904 hrtimer_cancel(&tsk->signal->real_timer); 897 hrtimer_cancel(&tsk->signal->real_timer);
905 exit_itimers(tsk->signal); 898 exit_itimers(tsk->signal);
906 acct_process(code);
907 } 899 }
900 acct_collect(code, group_dead);
908 if (unlikely(tsk->robust_list)) 901 if (unlikely(tsk->robust_list))
909 exit_robust_list(tsk); 902 exit_robust_list(tsk);
910#ifdef CONFIG_COMPAT 903#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
911 if (unlikely(tsk->compat_robust_list)) 904 if (unlikely(tsk->compat_robust_list))
912 compat_exit_robust_list(tsk); 905 compat_exit_robust_list(tsk);
913#endif 906#endif
914 if (unlikely(tsk->audit_context)) 907 if (unlikely(tsk->audit_context))
915 audit_free(tsk); 908 audit_free(tsk);
909 taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
910 taskstats_exit_free(tidstats);
911 delayacct_tsk_exit(tsk);
912
916 exit_mm(tsk); 913 exit_mm(tsk);
917 914
915 if (group_dead)
916 acct_process();
918 exit_sem(tsk); 917 exit_sem(tsk);
919 __exit_files(tsk); 918 __exit_files(tsk);
920 __exit_fs(tsk); 919 __exit_fs(tsk);
@@ -938,9 +937,17 @@ fastcall NORET_TYPE void do_exit(long code)
938 tsk->mempolicy = NULL; 937 tsk->mempolicy = NULL;
939#endif 938#endif
940 /* 939 /*
941 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 940 * This must happen late, after the PID is not
941 * hashed anymore:
942 */ 942 */
943 mutex_debug_check_no_locks_held(tsk); 943 if (unlikely(!list_empty(&tsk->pi_state_list)))
944 exit_pi_state_list(tsk);
945 if (unlikely(current->pi_state_cache))
946 kfree(current->pi_state_cache);
947 /*
948 * Make sure we are holding no locks:
949 */
950 debug_check_no_locks_held(tsk);
944 951
945 if (tsk->io_context) 952 if (tsk->io_context)
946 exit_io_context(); 953 exit_io_context();
@@ -1015,7 +1022,7 @@ asmlinkage void sys_exit_group(int error_code)
1015 do_group_exit((error_code & 0xff) << 8); 1022 do_group_exit((error_code & 0xff) << 8);
1016} 1023}
1017 1024
1018static int eligible_child(pid_t pid, int options, task_t *p) 1025static int eligible_child(pid_t pid, int options, struct task_struct *p)
1019{ 1026{
1020 if (pid > 0) { 1027 if (pid > 0) {
1021 if (p->pid != pid) 1028 if (p->pid != pid)
@@ -1056,12 +1063,13 @@ static int eligible_child(pid_t pid, int options, task_t *p)
1056 return 1; 1063 return 1;
1057} 1064}
1058 1065
1059static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, 1066static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1060 int why, int status, 1067 int why, int status,
1061 struct siginfo __user *infop, 1068 struct siginfo __user *infop,
1062 struct rusage __user *rusagep) 1069 struct rusage __user *rusagep)
1063{ 1070{
1064 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1071 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1072
1065 put_task_struct(p); 1073 put_task_struct(p);
1066 if (!retval) 1074 if (!retval)
1067 retval = put_user(SIGCHLD, &infop->si_signo); 1075 retval = put_user(SIGCHLD, &infop->si_signo);
@@ -1086,7 +1094,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
1086 * the lock and this task is uninteresting. If we return nonzero, we have 1094 * the lock and this task is uninteresting. If we return nonzero, we have
1087 * released the lock and the system call should return. 1095 * released the lock and the system call should return.
1088 */ 1096 */
1089static int wait_task_zombie(task_t *p, int noreap, 1097static int wait_task_zombie(struct task_struct *p, int noreap,
1090 struct siginfo __user *infop, 1098 struct siginfo __user *infop,
1091 int __user *stat_addr, struct rusage __user *ru) 1099 int __user *stat_addr, struct rusage __user *ru)
1092{ 1100{
@@ -1248,8 +1256,8 @@ static int wait_task_zombie(task_t *p, int noreap,
1248 * the lock and this task is uninteresting. If we return nonzero, we have 1256 * the lock and this task is uninteresting. If we return nonzero, we have
1249 * released the lock and the system call should return. 1257 * released the lock and the system call should return.
1250 */ 1258 */
1251static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, 1259static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1252 struct siginfo __user *infop, 1260 int noreap, struct siginfo __user *infop,
1253 int __user *stat_addr, struct rusage __user *ru) 1261 int __user *stat_addr, struct rusage __user *ru)
1254{ 1262{
1255 int retval, exit_code; 1263 int retval, exit_code;
@@ -1363,7 +1371,7 @@ bail_ref:
1363 * the lock and this task is uninteresting. If we return nonzero, we have 1371 * the lock and this task is uninteresting. If we return nonzero, we have
1364 * released the lock and the system call should return. 1372 * released the lock and the system call should return.
1365 */ 1373 */
1366static int wait_task_continued(task_t *p, int noreap, 1374static int wait_task_continued(struct task_struct *p, int noreap,
1367 struct siginfo __user *infop, 1375 struct siginfo __user *infop,
1368 int __user *stat_addr, struct rusage __user *ru) 1376 int __user *stat_addr, struct rusage __user *ru)
1369{ 1377{
@@ -1449,7 +1457,7 @@ repeat:
1449 int ret; 1457 int ret;
1450 1458
1451 list_for_each(_p,&tsk->children) { 1459 list_for_each(_p,&tsk->children) {
1452 p = list_entry(_p,struct task_struct,sibling); 1460 p = list_entry(_p, struct task_struct, sibling);
1453 1461
1454 ret = eligible_child(pid, options, p); 1462 ret = eligible_child(pid, options, p);
1455 if (!ret) 1463 if (!ret)
@@ -1538,8 +1546,7 @@ check_continued:
1538 if (options & __WNOTHREAD) 1546 if (options & __WNOTHREAD)
1539 break; 1547 break;
1540 tsk = next_thread(tsk); 1548 tsk = next_thread(tsk);
1541 if (tsk->signal != current->signal) 1549 BUG_ON(tsk->signal != current->signal);
1542 BUG();
1543 } while (tsk != current); 1550 } while (tsk != current);
1544 1551
1545 read_unlock(&tasklist_lock); 1552 read_unlock(&tasklist_lock);