aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c114
1 files changed, 64 insertions, 50 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index a3baf92462bd..2e4c13cba95a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */ 5 */
6 6
7#include <linux/config.h>
8#include <linux/mm.h> 7#include <linux/mm.h>
9#include <linux/slab.h> 8#include <linux/slab.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
@@ -26,6 +25,8 @@
26#include <linux/mount.h> 25#include <linux/mount.h>
27#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
28#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/taskstats_kern.h>
29#include <linux/delayacct.h>
29#include <linux/cpuset.h> 30#include <linux/cpuset.h>
30#include <linux/syscalls.h> 31#include <linux/syscalls.h>
31#include <linux/signal.h> 32#include <linux/signal.h>
@@ -36,6 +37,7 @@
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/pipe_fs_i.h> 38#include <linux/pipe_fs_i.h>
38#include <linux/audit.h> /* for audit_free() */ 39#include <linux/audit.h> /* for audit_free() */
40#include <linux/resource.h>
39 41
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/unistd.h> 43#include <asm/unistd.h>
@@ -45,8 +47,6 @@
45extern void sem_exit (void); 47extern void sem_exit (void);
46extern struct task_struct *child_reaper; 48extern struct task_struct *child_reaper;
47 49
48int getrusage(struct task_struct *, int, struct rusage __user *);
49
50static void exit_mm(struct task_struct * tsk); 50static void exit_mm(struct task_struct * tsk);
51 51
52static void __unhash_process(struct task_struct *p) 52static void __unhash_process(struct task_struct *p)
@@ -136,14 +136,10 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
136 136
137void release_task(struct task_struct * p) 137void release_task(struct task_struct * p)
138{ 138{
139 struct task_struct *leader;
139 int zap_leader; 140 int zap_leader;
140 task_t *leader;
141 struct dentry *proc_dentry;
142
143repeat: 141repeat:
144 atomic_dec(&p->user->processes); 142 atomic_dec(&p->user->processes);
145 spin_lock(&p->proc_lock);
146 proc_dentry = proc_pid_unhash(p);
147 write_lock_irq(&tasklist_lock); 143 write_lock_irq(&tasklist_lock);
148 ptrace_unlink(p); 144 ptrace_unlink(p);
149 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 145 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -172,8 +168,7 @@ repeat:
172 168
173 sched_exit(p); 169 sched_exit(p);
174 write_unlock_irq(&tasklist_lock); 170 write_unlock_irq(&tasklist_lock);
175 spin_unlock(&p->proc_lock); 171 proc_flush_task(p);
176 proc_pid_flush(proc_dentry);
177 release_thread(p); 172 release_thread(p);
178 call_rcu(&p->rcu, delayed_put_task_struct); 173 call_rcu(&p->rcu, delayed_put_task_struct);
179 174
@@ -216,7 +211,7 @@ out:
216 * 211 *
217 * "I ask you, have you ever known what it is to be an orphan?" 212 * "I ask you, have you ever known what it is to be an orphan?"
218 */ 213 */
219static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) 214static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
220{ 215{
221 struct task_struct *p; 216 struct task_struct *p;
222 int ret = 1; 217 int ret = 1;
@@ -224,7 +219,7 @@ static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
224 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 219 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
225 if (p == ignored_task 220 if (p == ignored_task
226 || p->exit_state 221 || p->exit_state
227 || p->real_parent->pid == 1) 222 || is_init(p->real_parent))
228 continue; 223 continue;
229 if (process_group(p->real_parent) != pgrp 224 if (process_group(p->real_parent) != pgrp
230 && p->real_parent->signal->session == p->signal->session) { 225 && p->real_parent->signal->session == p->signal->session) {
@@ -254,17 +249,6 @@ static int has_stopped_jobs(int pgrp)
254 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 249 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
255 if (p->state != TASK_STOPPED) 250 if (p->state != TASK_STOPPED)
256 continue; 251 continue;
257
258 /* If p is stopped by a debugger on a signal that won't
259 stop it, then don't count p as stopped. This isn't
260 perfect but it's a good approximation. */
261 if (unlikely (p->ptrace)
262 && p->exit_code != SIGSTOP
263 && p->exit_code != SIGTSTP
264 && p->exit_code != SIGTTOU
265 && p->exit_code != SIGTTIN)
266 continue;
267
268 retval = 1; 252 retval = 1;
269 break; 253 break;
270 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 254 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
@@ -297,9 +281,7 @@ static void reparent_to_init(void)
297 /* Set the exit signal to SIGCHLD so we signal init on exit */ 281 /* Set the exit signal to SIGCHLD so we signal init on exit */
298 current->exit_signal = SIGCHLD; 282 current->exit_signal = SIGCHLD;
299 283
300 if ((current->policy == SCHED_NORMAL || 284 if (!has_rt_policy(current) && (task_nice(current) < 0))
301 current->policy == SCHED_BATCH)
302 && (task_nice(current) < 0))
303 set_user_nice(current, 0); 285 set_user_nice(current, 0);
304 /* cpus_allowed? */ 286 /* cpus_allowed? */
305 /* rt_priority? */ 287 /* rt_priority? */
@@ -492,6 +474,18 @@ void fastcall put_files_struct(struct files_struct *files)
492 474
493EXPORT_SYMBOL(put_files_struct); 475EXPORT_SYMBOL(put_files_struct);
494 476
477void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
478{
479 struct files_struct *old;
480
481 old = tsk->files;
482 task_lock(tsk);
483 tsk->files = files;
484 task_unlock(tsk);
485 put_files_struct(old);
486}
487EXPORT_SYMBOL(reset_files_struct);
488
495static inline void __exit_files(struct task_struct *tsk) 489static inline void __exit_files(struct task_struct *tsk)
496{ 490{
497 struct files_struct * files = tsk->files; 491 struct files_struct * files = tsk->files;
@@ -589,7 +583,8 @@ static void exit_mm(struct task_struct * tsk)
589 mmput(mm); 583 mmput(mm);
590} 584}
591 585
592static inline void choose_new_parent(task_t *p, task_t *reaper) 586static inline void
587choose_new_parent(struct task_struct *p, struct task_struct *reaper)
593{ 588{
594 /* 589 /*
595 * Make sure we're not reparenting to ourselves and that 590 * Make sure we're not reparenting to ourselves and that
@@ -599,7 +594,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper)
599 p->real_parent = reaper; 594 p->real_parent = reaper;
600} 595}
601 596
602static void reparent_thread(task_t *p, task_t *father, int traced) 597static void
598reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
603{ 599{
604 /* We don't want people slaying init. */ 600 /* We don't want people slaying init. */
605 if (p->exit_signal != -1) 601 if (p->exit_signal != -1)
@@ -663,8 +659,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced)
663 * group, and if no such member exists, give it to 659 * group, and if no such member exists, give it to
664 * the global child reaper process (ie "init") 660 * the global child reaper process (ie "init")
665 */ 661 */
666static void forget_original_parent(struct task_struct * father, 662static void
667 struct list_head *to_release) 663forget_original_parent(struct task_struct *father, struct list_head *to_release)
668{ 664{
669 struct task_struct *p, *reaper = father; 665 struct task_struct *p, *reaper = father;
670 struct list_head *_p, *_n; 666 struct list_head *_p, *_n;
@@ -687,7 +683,7 @@ static void forget_original_parent(struct task_struct * father,
687 */ 683 */
688 list_for_each_safe(_p, _n, &father->children) { 684 list_for_each_safe(_p, _n, &father->children) {
689 int ptrace; 685 int ptrace;
690 p = list_entry(_p,struct task_struct,sibling); 686 p = list_entry(_p, struct task_struct, sibling);
691 687
692 ptrace = p->ptrace; 688 ptrace = p->ptrace;
693 689
@@ -716,7 +712,7 @@ static void forget_original_parent(struct task_struct * father,
716 list_add(&p->ptrace_list, to_release); 712 list_add(&p->ptrace_list, to_release);
717 } 713 }
718 list_for_each_safe(_p, _n, &father->ptrace_children) { 714 list_for_each_safe(_p, _n, &father->ptrace_children) {
719 p = list_entry(_p,struct task_struct,ptrace_list); 715 p = list_entry(_p, struct task_struct, ptrace_list);
720 choose_new_parent(p, reaper); 716 choose_new_parent(p, reaper);
721 reparent_thread(p, father, 1); 717 reparent_thread(p, father, 1);
722 } 718 }
@@ -836,7 +832,7 @@ static void exit_notify(struct task_struct *tsk)
836 832
837 list_for_each_safe(_p, _n, &ptrace_dead) { 833 list_for_each_safe(_p, _n, &ptrace_dead) {
838 list_del_init(_p); 834 list_del_init(_p);
839 t = list_entry(_p,struct task_struct,ptrace_list); 835 t = list_entry(_p, struct task_struct, ptrace_list);
840 release_task(t); 836 release_task(t);
841 } 837 }
842 838
@@ -848,7 +844,9 @@ static void exit_notify(struct task_struct *tsk)
848fastcall NORET_TYPE void do_exit(long code) 844fastcall NORET_TYPE void do_exit(long code)
849{ 845{
850 struct task_struct *tsk = current; 846 struct task_struct *tsk = current;
847 struct taskstats *tidstats;
851 int group_dead; 848 int group_dead;
849 unsigned int mycpu;
852 850
853 profile_task_exit(tsk); 851 profile_task_exit(tsk);
854 852
@@ -886,6 +884,8 @@ fastcall NORET_TYPE void do_exit(long code)
886 current->comm, current->pid, 884 current->comm, current->pid,
887 preempt_count()); 885 preempt_count());
888 886
887 taskstats_exit_alloc(&tidstats, &mycpu);
888
889 acct_update_integrals(tsk); 889 acct_update_integrals(tsk);
890 if (tsk->mm) { 890 if (tsk->mm) {
891 update_hiwater_rss(tsk->mm); 891 update_hiwater_rss(tsk->mm);
@@ -895,18 +895,23 @@ fastcall NORET_TYPE void do_exit(long code)
895 if (group_dead) { 895 if (group_dead) {
896 hrtimer_cancel(&tsk->signal->real_timer); 896 hrtimer_cancel(&tsk->signal->real_timer);
897 exit_itimers(tsk->signal); 897 exit_itimers(tsk->signal);
898 acct_process(code);
899 } 898 }
899 acct_collect(code, group_dead);
900 if (unlikely(tsk->robust_list)) 900 if (unlikely(tsk->robust_list))
901 exit_robust_list(tsk); 901 exit_robust_list(tsk);
902#ifdef CONFIG_COMPAT 902#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
903 if (unlikely(tsk->compat_robust_list)) 903 if (unlikely(tsk->compat_robust_list))
904 compat_exit_robust_list(tsk); 904 compat_exit_robust_list(tsk);
905#endif 905#endif
906 if (unlikely(tsk->audit_context)) 906 if (unlikely(tsk->audit_context))
907 audit_free(tsk); 907 audit_free(tsk);
908 taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
909 taskstats_exit_free(tidstats);
910
908 exit_mm(tsk); 911 exit_mm(tsk);
909 912
913 if (group_dead)
914 acct_process();
910 exit_sem(tsk); 915 exit_sem(tsk);
911 __exit_files(tsk); 916 __exit_files(tsk);
912 __exit_fs(tsk); 917 __exit_fs(tsk);
@@ -930,9 +935,17 @@ fastcall NORET_TYPE void do_exit(long code)
930 tsk->mempolicy = NULL; 935 tsk->mempolicy = NULL;
931#endif 936#endif
932 /* 937 /*
933 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 938 * This must happen late, after the PID is not
939 * hashed anymore:
934 */ 940 */
935 mutex_debug_check_no_locks_held(tsk); 941 if (unlikely(!list_empty(&tsk->pi_state_list)))
942 exit_pi_state_list(tsk);
943 if (unlikely(current->pi_state_cache))
944 kfree(current->pi_state_cache);
945 /*
946 * Make sure we are holding no locks:
947 */
948 debug_check_no_locks_held(tsk);
936 949
937 if (tsk->io_context) 950 if (tsk->io_context)
938 exit_io_context(); 951 exit_io_context();
@@ -940,15 +953,15 @@ fastcall NORET_TYPE void do_exit(long code)
940 if (tsk->splice_pipe) 953 if (tsk->splice_pipe)
941 __free_pipe_info(tsk->splice_pipe); 954 __free_pipe_info(tsk->splice_pipe);
942 955
943 /* PF_DEAD causes final put_task_struct after we schedule. */
944 preempt_disable(); 956 preempt_disable();
945 BUG_ON(tsk->flags & PF_DEAD); 957 /* causes final put_task_struct in finish_task_switch(). */
946 tsk->flags |= PF_DEAD; 958 tsk->state = TASK_DEAD;
947 959
948 schedule(); 960 schedule();
949 BUG(); 961 BUG();
950 /* Avoid "noreturn function does return". */ 962 /* Avoid "noreturn function does return". */
951 for (;;) ; 963 for (;;)
964 cpu_relax(); /* For when BUG is null */
952} 965}
953 966
954EXPORT_SYMBOL_GPL(do_exit); 967EXPORT_SYMBOL_GPL(do_exit);
@@ -957,7 +970,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
957{ 970{
958 if (comp) 971 if (comp)
959 complete(comp); 972 complete(comp);
960 973
961 do_exit(code); 974 do_exit(code);
962} 975}
963 976
@@ -1007,7 +1020,7 @@ asmlinkage void sys_exit_group(int error_code)
1007 do_group_exit((error_code & 0xff) << 8); 1020 do_group_exit((error_code & 0xff) << 8);
1008} 1021}
1009 1022
1010static int eligible_child(pid_t pid, int options, task_t *p) 1023static int eligible_child(pid_t pid, int options, struct task_struct *p)
1011{ 1024{
1012 if (pid > 0) { 1025 if (pid > 0) {
1013 if (p->pid != pid) 1026 if (p->pid != pid)
@@ -1039,7 +1052,7 @@ static int eligible_child(pid_t pid, int options, task_t *p)
1039 * Do not consider thread group leaders that are 1052 * Do not consider thread group leaders that are
1040 * in a non-empty thread group: 1053 * in a non-empty thread group:
1041 */ 1054 */
1042 if (current->tgid != p->tgid && delay_group_leader(p)) 1055 if (delay_group_leader(p))
1043 return 2; 1056 return 2;
1044 1057
1045 if (security_task_wait(p)) 1058 if (security_task_wait(p))
@@ -1048,12 +1061,13 @@ static int eligible_child(pid_t pid, int options, task_t *p)
1048 return 1; 1061 return 1;
1049} 1062}
1050 1063
1051static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, 1064static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1052 int why, int status, 1065 int why, int status,
1053 struct siginfo __user *infop, 1066 struct siginfo __user *infop,
1054 struct rusage __user *rusagep) 1067 struct rusage __user *rusagep)
1055{ 1068{
1056 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1069 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1070
1057 put_task_struct(p); 1071 put_task_struct(p);
1058 if (!retval) 1072 if (!retval)
1059 retval = put_user(SIGCHLD, &infop->si_signo); 1073 retval = put_user(SIGCHLD, &infop->si_signo);
@@ -1078,7 +1092,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
1078 * the lock and this task is uninteresting. If we return nonzero, we have 1092 * the lock and this task is uninteresting. If we return nonzero, we have
1079 * released the lock and the system call should return. 1093 * released the lock and the system call should return.
1080 */ 1094 */
1081static int wait_task_zombie(task_t *p, int noreap, 1095static int wait_task_zombie(struct task_struct *p, int noreap,
1082 struct siginfo __user *infop, 1096 struct siginfo __user *infop,
1083 int __user *stat_addr, struct rusage __user *ru) 1097 int __user *stat_addr, struct rusage __user *ru)
1084{ 1098{
@@ -1240,8 +1254,8 @@ static int wait_task_zombie(task_t *p, int noreap,
1240 * the lock and this task is uninteresting. If we return nonzero, we have 1254 * the lock and this task is uninteresting. If we return nonzero, we have
1241 * released the lock and the system call should return. 1255 * released the lock and the system call should return.
1242 */ 1256 */
1243static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, 1257static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1244 struct siginfo __user *infop, 1258 int noreap, struct siginfo __user *infop,
1245 int __user *stat_addr, struct rusage __user *ru) 1259 int __user *stat_addr, struct rusage __user *ru)
1246{ 1260{
1247 int retval, exit_code; 1261 int retval, exit_code;
@@ -1355,7 +1369,7 @@ bail_ref:
1355 * the lock and this task is uninteresting. If we return nonzero, we have 1369 * the lock and this task is uninteresting. If we return nonzero, we have
1356 * released the lock and the system call should return. 1370 * released the lock and the system call should return.
1357 */ 1371 */
1358static int wait_task_continued(task_t *p, int noreap, 1372static int wait_task_continued(struct task_struct *p, int noreap,
1359 struct siginfo __user *infop, 1373 struct siginfo __user *infop,
1360 int __user *stat_addr, struct rusage __user *ru) 1374 int __user *stat_addr, struct rusage __user *ru)
1361{ 1375{
@@ -1441,7 +1455,7 @@ repeat:
1441 int ret; 1455 int ret;
1442 1456
1443 list_for_each(_p,&tsk->children) { 1457 list_for_each(_p,&tsk->children) {
1444 p = list_entry(_p,struct task_struct,sibling); 1458 p = list_entry(_p, struct task_struct, sibling);
1445 1459
1446 ret = eligible_child(pid, options, p); 1460 ret = eligible_child(pid, options, p);
1447 if (!ret) 1461 if (!ret)