diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/exit.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 89 |
1 files changed, 53 insertions, 36 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 3da04257aeaf..256ce8c2ebc8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
50 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | ||
52 | 53 | ||
53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
54 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
@@ -69,10 +70,10 @@ static void __unhash_process(struct task_struct *p) | |||
69 | detach_pid(p, PIDTYPE_SID); | 70 | detach_pid(p, PIDTYPE_SID); |
70 | 71 | ||
71 | list_del_rcu(&p->tasks); | 72 | list_del_rcu(&p->tasks); |
73 | list_del_init(&p->sibling); | ||
72 | __get_cpu_var(process_counts)--; | 74 | __get_cpu_var(process_counts)--; |
73 | } | 75 | } |
74 | list_del_rcu(&p->thread_group); | 76 | list_del_rcu(&p->thread_group); |
75 | list_del_init(&p->sibling); | ||
76 | } | 77 | } |
77 | 78 | ||
78 | /* | 79 | /* |
@@ -86,7 +87,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
86 | BUG_ON(!sig); | 87 | BUG_ON(!sig); |
87 | BUG_ON(!atomic_read(&sig->count)); | 88 | BUG_ON(!atomic_read(&sig->count)); |
88 | 89 | ||
89 | sighand = rcu_dereference(tsk->sighand); | 90 | sighand = rcu_dereference_check(tsk->sighand, |
91 | rcu_read_lock_held() || | ||
92 | lockdep_tasklist_lock_is_held()); | ||
90 | spin_lock(&sighand->siglock); | 93 | spin_lock(&sighand->siglock); |
91 | 94 | ||
92 | posix_cpu_timers_exit(tsk); | 95 | posix_cpu_timers_exit(tsk); |
@@ -112,9 +115,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
112 | * We won't ever get here for the group leader, since it | 115 | * We won't ever get here for the group leader, since it |
113 | * will have been the last reference on the signal_struct. | 116 | * will have been the last reference on the signal_struct. |
114 | */ | 117 | */ |
115 | sig->utime = cputime_add(sig->utime, task_utime(tsk)); | 118 | sig->utime = cputime_add(sig->utime, tsk->utime); |
116 | sig->stime = cputime_add(sig->stime, task_stime(tsk)); | 119 | sig->stime = cputime_add(sig->stime, tsk->stime); |
117 | sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); | 120 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); |
118 | sig->min_flt += tsk->min_flt; | 121 | sig->min_flt += tsk->min_flt; |
119 | sig->maj_flt += tsk->maj_flt; | 122 | sig->maj_flt += tsk->maj_flt; |
120 | sig->nvcsw += tsk->nvcsw; | 123 | sig->nvcsw += tsk->nvcsw; |
@@ -171,8 +174,10 @@ void release_task(struct task_struct * p) | |||
171 | repeat: | 174 | repeat: |
172 | tracehook_prepare_release_task(p); | 175 | tracehook_prepare_release_task(p); |
173 | /* don't need to get the RCU readlock here - the process is dead and | 176 | /* don't need to get the RCU readlock here - the process is dead and |
174 | * can't be modifying its own credentials */ | 177 | * can't be modifying its own credentials. But shut RCU-lockdep up */ |
178 | rcu_read_lock(); | ||
175 | atomic_dec(&__task_cred(p)->user->processes); | 179 | atomic_dec(&__task_cred(p)->user->processes); |
180 | rcu_read_unlock(); | ||
176 | 181 | ||
177 | proc_flush_task(p); | 182 | proc_flush_task(p); |
178 | 183 | ||
@@ -474,9 +479,11 @@ static void close_files(struct files_struct * files) | |||
474 | /* | 479 | /* |
475 | * It is safe to dereference the fd table without RCU or | 480 | * It is safe to dereference the fd table without RCU or |
476 | * ->file_lock because this is the last reference to the | 481 | * ->file_lock because this is the last reference to the |
477 | * files structure. | 482 | * files structure. But use RCU to shut RCU-lockdep up. |
478 | */ | 483 | */ |
484 | rcu_read_lock(); | ||
479 | fdt = files_fdtable(files); | 485 | fdt = files_fdtable(files); |
486 | rcu_read_unlock(); | ||
480 | for (;;) { | 487 | for (;;) { |
481 | unsigned long set; | 488 | unsigned long set; |
482 | i = j * __NFDBITS; | 489 | i = j * __NFDBITS; |
@@ -522,10 +529,12 @@ void put_files_struct(struct files_struct *files) | |||
522 | * at the end of the RCU grace period. Otherwise, | 529 | * at the end of the RCU grace period. Otherwise, |
523 | * you can free files immediately. | 530 | * you can free files immediately. |
524 | */ | 531 | */ |
532 | rcu_read_lock(); | ||
525 | fdt = files_fdtable(files); | 533 | fdt = files_fdtable(files); |
526 | if (fdt != &files->fdtab) | 534 | if (fdt != &files->fdtab) |
527 | kmem_cache_free(files_cachep, files); | 535 | kmem_cache_free(files_cachep, files); |
528 | free_fdtable(fdt); | 536 | free_fdtable(fdt); |
537 | rcu_read_unlock(); | ||
529 | } | 538 | } |
530 | } | 539 | } |
531 | 540 | ||
@@ -737,12 +746,9 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
737 | /* | 746 | /* |
738 | * Any that need to be release_task'd are put on the @dead list. | 747 | * Any that need to be release_task'd are put on the @dead list. |
739 | */ | 748 | */ |
740 | static void reparent_thread(struct task_struct *father, struct task_struct *p, | 749 | static void reparent_leader(struct task_struct *father, struct task_struct *p, |
741 | struct list_head *dead) | 750 | struct list_head *dead) |
742 | { | 751 | { |
743 | if (p->pdeath_signal) | ||
744 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
745 | |||
746 | list_move_tail(&p->sibling, &p->real_parent->children); | 752 | list_move_tail(&p->sibling, &p->real_parent->children); |
747 | 753 | ||
748 | if (task_detached(p)) | 754 | if (task_detached(p)) |
@@ -781,12 +787,18 @@ static void forget_original_parent(struct task_struct *father) | |||
781 | reaper = find_new_reaper(father); | 787 | reaper = find_new_reaper(father); |
782 | 788 | ||
783 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 789 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
784 | p->real_parent = reaper; | 790 | struct task_struct *t = p; |
785 | if (p->parent == father) { | 791 | do { |
786 | BUG_ON(task_ptrace(p)); | 792 | t->real_parent = reaper; |
787 | p->parent = p->real_parent; | 793 | if (t->parent == father) { |
788 | } | 794 | BUG_ON(task_ptrace(t)); |
789 | reparent_thread(father, p, &dead_children); | 795 | t->parent = t->real_parent; |
796 | } | ||
797 | if (t->pdeath_signal) | ||
798 | group_send_sig_info(t->pdeath_signal, | ||
799 | SEND_SIG_NOINFO, t); | ||
800 | } while_each_thread(p, t); | ||
801 | reparent_leader(father, p, &dead_children); | ||
790 | } | 802 | } |
791 | write_unlock_irq(&tasklist_lock); | 803 | write_unlock_irq(&tasklist_lock); |
792 | 804 | ||
@@ -934,7 +946,7 @@ NORET_TYPE void do_exit(long code) | |||
934 | * an exiting task cleaning up the robust pi futexes. | 946 | * an exiting task cleaning up the robust pi futexes. |
935 | */ | 947 | */ |
936 | smp_mb(); | 948 | smp_mb(); |
937 | spin_unlock_wait(&tsk->pi_lock); | 949 | raw_spin_unlock_wait(&tsk->pi_lock); |
938 | 950 | ||
939 | if (unlikely(in_atomic())) | 951 | if (unlikely(in_atomic())) |
940 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 952 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
@@ -942,7 +954,9 @@ NORET_TYPE void do_exit(long code) | |||
942 | preempt_count()); | 954 | preempt_count()); |
943 | 955 | ||
944 | acct_update_integrals(tsk); | 956 | acct_update_integrals(tsk); |
945 | 957 | /* sync mm's RSS info before statistics gathering */ | |
958 | if (tsk->mm) | ||
959 | sync_mm_rss(tsk, tsk->mm); | ||
946 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 960 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
947 | if (group_dead) { | 961 | if (group_dead) { |
948 | hrtimer_cancel(&tsk->signal->real_timer); | 962 | hrtimer_cancel(&tsk->signal->real_timer); |
@@ -974,7 +988,7 @@ NORET_TYPE void do_exit(long code) | |||
974 | exit_thread(); | 988 | exit_thread(); |
975 | cgroup_exit(tsk, 1); | 989 | cgroup_exit(tsk, 1); |
976 | 990 | ||
977 | if (group_dead && tsk->signal->leader) | 991 | if (group_dead) |
978 | disassociate_ctty(1); | 992 | disassociate_ctty(1); |
979 | 993 | ||
980 | module_put(task_thread_info(tsk)->exec_domain->module); | 994 | module_put(task_thread_info(tsk)->exec_domain->module); |
@@ -982,6 +996,10 @@ NORET_TYPE void do_exit(long code) | |||
982 | proc_exit_connector(tsk); | 996 | proc_exit_connector(tsk); |
983 | 997 | ||
984 | /* | 998 | /* |
999 | * FIXME: do that only when needed, using sched_exit tracepoint | ||
1000 | */ | ||
1001 | flush_ptrace_hw_breakpoint(tsk); | ||
1002 | /* | ||
985 | * Flush inherited counters to the parent - before the parent | 1003 | * Flush inherited counters to the parent - before the parent |
986 | * gets woken up by child-exit notifications. | 1004 | * gets woken up by child-exit notifications. |
987 | */ | 1005 | */ |
@@ -1008,7 +1026,7 @@ NORET_TYPE void do_exit(long code) | |||
1008 | tsk->flags |= PF_EXITPIDONE; | 1026 | tsk->flags |= PF_EXITPIDONE; |
1009 | 1027 | ||
1010 | if (tsk->io_context) | 1028 | if (tsk->io_context) |
1011 | exit_io_context(); | 1029 | exit_io_context(tsk); |
1012 | 1030 | ||
1013 | if (tsk->splice_pipe) | 1031 | if (tsk->splice_pipe) |
1014 | __free_pipe_info(tsk->splice_pipe); | 1032 | __free_pipe_info(tsk->splice_pipe); |
@@ -1176,7 +1194,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1176 | 1194 | ||
1177 | if (unlikely(wo->wo_flags & WNOWAIT)) { | 1195 | if (unlikely(wo->wo_flags & WNOWAIT)) { |
1178 | int exit_code = p->exit_code; | 1196 | int exit_code = p->exit_code; |
1179 | int why, status; | 1197 | int why; |
1180 | 1198 | ||
1181 | get_task_struct(p); | 1199 | get_task_struct(p); |
1182 | read_unlock(&tasklist_lock); | 1200 | read_unlock(&tasklist_lock); |
@@ -1209,6 +1227,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1209 | struct signal_struct *psig; | 1227 | struct signal_struct *psig; |
1210 | struct signal_struct *sig; | 1228 | struct signal_struct *sig; |
1211 | unsigned long maxrss; | 1229 | unsigned long maxrss; |
1230 | cputime_t tgutime, tgstime; | ||
1212 | 1231 | ||
1213 | /* | 1232 | /* |
1214 | * The resource counters for the group leader are in its | 1233 | * The resource counters for the group leader are in its |
@@ -1224,20 +1243,23 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1224 | * need to protect the access to parent->signal fields, | 1243 | * need to protect the access to parent->signal fields, |
1225 | * as other threads in the parent group can be right | 1244 | * as other threads in the parent group can be right |
1226 | * here reaping other children at the same time. | 1245 | * here reaping other children at the same time. |
1246 | * | ||
1247 | * We use thread_group_times() to get times for the thread | ||
1248 | * group, which consolidates times for all threads in the | ||
1249 | * group including the group leader. | ||
1227 | */ | 1250 | */ |
1251 | thread_group_times(p, &tgutime, &tgstime); | ||
1228 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1252 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1229 | psig = p->real_parent->signal; | 1253 | psig = p->real_parent->signal; |
1230 | sig = p->signal; | 1254 | sig = p->signal; |
1231 | psig->cutime = | 1255 | psig->cutime = |
1232 | cputime_add(psig->cutime, | 1256 | cputime_add(psig->cutime, |
1233 | cputime_add(p->utime, | 1257 | cputime_add(tgutime, |
1234 | cputime_add(sig->utime, | 1258 | sig->cutime)); |
1235 | sig->cutime))); | ||
1236 | psig->cstime = | 1259 | psig->cstime = |
1237 | cputime_add(psig->cstime, | 1260 | cputime_add(psig->cstime, |
1238 | cputime_add(p->stime, | 1261 | cputime_add(tgstime, |
1239 | cputime_add(sig->stime, | 1262 | sig->cstime)); |
1240 | sig->cstime))); | ||
1241 | psig->cgtime = | 1263 | psig->cgtime = |
1242 | cputime_add(psig->cgtime, | 1264 | cputime_add(psig->cgtime, |
1243 | cputime_add(p->gtime, | 1265 | cputime_add(p->gtime, |
@@ -1546,14 +1568,9 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) | |||
1546 | struct task_struct *p; | 1568 | struct task_struct *p; |
1547 | 1569 | ||
1548 | list_for_each_entry(p, &tsk->children, sibling) { | 1570 | list_for_each_entry(p, &tsk->children, sibling) { |
1549 | /* | 1571 | int ret = wait_consider_task(wo, 0, p); |
1550 | * Do not consider detached threads. | 1572 | if (ret) |
1551 | */ | 1573 | return ret; |
1552 | if (!task_detached(p)) { | ||
1553 | int ret = wait_consider_task(wo, 0, p); | ||
1554 | if (ret) | ||
1555 | return ret; | ||
1556 | } | ||
1557 | } | 1574 | } |
1558 | 1575 | ||
1559 | return 0; | 1576 | return 0; |