diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 129 |
1 files changed, 82 insertions, 47 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index a8c7efc7a681..bc0ec674d3f4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/cpuset.h> | 29 | #include <linux/cpuset.h> |
30 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> |
32 | #include <linux/posix-timers.h> | ||
32 | #include <linux/cn_proc.h> | 33 | #include <linux/cn_proc.h> |
33 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
34 | #include <linux/futex.h> | 35 | #include <linux/futex.h> |
@@ -50,15 +51,80 @@ static void __unhash_process(struct task_struct *p) | |||
50 | { | 51 | { |
51 | nr_threads--; | 52 | nr_threads--; |
52 | detach_pid(p, PIDTYPE_PID); | 53 | detach_pid(p, PIDTYPE_PID); |
53 | detach_pid(p, PIDTYPE_TGID); | ||
54 | if (thread_group_leader(p)) { | 54 | if (thread_group_leader(p)) { |
55 | detach_pid(p, PIDTYPE_PGID); | 55 | detach_pid(p, PIDTYPE_PGID); |
56 | detach_pid(p, PIDTYPE_SID); | 56 | detach_pid(p, PIDTYPE_SID); |
57 | if (p->pid) | 57 | |
58 | __get_cpu_var(process_counts)--; | 58 | list_del_init(&p->tasks); |
59 | __get_cpu_var(process_counts)--; | ||
60 | } | ||
61 | list_del_rcu(&p->thread_group); | ||
62 | remove_parent(p); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * This function expects the tasklist_lock write-locked. | ||
67 | */ | ||
68 | static void __exit_signal(struct task_struct *tsk) | ||
69 | { | ||
70 | struct signal_struct *sig = tsk->signal; | ||
71 | struct sighand_struct *sighand; | ||
72 | |||
73 | BUG_ON(!sig); | ||
74 | BUG_ON(!atomic_read(&sig->count)); | ||
75 | |||
76 | rcu_read_lock(); | ||
77 | sighand = rcu_dereference(tsk->sighand); | ||
78 | spin_lock(&sighand->siglock); | ||
79 | |||
80 | posix_cpu_timers_exit(tsk); | ||
81 | if (atomic_dec_and_test(&sig->count)) | ||
82 | posix_cpu_timers_exit_group(tsk); | ||
83 | else { | ||
84 | /* | ||
85 | * If there is any task waiting for the group exit | ||
86 | * then notify it: | ||
87 | */ | ||
88 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
89 | wake_up_process(sig->group_exit_task); | ||
90 | sig->group_exit_task = NULL; | ||
91 | } | ||
92 | if (tsk == sig->curr_target) | ||
93 | sig->curr_target = next_thread(tsk); | ||
94 | /* | ||
95 | * Accumulate here the counters for all threads but the | ||
96 | * group leader as they die, so they can be added into | ||
97 | * the process-wide totals when those are taken. | ||
98 | * The group leader stays around as a zombie as long | ||
99 | * as there are other threads. When it gets reaped, | ||
100 | * the exit.c code will add its counts into these totals. | ||
101 | * We won't ever get here for the group leader, since it | ||
102 | * will have been the last reference on the signal_struct. | ||
103 | */ | ||
104 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
105 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
106 | sig->min_flt += tsk->min_flt; | ||
107 | sig->maj_flt += tsk->maj_flt; | ||
108 | sig->nvcsw += tsk->nvcsw; | ||
109 | sig->nivcsw += tsk->nivcsw; | ||
110 | sig->sched_time += tsk->sched_time; | ||
111 | sig = NULL; /* Marker for below. */ | ||
59 | } | 112 | } |
60 | 113 | ||
61 | REMOVE_LINKS(p); | 114 | __unhash_process(tsk); |
115 | |||
116 | tsk->signal = NULL; | ||
117 | tsk->sighand = NULL; | ||
118 | spin_unlock(&sighand->siglock); | ||
119 | rcu_read_unlock(); | ||
120 | |||
121 | __cleanup_sighand(sighand); | ||
122 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
123 | flush_sigqueue(&tsk->pending); | ||
124 | if (sig) { | ||
125 | flush_sigqueue(&sig->shared_pending); | ||
126 | __cleanup_signal(sig); | ||
127 | } | ||
62 | } | 128 | } |
63 | 129 | ||
64 | void release_task(struct task_struct * p) | 130 | void release_task(struct task_struct * p) |
@@ -67,21 +133,14 @@ void release_task(struct task_struct * p) | |||
67 | task_t *leader; | 133 | task_t *leader; |
68 | struct dentry *proc_dentry; | 134 | struct dentry *proc_dentry; |
69 | 135 | ||
70 | repeat: | 136 | repeat: |
71 | atomic_dec(&p->user->processes); | 137 | atomic_dec(&p->user->processes); |
72 | spin_lock(&p->proc_lock); | 138 | spin_lock(&p->proc_lock); |
73 | proc_dentry = proc_pid_unhash(p); | 139 | proc_dentry = proc_pid_unhash(p); |
74 | write_lock_irq(&tasklist_lock); | 140 | write_lock_irq(&tasklist_lock); |
75 | if (unlikely(p->ptrace)) | 141 | ptrace_unlink(p); |
76 | __ptrace_unlink(p); | ||
77 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); | 142 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
78 | __exit_signal(p); | 143 | __exit_signal(p); |
79 | /* | ||
80 | * Note that the fastpath in sys_times depends on __exit_signal having | ||
81 | * updated the counters before a task is removed from the tasklist of | ||
82 | * the process by __unhash_process. | ||
83 | */ | ||
84 | __unhash_process(p); | ||
85 | 144 | ||
86 | /* | 145 | /* |
87 | * If we are the last non-leader member of the thread | 146 | * If we are the last non-leader member of the thread |
@@ -116,21 +175,6 @@ repeat: | |||
116 | goto repeat; | 175 | goto repeat; |
117 | } | 176 | } |
118 | 177 | ||
119 | /* we are using it only for SMP init */ | ||
120 | |||
121 | void unhash_process(struct task_struct *p) | ||
122 | { | ||
123 | struct dentry *proc_dentry; | ||
124 | |||
125 | spin_lock(&p->proc_lock); | ||
126 | proc_dentry = proc_pid_unhash(p); | ||
127 | write_lock_irq(&tasklist_lock); | ||
128 | __unhash_process(p); | ||
129 | write_unlock_irq(&tasklist_lock); | ||
130 | spin_unlock(&p->proc_lock); | ||
131 | proc_pid_flush(proc_dentry); | ||
132 | } | ||
133 | |||
134 | /* | 178 | /* |
135 | * This checks not only the pgrp, but falls back on the pid if no | 179 | * This checks not only the pgrp, but falls back on the pid if no |
136 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | 180 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly |
@@ -238,10 +282,10 @@ static void reparent_to_init(void) | |||
238 | 282 | ||
239 | ptrace_unlink(current); | 283 | ptrace_unlink(current); |
240 | /* Reparent to init */ | 284 | /* Reparent to init */ |
241 | REMOVE_LINKS(current); | 285 | remove_parent(current); |
242 | current->parent = child_reaper; | 286 | current->parent = child_reaper; |
243 | current->real_parent = child_reaper; | 287 | current->real_parent = child_reaper; |
244 | SET_LINKS(current); | 288 | add_parent(current); |
245 | 289 | ||
246 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | 290 | /* Set the exit signal to SIGCHLD so we signal init on exit */ |
247 | current->exit_signal = SIGCHLD; | 291 | current->exit_signal = SIGCHLD; |
@@ -538,13 +582,13 @@ static void exit_mm(struct task_struct * tsk) | |||
538 | mmput(mm); | 582 | mmput(mm); |
539 | } | 583 | } |
540 | 584 | ||
541 | static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper) | 585 | static inline void choose_new_parent(task_t *p, task_t *reaper) |
542 | { | 586 | { |
543 | /* | 587 | /* |
544 | * Make sure we're not reparenting to ourselves and that | 588 | * Make sure we're not reparenting to ourselves and that |
545 | * the parent is not a zombie. | 589 | * the parent is not a zombie. |
546 | */ | 590 | */ |
547 | BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE); | 591 | BUG_ON(p == reaper || reaper->exit_state); |
548 | p->real_parent = reaper; | 592 | p->real_parent = reaper; |
549 | } | 593 | } |
550 | 594 | ||
@@ -569,9 +613,9 @@ static void reparent_thread(task_t *p, task_t *father, int traced) | |||
569 | * anyway, so let go of it. | 613 | * anyway, so let go of it. |
570 | */ | 614 | */ |
571 | p->ptrace = 0; | 615 | p->ptrace = 0; |
572 | list_del_init(&p->sibling); | 616 | remove_parent(p); |
573 | p->parent = p->real_parent; | 617 | p->parent = p->real_parent; |
574 | list_add_tail(&p->sibling, &p->parent->children); | 618 | add_parent(p); |
575 | 619 | ||
576 | /* If we'd notified the old parent about this child's death, | 620 | /* If we'd notified the old parent about this child's death, |
577 | * also notify the new parent. | 621 | * also notify the new parent. |
@@ -645,7 +689,7 @@ static void forget_original_parent(struct task_struct * father, | |||
645 | 689 | ||
646 | if (father == p->real_parent) { | 690 | if (father == p->real_parent) { |
647 | /* reparent with a reaper, real father it's us */ | 691 | /* reparent with a reaper, real father it's us */ |
648 | choose_new_parent(p, reaper, child_reaper); | 692 | choose_new_parent(p, reaper); |
649 | reparent_thread(p, father, 0); | 693 | reparent_thread(p, father, 0); |
650 | } else { | 694 | } else { |
651 | /* reparent ptraced task to its real parent */ | 695 | /* reparent ptraced task to its real parent */ |
@@ -666,7 +710,7 @@ static void forget_original_parent(struct task_struct * father, | |||
666 | } | 710 | } |
667 | list_for_each_safe(_p, _n, &father->ptrace_children) { | 711 | list_for_each_safe(_p, _n, &father->ptrace_children) { |
668 | p = list_entry(_p,struct task_struct,ptrace_list); | 712 | p = list_entry(_p,struct task_struct,ptrace_list); |
669 | choose_new_parent(p, reaper, child_reaper); | 713 | choose_new_parent(p, reaper); |
670 | reparent_thread(p, father, 1); | 714 | reparent_thread(p, father, 1); |
671 | } | 715 | } |
672 | } | 716 | } |
@@ -807,7 +851,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
807 | panic("Aiee, killing interrupt handler!"); | 851 | panic("Aiee, killing interrupt handler!"); |
808 | if (unlikely(!tsk->pid)) | 852 | if (unlikely(!tsk->pid)) |
809 | panic("Attempted to kill the idle task!"); | 853 | panic("Attempted to kill the idle task!"); |
810 | if (unlikely(tsk->pid == 1)) | 854 | if (unlikely(tsk == child_reaper)) |
811 | panic("Attempted to kill init!"); | 855 | panic("Attempted to kill init!"); |
812 | 856 | ||
813 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 857 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
@@ -920,13 +964,6 @@ asmlinkage long sys_exit(int error_code) | |||
920 | do_exit((error_code&0xff)<<8); | 964 | do_exit((error_code&0xff)<<8); |
921 | } | 965 | } |
922 | 966 | ||
923 | task_t fastcall *next_thread(const task_t *p) | ||
924 | { | ||
925 | return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID); | ||
926 | } | ||
927 | |||
928 | EXPORT_SYMBOL(next_thread); | ||
929 | |||
930 | /* | 967 | /* |
931 | * Take down every thread in the group. This is called by fatal signals | 968 | * Take down every thread in the group. This is called by fatal signals |
932 | * as well as by sys_exit_group (below). | 969 | * as well as by sys_exit_group (below). |
@@ -941,7 +978,6 @@ do_group_exit(int exit_code) | |||
941 | else if (!thread_group_empty(current)) { | 978 | else if (!thread_group_empty(current)) { |
942 | struct signal_struct *const sig = current->signal; | 979 | struct signal_struct *const sig = current->signal; |
943 | struct sighand_struct *const sighand = current->sighand; | 980 | struct sighand_struct *const sighand = current->sighand; |
944 | read_lock(&tasklist_lock); | ||
945 | spin_lock_irq(&sighand->siglock); | 981 | spin_lock_irq(&sighand->siglock); |
946 | if (sig->flags & SIGNAL_GROUP_EXIT) | 982 | if (sig->flags & SIGNAL_GROUP_EXIT) |
947 | /* Another thread got here before we took the lock. */ | 983 | /* Another thread got here before we took the lock. */ |
@@ -951,7 +987,6 @@ do_group_exit(int exit_code) | |||
951 | zap_other_threads(current); | 987 | zap_other_threads(current); |
952 | } | 988 | } |
953 | spin_unlock_irq(&sighand->siglock); | 989 | spin_unlock_irq(&sighand->siglock); |
954 | read_unlock(&tasklist_lock); | ||
955 | } | 990 | } |
956 | 991 | ||
957 | do_exit(exit_code); | 992 | do_exit(exit_code); |
@@ -1281,7 +1316,7 @@ bail_ref: | |||
1281 | 1316 | ||
1282 | /* move to end of parent's list to avoid starvation */ | 1317 | /* move to end of parent's list to avoid starvation */ |
1283 | remove_parent(p); | 1318 | remove_parent(p); |
1284 | add_parent(p, p->parent); | 1319 | add_parent(p); |
1285 | 1320 | ||
1286 | write_unlock_irq(&tasklist_lock); | 1321 | write_unlock_irq(&tasklist_lock); |
1287 | 1322 | ||