diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 77c35efad88c..3823ec89d7b8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/cpuset.h> | 29 | #include <linux/cpuset.h> |
30 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> |
32 | #include <linux/posix-timers.h> | ||
32 | #include <linux/cn_proc.h> | 33 | #include <linux/cn_proc.h> |
33 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
34 | #include <linux/futex.h> | 35 | #include <linux/futex.h> |
@@ -62,6 +63,68 @@ static void __unhash_process(struct task_struct *p) | |||
62 | remove_parent(p); | 63 | remove_parent(p); |
63 | } | 64 | } |
64 | 65 | ||
66 | /* | ||
67 | * This function expects the tasklist_lock write-locked. | ||
68 | */ | ||
69 | static void __exit_signal(struct task_struct *tsk) | ||
70 | { | ||
71 | struct signal_struct *sig = tsk->signal; | ||
72 | struct sighand_struct *sighand; | ||
73 | |||
74 | BUG_ON(!sig); | ||
75 | BUG_ON(!atomic_read(&sig->count)); | ||
76 | |||
77 | rcu_read_lock(); | ||
78 | sighand = rcu_dereference(tsk->sighand); | ||
79 | spin_lock(&sighand->siglock); | ||
80 | |||
81 | posix_cpu_timers_exit(tsk); | ||
82 | if (atomic_dec_and_test(&sig->count)) | ||
83 | posix_cpu_timers_exit_group(tsk); | ||
84 | else { | ||
85 | /* | ||
86 | * If there is any task waiting for the group exit | ||
87 | * then notify it: | ||
88 | */ | ||
89 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
90 | wake_up_process(sig->group_exit_task); | ||
91 | sig->group_exit_task = NULL; | ||
92 | } | ||
93 | if (tsk == sig->curr_target) | ||
94 | sig->curr_target = next_thread(tsk); | ||
95 | /* | ||
96 | * Accumulate here the counters for all threads but the | ||
97 | * group leader as they die, so they can be added into | ||
98 | * the process-wide totals when those are taken. | ||
99 | * The group leader stays around as a zombie as long | ||
100 | * as there are other threads. When it gets reaped, | ||
101 | * the exit.c code will add its counts into these totals. | ||
102 | * We won't ever get here for the group leader, since it | ||
103 | * will have been the last reference on the signal_struct. | ||
104 | */ | ||
105 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
106 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
107 | sig->min_flt += tsk->min_flt; | ||
108 | sig->maj_flt += tsk->maj_flt; | ||
109 | sig->nvcsw += tsk->nvcsw; | ||
110 | sig->nivcsw += tsk->nivcsw; | ||
111 | sig->sched_time += tsk->sched_time; | ||
112 | sig = NULL; /* Marker for below. */ | ||
113 | } | ||
114 | |||
115 | tsk->signal = NULL; | ||
116 | cleanup_sighand(tsk); | ||
117 | spin_unlock(&sighand->siglock); | ||
118 | rcu_read_unlock(); | ||
119 | |||
120 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
121 | flush_sigqueue(&tsk->pending); | ||
122 | if (sig) { | ||
123 | flush_sigqueue(&sig->shared_pending); | ||
124 | __cleanup_signal(sig); | ||
125 | } | ||
126 | } | ||
127 | |||
65 | void release_task(struct task_struct * p) | 128 | void release_task(struct task_struct * p) |
66 | { | 129 | { |
67 | int zap_leader; | 130 | int zap_leader; |