aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 19:11:18 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 21:36:43 -0500
commit6a14c5c9da0b4c34b5be783403c54f0396fcfe77 (patch)
tree63e40e5761eb5327ae4f713b284c67128cb4d261
parentc81addc9d3a0ebff2155e0cd86f90820ab97147e (diff)
[PATCH] move __exit_signal() to kernel/exit.c
__exit_signal() is private to release_task() now. I think it is better to make it static in kernel/exit.c and export flush_sigqueue() instead - this function is much more simple and straightforward. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/signal.h2
-rw-r--r--kernel/exit.c63
-rw-r--r--kernel/signal.c65
4 files changed, 66 insertions, 65 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 921148277da9..a913fca9e70d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1151,7 +1151,6 @@ extern void exit_thread(void);
1151extern void exit_files(struct task_struct *); 1151extern void exit_files(struct task_struct *);
1152extern void __cleanup_signal(struct signal_struct *); 1152extern void __cleanup_signal(struct signal_struct *);
1153extern void cleanup_sighand(struct task_struct *); 1153extern void cleanup_sighand(struct task_struct *);
1154extern void __exit_signal(struct task_struct *);
1155extern void exit_itimers(struct signal_struct *); 1154extern void exit_itimers(struct signal_struct *);
1156 1155
1157extern NORET_TYPE void do_group_exit(int); 1156extern NORET_TYPE void do_group_exit(int);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b7d093520bb6..162a8fd10b29 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig)
249 INIT_LIST_HEAD(&sig->list); 249 INIT_LIST_HEAD(&sig->list);
250} 250}
251 251
252extern void flush_sigqueue(struct sigpending *queue);
253
252/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ 254/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
253static inline int valid_signal(unsigned long sig) 255static inline int valid_signal(unsigned long sig)
254{ 256{
diff --git a/kernel/exit.c b/kernel/exit.c
index 77c35efad88c..3823ec89d7b8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
29#include <linux/cpuset.h> 29#include <linux/cpuset.h>
30#include <linux/syscalls.h> 30#include <linux/syscalls.h>
31#include <linux/signal.h> 31#include <linux/signal.h>
32#include <linux/posix-timers.h>
32#include <linux/cn_proc.h> 33#include <linux/cn_proc.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/futex.h> 35#include <linux/futex.h>
@@ -62,6 +63,68 @@ static void __unhash_process(struct task_struct *p)
62 remove_parent(p); 63 remove_parent(p);
63} 64}
64 65
66/*
67 * This function expects the tasklist_lock write-locked.
68 */
69static void __exit_signal(struct task_struct *tsk)
70{
71 struct signal_struct *sig = tsk->signal;
72 struct sighand_struct *sighand;
73
74 BUG_ON(!sig);
75 BUG_ON(!atomic_read(&sig->count));
76
77 rcu_read_lock();
78 sighand = rcu_dereference(tsk->sighand);
79 spin_lock(&sighand->siglock);
80
81 posix_cpu_timers_exit(tsk);
82 if (atomic_dec_and_test(&sig->count))
83 posix_cpu_timers_exit_group(tsk);
84 else {
85 /*
86 * If there is any task waiting for the group exit
87 * then notify it:
88 */
89 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
90 wake_up_process(sig->group_exit_task);
91 sig->group_exit_task = NULL;
92 }
93 if (tsk == sig->curr_target)
94 sig->curr_target = next_thread(tsk);
95 /*
96 * Accumulate here the counters for all threads but the
97 * group leader as they die, so they can be added into
98 * the process-wide totals when those are taken.
99 * The group leader stays around as a zombie as long
100 * as there are other threads. When it gets reaped,
101 * the exit.c code will add its counts into these totals.
102 * We won't ever get here for the group leader, since it
103 * will have been the last reference on the signal_struct.
104 */
105 sig->utime = cputime_add(sig->utime, tsk->utime);
106 sig->stime = cputime_add(sig->stime, tsk->stime);
107 sig->min_flt += tsk->min_flt;
108 sig->maj_flt += tsk->maj_flt;
109 sig->nvcsw += tsk->nvcsw;
110 sig->nivcsw += tsk->nivcsw;
111 sig->sched_time += tsk->sched_time;
112 sig = NULL; /* Marker for below. */
113 }
114
115 tsk->signal = NULL;
116 cleanup_sighand(tsk);
117 spin_unlock(&sighand->siglock);
118 rcu_read_unlock();
119
120 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
121 flush_sigqueue(&tsk->pending);
122 if (sig) {
123 flush_sigqueue(&sig->shared_pending);
124 __cleanup_signal(sig);
125 }
126}
127
65void release_task(struct task_struct * p) 128void release_task(struct task_struct * p)
66{ 129{
67 int zap_leader; 130 int zap_leader;
diff --git a/kernel/signal.c b/kernel/signal.c
index b29c868bd5ee..6ea49f742a2f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,7 +22,6 @@
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/ptrace.h> 24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
26#include <linux/signal.h> 25#include <linux/signal.h>
27#include <linux/audit.h> 26#include <linux/audit.h>
28#include <linux/capability.h> 27#include <linux/capability.h>
@@ -295,7 +294,7 @@ static void __sigqueue_free(struct sigqueue *q)
295 kmem_cache_free(sigqueue_cachep, q); 294 kmem_cache_free(sigqueue_cachep, q);
296} 295}
297 296
298static void flush_sigqueue(struct sigpending *queue) 297void flush_sigqueue(struct sigpending *queue)
299{ 298{
300 struct sigqueue *q; 299 struct sigqueue *q;
301 300
@@ -322,68 +321,6 @@ void flush_signals(struct task_struct *t)
322} 321}
323 322
324/* 323/*
325 * This function expects the tasklist_lock write-locked.
326 */
327void __exit_signal(struct task_struct *tsk)
328{
329 struct signal_struct *sig = tsk->signal;
330 struct sighand_struct *sighand;
331
332 BUG_ON(!sig);
333 BUG_ON(!atomic_read(&sig->count));
334
335 rcu_read_lock();
336 sighand = rcu_dereference(tsk->sighand);
337 spin_lock(&sighand->siglock);
338
339 posix_cpu_timers_exit(tsk);
340 if (atomic_dec_and_test(&sig->count))
341 posix_cpu_timers_exit_group(tsk);
342 else {
343 /*
344 * If there is any task waiting for the group exit
345 * then notify it:
346 */
347 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
348 wake_up_process(sig->group_exit_task);
349 sig->group_exit_task = NULL;
350 }
351 if (tsk == sig->curr_target)
352 sig->curr_target = next_thread(tsk);
353 /*
354 * Accumulate here the counters for all threads but the
355 * group leader as they die, so they can be added into
356 * the process-wide totals when those are taken.
357 * The group leader stays around as a zombie as long
358 * as there are other threads. When it gets reaped,
359 * the exit.c code will add its counts into these totals.
360 * We won't ever get here for the group leader, since it
361 * will have been the last reference on the signal_struct.
362 */
363 sig->utime = cputime_add(sig->utime, tsk->utime);
364 sig->stime = cputime_add(sig->stime, tsk->stime);
365 sig->min_flt += tsk->min_flt;
366 sig->maj_flt += tsk->maj_flt;
367 sig->nvcsw += tsk->nvcsw;
368 sig->nivcsw += tsk->nivcsw;
369 sig->sched_time += tsk->sched_time;
370 sig = NULL; /* Marker for below. */
371 }
372
373 tsk->signal = NULL;
374 cleanup_sighand(tsk);
375 spin_unlock(&sighand->siglock);
376 rcu_read_unlock();
377
378 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
379 flush_sigqueue(&tsk->pending);
380 if (sig) {
381 flush_sigqueue(&sig->shared_pending);
382 __cleanup_signal(sig);
383 }
384}
385
386/*
387 * Flush all handlers for a task. 324 * Flush all handlers for a task.
388 */ 325 */
389 326