diff options
| -rw-r--r-- | include/linux/sched.h | 1 | ||||
| -rw-r--r-- | include/linux/signal.h | 2 | ||||
| -rw-r--r-- | kernel/exit.c | 63 | ||||
| -rw-r--r-- | kernel/signal.c | 65 | 
4 files changed, 66 insertions, 65 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 921148277da9..a913fca9e70d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h  | |||
| @@ -1151,7 +1151,6 @@ extern void exit_thread(void); | |||
| 1151 | extern void exit_files(struct task_struct *); | 1151 | extern void exit_files(struct task_struct *); | 
| 1152 | extern void __cleanup_signal(struct signal_struct *); | 1152 | extern void __cleanup_signal(struct signal_struct *); | 
| 1153 | extern void cleanup_sighand(struct task_struct *); | 1153 | extern void cleanup_sighand(struct task_struct *); | 
| 1154 | extern void __exit_signal(struct task_struct *); | ||
| 1155 | extern void exit_itimers(struct signal_struct *); | 1154 | extern void exit_itimers(struct signal_struct *); | 
| 1156 | 1155 | ||
| 1157 | extern NORET_TYPE void do_group_exit(int); | 1156 | extern NORET_TYPE void do_group_exit(int); | 
diff --git a/include/linux/signal.h b/include/linux/signal.h index b7d093520bb6..162a8fd10b29 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h  | |||
| @@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig) | |||
| 249 | INIT_LIST_HEAD(&sig->list); | 249 | INIT_LIST_HEAD(&sig->list); | 
| 250 | } | 250 | } | 
| 251 | 251 | ||
| 252 | extern void flush_sigqueue(struct sigpending *queue); | ||
| 253 | |||
| 252 | /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ | 254 | /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ | 
| 253 | static inline int valid_signal(unsigned long sig) | 255 | static inline int valid_signal(unsigned long sig) | 
| 254 | { | 256 | { | 
diff --git a/kernel/exit.c b/kernel/exit.c index 77c35efad88c..3823ec89d7b8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c  | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/cpuset.h> | 29 | #include <linux/cpuset.h> | 
| 30 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> | 
| 31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> | 
| 32 | #include <linux/posix-timers.h> | ||
| 32 | #include <linux/cn_proc.h> | 33 | #include <linux/cn_proc.h> | 
| 33 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> | 
| 34 | #include <linux/futex.h> | 35 | #include <linux/futex.h> | 
| @@ -62,6 +63,68 @@ static void __unhash_process(struct task_struct *p) | |||
| 62 | remove_parent(p); | 63 | remove_parent(p); | 
| 63 | } | 64 | } | 
| 64 | 65 | ||
| 66 | /* | ||
| 67 | * This function expects the tasklist_lock write-locked. | ||
| 68 | */ | ||
| 69 | static void __exit_signal(struct task_struct *tsk) | ||
| 70 | { | ||
| 71 | struct signal_struct *sig = tsk->signal; | ||
| 72 | struct sighand_struct *sighand; | ||
| 73 | |||
| 74 | BUG_ON(!sig); | ||
| 75 | BUG_ON(!atomic_read(&sig->count)); | ||
| 76 | |||
| 77 | rcu_read_lock(); | ||
| 78 | sighand = rcu_dereference(tsk->sighand); | ||
| 79 | spin_lock(&sighand->siglock); | ||
| 80 | |||
| 81 | posix_cpu_timers_exit(tsk); | ||
| 82 | if (atomic_dec_and_test(&sig->count)) | ||
| 83 | posix_cpu_timers_exit_group(tsk); | ||
| 84 | else { | ||
| 85 | /* | ||
| 86 | * If there is any task waiting for the group exit | ||
| 87 | * then notify it: | ||
| 88 | */ | ||
| 89 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
| 90 | wake_up_process(sig->group_exit_task); | ||
| 91 | sig->group_exit_task = NULL; | ||
| 92 | } | ||
| 93 | if (tsk == sig->curr_target) | ||
| 94 | sig->curr_target = next_thread(tsk); | ||
| 95 | /* | ||
| 96 | * Accumulate here the counters for all threads but the | ||
| 97 | * group leader as they die, so they can be added into | ||
| 98 | * the process-wide totals when those are taken. | ||
| 99 | * The group leader stays around as a zombie as long | ||
| 100 | * as there are other threads. When it gets reaped, | ||
| 101 | * the exit.c code will add its counts into these totals. | ||
| 102 | * We won't ever get here for the group leader, since it | ||
| 103 | * will have been the last reference on the signal_struct. | ||
| 104 | */ | ||
| 105 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
| 106 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
| 107 | sig->min_flt += tsk->min_flt; | ||
| 108 | sig->maj_flt += tsk->maj_flt; | ||
| 109 | sig->nvcsw += tsk->nvcsw; | ||
| 110 | sig->nivcsw += tsk->nivcsw; | ||
| 111 | sig->sched_time += tsk->sched_time; | ||
| 112 | sig = NULL; /* Marker for below. */ | ||
| 113 | } | ||
| 114 | |||
| 115 | tsk->signal = NULL; | ||
| 116 | cleanup_sighand(tsk); | ||
| 117 | spin_unlock(&sighand->siglock); | ||
| 118 | rcu_read_unlock(); | ||
| 119 | |||
| 120 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
| 121 | flush_sigqueue(&tsk->pending); | ||
| 122 | if (sig) { | ||
| 123 | flush_sigqueue(&sig->shared_pending); | ||
| 124 | __cleanup_signal(sig); | ||
| 125 | } | ||
| 126 | } | ||
| 127 | |||
| 65 | void release_task(struct task_struct * p) | 128 | void release_task(struct task_struct * p) | 
| 66 | { | 129 | { | 
| 67 | int zap_leader; | 130 | int zap_leader; | 
diff --git a/kernel/signal.c b/kernel/signal.c index b29c868bd5ee..6ea49f742a2f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c  | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/security.h> | 22 | #include <linux/security.h> | 
| 23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> | 
| 24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> | 
| 25 | #include <linux/posix-timers.h> | ||
| 26 | #include <linux/signal.h> | 25 | #include <linux/signal.h> | 
| 27 | #include <linux/audit.h> | 26 | #include <linux/audit.h> | 
| 28 | #include <linux/capability.h> | 27 | #include <linux/capability.h> | 
| @@ -295,7 +294,7 @@ static void __sigqueue_free(struct sigqueue *q) | |||
| 295 | kmem_cache_free(sigqueue_cachep, q); | 294 | kmem_cache_free(sigqueue_cachep, q); | 
| 296 | } | 295 | } | 
| 297 | 296 | ||
| 298 | static void flush_sigqueue(struct sigpending *queue) | 297 | void flush_sigqueue(struct sigpending *queue) | 
| 299 | { | 298 | { | 
| 300 | struct sigqueue *q; | 299 | struct sigqueue *q; | 
| 301 | 300 | ||
| @@ -322,68 +321,6 @@ void flush_signals(struct task_struct *t) | |||
| 322 | } | 321 | } | 
| 323 | 322 | ||
| 324 | /* | 323 | /* | 
| 325 | * This function expects the tasklist_lock write-locked. | ||
| 326 | */ | ||
| 327 | void __exit_signal(struct task_struct *tsk) | ||
| 328 | { | ||
| 329 | struct signal_struct *sig = tsk->signal; | ||
| 330 | struct sighand_struct *sighand; | ||
| 331 | |||
| 332 | BUG_ON(!sig); | ||
| 333 | BUG_ON(!atomic_read(&sig->count)); | ||
| 334 | |||
| 335 | rcu_read_lock(); | ||
| 336 | sighand = rcu_dereference(tsk->sighand); | ||
| 337 | spin_lock(&sighand->siglock); | ||
| 338 | |||
| 339 | posix_cpu_timers_exit(tsk); | ||
| 340 | if (atomic_dec_and_test(&sig->count)) | ||
| 341 | posix_cpu_timers_exit_group(tsk); | ||
| 342 | else { | ||
| 343 | /* | ||
| 344 | * If there is any task waiting for the group exit | ||
| 345 | * then notify it: | ||
| 346 | */ | ||
| 347 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | ||
| 348 | wake_up_process(sig->group_exit_task); | ||
| 349 | sig->group_exit_task = NULL; | ||
| 350 | } | ||
| 351 | if (tsk == sig->curr_target) | ||
| 352 | sig->curr_target = next_thread(tsk); | ||
| 353 | /* | ||
| 354 | * Accumulate here the counters for all threads but the | ||
| 355 | * group leader as they die, so they can be added into | ||
| 356 | * the process-wide totals when those are taken. | ||
| 357 | * The group leader stays around as a zombie as long | ||
| 358 | * as there are other threads. When it gets reaped, | ||
| 359 | * the exit.c code will add its counts into these totals. | ||
| 360 | * We won't ever get here for the group leader, since it | ||
| 361 | * will have been the last reference on the signal_struct. | ||
| 362 | */ | ||
| 363 | sig->utime = cputime_add(sig->utime, tsk->utime); | ||
| 364 | sig->stime = cputime_add(sig->stime, tsk->stime); | ||
| 365 | sig->min_flt += tsk->min_flt; | ||
| 366 | sig->maj_flt += tsk->maj_flt; | ||
| 367 | sig->nvcsw += tsk->nvcsw; | ||
| 368 | sig->nivcsw += tsk->nivcsw; | ||
| 369 | sig->sched_time += tsk->sched_time; | ||
| 370 | sig = NULL; /* Marker for below. */ | ||
| 371 | } | ||
| 372 | |||
| 373 | tsk->signal = NULL; | ||
| 374 | cleanup_sighand(tsk); | ||
| 375 | spin_unlock(&sighand->siglock); | ||
| 376 | rcu_read_unlock(); | ||
| 377 | |||
| 378 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | ||
| 379 | flush_sigqueue(&tsk->pending); | ||
| 380 | if (sig) { | ||
| 381 | flush_sigqueue(&sig->shared_pending); | ||
| 382 | __cleanup_signal(sig); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Flush all handlers for a task. | 324 | * Flush all handlers for a task. | 
| 388 | */ | 325 | */ | 
| 389 | 326 | ||
