diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-23 16:22:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-23 16:22:21 -0400 |
commit | 188a1eafa03aaa5e5fe6f53e637e704cd2c31c7c (patch) | |
tree | 33d9480c792724c10e4e6fb681fc69c477cf1ff0 /kernel | |
parent | ede1327ea4ca8019ec6df24b3e837def091c26b8 (diff) |
Make sure SIGKILL gets proper respect
Bhavesh P. Davda <bhavesh@avaya.com> noticed that SIGKILL wouldn't
properly kill a process under just the right cicumstances: a stopped
task that already had another signal queued would get the SIGKILL
queued onto the shared queue, and there it would remain until SIGCONT.
This simplifies the signal acceptance logic, and fixes the bug in the
process.
Losely based on an earlier patch by Bhavesh.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/signal.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index b92c3c9f8b9a..5a274705ba19 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -936,34 +936,31 @@ force_sig_specific(int sig, struct task_struct *t) | |||
936 | * as soon as they're available, so putting the signal on the shared queue | 936 | * as soon as they're available, so putting the signal on the shared queue |
937 | * will be equivalent to sending it to one such thread. | 937 | * will be equivalent to sending it to one such thread. |
938 | */ | 938 | */ |
939 | #define wants_signal(sig, p, mask) \ | 939 | static inline int wants_signal(int sig, struct task_struct *p) |
940 | (!sigismember(&(p)->blocked, sig) \ | 940 | { |
941 | && !((p)->state & mask) \ | 941 | if (sigismember(&p->blocked, sig)) |
942 | && !((p)->flags & PF_EXITING) \ | 942 | return 0; |
943 | && (task_curr(p) || !signal_pending(p))) | 943 | if (p->flags & PF_EXITING) |
944 | 944 | return 0; | |
945 | if (sig == SIGKILL) | ||
946 | return 1; | ||
947 | if (p->state & (TASK_STOPPED | TASK_TRACED)) | ||
948 | return 0; | ||
949 | return task_curr(p) || !signal_pending(p); | ||
950 | } | ||
945 | 951 | ||
946 | static void | 952 | static void |
947 | __group_complete_signal(int sig, struct task_struct *p) | 953 | __group_complete_signal(int sig, struct task_struct *p) |
948 | { | 954 | { |
949 | unsigned int mask; | ||
950 | struct task_struct *t; | 955 | struct task_struct *t; |
951 | 956 | ||
952 | /* | 957 | /* |
953 | * Don't bother traced and stopped tasks (but | ||
954 | * SIGKILL will punch through that). | ||
955 | */ | ||
956 | mask = TASK_STOPPED | TASK_TRACED; | ||
957 | if (sig == SIGKILL) | ||
958 | mask = 0; | ||
959 | |||
960 | /* | ||
961 | * Now find a thread we can wake up to take the signal off the queue. | 958 | * Now find a thread we can wake up to take the signal off the queue. |
962 | * | 959 | * |
963 | * If the main thread wants the signal, it gets first crack. | 960 | * If the main thread wants the signal, it gets first crack. |
964 | * Probably the least surprising to the average bear. | 961 | * Probably the least surprising to the average bear. |
965 | */ | 962 | */ |
966 | if (wants_signal(sig, p, mask)) | 963 | if (wants_signal(sig, p)) |
967 | t = p; | 964 | t = p; |
968 | else if (thread_group_empty(p)) | 965 | else if (thread_group_empty(p)) |
969 | /* | 966 | /* |
@@ -981,7 +978,7 @@ __group_complete_signal(int sig, struct task_struct *p) | |||
981 | t = p->signal->curr_target = p; | 978 | t = p->signal->curr_target = p; |
982 | BUG_ON(t->tgid != p->tgid); | 979 | BUG_ON(t->tgid != p->tgid); |
983 | 980 | ||
984 | while (!wants_signal(sig, t, mask)) { | 981 | while (!wants_signal(sig, t)) { |
985 | t = next_thread(t); | 982 | t = next_thread(t); |
986 | if (t == p->signal->curr_target) | 983 | if (t == p->signal->curr_target) |
987 | /* | 984 | /* |