diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 73 |
1 files changed, 53 insertions, 20 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index b92c3c9f8b9..50c99264377 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask) | |||
262 | return sig; | 262 | return sig; |
263 | } | 263 | } |
264 | 264 | ||
265 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, | 265 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, |
266 | int override_rlimit) | 266 | int override_rlimit) |
267 | { | 267 | { |
268 | struct sigqueue *q = NULL; | 268 | struct sigqueue *q = NULL; |
@@ -578,7 +578,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
578 | * is to alert stop-signal processing code when another | 578 | * is to alert stop-signal processing code when another |
579 | * processor has come along and cleared the flag. | 579 | * processor has come along and cleared the flag. |
580 | */ | 580 | */ |
581 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 581 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
582 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | ||
582 | } | 583 | } |
583 | if ( signr && | 584 | if ( signr && |
584 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | 585 | ((info->si_code & __SI_MASK) == __SI_TIMER) && |
@@ -936,34 +937,31 @@ force_sig_specific(int sig, struct task_struct *t) | |||
936 | * as soon as they're available, so putting the signal on the shared queue | 937 | * as soon as they're available, so putting the signal on the shared queue |
937 | * will be equivalent to sending it to one such thread. | 938 | * will be equivalent to sending it to one such thread. |
938 | */ | 939 | */ |
939 | #define wants_signal(sig, p, mask) \ | 940 | static inline int wants_signal(int sig, struct task_struct *p) |
940 | (!sigismember(&(p)->blocked, sig) \ | 941 | { |
941 | && !((p)->state & mask) \ | 942 | if (sigismember(&p->blocked, sig)) |
942 | && !((p)->flags & PF_EXITING) \ | 943 | return 0; |
943 | && (task_curr(p) || !signal_pending(p))) | 944 | if (p->flags & PF_EXITING) |
944 | 945 | return 0; | |
946 | if (sig == SIGKILL) | ||
947 | return 1; | ||
948 | if (p->state & (TASK_STOPPED | TASK_TRACED)) | ||
949 | return 0; | ||
950 | return task_curr(p) || !signal_pending(p); | ||
951 | } | ||
945 | 952 | ||
946 | static void | 953 | static void |
947 | __group_complete_signal(int sig, struct task_struct *p) | 954 | __group_complete_signal(int sig, struct task_struct *p) |
948 | { | 955 | { |
949 | unsigned int mask; | ||
950 | struct task_struct *t; | 956 | struct task_struct *t; |
951 | 957 | ||
952 | /* | 958 | /* |
953 | * Don't bother traced and stopped tasks (but | ||
954 | * SIGKILL will punch through that). | ||
955 | */ | ||
956 | mask = TASK_STOPPED | TASK_TRACED; | ||
957 | if (sig == SIGKILL) | ||
958 | mask = 0; | ||
959 | |||
960 | /* | ||
961 | * Now find a thread we can wake up to take the signal off the queue. | 959 | * Now find a thread we can wake up to take the signal off the queue. |
962 | * | 960 | * |
963 | * If the main thread wants the signal, it gets first crack. | 961 | * If the main thread wants the signal, it gets first crack. |
964 | * Probably the least surprising to the average bear. | 962 | * Probably the least surprising to the average bear. |
965 | */ | 963 | */ |
966 | if (wants_signal(sig, p, mask)) | 964 | if (wants_signal(sig, p)) |
967 | t = p; | 965 | t = p; |
968 | else if (thread_group_empty(p)) | 966 | else if (thread_group_empty(p)) |
969 | /* | 967 | /* |
@@ -981,7 +979,7 @@ __group_complete_signal(int sig, struct task_struct *p) | |||
981 | t = p->signal->curr_target = p; | 979 | t = p->signal->curr_target = p; |
982 | BUG_ON(t->tgid != p->tgid); | 980 | BUG_ON(t->tgid != p->tgid); |
983 | 981 | ||
984 | while (!wants_signal(sig, t, mask)) { | 982 | while (!wants_signal(sig, t)) { |
985 | t = next_thread(t); | 983 | t = next_thread(t); |
986 | if (t == p->signal->curr_target) | 984 | if (t == p->signal->curr_target) |
987 | /* | 985 | /* |
@@ -1195,6 +1193,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) | |||
1195 | return error; | 1193 | return error; |
1196 | } | 1194 | } |
1197 | 1195 | ||
1196 | /* like kill_proc_info(), but doesn't use uid/euid of "current" */ | ||
1197 | int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, | ||
1198 | uid_t uid, uid_t euid) | ||
1199 | { | ||
1200 | int ret = -EINVAL; | ||
1201 | struct task_struct *p; | ||
1202 | |||
1203 | if (!valid_signal(sig)) | ||
1204 | return ret; | ||
1205 | |||
1206 | read_lock(&tasklist_lock); | ||
1207 | p = find_task_by_pid(pid); | ||
1208 | if (!p) { | ||
1209 | ret = -ESRCH; | ||
1210 | goto out_unlock; | ||
1211 | } | ||
1212 | if ((!info || ((unsigned long)info != 1 && | ||
1213 | (unsigned long)info != 2 && SI_FROMUSER(info))) | ||
1214 | && (euid != p->suid) && (euid != p->uid) | ||
1215 | && (uid != p->suid) && (uid != p->uid)) { | ||
1216 | ret = -EPERM; | ||
1217 | goto out_unlock; | ||
1218 | } | ||
1219 | if (sig && p->sighand) { | ||
1220 | unsigned long flags; | ||
1221 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
1222 | ret = __group_send_sig_info(sig, info, p); | ||
1223 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
1224 | } | ||
1225 | out_unlock: | ||
1226 | read_unlock(&tasklist_lock); | ||
1227 | return ret; | ||
1228 | } | ||
1229 | EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); | ||
1198 | 1230 | ||
1199 | /* | 1231 | /* |
1200 | * kill_something_info() interprets pid in interesting ways just like kill(2). | 1232 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
@@ -1766,7 +1798,8 @@ do_signal_stop(int signr) | |||
1766 | * stop is always done with the siglock held, | 1798 | * stop is always done with the siglock held, |
1767 | * so this check has no races. | 1799 | * so this check has no races. |
1768 | */ | 1800 | */ |
1769 | if (t->state < TASK_STOPPED) { | 1801 | if (!t->exit_state && |
1802 | !(t->state & (TASK_STOPPED|TASK_TRACED))) { | ||
1770 | stop_count++; | 1803 | stop_count++; |
1771 | signal_wake_up(t, 0); | 1804 | signal_wake_up(t, 0); |
1772 | } | 1805 | } |