diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 09:43:54 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 10:06:11 -0400 |
commit | 7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch) | |
tree | 5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /kernel/signal.c | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) |
Merge 'Linux v3.0' into Litmus
Some notes:
* Litmus^RT scheduling class is the topmost scheduling class
(above stop_sched_class).
* scheduler_ipi() function (e.g., in smp_reschedule_interrupt())
may increase IPI latencies.
* Added path into schedule() to quickly re-evaluate scheduling
decision without becoming preemptive again. This used to be
a standard path before the removal of BKL.
Conflicts:
Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/smp.c
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/smp.c
arch/x86/kernel/syscall_table_32.S
include/linux/hrtimer.h
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 893 |
1 files changed, 612 insertions, 281 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 919562c3d6b7..415d85d6f6c6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | |||
124 | 124 | ||
125 | static int recalc_sigpending_tsk(struct task_struct *t) | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { | 126 | { |
127 | if (t->signal->group_stop_count > 0 || | 127 | if ((t->group_stop & GROUP_STOP_PENDING) || |
128 | PENDING(&t->pending, &t->blocked) || | 128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
@@ -223,10 +223,87 @@ static inline void print_dropped_signal(int sig) | |||
223 | current->comm, current->pid, sig); | 223 | current->comm, current->pid, sig); |
224 | } | 224 | } |
225 | 225 | ||
226 | /** | ||
227 | * task_clear_group_stop_trapping - clear group stop trapping bit | ||
228 | * @task: target task | ||
229 | * | ||
230 | * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it | ||
231 | * and wake up the ptracer. Note that we don't need any further locking. | ||
232 | * @task->siglock guarantees that @task->parent points to the ptracer. | ||
233 | * | ||
234 | * CONTEXT: | ||
235 | * Must be called with @task->sighand->siglock held. | ||
236 | */ | ||
237 | static void task_clear_group_stop_trapping(struct task_struct *task) | ||
238 | { | ||
239 | if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { | ||
240 | task->group_stop &= ~GROUP_STOP_TRAPPING; | ||
241 | __wake_up_sync_key(&task->parent->signal->wait_chldexit, | ||
242 | TASK_UNINTERRUPTIBLE, 1, task); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * task_clear_group_stop_pending - clear pending group stop | ||
248 | * @task: target task | ||
249 | * | ||
250 | * Clear group stop states for @task. | ||
251 | * | ||
252 | * CONTEXT: | ||
253 | * Must be called with @task->sighand->siglock held. | ||
254 | */ | ||
255 | void task_clear_group_stop_pending(struct task_struct *task) | ||
256 | { | ||
257 | task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | | ||
258 | GROUP_STOP_DEQUEUED); | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * task_participate_group_stop - participate in a group stop | ||
263 | * @task: task participating in a group stop | ||
264 | * | ||
265 | * @task has GROUP_STOP_PENDING set and is participating in a group stop. | ||
266 | * Group stop states are cleared and the group stop count is consumed if | ||
267 | * %GROUP_STOP_CONSUME was set. If the consumption completes the group | ||
268 | * stop, the appropriate %SIGNAL_* flags are set. | ||
269 | * | ||
270 | * CONTEXT: | ||
271 | * Must be called with @task->sighand->siglock held. | ||
272 | * | ||
273 | * RETURNS: | ||
274 | * %true if group stop completion should be notified to the parent, %false | ||
275 | * otherwise. | ||
276 | */ | ||
277 | static bool task_participate_group_stop(struct task_struct *task) | ||
278 | { | ||
279 | struct signal_struct *sig = task->signal; | ||
280 | bool consume = task->group_stop & GROUP_STOP_CONSUME; | ||
281 | |||
282 | WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | ||
283 | |||
284 | task_clear_group_stop_pending(task); | ||
285 | |||
286 | if (!consume) | ||
287 | return false; | ||
288 | |||
289 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | ||
290 | sig->group_stop_count--; | ||
291 | |||
292 | /* | ||
293 | * Tell the caller to notify completion iff we are entering into a | ||
294 | * fresh group stop. Read comment in do_signal_stop() for details. | ||
295 | */ | ||
296 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | ||
297 | sig->flags = SIGNAL_STOP_STOPPED; | ||
298 | return true; | ||
299 | } | ||
300 | return false; | ||
301 | } | ||
302 | |||
226 | /* | 303 | /* |
227 | * allocate a new signal queue record | 304 | * allocate a new signal queue record |
228 | * - this may be called without locks if and only if t == current, otherwise an | 305 | * - this may be called without locks if and only if t == current, otherwise an |
229 | * appopriate lock must be held to stop the target task from exiting | 306 | * appropriate lock must be held to stop the target task from exiting |
230 | */ | 307 | */ |
231 | static struct sigqueue * | 308 | static struct sigqueue * |
232 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | 309 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
@@ -375,15 +452,15 @@ int unhandled_signal(struct task_struct *tsk, int sig) | |||
375 | return !tracehook_consider_fatal_signal(tsk, sig); | 452 | return !tracehook_consider_fatal_signal(tsk, sig); |
376 | } | 453 | } |
377 | 454 | ||
378 | 455 | /* | |
379 | /* Notify the system that a driver wants to block all signals for this | 456 | * Notify the system that a driver wants to block all signals for this |
380 | * process, and wants to be notified if any signals at all were to be | 457 | * process, and wants to be notified if any signals at all were to be |
381 | * sent/acted upon. If the notifier routine returns non-zero, then the | 458 | * sent/acted upon. If the notifier routine returns non-zero, then the |
382 | * signal will be acted upon after all. If the notifier routine returns 0, | 459 | * signal will be acted upon after all. If the notifier routine returns 0, |
383 | * then then signal will be blocked. Only one block per process is | 460 | * then then signal will be blocked. Only one block per process is |
384 | * allowed. priv is a pointer to private data that the notifier routine | 461 | * allowed. priv is a pointer to private data that the notifier routine |
385 | * can use to determine if the signal should be blocked or not. */ | 462 | * can use to determine if the signal should be blocked or not. |
386 | 463 | */ | |
387 | void | 464 | void |
388 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | 465 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
389 | { | 466 | { |
@@ -434,9 +511,10 @@ still_pending: | |||
434 | copy_siginfo(info, &first->info); | 511 | copy_siginfo(info, &first->info); |
435 | __sigqueue_free(first); | 512 | __sigqueue_free(first); |
436 | } else { | 513 | } else { |
437 | /* Ok, it wasn't in the queue. This must be | 514 | /* |
438 | a fast-pathed signal or we must have been | 515 | * Ok, it wasn't in the queue. This must be |
439 | out of queue space. So zero out the info. | 516 | * a fast-pathed signal or we must have been |
517 | * out of queue space. So zero out the info. | ||
440 | */ | 518 | */ |
441 | info->si_signo = sig; | 519 | info->si_signo = sig; |
442 | info->si_errno = 0; | 520 | info->si_errno = 0; |
@@ -468,7 +546,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
468 | } | 546 | } |
469 | 547 | ||
470 | /* | 548 | /* |
471 | * Dequeue a signal and return the element to the caller, which is | 549 | * Dequeue a signal and return the element to the caller, which is |
472 | * expected to free it. | 550 | * expected to free it. |
473 | * | 551 | * |
474 | * All callers have to hold the siglock. | 552 | * All callers have to hold the siglock. |
@@ -490,7 +568,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
490 | * itimers are process shared and we restart periodic | 568 | * itimers are process shared and we restart periodic |
491 | * itimers in the signal delivery path to prevent DoS | 569 | * itimers in the signal delivery path to prevent DoS |
492 | * attacks in the high resolution timer case. This is | 570 | * attacks in the high resolution timer case. This is |
493 | * compliant with the old way of self restarting | 571 | * compliant with the old way of self-restarting |
494 | * itimers, as the SIGALRM is a legacy signal and only | 572 | * itimers, as the SIGALRM is a legacy signal and only |
495 | * queued once. Changing the restart behaviour to | 573 | * queued once. Changing the restart behaviour to |
496 | * restart the timer in the signal dequeue path is | 574 | * restart the timer in the signal dequeue path is |
@@ -526,7 +604,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
526 | * is to alert stop-signal processing code when another | 604 | * is to alert stop-signal processing code when another |
527 | * processor has come along and cleared the flag. | 605 | * processor has come along and cleared the flag. |
528 | */ | 606 | */ |
529 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 607 | current->group_stop |= GROUP_STOP_DEQUEUED; |
530 | } | 608 | } |
531 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 609 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
532 | /* | 610 | /* |
@@ -591,7 +669,7 @@ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |||
591 | if (sigisemptyset(&m)) | 669 | if (sigisemptyset(&m)) |
592 | return 0; | 670 | return 0; |
593 | 671 | ||
594 | signandsets(&s->signal, &s->signal, mask); | 672 | sigandnsets(&s->signal, &s->signal, mask); |
595 | list_for_each_entry_safe(q, n, &s->list, list) { | 673 | list_for_each_entry_safe(q, n, &s->list, list) { |
596 | if (sigismember(mask, q->info.si_signo)) { | 674 | if (sigismember(mask, q->info.si_signo)) { |
597 | list_del_init(&q->list); | 675 | list_del_init(&q->list); |
@@ -636,13 +714,33 @@ static inline bool si_fromuser(const struct siginfo *info) | |||
636 | } | 714 | } |
637 | 715 | ||
638 | /* | 716 | /* |
717 | * called with RCU read lock from check_kill_permission() | ||
718 | */ | ||
719 | static int kill_ok_by_cred(struct task_struct *t) | ||
720 | { | ||
721 | const struct cred *cred = current_cred(); | ||
722 | const struct cred *tcred = __task_cred(t); | ||
723 | |||
724 | if (cred->user->user_ns == tcred->user->user_ns && | ||
725 | (cred->euid == tcred->suid || | ||
726 | cred->euid == tcred->uid || | ||
727 | cred->uid == tcred->suid || | ||
728 | cred->uid == tcred->uid)) | ||
729 | return 1; | ||
730 | |||
731 | if (ns_capable(tcred->user->user_ns, CAP_KILL)) | ||
732 | return 1; | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | /* | ||
639 | * Bad permissions for sending the signal | 738 | * Bad permissions for sending the signal |
640 | * - the caller must hold the RCU read lock | 739 | * - the caller must hold the RCU read lock |
641 | */ | 740 | */ |
642 | static int check_kill_permission(int sig, struct siginfo *info, | 741 | static int check_kill_permission(int sig, struct siginfo *info, |
643 | struct task_struct *t) | 742 | struct task_struct *t) |
644 | { | 743 | { |
645 | const struct cred *cred, *tcred; | ||
646 | struct pid *sid; | 744 | struct pid *sid; |
647 | int error; | 745 | int error; |
648 | 746 | ||
@@ -656,14 +754,8 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
656 | if (error) | 754 | if (error) |
657 | return error; | 755 | return error; |
658 | 756 | ||
659 | cred = current_cred(); | ||
660 | tcred = __task_cred(t); | ||
661 | if (!same_thread_group(current, t) && | 757 | if (!same_thread_group(current, t) && |
662 | (cred->euid ^ tcred->suid) && | 758 | !kill_ok_by_cred(t)) { |
663 | (cred->euid ^ tcred->uid) && | ||
664 | (cred->uid ^ tcred->suid) && | ||
665 | (cred->uid ^ tcred->uid) && | ||
666 | !capable(CAP_KILL)) { | ||
667 | switch (sig) { | 759 | switch (sig) { |
668 | case SIGCONT: | 760 | case SIGCONT: |
669 | sid = task_session(t); | 761 | sid = task_session(t); |
@@ -712,34 +804,14 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
712 | } else if (sig == SIGCONT) { | 804 | } else if (sig == SIGCONT) { |
713 | unsigned int why; | 805 | unsigned int why; |
714 | /* | 806 | /* |
715 | * Remove all stop signals from all queues, | 807 | * Remove all stop signals from all queues, wake all threads. |
716 | * and wake all threads. | ||
717 | */ | 808 | */ |
718 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 809 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
719 | t = p; | 810 | t = p; |
720 | do { | 811 | do { |
721 | unsigned int state; | 812 | task_clear_group_stop_pending(t); |
722 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 813 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
723 | /* | 814 | wake_up_state(t, __TASK_STOPPED); |
724 | * If there is a handler for SIGCONT, we must make | ||
725 | * sure that no thread returns to user mode before | ||
726 | * we post the signal, in case it was the only | ||
727 | * thread eligible to run the signal handler--then | ||
728 | * it must not do anything between resuming and | ||
729 | * running the handler. With the TIF_SIGPENDING | ||
730 | * flag set, the thread will pause and acquire the | ||
731 | * siglock that we hold now and until we've queued | ||
732 | * the pending signal. | ||
733 | * | ||
734 | * Wake up the stopped thread _after_ setting | ||
735 | * TIF_SIGPENDING | ||
736 | */ | ||
737 | state = __TASK_STOPPED; | ||
738 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | ||
739 | set_tsk_thread_flag(t, TIF_SIGPENDING); | ||
740 | state |= TASK_INTERRUPTIBLE; | ||
741 | } | ||
742 | wake_up_state(t, state); | ||
743 | } while_each_thread(p, t); | 815 | } while_each_thread(p, t); |
744 | 816 | ||
745 | /* | 817 | /* |
@@ -765,13 +837,6 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
765 | signal->flags = why | SIGNAL_STOP_CONTINUED; | 837 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
766 | signal->group_stop_count = 0; | 838 | signal->group_stop_count = 0; |
767 | signal->group_exit_code = 0; | 839 | signal->group_exit_code = 0; |
768 | } else { | ||
769 | /* | ||
770 | * We are not stopped, but there could be a stop | ||
771 | * signal in the middle of being processed after | ||
772 | * being removed from the queue. Clear that too. | ||
773 | */ | ||
774 | signal->flags &= ~SIGNAL_STOP_DEQUEUED; | ||
775 | } | 840 | } |
776 | } | 841 | } |
777 | 842 | ||
@@ -860,6 +925,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
860 | signal->group_stop_count = 0; | 925 | signal->group_stop_count = 0; |
861 | t = p; | 926 | t = p; |
862 | do { | 927 | do { |
928 | task_clear_group_stop_pending(t); | ||
863 | sigaddset(&t->pending.signal, SIGKILL); | 929 | sigaddset(&t->pending.signal, SIGKILL); |
864 | signal_wake_up(t, 1); | 930 | signal_wake_up(t, 1); |
865 | } while_each_thread(p, t); | 931 | } while_each_thread(p, t); |
@@ -909,14 +975,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
909 | if (info == SEND_SIG_FORCED) | 975 | if (info == SEND_SIG_FORCED) |
910 | goto out_set; | 976 | goto out_set; |
911 | 977 | ||
912 | /* Real-time signals must be queued if sent by sigqueue, or | 978 | /* |
913 | some other real-time mechanism. It is implementation | 979 | * Real-time signals must be queued if sent by sigqueue, or |
914 | defined whether kill() does so. We attempt to do so, on | 980 | * some other real-time mechanism. It is implementation |
915 | the principle of least surprise, but since kill is not | 981 | * defined whether kill() does so. We attempt to do so, on |
916 | allowed to fail with EAGAIN when low on memory we just | 982 | * the principle of least surprise, but since kill is not |
917 | make sure at least one signal gets delivered and don't | 983 | * allowed to fail with EAGAIN when low on memory we just |
918 | pass on the info struct. */ | 984 | * make sure at least one signal gets delivered and don't |
919 | 985 | * pass on the info struct. | |
986 | */ | ||
920 | if (sig < SIGRTMIN) | 987 | if (sig < SIGRTMIN) |
921 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | 988 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
922 | else | 989 | else |
@@ -1093,6 +1160,7 @@ int zap_other_threads(struct task_struct *p) | |||
1093 | p->signal->group_stop_count = 0; | 1160 | p->signal->group_stop_count = 0; |
1094 | 1161 | ||
1095 | while_each_thread(p, t) { | 1162 | while_each_thread(p, t) { |
1163 | task_clear_group_stop_pending(t); | ||
1096 | count++; | 1164 | count++; |
1097 | 1165 | ||
1098 | /* Don't bother with already dead threads */ | 1166 | /* Don't bother with already dead threads */ |
@@ -1105,22 +1173,30 @@ int zap_other_threads(struct task_struct *p) | |||
1105 | return count; | 1173 | return count; |
1106 | } | 1174 | } |
1107 | 1175 | ||
1108 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) | 1176 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1177 | unsigned long *flags) | ||
1109 | { | 1178 | { |
1110 | struct sighand_struct *sighand; | 1179 | struct sighand_struct *sighand; |
1111 | 1180 | ||
1112 | rcu_read_lock(); | ||
1113 | for (;;) { | 1181 | for (;;) { |
1182 | local_irq_save(*flags); | ||
1183 | rcu_read_lock(); | ||
1114 | sighand = rcu_dereference(tsk->sighand); | 1184 | sighand = rcu_dereference(tsk->sighand); |
1115 | if (unlikely(sighand == NULL)) | 1185 | if (unlikely(sighand == NULL)) { |
1186 | rcu_read_unlock(); | ||
1187 | local_irq_restore(*flags); | ||
1116 | break; | 1188 | break; |
1189 | } | ||
1117 | 1190 | ||
1118 | spin_lock_irqsave(&sighand->siglock, *flags); | 1191 | spin_lock(&sighand->siglock); |
1119 | if (likely(sighand == tsk->sighand)) | 1192 | if (likely(sighand == tsk->sighand)) { |
1193 | rcu_read_unlock(); | ||
1120 | break; | 1194 | break; |
1121 | spin_unlock_irqrestore(&sighand->siglock, *flags); | 1195 | } |
1196 | spin_unlock(&sighand->siglock); | ||
1197 | rcu_read_unlock(); | ||
1198 | local_irq_restore(*flags); | ||
1122 | } | 1199 | } |
1123 | rcu_read_unlock(); | ||
1124 | 1200 | ||
1125 | return sighand; | 1201 | return sighand; |
1126 | } | 1202 | } |
@@ -1186,8 +1262,7 @@ retry: | |||
1186 | return error; | 1262 | return error; |
1187 | } | 1263 | } |
1188 | 1264 | ||
1189 | int | 1265 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1190 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) | ||
1191 | { | 1266 | { |
1192 | int error; | 1267 | int error; |
1193 | rcu_read_lock(); | 1268 | rcu_read_lock(); |
@@ -1284,8 +1359,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid) | |||
1284 | * These are for backward compatibility with the rest of the kernel source. | 1359 | * These are for backward compatibility with the rest of the kernel source. |
1285 | */ | 1360 | */ |
1286 | 1361 | ||
1287 | int | 1362 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1288 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | ||
1289 | { | 1363 | { |
1290 | /* | 1364 | /* |
1291 | * Make sure legacy kernel users don't send in bad values | 1365 | * Make sure legacy kernel users don't send in bad values |
@@ -1353,7 +1427,7 @@ EXPORT_SYMBOL(kill_pid); | |||
1353 | * These functions support sending signals using preallocated sigqueue | 1427 | * These functions support sending signals using preallocated sigqueue |
1354 | * structures. This is needed "because realtime applications cannot | 1428 | * structures. This is needed "because realtime applications cannot |
1355 | * afford to lose notifications of asynchronous events, like timer | 1429 | * afford to lose notifications of asynchronous events, like timer |
1356 | * expirations or I/O completions". In the case of Posix Timers | 1430 | * expirations or I/O completions". In the case of POSIX Timers |
1357 | * we allocate the sigqueue structure from the timer_create. If this | 1431 | * we allocate the sigqueue structure from the timer_create. If this |
1358 | * allocation fails we are able to report the failure to the application | 1432 | * allocation fails we are able to report the failure to the application |
1359 | * with an EAGAIN error. | 1433 | * with an EAGAIN error. |
@@ -1521,16 +1595,30 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
1521 | return ret; | 1595 | return ret; |
1522 | } | 1596 | } |
1523 | 1597 | ||
1524 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | 1598 | /** |
1599 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | ||
1600 | * @tsk: task reporting the state change | ||
1601 | * @for_ptracer: the notification is for ptracer | ||
1602 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | ||
1603 | * | ||
1604 | * Notify @tsk's parent that the stopped/continued state has changed. If | ||
1605 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | ||
1606 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | ||
1607 | * | ||
1608 | * CONTEXT: | ||
1609 | * Must be called with tasklist_lock at least read locked. | ||
1610 | */ | ||
1611 | static void do_notify_parent_cldstop(struct task_struct *tsk, | ||
1612 | bool for_ptracer, int why) | ||
1525 | { | 1613 | { |
1526 | struct siginfo info; | 1614 | struct siginfo info; |
1527 | unsigned long flags; | 1615 | unsigned long flags; |
1528 | struct task_struct *parent; | 1616 | struct task_struct *parent; |
1529 | struct sighand_struct *sighand; | 1617 | struct sighand_struct *sighand; |
1530 | 1618 | ||
1531 | if (task_ptrace(tsk)) | 1619 | if (for_ptracer) { |
1532 | parent = tsk->parent; | 1620 | parent = tsk->parent; |
1533 | else { | 1621 | } else { |
1534 | tsk = tsk->group_leader; | 1622 | tsk = tsk->group_leader; |
1535 | parent = tsk->real_parent; | 1623 | parent = tsk->real_parent; |
1536 | } | 1624 | } |
@@ -1538,7 +1626,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1538 | info.si_signo = SIGCHLD; | 1626 | info.si_signo = SIGCHLD; |
1539 | info.si_errno = 0; | 1627 | info.si_errno = 0; |
1540 | /* | 1628 | /* |
1541 | * see comment in do_notify_parent() abot the following 3 lines | 1629 | * see comment in do_notify_parent() about the following 4 lines |
1542 | */ | 1630 | */ |
1543 | rcu_read_lock(); | 1631 | rcu_read_lock(); |
1544 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); | 1632 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
@@ -1596,7 +1684,7 @@ static inline int may_ptrace_stop(void) | |||
1596 | } | 1684 | } |
1597 | 1685 | ||
1598 | /* | 1686 | /* |
1599 | * Return nonzero if there is a SIGKILL that should be waking us up. | 1687 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1600 | * Called with the siglock held. | 1688 | * Called with the siglock held. |
1601 | */ | 1689 | */ |
1602 | static int sigkill_pending(struct task_struct *tsk) | 1690 | static int sigkill_pending(struct task_struct *tsk) |
@@ -1606,6 +1694,15 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1606 | } | 1694 | } |
1607 | 1695 | ||
1608 | /* | 1696 | /* |
1697 | * Test whether the target task of the usual cldstop notification - the | ||
1698 | * real_parent of @child - is in the same group as the ptracer. | ||
1699 | */ | ||
1700 | static bool real_parent_is_ptracer(struct task_struct *child) | ||
1701 | { | ||
1702 | return same_thread_group(child->parent, child->real_parent); | ||
1703 | } | ||
1704 | |||
1705 | /* | ||
1609 | * This must be called with current->sighand->siglock held. | 1706 | * This must be called with current->sighand->siglock held. |
1610 | * | 1707 | * |
1611 | * This should be the path for all ptrace stops. | 1708 | * This should be the path for all ptrace stops. |
@@ -1616,8 +1713,12 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1616 | * If we actually decide not to stop at all because the tracer | 1713 | * If we actually decide not to stop at all because the tracer |
1617 | * is gone, we keep current->exit_code unless clear_code. | 1714 | * is gone, we keep current->exit_code unless clear_code. |
1618 | */ | 1715 | */ |
1619 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | 1716 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1717 | __releases(¤t->sighand->siglock) | ||
1718 | __acquires(¤t->sighand->siglock) | ||
1620 | { | 1719 | { |
1720 | bool gstop_done = false; | ||
1721 | |||
1621 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1722 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1622 | /* | 1723 | /* |
1623 | * The arch code has something special to do before a | 1724 | * The arch code has something special to do before a |
@@ -1638,21 +1739,49 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1638 | } | 1739 | } |
1639 | 1740 | ||
1640 | /* | 1741 | /* |
1641 | * If there is a group stop in progress, | 1742 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1642 | * we must participate in the bookkeeping. | 1743 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1744 | * while siglock was released for the arch hook, PENDING could be | ||
1745 | * clear now. We act as if SIGCONT is received after TASK_TRACED | ||
1746 | * is entered - ignore it. | ||
1643 | */ | 1747 | */ |
1644 | if (current->signal->group_stop_count > 0) | 1748 | if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) |
1645 | --current->signal->group_stop_count; | 1749 | gstop_done = task_participate_group_stop(current); |
1646 | 1750 | ||
1647 | current->last_siginfo = info; | 1751 | current->last_siginfo = info; |
1648 | current->exit_code = exit_code; | 1752 | current->exit_code = exit_code; |
1649 | 1753 | ||
1650 | /* Let the debugger run. */ | 1754 | /* |
1651 | __set_current_state(TASK_TRACED); | 1755 | * TRACED should be visible before TRAPPING is cleared; otherwise, |
1756 | * the tracer might fail do_wait(). | ||
1757 | */ | ||
1758 | set_current_state(TASK_TRACED); | ||
1759 | |||
1760 | /* | ||
1761 | * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and | ||
1762 | * transition to TASK_TRACED should be atomic with respect to | ||
1763 | * siglock. This hsould be done after the arch hook as siglock is | ||
1764 | * released and regrabbed across it. | ||
1765 | */ | ||
1766 | task_clear_group_stop_trapping(current); | ||
1767 | |||
1652 | spin_unlock_irq(¤t->sighand->siglock); | 1768 | spin_unlock_irq(¤t->sighand->siglock); |
1653 | read_lock(&tasklist_lock); | 1769 | read_lock(&tasklist_lock); |
1654 | if (may_ptrace_stop()) { | 1770 | if (may_ptrace_stop()) { |
1655 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1771 | /* |
1772 | * Notify parents of the stop. | ||
1773 | * | ||
1774 | * While ptraced, there are two parents - the ptracer and | ||
1775 | * the real_parent of the group_leader. The ptracer should | ||
1776 | * know about every stop while the real parent is only | ||
1777 | * interested in the completion of group stop. The states | ||
1778 | * for the two don't interact with each other. Notify | ||
1779 | * separately unless they're gonna be duplicates. | ||
1780 | */ | ||
1781 | do_notify_parent_cldstop(current, true, why); | ||
1782 | if (gstop_done && !real_parent_is_ptracer(current)) | ||
1783 | do_notify_parent_cldstop(current, false, why); | ||
1784 | |||
1656 | /* | 1785 | /* |
1657 | * Don't want to allow preemption here, because | 1786 | * Don't want to allow preemption here, because |
1658 | * sys_ptrace() needs this task to be inactive. | 1787 | * sys_ptrace() needs this task to be inactive. |
@@ -1667,7 +1796,16 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1667 | /* | 1796 | /* |
1668 | * By the time we got the lock, our tracer went away. | 1797 | * By the time we got the lock, our tracer went away. |
1669 | * Don't drop the lock yet, another tracer may come. | 1798 | * Don't drop the lock yet, another tracer may come. |
1799 | * | ||
1800 | * If @gstop_done, the ptracer went away between group stop | ||
1801 | * completion and here. During detach, it would have set | ||
1802 | * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED | ||
1803 | * in do_signal_stop() on return, so notifying the real | ||
1804 | * parent of the group stop completion is enough. | ||
1670 | */ | 1805 | */ |
1806 | if (gstop_done) | ||
1807 | do_notify_parent_cldstop(current, false, why); | ||
1808 | |||
1671 | __set_current_state(TASK_RUNNING); | 1809 | __set_current_state(TASK_RUNNING); |
1672 | if (clear_code) | 1810 | if (clear_code) |
1673 | current->exit_code = 0; | 1811 | current->exit_code = 0; |
@@ -1711,79 +1849,128 @@ void ptrace_notify(int exit_code) | |||
1711 | 1849 | ||
1712 | /* Let the debugger run. */ | 1850 | /* Let the debugger run. */ |
1713 | spin_lock_irq(¤t->sighand->siglock); | 1851 | spin_lock_irq(¤t->sighand->siglock); |
1714 | ptrace_stop(exit_code, 1, &info); | 1852 | ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); |
1715 | spin_unlock_irq(¤t->sighand->siglock); | 1853 | spin_unlock_irq(¤t->sighand->siglock); |
1716 | } | 1854 | } |
1717 | 1855 | ||
1718 | /* | 1856 | /* |
1719 | * This performs the stopping for SIGSTOP and other stop signals. | 1857 | * This performs the stopping for SIGSTOP and other stop signals. |
1720 | * We have to stop all threads in the thread group. | 1858 | * We have to stop all threads in the thread group. |
1721 | * Returns nonzero if we've actually stopped and released the siglock. | 1859 | * Returns non-zero if we've actually stopped and released the siglock. |
1722 | * Returns zero if we didn't stop and still hold the siglock. | 1860 | * Returns zero if we didn't stop and still hold the siglock. |
1723 | */ | 1861 | */ |
1724 | static int do_signal_stop(int signr) | 1862 | static int do_signal_stop(int signr) |
1725 | { | 1863 | { |
1726 | struct signal_struct *sig = current->signal; | 1864 | struct signal_struct *sig = current->signal; |
1727 | int notify; | ||
1728 | 1865 | ||
1729 | if (!sig->group_stop_count) { | 1866 | if (!(current->group_stop & GROUP_STOP_PENDING)) { |
1867 | unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; | ||
1730 | struct task_struct *t; | 1868 | struct task_struct *t; |
1731 | 1869 | ||
1732 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1870 | /* signr will be recorded in task->group_stop for retries */ |
1871 | WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); | ||
1872 | |||
1873 | if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || | ||
1733 | unlikely(signal_group_exit(sig))) | 1874 | unlikely(signal_group_exit(sig))) |
1734 | return 0; | 1875 | return 0; |
1735 | /* | 1876 | /* |
1736 | * There is no group stop already in progress. | 1877 | * There is no group stop already in progress. We must |
1737 | * We must initiate one now. | 1878 | * initiate one now. |
1879 | * | ||
1880 | * While ptraced, a task may be resumed while group stop is | ||
1881 | * still in effect and then receive a stop signal and | ||
1882 | * initiate another group stop. This deviates from the | ||
1883 | * usual behavior as two consecutive stop signals can't | ||
1884 | * cause two group stops when !ptraced. That is why we | ||
1885 | * also check !task_is_stopped(t) below. | ||
1886 | * | ||
1887 | * The condition can be distinguished by testing whether | ||
1888 | * SIGNAL_STOP_STOPPED is already set. Don't generate | ||
1889 | * group_exit_code in such case. | ||
1890 | * | ||
1891 | * This is not necessary for SIGNAL_STOP_CONTINUED because | ||
1892 | * an intervening stop signal is required to cause two | ||
1893 | * continued events regardless of ptrace. | ||
1738 | */ | 1894 | */ |
1739 | sig->group_exit_code = signr; | 1895 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
1896 | sig->group_exit_code = signr; | ||
1897 | else | ||
1898 | WARN_ON_ONCE(!task_ptrace(current)); | ||
1740 | 1899 | ||
1900 | current->group_stop &= ~GROUP_STOP_SIGMASK; | ||
1901 | current->group_stop |= signr | gstop; | ||
1741 | sig->group_stop_count = 1; | 1902 | sig->group_stop_count = 1; |
1742 | for (t = next_thread(current); t != current; t = next_thread(t)) | 1903 | for (t = next_thread(current); t != current; |
1904 | t = next_thread(t)) { | ||
1905 | t->group_stop &= ~GROUP_STOP_SIGMASK; | ||
1743 | /* | 1906 | /* |
1744 | * Setting state to TASK_STOPPED for a group | 1907 | * Setting state to TASK_STOPPED for a group |
1745 | * stop is always done with the siglock held, | 1908 | * stop is always done with the siglock held, |
1746 | * so this check has no races. | 1909 | * so this check has no races. |
1747 | */ | 1910 | */ |
1748 | if (!(t->flags & PF_EXITING) && | 1911 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { |
1749 | !task_is_stopped_or_traced(t)) { | 1912 | t->group_stop |= signr | gstop; |
1750 | sig->group_stop_count++; | 1913 | sig->group_stop_count++; |
1751 | signal_wake_up(t, 0); | 1914 | signal_wake_up(t, 0); |
1752 | } | 1915 | } |
1916 | } | ||
1753 | } | 1917 | } |
1754 | /* | 1918 | retry: |
1755 | * If there are no other threads in the group, or if there is | 1919 | if (likely(!task_ptrace(current))) { |
1756 | * a group stop in progress and we are the last to stop, report | 1920 | int notify = 0; |
1757 | * to the parent. When ptraced, every thread reports itself. | 1921 | |
1758 | */ | 1922 | /* |
1759 | notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | 1923 | * If there are no other threads in the group, or if there |
1760 | notify = tracehook_notify_jctl(notify, CLD_STOPPED); | 1924 | * is a group stop in progress and we are the last to stop, |
1761 | /* | 1925 | * report to the parent. |
1762 | * tracehook_notify_jctl() can drop and reacquire siglock, so | 1926 | */ |
1763 | * we keep ->group_stop_count != 0 before the call. If SIGCONT | 1927 | if (task_participate_group_stop(current)) |
1764 | * or SIGKILL comes in between ->group_stop_count == 0. | 1928 | notify = CLD_STOPPED; |
1765 | */ | 1929 | |
1766 | if (sig->group_stop_count) { | ||
1767 | if (!--sig->group_stop_count) | ||
1768 | sig->flags = SIGNAL_STOP_STOPPED; | ||
1769 | current->exit_code = sig->group_exit_code; | ||
1770 | __set_current_state(TASK_STOPPED); | 1930 | __set_current_state(TASK_STOPPED); |
1931 | spin_unlock_irq(¤t->sighand->siglock); | ||
1932 | |||
1933 | /* | ||
1934 | * Notify the parent of the group stop completion. Because | ||
1935 | * we're not holding either the siglock or tasklist_lock | ||
1936 | * here, ptracer may attach inbetween; however, this is for | ||
1937 | * group stop and should always be delivered to the real | ||
1938 | * parent of the group leader. The new ptracer will get | ||
1939 | * its notification when this task transitions into | ||
1940 | * TASK_TRACED. | ||
1941 | */ | ||
1942 | if (notify) { | ||
1943 | read_lock(&tasklist_lock); | ||
1944 | do_notify_parent_cldstop(current, false, notify); | ||
1945 | read_unlock(&tasklist_lock); | ||
1946 | } | ||
1947 | |||
1948 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | ||
1949 | schedule(); | ||
1950 | |||
1951 | spin_lock_irq(¤t->sighand->siglock); | ||
1952 | } else { | ||
1953 | ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, | ||
1954 | CLD_STOPPED, 0, NULL); | ||
1955 | current->exit_code = 0; | ||
1771 | } | 1956 | } |
1772 | spin_unlock_irq(¤t->sighand->siglock); | ||
1773 | 1957 | ||
1774 | if (notify) { | 1958 | /* |
1775 | read_lock(&tasklist_lock); | 1959 | * GROUP_STOP_PENDING could be set if another group stop has |
1776 | do_notify_parent_cldstop(current, notify); | 1960 | * started since being woken up or ptrace wants us to transit |
1777 | read_unlock(&tasklist_lock); | 1961 | * between TASK_STOPPED and TRACED. Retry group stop. |
1962 | */ | ||
1963 | if (current->group_stop & GROUP_STOP_PENDING) { | ||
1964 | WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); | ||
1965 | goto retry; | ||
1778 | } | 1966 | } |
1779 | 1967 | ||
1780 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | 1968 | /* PTRACE_ATTACH might have raced with task killing, clear trapping */ |
1781 | do { | 1969 | task_clear_group_stop_trapping(current); |
1782 | schedule(); | 1970 | |
1783 | } while (try_to_freeze()); | 1971 | spin_unlock_irq(¤t->sighand->siglock); |
1784 | 1972 | ||
1785 | tracehook_finish_jctl(); | 1973 | tracehook_finish_jctl(); |
1786 | current->exit_code = 0; | ||
1787 | 1974 | ||
1788 | return 1; | 1975 | return 1; |
1789 | } | 1976 | } |
@@ -1797,7 +1984,7 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1797 | ptrace_signal_deliver(regs, cookie); | 1984 | ptrace_signal_deliver(regs, cookie); |
1798 | 1985 | ||
1799 | /* Let the debugger run. */ | 1986 | /* Let the debugger run. */ |
1800 | ptrace_stop(signr, 0, info); | 1987 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
1801 | 1988 | ||
1802 | /* We're back. Did the debugger cancel the sig? */ | 1989 | /* We're back. Did the debugger cancel the sig? */ |
1803 | signr = current->exit_code; | 1990 | signr = current->exit_code; |
@@ -1806,10 +1993,12 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1806 | 1993 | ||
1807 | current->exit_code = 0; | 1994 | current->exit_code = 0; |
1808 | 1995 | ||
1809 | /* Update the siginfo structure if the signal has | 1996 | /* |
1810 | changed. If the debugger wanted something | 1997 | * Update the siginfo structure if the signal has |
1811 | specific in the siginfo structure then it should | 1998 | * changed. If the debugger wanted something |
1812 | have updated *info via PTRACE_SETSIGINFO. */ | 1999 | * specific in the siginfo structure then it should |
2000 | * have updated *info via PTRACE_SETSIGINFO. | ||
2001 | */ | ||
1813 | if (signr != info->si_signo) { | 2002 | if (signr != info->si_signo) { |
1814 | info->si_signo = signr; | 2003 | info->si_signo = signr; |
1815 | info->si_errno = 0; | 2004 | info->si_errno = 0; |
@@ -1850,25 +2039,43 @@ relock: | |||
1850 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | 2039 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
1851 | */ | 2040 | */ |
1852 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | 2041 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
1853 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) | 2042 | struct task_struct *leader; |
1854 | ? CLD_CONTINUED : CLD_STOPPED; | 2043 | int why; |
2044 | |||
2045 | if (signal->flags & SIGNAL_CLD_CONTINUED) | ||
2046 | why = CLD_CONTINUED; | ||
2047 | else | ||
2048 | why = CLD_STOPPED; | ||
2049 | |||
1855 | signal->flags &= ~SIGNAL_CLD_MASK; | 2050 | signal->flags &= ~SIGNAL_CLD_MASK; |
1856 | 2051 | ||
1857 | why = tracehook_notify_jctl(why, CLD_CONTINUED); | ||
1858 | spin_unlock_irq(&sighand->siglock); | 2052 | spin_unlock_irq(&sighand->siglock); |
1859 | 2053 | ||
1860 | if (why) { | 2054 | /* |
1861 | read_lock(&tasklist_lock); | 2055 | * Notify the parent that we're continuing. This event is |
1862 | do_notify_parent_cldstop(current->group_leader, why); | 2056 | * always per-process and doesn't make whole lot of sense |
1863 | read_unlock(&tasklist_lock); | 2057 | * for ptracers, who shouldn't consume the state via |
1864 | } | 2058 | * wait(2) either, but, for backward compatibility, notify |
2059 | * the ptracer of the group leader too unless it's gonna be | ||
2060 | * a duplicate. | ||
2061 | */ | ||
2062 | read_lock(&tasklist_lock); | ||
2063 | |||
2064 | do_notify_parent_cldstop(current, false, why); | ||
2065 | |||
2066 | leader = current->group_leader; | ||
2067 | if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) | ||
2068 | do_notify_parent_cldstop(leader, true, why); | ||
2069 | |||
2070 | read_unlock(&tasklist_lock); | ||
2071 | |||
1865 | goto relock; | 2072 | goto relock; |
1866 | } | 2073 | } |
1867 | 2074 | ||
1868 | for (;;) { | 2075 | for (;;) { |
1869 | struct k_sigaction *ka; | 2076 | struct k_sigaction *ka; |
1870 | /* | 2077 | /* |
1871 | * Tracing can induce an artifical signal and choose sigaction. | 2078 | * Tracing can induce an artificial signal and choose sigaction. |
1872 | * The return value in @signr determines the default action, | 2079 | * The return value in @signr determines the default action, |
1873 | * but @info->si_signo is the signal number we will report. | 2080 | * but @info->si_signo is the signal number we will report. |
1874 | */ | 2081 | */ |
@@ -1878,8 +2085,8 @@ relock: | |||
1878 | if (unlikely(signr != 0)) | 2085 | if (unlikely(signr != 0)) |
1879 | ka = return_ka; | 2086 | ka = return_ka; |
1880 | else { | 2087 | else { |
1881 | if (unlikely(signal->group_stop_count > 0) && | 2088 | if (unlikely(current->group_stop & |
1882 | do_signal_stop(0)) | 2089 | GROUP_STOP_PENDING) && do_signal_stop(0)) |
1883 | goto relock; | 2090 | goto relock; |
1884 | 2091 | ||
1885 | signr = dequeue_signal(current, ¤t->blocked, | 2092 | signr = dequeue_signal(current, ¤t->blocked, |
@@ -1998,10 +2205,42 @@ relock: | |||
1998 | return signr; | 2205 | return signr; |
1999 | } | 2206 | } |
2000 | 2207 | ||
2208 | /* | ||
2209 | * It could be that complete_signal() picked us to notify about the | ||
2210 | * group-wide signal. Other threads should be notified now to take | ||
2211 | * the shared signals in @which since we will not. | ||
2212 | */ | ||
2213 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | ||
2214 | { | ||
2215 | sigset_t retarget; | ||
2216 | struct task_struct *t; | ||
2217 | |||
2218 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | ||
2219 | if (sigisemptyset(&retarget)) | ||
2220 | return; | ||
2221 | |||
2222 | t = tsk; | ||
2223 | while_each_thread(tsk, t) { | ||
2224 | if (t->flags & PF_EXITING) | ||
2225 | continue; | ||
2226 | |||
2227 | if (!has_pending_signals(&retarget, &t->blocked)) | ||
2228 | continue; | ||
2229 | /* Remove the signals this thread can handle. */ | ||
2230 | sigandsets(&retarget, &retarget, &t->blocked); | ||
2231 | |||
2232 | if (!signal_pending(t)) | ||
2233 | signal_wake_up(t, 0); | ||
2234 | |||
2235 | if (sigisemptyset(&retarget)) | ||
2236 | break; | ||
2237 | } | ||
2238 | } | ||
2239 | |||
2001 | void exit_signals(struct task_struct *tsk) | 2240 | void exit_signals(struct task_struct *tsk) |
2002 | { | 2241 | { |
2003 | int group_stop = 0; | 2242 | int group_stop = 0; |
2004 | struct task_struct *t; | 2243 | sigset_t unblocked; |
2005 | 2244 | ||
2006 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | 2245 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2007 | tsk->flags |= PF_EXITING; | 2246 | tsk->flags |= PF_EXITING; |
@@ -2017,25 +2256,23 @@ void exit_signals(struct task_struct *tsk) | |||
2017 | if (!signal_pending(tsk)) | 2256 | if (!signal_pending(tsk)) |
2018 | goto out; | 2257 | goto out; |
2019 | 2258 | ||
2020 | /* It could be that __group_complete_signal() choose us to | 2259 | unblocked = tsk->blocked; |
2021 | * notify about group-wide signal. Another thread should be | 2260 | signotset(&unblocked); |
2022 | * woken now to take the signal since we will not. | 2261 | retarget_shared_pending(tsk, &unblocked); |
2023 | */ | ||
2024 | for (t = tsk; (t = next_thread(t)) != tsk; ) | ||
2025 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) | ||
2026 | recalc_sigpending_and_wake(t); | ||
2027 | 2262 | ||
2028 | if (unlikely(tsk->signal->group_stop_count) && | 2263 | if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && |
2029 | !--tsk->signal->group_stop_count) { | 2264 | task_participate_group_stop(tsk)) |
2030 | tsk->signal->flags = SIGNAL_STOP_STOPPED; | 2265 | group_stop = CLD_STOPPED; |
2031 | group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); | ||
2032 | } | ||
2033 | out: | 2266 | out: |
2034 | spin_unlock_irq(&tsk->sighand->siglock); | 2267 | spin_unlock_irq(&tsk->sighand->siglock); |
2035 | 2268 | ||
2269 | /* | ||
2270 | * If group stop has completed, deliver the notification. This | ||
2271 | * should always go to the real parent of the group leader. | ||
2272 | */ | ||
2036 | if (unlikely(group_stop)) { | 2273 | if (unlikely(group_stop)) { |
2037 | read_lock(&tasklist_lock); | 2274 | read_lock(&tasklist_lock); |
2038 | do_notify_parent_cldstop(tsk, group_stop); | 2275 | do_notify_parent_cldstop(tsk, false, group_stop); |
2039 | read_unlock(&tasklist_lock); | 2276 | read_unlock(&tasklist_lock); |
2040 | } | 2277 | } |
2041 | } | 2278 | } |
@@ -2055,6 +2292,9 @@ EXPORT_SYMBOL(unblock_all_signals); | |||
2055 | * System call entry points. | 2292 | * System call entry points. |
2056 | */ | 2293 | */ |
2057 | 2294 | ||
2295 | /** | ||
2296 | * sys_restart_syscall - restart a system call | ||
2297 | */ | ||
2058 | SYSCALL_DEFINE0(restart_syscall) | 2298 | SYSCALL_DEFINE0(restart_syscall) |
2059 | { | 2299 | { |
2060 | struct restart_block *restart = ¤t_thread_info()->restart_block; | 2300 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
@@ -2066,11 +2306,33 @@ long do_no_restart_syscall(struct restart_block *param) | |||
2066 | return -EINTR; | 2306 | return -EINTR; |
2067 | } | 2307 | } |
2068 | 2308 | ||
2069 | /* | 2309 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2070 | * We don't need to get the kernel lock - this is all local to this | 2310 | { |
2071 | * particular thread.. (and that's good, because this is _heavily_ | 2311 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2072 | * used by various programs) | 2312 | sigset_t newblocked; |
2313 | /* A set of now blocked but previously unblocked signals. */ | ||
2314 | sigandnsets(&newblocked, newset, ¤t->blocked); | ||
2315 | retarget_shared_pending(tsk, &newblocked); | ||
2316 | } | ||
2317 | tsk->blocked = *newset; | ||
2318 | recalc_sigpending(); | ||
2319 | } | ||
2320 | |||
2321 | /** | ||
2322 | * set_current_blocked - change current->blocked mask | ||
2323 | * @newset: new mask | ||
2324 | * | ||
2325 | * It is wrong to change ->blocked directly, this helper should be used | ||
2326 | * to ensure the process can't miss a shared signal we are going to block. | ||
2073 | */ | 2327 | */ |
2328 | void set_current_blocked(const sigset_t *newset) | ||
2329 | { | ||
2330 | struct task_struct *tsk = current; | ||
2331 | |||
2332 | spin_lock_irq(&tsk->sighand->siglock); | ||
2333 | __set_task_blocked(tsk, newset); | ||
2334 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2335 | } | ||
2074 | 2336 | ||
2075 | /* | 2337 | /* |
2076 | * This is also useful for kernel threads that want to temporarily | 2338 | * This is also useful for kernel threads that want to temporarily |
@@ -2082,66 +2344,66 @@ long do_no_restart_syscall(struct restart_block *param) | |||
2082 | */ | 2344 | */ |
2083 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | 2345 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2084 | { | 2346 | { |
2085 | int error; | 2347 | struct task_struct *tsk = current; |
2348 | sigset_t newset; | ||
2086 | 2349 | ||
2087 | spin_lock_irq(¤t->sighand->siglock); | 2350 | /* Lockless, only current can change ->blocked, never from irq */ |
2088 | if (oldset) | 2351 | if (oldset) |
2089 | *oldset = current->blocked; | 2352 | *oldset = tsk->blocked; |
2090 | 2353 | ||
2091 | error = 0; | ||
2092 | switch (how) { | 2354 | switch (how) { |
2093 | case SIG_BLOCK: | 2355 | case SIG_BLOCK: |
2094 | sigorsets(¤t->blocked, ¤t->blocked, set); | 2356 | sigorsets(&newset, &tsk->blocked, set); |
2095 | break; | 2357 | break; |
2096 | case SIG_UNBLOCK: | 2358 | case SIG_UNBLOCK: |
2097 | signandsets(¤t->blocked, ¤t->blocked, set); | 2359 | sigandnsets(&newset, &tsk->blocked, set); |
2098 | break; | 2360 | break; |
2099 | case SIG_SETMASK: | 2361 | case SIG_SETMASK: |
2100 | current->blocked = *set; | 2362 | newset = *set; |
2101 | break; | 2363 | break; |
2102 | default: | 2364 | default: |
2103 | error = -EINVAL; | 2365 | return -EINVAL; |
2104 | } | 2366 | } |
2105 | recalc_sigpending(); | ||
2106 | spin_unlock_irq(¤t->sighand->siglock); | ||
2107 | 2367 | ||
2108 | return error; | 2368 | set_current_blocked(&newset); |
2369 | return 0; | ||
2109 | } | 2370 | } |
2110 | 2371 | ||
2111 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, | 2372 | /** |
2373 | * sys_rt_sigprocmask - change the list of currently blocked signals | ||
2374 | * @how: whether to add, remove, or set signals | ||
2375 | * @nset: stores pending signals | ||
2376 | * @oset: previous value of signal mask if non-null | ||
2377 | * @sigsetsize: size of sigset_t type | ||
2378 | */ | ||
2379 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, | ||
2112 | sigset_t __user *, oset, size_t, sigsetsize) | 2380 | sigset_t __user *, oset, size_t, sigsetsize) |
2113 | { | 2381 | { |
2114 | int error = -EINVAL; | ||
2115 | sigset_t old_set, new_set; | 2382 | sigset_t old_set, new_set; |
2383 | int error; | ||
2116 | 2384 | ||
2117 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2385 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2118 | if (sigsetsize != sizeof(sigset_t)) | 2386 | if (sigsetsize != sizeof(sigset_t)) |
2119 | goto out; | 2387 | return -EINVAL; |
2120 | 2388 | ||
2121 | if (set) { | 2389 | old_set = current->blocked; |
2122 | error = -EFAULT; | 2390 | |
2123 | if (copy_from_user(&new_set, set, sizeof(*set))) | 2391 | if (nset) { |
2124 | goto out; | 2392 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2393 | return -EFAULT; | ||
2125 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 2394 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2126 | 2395 | ||
2127 | error = sigprocmask(how, &new_set, &old_set); | 2396 | error = sigprocmask(how, &new_set, NULL); |
2128 | if (error) | 2397 | if (error) |
2129 | goto out; | 2398 | return error; |
2130 | if (oset) | 2399 | } |
2131 | goto set_old; | ||
2132 | } else if (oset) { | ||
2133 | spin_lock_irq(¤t->sighand->siglock); | ||
2134 | old_set = current->blocked; | ||
2135 | spin_unlock_irq(¤t->sighand->siglock); | ||
2136 | 2400 | ||
2137 | set_old: | 2401 | if (oset) { |
2138 | error = -EFAULT; | 2402 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2139 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 2403 | return -EFAULT; |
2140 | goto out; | ||
2141 | } | 2404 | } |
2142 | error = 0; | 2405 | |
2143 | out: | 2406 | return 0; |
2144 | return error; | ||
2145 | } | 2407 | } |
2146 | 2408 | ||
2147 | long do_sigpending(void __user *set, unsigned long sigsetsize) | 2409 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
@@ -2166,8 +2428,14 @@ long do_sigpending(void __user *set, unsigned long sigsetsize) | |||
2166 | 2428 | ||
2167 | out: | 2429 | out: |
2168 | return error; | 2430 | return error; |
2169 | } | 2431 | } |
2170 | 2432 | ||
2433 | /** | ||
2434 | * sys_rt_sigpending - examine a pending signal that has been raised | ||
2435 | * while blocked | ||
2436 | * @set: stores pending signals | ||
2437 | * @sigsetsize: size of sigset_t type or larger | ||
2438 | */ | ||
2171 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) | 2439 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
2172 | { | 2440 | { |
2173 | return do_sigpending(set, sigsetsize); | 2441 | return do_sigpending(set, sigsetsize); |
@@ -2216,9 +2484,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
2216 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2484 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2217 | #endif | 2485 | #endif |
2218 | #ifdef BUS_MCEERR_AO | 2486 | #ifdef BUS_MCEERR_AO |
2219 | /* | 2487 | /* |
2220 | * Other callers might not initialize the si_lsb field, | 2488 | * Other callers might not initialize the si_lsb field, |
2221 | * so check explicitely for the right codes here. | 2489 | * so check explicitly for the right codes here. |
2222 | */ | 2490 | */ |
2223 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | 2491 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
2224 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | 2492 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
@@ -2247,15 +2515,82 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
2247 | 2515 | ||
2248 | #endif | 2516 | #endif |
2249 | 2517 | ||
2518 | /** | ||
2519 | * do_sigtimedwait - wait for queued signals specified in @which | ||
2520 | * @which: queued signals to wait for | ||
2521 | * @info: if non-null, the signal's siginfo is returned here | ||
2522 | * @ts: upper bound on process time suspension | ||
2523 | */ | ||
2524 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | ||
2525 | const struct timespec *ts) | ||
2526 | { | ||
2527 | struct task_struct *tsk = current; | ||
2528 | long timeout = MAX_SCHEDULE_TIMEOUT; | ||
2529 | sigset_t mask = *which; | ||
2530 | int sig; | ||
2531 | |||
2532 | if (ts) { | ||
2533 | if (!timespec_valid(ts)) | ||
2534 | return -EINVAL; | ||
2535 | timeout = timespec_to_jiffies(ts); | ||
2536 | /* | ||
2537 | * We can be close to the next tick, add another one | ||
2538 | * to ensure we will wait at least the time asked for. | ||
2539 | */ | ||
2540 | if (ts->tv_sec || ts->tv_nsec) | ||
2541 | timeout++; | ||
2542 | } | ||
2543 | |||
2544 | /* | ||
2545 | * Invert the set of allowed signals to get those we want to block. | ||
2546 | */ | ||
2547 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
2548 | signotset(&mask); | ||
2549 | |||
2550 | spin_lock_irq(&tsk->sighand->siglock); | ||
2551 | sig = dequeue_signal(tsk, &mask, info); | ||
2552 | if (!sig && timeout) { | ||
2553 | /* | ||
2554 | * None ready, temporarily unblock those we're interested | ||
2555 | * while we are sleeping in so that we'll be awakened when | ||
2556 | * they arrive. Unblocking is always fine, we can avoid | ||
2557 | * set_current_blocked(). | ||
2558 | */ | ||
2559 | tsk->real_blocked = tsk->blocked; | ||
2560 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | ||
2561 | recalc_sigpending(); | ||
2562 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2563 | |||
2564 | timeout = schedule_timeout_interruptible(timeout); | ||
2565 | |||
2566 | spin_lock_irq(&tsk->sighand->siglock); | ||
2567 | __set_task_blocked(tsk, &tsk->real_blocked); | ||
2568 | siginitset(&tsk->real_blocked, 0); | ||
2569 | sig = dequeue_signal(tsk, &mask, info); | ||
2570 | } | ||
2571 | spin_unlock_irq(&tsk->sighand->siglock); | ||
2572 | |||
2573 | if (sig) | ||
2574 | return sig; | ||
2575 | return timeout ? -EINTR : -EAGAIN; | ||
2576 | } | ||
2577 | |||
2578 | /** | ||
2579 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | ||
2580 | * in @uthese | ||
2581 | * @uthese: queued signals to wait for | ||
2582 | * @uinfo: if non-null, the signal's siginfo is returned here | ||
2583 | * @uts: upper bound on process time suspension | ||
2584 | * @sigsetsize: size of sigset_t type | ||
2585 | */ | ||
2250 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | 2586 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2251 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | 2587 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2252 | size_t, sigsetsize) | 2588 | size_t, sigsetsize) |
2253 | { | 2589 | { |
2254 | int ret, sig; | ||
2255 | sigset_t these; | 2590 | sigset_t these; |
2256 | struct timespec ts; | 2591 | struct timespec ts; |
2257 | siginfo_t info; | 2592 | siginfo_t info; |
2258 | long timeout = 0; | 2593 | int ret; |
2259 | 2594 | ||
2260 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2595 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2261 | if (sigsetsize != sizeof(sigset_t)) | 2596 | if (sigsetsize != sizeof(sigset_t)) |
@@ -2263,65 +2598,27 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | |||
2263 | 2598 | ||
2264 | if (copy_from_user(&these, uthese, sizeof(these))) | 2599 | if (copy_from_user(&these, uthese, sizeof(these))) |
2265 | return -EFAULT; | 2600 | return -EFAULT; |
2266 | |||
2267 | /* | ||
2268 | * Invert the set of allowed signals to get those we | ||
2269 | * want to block. | ||
2270 | */ | ||
2271 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
2272 | signotset(&these); | ||
2273 | 2601 | ||
2274 | if (uts) { | 2602 | if (uts) { |
2275 | if (copy_from_user(&ts, uts, sizeof(ts))) | 2603 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2276 | return -EFAULT; | 2604 | return -EFAULT; |
2277 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 | ||
2278 | || ts.tv_sec < 0) | ||
2279 | return -EINVAL; | ||
2280 | } | 2605 | } |
2281 | 2606 | ||
2282 | spin_lock_irq(¤t->sighand->siglock); | 2607 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2283 | sig = dequeue_signal(current, &these, &info); | ||
2284 | if (!sig) { | ||
2285 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
2286 | if (uts) | ||
2287 | timeout = (timespec_to_jiffies(&ts) | ||
2288 | + (ts.tv_sec || ts.tv_nsec)); | ||
2289 | |||
2290 | if (timeout) { | ||
2291 | /* None ready -- temporarily unblock those we're | ||
2292 | * interested while we are sleeping in so that we'll | ||
2293 | * be awakened when they arrive. */ | ||
2294 | current->real_blocked = current->blocked; | ||
2295 | sigandsets(¤t->blocked, ¤t->blocked, &these); | ||
2296 | recalc_sigpending(); | ||
2297 | spin_unlock_irq(¤t->sighand->siglock); | ||
2298 | |||
2299 | timeout = schedule_timeout_interruptible(timeout); | ||
2300 | |||
2301 | spin_lock_irq(¤t->sighand->siglock); | ||
2302 | sig = dequeue_signal(current, &these, &info); | ||
2303 | current->blocked = current->real_blocked; | ||
2304 | siginitset(¤t->real_blocked, 0); | ||
2305 | recalc_sigpending(); | ||
2306 | } | ||
2307 | } | ||
2308 | spin_unlock_irq(¤t->sighand->siglock); | ||
2309 | 2608 | ||
2310 | if (sig) { | 2609 | if (ret > 0 && uinfo) { |
2311 | ret = sig; | 2610 | if (copy_siginfo_to_user(uinfo, &info)) |
2312 | if (uinfo) { | 2611 | ret = -EFAULT; |
2313 | if (copy_siginfo_to_user(uinfo, &info)) | ||
2314 | ret = -EFAULT; | ||
2315 | } | ||
2316 | } else { | ||
2317 | ret = -EAGAIN; | ||
2318 | if (timeout) | ||
2319 | ret = -EINTR; | ||
2320 | } | 2612 | } |
2321 | 2613 | ||
2322 | return ret; | 2614 | return ret; |
2323 | } | 2615 | } |
2324 | 2616 | ||
2617 | /** | ||
2618 | * sys_kill - send a signal to a process | ||
2619 | * @pid: the PID of the process | ||
2620 | * @sig: signal to be sent | ||
2621 | */ | ||
2325 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) | 2622 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2326 | { | 2623 | { |
2327 | struct siginfo info; | 2624 | struct siginfo info; |
@@ -2397,7 +2694,11 @@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) | |||
2397 | return do_tkill(tgid, pid, sig); | 2694 | return do_tkill(tgid, pid, sig); |
2398 | } | 2695 | } |
2399 | 2696 | ||
2400 | /* | 2697 | /** |
2698 | * sys_tkill - send signal to one specific task | ||
2699 | * @pid: the PID of the task | ||
2700 | * @sig: signal to be sent | ||
2701 | * | ||
2401 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2702 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2402 | */ | 2703 | */ |
2403 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) | 2704 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
@@ -2409,6 +2710,12 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) | |||
2409 | return do_tkill(0, pid, sig); | 2710 | return do_tkill(0, pid, sig); |
2410 | } | 2711 | } |
2411 | 2712 | ||
2713 | /** | ||
2714 | * sys_rt_sigqueueinfo - send signal information to a signal | ||
2715 | * @pid: the PID of the thread | ||
2716 | * @sig: signal to be sent | ||
2717 | * @uinfo: signal info to be sent | ||
2718 | */ | ||
2412 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | 2719 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
2413 | siginfo_t __user *, uinfo) | 2720 | siginfo_t __user *, uinfo) |
2414 | { | 2721 | { |
@@ -2418,9 +2725,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | |||
2418 | return -EFAULT; | 2725 | return -EFAULT; |
2419 | 2726 | ||
2420 | /* Not even root can pretend to send signals from the kernel. | 2727 | /* Not even root can pretend to send signals from the kernel. |
2421 | Nor can they impersonate a kill(), which adds source info. */ | 2728 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2422 | if (info.si_code >= 0) | 2729 | */ |
2730 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { | ||
2731 | /* We used to allow any < 0 si_code */ | ||
2732 | WARN_ON_ONCE(info.si_code < 0); | ||
2423 | return -EPERM; | 2733 | return -EPERM; |
2734 | } | ||
2424 | info.si_signo = sig; | 2735 | info.si_signo = sig; |
2425 | 2736 | ||
2426 | /* POSIX.1b doesn't mention process groups. */ | 2737 | /* POSIX.1b doesn't mention process groups. */ |
@@ -2434,9 +2745,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |||
2434 | return -EINVAL; | 2745 | return -EINVAL; |
2435 | 2746 | ||
2436 | /* Not even root can pretend to send signals from the kernel. | 2747 | /* Not even root can pretend to send signals from the kernel. |
2437 | Nor can they impersonate a kill(), which adds source info. */ | 2748 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2438 | if (info->si_code >= 0) | 2749 | */ |
2750 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { | ||
2751 | /* We used to allow any < 0 si_code */ | ||
2752 | WARN_ON_ONCE(info->si_code < 0); | ||
2439 | return -EPERM; | 2753 | return -EPERM; |
2754 | } | ||
2440 | info->si_signo = sig; | 2755 | info->si_signo = sig; |
2441 | 2756 | ||
2442 | return do_send_specific(tgid, pid, sig, info); | 2757 | return do_send_specific(tgid, pid, sig, info); |
@@ -2528,12 +2843,11 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
2528 | 2843 | ||
2529 | error = -EINVAL; | 2844 | error = -EINVAL; |
2530 | /* | 2845 | /* |
2531 | * | 2846 | * Note - this code used to test ss_flags incorrectly: |
2532 | * Note - this code used to test ss_flags incorrectly | ||
2533 | * old code may have been written using ss_flags==0 | 2847 | * old code may have been written using ss_flags==0 |
2534 | * to mean ss_flags==SS_ONSTACK (as this was the only | 2848 | * to mean ss_flags==SS_ONSTACK (as this was the only |
2535 | * way that worked) - this fix preserves that older | 2849 | * way that worked) - this fix preserves that older |
2536 | * mechanism | 2850 | * mechanism. |
2537 | */ | 2851 | */ |
2538 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | 2852 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
2539 | goto out; | 2853 | goto out; |
@@ -2567,6 +2881,10 @@ out: | |||
2567 | 2881 | ||
2568 | #ifdef __ARCH_WANT_SYS_SIGPENDING | 2882 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
2569 | 2883 | ||
2884 | /** | ||
2885 | * sys_sigpending - examine pending signals | ||
2886 | * @set: where mask of pending signal is returned | ||
2887 | */ | ||
2570 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | 2888 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
2571 | { | 2889 | { |
2572 | return do_sigpending(set, sizeof(*set)); | 2890 | return do_sigpending(set, sizeof(*set)); |
@@ -2575,60 +2893,65 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | |||
2575 | #endif | 2893 | #endif |
2576 | 2894 | ||
2577 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | 2895 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
2578 | /* Some platforms have their own version with special arguments others | 2896 | /** |
2579 | support only sys_rt_sigprocmask. */ | 2897 | * sys_sigprocmask - examine and change blocked signals |
2898 | * @how: whether to add, remove, or set signals | ||
2899 | * @nset: signals to add or remove (if non-null) | ||
2900 | * @oset: previous value of signal mask if non-null | ||
2901 | * | ||
2902 | * Some platforms have their own version with special arguments; | ||
2903 | * others support only sys_rt_sigprocmask. | ||
2904 | */ | ||
2580 | 2905 | ||
2581 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, | 2906 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
2582 | old_sigset_t __user *, oset) | 2907 | old_sigset_t __user *, oset) |
2583 | { | 2908 | { |
2584 | int error; | ||
2585 | old_sigset_t old_set, new_set; | 2909 | old_sigset_t old_set, new_set; |
2910 | sigset_t new_blocked; | ||
2586 | 2911 | ||
2587 | if (set) { | 2912 | old_set = current->blocked.sig[0]; |
2588 | error = -EFAULT; | 2913 | |
2589 | if (copy_from_user(&new_set, set, sizeof(*set))) | 2914 | if (nset) { |
2590 | goto out; | 2915 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
2916 | return -EFAULT; | ||
2591 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2917 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2592 | 2918 | ||
2593 | spin_lock_irq(¤t->sighand->siglock); | 2919 | new_blocked = current->blocked; |
2594 | old_set = current->blocked.sig[0]; | ||
2595 | 2920 | ||
2596 | error = 0; | ||
2597 | switch (how) { | 2921 | switch (how) { |
2598 | default: | ||
2599 | error = -EINVAL; | ||
2600 | break; | ||
2601 | case SIG_BLOCK: | 2922 | case SIG_BLOCK: |
2602 | sigaddsetmask(¤t->blocked, new_set); | 2923 | sigaddsetmask(&new_blocked, new_set); |
2603 | break; | 2924 | break; |
2604 | case SIG_UNBLOCK: | 2925 | case SIG_UNBLOCK: |
2605 | sigdelsetmask(¤t->blocked, new_set); | 2926 | sigdelsetmask(&new_blocked, new_set); |
2606 | break; | 2927 | break; |
2607 | case SIG_SETMASK: | 2928 | case SIG_SETMASK: |
2608 | current->blocked.sig[0] = new_set; | 2929 | new_blocked.sig[0] = new_set; |
2609 | break; | 2930 | break; |
2931 | default: | ||
2932 | return -EINVAL; | ||
2610 | } | 2933 | } |
2611 | 2934 | ||
2612 | recalc_sigpending(); | 2935 | set_current_blocked(&new_blocked); |
2613 | spin_unlock_irq(¤t->sighand->siglock); | 2936 | } |
2614 | if (error) | 2937 | |
2615 | goto out; | 2938 | if (oset) { |
2616 | if (oset) | ||
2617 | goto set_old; | ||
2618 | } else if (oset) { | ||
2619 | old_set = current->blocked.sig[0]; | ||
2620 | set_old: | ||
2621 | error = -EFAULT; | ||
2622 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 2939 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2623 | goto out; | 2940 | return -EFAULT; |
2624 | } | 2941 | } |
2625 | error = 0; | 2942 | |
2626 | out: | 2943 | return 0; |
2627 | return error; | ||
2628 | } | 2944 | } |
2629 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | 2945 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
2630 | 2946 | ||
2631 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | 2947 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
2948 | /** | ||
2949 | * sys_rt_sigaction - alter an action taken by a process | ||
2950 | * @sig: signal to be sent | ||
2951 | * @act: new sigaction | ||
2952 | * @oact: used to save the previous sigaction | ||
2953 | * @sigsetsize: size of sigset_t type | ||
2954 | */ | ||
2632 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | 2955 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
2633 | const struct sigaction __user *, act, | 2956 | const struct sigaction __user *, act, |
2634 | struct sigaction __user *, oact, | 2957 | struct sigaction __user *, oact, |
@@ -2707,14 +3030,22 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) | |||
2707 | 3030 | ||
2708 | SYSCALL_DEFINE0(pause) | 3031 | SYSCALL_DEFINE0(pause) |
2709 | { | 3032 | { |
2710 | current->state = TASK_INTERRUPTIBLE; | 3033 | while (!signal_pending(current)) { |
2711 | schedule(); | 3034 | current->state = TASK_INTERRUPTIBLE; |
3035 | schedule(); | ||
3036 | } | ||
2712 | return -ERESTARTNOHAND; | 3037 | return -ERESTARTNOHAND; |
2713 | } | 3038 | } |
2714 | 3039 | ||
2715 | #endif | 3040 | #endif |
2716 | 3041 | ||
2717 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND | 3042 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
3043 | /** | ||
3044 | * sys_rt_sigsuspend - replace the signal mask for a value with the | ||
3045 | * @unewset value until a signal is received | ||
3046 | * @unewset: new signal mask value | ||
3047 | * @sigsetsize: size of sigset_t type | ||
3048 | */ | ||
2718 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) | 3049 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
2719 | { | 3050 | { |
2720 | sigset_t newset; | 3051 | sigset_t newset; |