diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 646 |
1 files changed, 282 insertions, 364 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 64ad0ed15992..72bb4f51f963 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -39,11 +39,19 @@ | |||
39 | 39 | ||
40 | static struct kmem_cache *sigqueue_cachep; | 40 | static struct kmem_cache *sigqueue_cachep; |
41 | 41 | ||
42 | static int __sig_ignored(struct task_struct *t, int sig) | ||
43 | { | ||
44 | void __user *handler; | ||
45 | |||
46 | /* Is it explicitly or implicitly ignored? */ | ||
47 | |||
48 | handler = t->sighand->action[sig - 1].sa.sa_handler; | ||
49 | return handler == SIG_IGN || | ||
50 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | ||
51 | } | ||
42 | 52 | ||
43 | static int sig_ignored(struct task_struct *t, int sig) | 53 | static int sig_ignored(struct task_struct *t, int sig) |
44 | { | 54 | { |
45 | void __user * handler; | ||
46 | |||
47 | /* | 55 | /* |
48 | * Tracers always want to know about signals.. | 56 | * Tracers always want to know about signals.. |
49 | */ | 57 | */ |
@@ -58,10 +66,7 @@ static int sig_ignored(struct task_struct *t, int sig) | |||
58 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 66 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
59 | return 0; | 67 | return 0; |
60 | 68 | ||
61 | /* Is it explicitly or implicitly ignored? */ | 69 | return __sig_ignored(t, sig); |
62 | handler = t->sighand->action[sig-1].sa.sa_handler; | ||
63 | return handler == SIG_IGN || | ||
64 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | ||
65 | } | 70 | } |
66 | 71 | ||
67 | /* | 72 | /* |
@@ -372,7 +377,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
372 | */ | 377 | */ |
373 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 378 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
374 | { | 379 | { |
375 | int signr = 0; | 380 | int signr; |
376 | 381 | ||
377 | /* We only dequeue private signals from ourselves, we don't let | 382 | /* We only dequeue private signals from ourselves, we don't let |
378 | * signalfd steal them | 383 | * signalfd steal them |
@@ -405,8 +410,12 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
405 | } | 410 | } |
406 | } | 411 | } |
407 | } | 412 | } |
413 | |||
408 | recalc_sigpending(); | 414 | recalc_sigpending(); |
409 | if (signr && unlikely(sig_kernel_stop(signr))) { | 415 | if (!signr) |
416 | return 0; | ||
417 | |||
418 | if (unlikely(sig_kernel_stop(signr))) { | ||
410 | /* | 419 | /* |
411 | * Set a marker that we have dequeued a stop signal. Our | 420 | * Set a marker that we have dequeued a stop signal. Our |
412 | * caller might release the siglock and then the pending | 421 | * caller might release the siglock and then the pending |
@@ -422,9 +431,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
422 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 431 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
423 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 432 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
424 | } | 433 | } |
425 | if (signr && | 434 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
426 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | ||
427 | info->si_sys_private) { | ||
428 | /* | 435 | /* |
429 | * Release the siglock to ensure proper locking order | 436 | * Release the siglock to ensure proper locking order |
430 | * of timer locks outside of siglocks. Note, we leave | 437 | * of timer locks outside of siglocks. Note, we leave |
@@ -526,21 +533,34 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) | |||
526 | static int check_kill_permission(int sig, struct siginfo *info, | 533 | static int check_kill_permission(int sig, struct siginfo *info, |
527 | struct task_struct *t) | 534 | struct task_struct *t) |
528 | { | 535 | { |
529 | int error = -EINVAL; | 536 | struct pid *sid; |
537 | int error; | ||
538 | |||
530 | if (!valid_signal(sig)) | 539 | if (!valid_signal(sig)) |
531 | return error; | 540 | return -EINVAL; |
532 | 541 | ||
533 | if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) { | 542 | if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) |
534 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ | 543 | return 0; |
535 | if (error) | 544 | |
536 | return error; | 545 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
537 | error = -EPERM; | 546 | if (error) |
538 | if (((sig != SIGCONT) || | ||
539 | (task_session_nr(current) != task_session_nr(t))) | ||
540 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) | ||
541 | && (current->uid ^ t->suid) && (current->uid ^ t->uid) | ||
542 | && !capable(CAP_KILL)) | ||
543 | return error; | 547 | return error; |
548 | |||
549 | if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && | ||
550 | (current->uid ^ t->suid) && (current->uid ^ t->uid) && | ||
551 | !capable(CAP_KILL)) { | ||
552 | switch (sig) { | ||
553 | case SIGCONT: | ||
554 | sid = task_session(t); | ||
555 | /* | ||
556 | * We don't return the error if sid == NULL. The | ||
557 | * task was unhashed, the caller must notice this. | ||
558 | */ | ||
559 | if (!sid || sid == task_session(current)) | ||
560 | break; | ||
561 | default: | ||
562 | return -EPERM; | ||
563 | } | ||
544 | } | 564 | } |
545 | 565 | ||
546 | return security_task_kill(t, info, sig, 0); | 566 | return security_task_kill(t, info, sig, 0); |
@@ -550,62 +570,44 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
550 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); | 570 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); |
551 | 571 | ||
552 | /* | 572 | /* |
553 | * Handle magic process-wide effects of stop/continue signals. | 573 | * Handle magic process-wide effects of stop/continue signals. Unlike |
554 | * Unlike the signal actions, these happen immediately at signal-generation | 574 | * the signal actions, these happen immediately at signal-generation |
555 | * time regardless of blocking, ignoring, or handling. This does the | 575 | * time regardless of blocking, ignoring, or handling. This does the |
556 | * actual continuing for SIGCONT, but not the actual stopping for stop | 576 | * actual continuing for SIGCONT, but not the actual stopping for stop |
557 | * signals. The process stop is done as a signal action for SIG_DFL. | 577 | * signals. The process stop is done as a signal action for SIG_DFL. |
578 | * | ||
579 | * Returns true if the signal should be actually delivered, otherwise | ||
580 | * it should be dropped. | ||
558 | */ | 581 | */ |
559 | static void handle_stop_signal(int sig, struct task_struct *p) | 582 | static int prepare_signal(int sig, struct task_struct *p) |
560 | { | 583 | { |
584 | struct signal_struct *signal = p->signal; | ||
561 | struct task_struct *t; | 585 | struct task_struct *t; |
562 | 586 | ||
563 | if (p->signal->flags & SIGNAL_GROUP_EXIT) | 587 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
564 | /* | 588 | /* |
565 | * The process is in the middle of dying already. | 589 | * The process is in the middle of dying, nothing to do. |
566 | */ | 590 | */ |
567 | return; | 591 | } else if (sig_kernel_stop(sig)) { |
568 | |||
569 | if (sig_kernel_stop(sig)) { | ||
570 | /* | 592 | /* |
571 | * This is a stop signal. Remove SIGCONT from all queues. | 593 | * This is a stop signal. Remove SIGCONT from all queues. |
572 | */ | 594 | */ |
573 | rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); | 595 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
574 | t = p; | 596 | t = p; |
575 | do { | 597 | do { |
576 | rm_from_queue(sigmask(SIGCONT), &t->pending); | 598 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
577 | t = next_thread(t); | 599 | } while_each_thread(p, t); |
578 | } while (t != p); | ||
579 | } else if (sig == SIGCONT) { | 600 | } else if (sig == SIGCONT) { |
601 | unsigned int why; | ||
580 | /* | 602 | /* |
581 | * Remove all stop signals from all queues, | 603 | * Remove all stop signals from all queues, |
582 | * and wake all threads. | 604 | * and wake all threads. |
583 | */ | 605 | */ |
584 | if (unlikely(p->signal->group_stop_count > 0)) { | 606 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
585 | /* | ||
586 | * There was a group stop in progress. We'll | ||
587 | * pretend it finished before we got here. We are | ||
588 | * obliged to report it to the parent: if the | ||
589 | * SIGSTOP happened "after" this SIGCONT, then it | ||
590 | * would have cleared this pending SIGCONT. If it | ||
591 | * happened "before" this SIGCONT, then the parent | ||
592 | * got the SIGCHLD about the stop finishing before | ||
593 | * the continue happened. We do the notification | ||
594 | * now, and it's as if the stop had finished and | ||
595 | * the SIGCHLD was pending on entry to this kill. | ||
596 | */ | ||
597 | p->signal->group_stop_count = 0; | ||
598 | p->signal->flags = SIGNAL_STOP_CONTINUED; | ||
599 | spin_unlock(&p->sighand->siglock); | ||
600 | do_notify_parent_cldstop(p, CLD_STOPPED); | ||
601 | spin_lock(&p->sighand->siglock); | ||
602 | } | ||
603 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); | ||
604 | t = p; | 607 | t = p; |
605 | do { | 608 | do { |
606 | unsigned int state; | 609 | unsigned int state; |
607 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 610 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
608 | |||
609 | /* | 611 | /* |
610 | * If there is a handler for SIGCONT, we must make | 612 | * If there is a handler for SIGCONT, we must make |
611 | * sure that no thread returns to user mode before | 613 | * sure that no thread returns to user mode before |
@@ -615,7 +617,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
615 | * running the handler. With the TIF_SIGPENDING | 617 | * running the handler. With the TIF_SIGPENDING |
616 | * flag set, the thread will pause and acquire the | 618 | * flag set, the thread will pause and acquire the |
617 | * siglock that we hold now and until we've queued | 619 | * siglock that we hold now and until we've queued |
618 | * the pending signal. | 620 | * the pending signal. |
619 | * | 621 | * |
620 | * Wake up the stopped thread _after_ setting | 622 | * Wake up the stopped thread _after_ setting |
621 | * TIF_SIGPENDING | 623 | * TIF_SIGPENDING |
@@ -626,49 +628,163 @@ static void handle_stop_signal(int sig, struct task_struct *p) | |||
626 | state |= TASK_INTERRUPTIBLE; | 628 | state |= TASK_INTERRUPTIBLE; |
627 | } | 629 | } |
628 | wake_up_state(t, state); | 630 | wake_up_state(t, state); |
631 | } while_each_thread(p, t); | ||
629 | 632 | ||
630 | t = next_thread(t); | 633 | /* |
631 | } while (t != p); | 634 | * Notify the parent with CLD_CONTINUED if we were stopped. |
635 | * | ||
636 | * If we were in the middle of a group stop, we pretend it | ||
637 | * was already finished, and then continued. Since SIGCHLD | ||
638 | * doesn't queue we report only CLD_STOPPED, as if the next | ||
639 | * CLD_CONTINUED was dropped. | ||
640 | */ | ||
641 | why = 0; | ||
642 | if (signal->flags & SIGNAL_STOP_STOPPED) | ||
643 | why |= SIGNAL_CLD_CONTINUED; | ||
644 | else if (signal->group_stop_count) | ||
645 | why |= SIGNAL_CLD_STOPPED; | ||
632 | 646 | ||
633 | if (p->signal->flags & SIGNAL_STOP_STOPPED) { | 647 | if (why) { |
634 | /* | 648 | /* |
635 | * We were in fact stopped, and are now continued. | 649 | * The first thread which returns from finish_stop() |
636 | * Notify the parent with CLD_CONTINUED. | 650 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
651 | * notify its parent. See get_signal_to_deliver(). | ||
637 | */ | 652 | */ |
638 | p->signal->flags = SIGNAL_STOP_CONTINUED; | 653 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
639 | p->signal->group_exit_code = 0; | 654 | signal->group_stop_count = 0; |
640 | spin_unlock(&p->sighand->siglock); | 655 | signal->group_exit_code = 0; |
641 | do_notify_parent_cldstop(p, CLD_CONTINUED); | ||
642 | spin_lock(&p->sighand->siglock); | ||
643 | } else { | 656 | } else { |
644 | /* | 657 | /* |
645 | * We are not stopped, but there could be a stop | 658 | * We are not stopped, but there could be a stop |
646 | * signal in the middle of being processed after | 659 | * signal in the middle of being processed after |
647 | * being removed from the queue. Clear that too. | 660 | * being removed from the queue. Clear that too. |
648 | */ | 661 | */ |
649 | p->signal->flags = 0; | 662 | signal->flags &= ~SIGNAL_STOP_DEQUEUED; |
650 | } | 663 | } |
651 | } else if (sig == SIGKILL) { | 664 | } |
665 | |||
666 | return !sig_ignored(p, sig); | ||
667 | } | ||
668 | |||
669 | /* | ||
670 | * Test if P wants to take SIG. After we've checked all threads with this, | ||
671 | * it's equivalent to finding no threads not blocking SIG. Any threads not | ||
672 | * blocking SIG were ruled out because they are not running and already | ||
673 | * have pending signals. Such threads will dequeue from the shared queue | ||
674 | * as soon as they're available, so putting the signal on the shared queue | ||
675 | * will be equivalent to sending it to one such thread. | ||
676 | */ | ||
677 | static inline int wants_signal(int sig, struct task_struct *p) | ||
678 | { | ||
679 | if (sigismember(&p->blocked, sig)) | ||
680 | return 0; | ||
681 | if (p->flags & PF_EXITING) | ||
682 | return 0; | ||
683 | if (sig == SIGKILL) | ||
684 | return 1; | ||
685 | if (task_is_stopped_or_traced(p)) | ||
686 | return 0; | ||
687 | return task_curr(p) || !signal_pending(p); | ||
688 | } | ||
689 | |||
690 | static void complete_signal(int sig, struct task_struct *p, int group) | ||
691 | { | ||
692 | struct signal_struct *signal = p->signal; | ||
693 | struct task_struct *t; | ||
694 | |||
695 | /* | ||
696 | * Now find a thread we can wake up to take the signal off the queue. | ||
697 | * | ||
698 | * If the main thread wants the signal, it gets first crack. | ||
699 | * Probably the least surprising to the average bear. | ||
700 | */ | ||
701 | if (wants_signal(sig, p)) | ||
702 | t = p; | ||
703 | else if (!group || thread_group_empty(p)) | ||
704 | /* | ||
705 | * There is just one thread and it does not need to be woken. | ||
706 | * It will dequeue unblocked signals before it runs again. | ||
707 | */ | ||
708 | return; | ||
709 | else { | ||
652 | /* | 710 | /* |
653 | * Make sure that any pending stop signal already dequeued | 711 | * Otherwise try to find a suitable thread. |
654 | * is undone by the wakeup for SIGKILL. | ||
655 | */ | 712 | */ |
656 | p->signal->flags = 0; | 713 | t = signal->curr_target; |
714 | while (!wants_signal(sig, t)) { | ||
715 | t = next_thread(t); | ||
716 | if (t == signal->curr_target) | ||
717 | /* | ||
718 | * No thread needs to be woken. | ||
719 | * Any eligible threads will see | ||
720 | * the signal in the queue soon. | ||
721 | */ | ||
722 | return; | ||
723 | } | ||
724 | signal->curr_target = t; | ||
657 | } | 725 | } |
726 | |||
727 | /* | ||
728 | * Found a killable thread. If the signal will be fatal, | ||
729 | * then start taking the whole group down immediately. | ||
730 | */ | ||
731 | if (sig_fatal(p, sig) && | ||
732 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | ||
733 | !sigismember(&t->real_blocked, sig) && | ||
734 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | ||
735 | /* | ||
736 | * This signal will be fatal to the whole group. | ||
737 | */ | ||
738 | if (!sig_kernel_coredump(sig)) { | ||
739 | /* | ||
740 | * Start a group exit and wake everybody up. | ||
741 | * This way we don't have other threads | ||
742 | * running and doing things after a slower | ||
743 | * thread has the fatal signal pending. | ||
744 | */ | ||
745 | signal->flags = SIGNAL_GROUP_EXIT; | ||
746 | signal->group_exit_code = sig; | ||
747 | signal->group_stop_count = 0; | ||
748 | t = p; | ||
749 | do { | ||
750 | sigaddset(&t->pending.signal, SIGKILL); | ||
751 | signal_wake_up(t, 1); | ||
752 | } while_each_thread(p, t); | ||
753 | return; | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /* | ||
758 | * The signal is already in the shared-pending queue. | ||
759 | * Tell the chosen thread to wake up and dequeue it. | ||
760 | */ | ||
761 | signal_wake_up(t, sig == SIGKILL); | ||
762 | return; | ||
763 | } | ||
764 | |||
765 | static inline int legacy_queue(struct sigpending *signals, int sig) | ||
766 | { | ||
767 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | ||
658 | } | 768 | } |
659 | 769 | ||
660 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | 770 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
661 | struct sigpending *signals) | 771 | int group) |
662 | { | 772 | { |
663 | struct sigqueue * q = NULL; | 773 | struct sigpending *pending; |
664 | int ret = 0; | 774 | struct sigqueue *q; |
775 | |||
776 | assert_spin_locked(&t->sighand->siglock); | ||
777 | if (!prepare_signal(sig, t)) | ||
778 | return 0; | ||
665 | 779 | ||
780 | pending = group ? &t->signal->shared_pending : &t->pending; | ||
666 | /* | 781 | /* |
667 | * Deliver the signal to listening signalfds. This must be called | 782 | * Short-circuit ignored signals and support queuing |
668 | * with the sighand lock held. | 783 | * exactly one non-rt signal, so that we can get more |
784 | * detailed information about the cause of the signal. | ||
669 | */ | 785 | */ |
670 | signalfd_notify(t, sig); | 786 | if (legacy_queue(pending, sig)) |
671 | 787 | return 0; | |
672 | /* | 788 | /* |
673 | * fast-pathed signals for kernel-internal things like SIGSTOP | 789 | * fast-pathed signals for kernel-internal things like SIGSTOP |
674 | * or SIGKILL. | 790 | * or SIGKILL. |
@@ -688,7 +804,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
688 | (is_si_special(info) || | 804 | (is_si_special(info) || |
689 | info->si_code >= 0))); | 805 | info->si_code >= 0))); |
690 | if (q) { | 806 | if (q) { |
691 | list_add_tail(&q->list, &signals->list); | 807 | list_add_tail(&q->list, &pending->list); |
692 | switch ((unsigned long) info) { | 808 | switch ((unsigned long) info) { |
693 | case (unsigned long) SEND_SIG_NOINFO: | 809 | case (unsigned long) SEND_SIG_NOINFO: |
694 | q->info.si_signo = sig; | 810 | q->info.si_signo = sig; |
@@ -718,13 +834,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
718 | } | 834 | } |
719 | 835 | ||
720 | out_set: | 836 | out_set: |
721 | sigaddset(&signals->signal, sig); | 837 | signalfd_notify(t, sig); |
722 | return ret; | 838 | sigaddset(&pending->signal, sig); |
839 | complete_signal(sig, t, group); | ||
840 | return 0; | ||
723 | } | 841 | } |
724 | 842 | ||
725 | #define LEGACY_QUEUE(sigptr, sig) \ | ||
726 | (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) | ||
727 | |||
728 | int print_fatal_signals; | 843 | int print_fatal_signals; |
729 | 844 | ||
730 | static void print_fatal_signal(struct pt_regs *regs, int signr) | 845 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
@@ -757,29 +872,16 @@ static int __init setup_print_fatal_signals(char *str) | |||
757 | 872 | ||
758 | __setup("print-fatal-signals=", setup_print_fatal_signals); | 873 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
759 | 874 | ||
875 | int | ||
876 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | ||
877 | { | ||
878 | return send_signal(sig, info, p, 1); | ||
879 | } | ||
880 | |||
760 | static int | 881 | static int |
761 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 882 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
762 | { | 883 | { |
763 | int ret = 0; | 884 | return send_signal(sig, info, t, 0); |
764 | |||
765 | BUG_ON(!irqs_disabled()); | ||
766 | assert_spin_locked(&t->sighand->siglock); | ||
767 | |||
768 | /* Short-circuit ignored signals. */ | ||
769 | if (sig_ignored(t, sig)) | ||
770 | goto out; | ||
771 | |||
772 | /* Support queueing exactly one non-rt signal, so that we | ||
773 | can get more detailed information about the cause of | ||
774 | the signal. */ | ||
775 | if (LEGACY_QUEUE(&t->pending, sig)) | ||
776 | goto out; | ||
777 | |||
778 | ret = send_signal(sig, info, t, &t->pending); | ||
779 | if (!ret && !sigismember(&t->blocked, sig)) | ||
780 | signal_wake_up(t, sig == SIGKILL); | ||
781 | out: | ||
782 | return ret; | ||
783 | } | 885 | } |
784 | 886 | ||
785 | /* | 887 | /* |
@@ -790,7 +892,8 @@ out: | |||
790 | * since we do not want to have a signal handler that was blocked | 892 | * since we do not want to have a signal handler that was blocked |
791 | * be invoked when user space had explicitly blocked it. | 893 | * be invoked when user space had explicitly blocked it. |
792 | * | 894 | * |
793 | * We don't want to have recursive SIGSEGV's etc, for example. | 895 | * We don't want to have recursive SIGSEGV's etc, for example, |
896 | * that is why we also clear SIGNAL_UNKILLABLE. | ||
794 | */ | 897 | */ |
795 | int | 898 | int |
796 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 899 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
@@ -810,6 +913,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
810 | recalc_sigpending_and_wake(t); | 913 | recalc_sigpending_and_wake(t); |
811 | } | 914 | } |
812 | } | 915 | } |
916 | if (action->sa.sa_handler == SIG_DFL) | ||
917 | t->signal->flags &= ~SIGNAL_UNKILLABLE; | ||
813 | ret = specific_send_sig_info(sig, info, t); | 918 | ret = specific_send_sig_info(sig, info, t); |
814 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 919 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
815 | 920 | ||
@@ -823,134 +928,6 @@ force_sig_specific(int sig, struct task_struct *t) | |||
823 | } | 928 | } |
824 | 929 | ||
825 | /* | 930 | /* |
826 | * Test if P wants to take SIG. After we've checked all threads with this, | ||
827 | * it's equivalent to finding no threads not blocking SIG. Any threads not | ||
828 | * blocking SIG were ruled out because they are not running and already | ||
829 | * have pending signals. Such threads will dequeue from the shared queue | ||
830 | * as soon as they're available, so putting the signal on the shared queue | ||
831 | * will be equivalent to sending it to one such thread. | ||
832 | */ | ||
833 | static inline int wants_signal(int sig, struct task_struct *p) | ||
834 | { | ||
835 | if (sigismember(&p->blocked, sig)) | ||
836 | return 0; | ||
837 | if (p->flags & PF_EXITING) | ||
838 | return 0; | ||
839 | if (sig == SIGKILL) | ||
840 | return 1; | ||
841 | if (task_is_stopped_or_traced(p)) | ||
842 | return 0; | ||
843 | return task_curr(p) || !signal_pending(p); | ||
844 | } | ||
845 | |||
846 | static void | ||
847 | __group_complete_signal(int sig, struct task_struct *p) | ||
848 | { | ||
849 | struct task_struct *t; | ||
850 | |||
851 | /* | ||
852 | * Now find a thread we can wake up to take the signal off the queue. | ||
853 | * | ||
854 | * If the main thread wants the signal, it gets first crack. | ||
855 | * Probably the least surprising to the average bear. | ||
856 | */ | ||
857 | if (wants_signal(sig, p)) | ||
858 | t = p; | ||
859 | else if (thread_group_empty(p)) | ||
860 | /* | ||
861 | * There is just one thread and it does not need to be woken. | ||
862 | * It will dequeue unblocked signals before it runs again. | ||
863 | */ | ||
864 | return; | ||
865 | else { | ||
866 | /* | ||
867 | * Otherwise try to find a suitable thread. | ||
868 | */ | ||
869 | t = p->signal->curr_target; | ||
870 | if (t == NULL) | ||
871 | /* restart balancing at this thread */ | ||
872 | t = p->signal->curr_target = p; | ||
873 | |||
874 | while (!wants_signal(sig, t)) { | ||
875 | t = next_thread(t); | ||
876 | if (t == p->signal->curr_target) | ||
877 | /* | ||
878 | * No thread needs to be woken. | ||
879 | * Any eligible threads will see | ||
880 | * the signal in the queue soon. | ||
881 | */ | ||
882 | return; | ||
883 | } | ||
884 | p->signal->curr_target = t; | ||
885 | } | ||
886 | |||
887 | /* | ||
888 | * Found a killable thread. If the signal will be fatal, | ||
889 | * then start taking the whole group down immediately. | ||
890 | */ | ||
891 | if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && | ||
892 | !sigismember(&t->real_blocked, sig) && | ||
893 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | ||
894 | /* | ||
895 | * This signal will be fatal to the whole group. | ||
896 | */ | ||
897 | if (!sig_kernel_coredump(sig)) { | ||
898 | /* | ||
899 | * Start a group exit and wake everybody up. | ||
900 | * This way we don't have other threads | ||
901 | * running and doing things after a slower | ||
902 | * thread has the fatal signal pending. | ||
903 | */ | ||
904 | p->signal->flags = SIGNAL_GROUP_EXIT; | ||
905 | p->signal->group_exit_code = sig; | ||
906 | p->signal->group_stop_count = 0; | ||
907 | t = p; | ||
908 | do { | ||
909 | sigaddset(&t->pending.signal, SIGKILL); | ||
910 | signal_wake_up(t, 1); | ||
911 | } while_each_thread(p, t); | ||
912 | return; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * The signal is already in the shared-pending queue. | ||
918 | * Tell the chosen thread to wake up and dequeue it. | ||
919 | */ | ||
920 | signal_wake_up(t, sig == SIGKILL); | ||
921 | return; | ||
922 | } | ||
923 | |||
924 | int | ||
925 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | ||
926 | { | ||
927 | int ret = 0; | ||
928 | |||
929 | assert_spin_locked(&p->sighand->siglock); | ||
930 | handle_stop_signal(sig, p); | ||
931 | |||
932 | /* Short-circuit ignored signals. */ | ||
933 | if (sig_ignored(p, sig)) | ||
934 | return ret; | ||
935 | |||
936 | if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) | ||
937 | /* This is a non-RT signal and we already have one queued. */ | ||
938 | return ret; | ||
939 | |||
940 | /* | ||
941 | * Put this signal on the shared-pending queue, or fail with EAGAIN. | ||
942 | * We always use the shared queue for process-wide signals, | ||
943 | * to avoid several races. | ||
944 | */ | ||
945 | ret = send_signal(sig, info, p, &p->signal->shared_pending); | ||
946 | if (unlikely(ret)) | ||
947 | return ret; | ||
948 | |||
949 | __group_complete_signal(sig, p); | ||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Nuke all other threads in the group. | 931 | * Nuke all other threads in the group. |
955 | */ | 932 | */ |
956 | void zap_other_threads(struct task_struct *p) | 933 | void zap_other_threads(struct task_struct *p) |
@@ -978,13 +955,11 @@ int __fatal_signal_pending(struct task_struct *tsk) | |||
978 | } | 955 | } |
979 | EXPORT_SYMBOL(__fatal_signal_pending); | 956 | EXPORT_SYMBOL(__fatal_signal_pending); |
980 | 957 | ||
981 | /* | ||
982 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | ||
983 | */ | ||
984 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) | 958 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
985 | { | 959 | { |
986 | struct sighand_struct *sighand; | 960 | struct sighand_struct *sighand; |
987 | 961 | ||
962 | rcu_read_lock(); | ||
988 | for (;;) { | 963 | for (;;) { |
989 | sighand = rcu_dereference(tsk->sighand); | 964 | sighand = rcu_dereference(tsk->sighand); |
990 | if (unlikely(sighand == NULL)) | 965 | if (unlikely(sighand == NULL)) |
@@ -995,6 +970,7 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long | |||
995 | break; | 970 | break; |
996 | spin_unlock_irqrestore(&sighand->siglock, *flags); | 971 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
997 | } | 972 | } |
973 | rcu_read_unlock(); | ||
998 | 974 | ||
999 | return sighand; | 975 | return sighand; |
1000 | } | 976 | } |
@@ -1043,9 +1019,6 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) | |||
1043 | struct task_struct *p; | 1019 | struct task_struct *p; |
1044 | 1020 | ||
1045 | rcu_read_lock(); | 1021 | rcu_read_lock(); |
1046 | if (unlikely(sig_needs_tasklist(sig))) | ||
1047 | read_lock(&tasklist_lock); | ||
1048 | |||
1049 | retry: | 1022 | retry: |
1050 | p = pid_task(pid, PIDTYPE_PID); | 1023 | p = pid_task(pid, PIDTYPE_PID); |
1051 | if (p) { | 1024 | if (p) { |
@@ -1059,10 +1032,8 @@ retry: | |||
1059 | */ | 1032 | */ |
1060 | goto retry; | 1033 | goto retry; |
1061 | } | 1034 | } |
1062 | |||
1063 | if (unlikely(sig_needs_tasklist(sig))) | ||
1064 | read_unlock(&tasklist_lock); | ||
1065 | rcu_read_unlock(); | 1035 | rcu_read_unlock(); |
1036 | |||
1066 | return error; | 1037 | return error; |
1067 | } | 1038 | } |
1068 | 1039 | ||
@@ -1159,8 +1130,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) | |||
1159 | */ | 1130 | */ |
1160 | 1131 | ||
1161 | /* | 1132 | /* |
1162 | * These two are the most common entry points. They send a signal | 1133 | * The caller must ensure the task can't exit. |
1163 | * just to the specific thread. | ||
1164 | */ | 1134 | */ |
1165 | int | 1135 | int |
1166 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1136 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
@@ -1175,17 +1145,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1175 | if (!valid_signal(sig)) | 1145 | if (!valid_signal(sig)) |
1176 | return -EINVAL; | 1146 | return -EINVAL; |
1177 | 1147 | ||
1178 | /* | ||
1179 | * We need the tasklist lock even for the specific | ||
1180 | * thread case (when we don't need to follow the group | ||
1181 | * lists) in order to avoid races with "p->sighand" | ||
1182 | * going away or changing from under us. | ||
1183 | */ | ||
1184 | read_lock(&tasklist_lock); | ||
1185 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1148 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1186 | ret = specific_send_sig_info(sig, info, p); | 1149 | ret = specific_send_sig_info(sig, info, p); |
1187 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1150 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1188 | read_unlock(&tasklist_lock); | ||
1189 | return ret; | 1151 | return ret; |
1190 | } | 1152 | } |
1191 | 1153 | ||
@@ -1291,28 +1253,24 @@ void sigqueue_free(struct sigqueue *q) | |||
1291 | __sigqueue_free(q); | 1253 | __sigqueue_free(q); |
1292 | } | 1254 | } |
1293 | 1255 | ||
1294 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | 1256 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1295 | { | 1257 | { |
1258 | int sig = q->info.si_signo; | ||
1259 | struct sigpending *pending; | ||
1296 | unsigned long flags; | 1260 | unsigned long flags; |
1297 | int ret = 0; | 1261 | int ret; |
1298 | 1262 | ||
1299 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1263 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1300 | 1264 | ||
1301 | /* | 1265 | ret = -1; |
1302 | * The rcu based delayed sighand destroy makes it possible to | 1266 | if (!likely(lock_task_sighand(t, &flags))) |
1303 | * run this without tasklist lock held. The task struct itself | 1267 | goto ret; |
1304 | * cannot go away as create_timer did get_task_struct(). | ||
1305 | * | ||
1306 | * We return -1, when the task is marked exiting, so | ||
1307 | * posix_timer_event can redirect it to the group leader | ||
1308 | */ | ||
1309 | rcu_read_lock(); | ||
1310 | 1268 | ||
1311 | if (!likely(lock_task_sighand(p, &flags))) { | 1269 | ret = 1; /* the signal is ignored */ |
1312 | ret = -1; | 1270 | if (!prepare_signal(sig, t)) |
1313 | goto out_err; | 1271 | goto out; |
1314 | } | ||
1315 | 1272 | ||
1273 | ret = 0; | ||
1316 | if (unlikely(!list_empty(&q->list))) { | 1274 | if (unlikely(!list_empty(&q->list))) { |
1317 | /* | 1275 | /* |
1318 | * If an SI_TIMER entry is already queue just increment | 1276 | * If an SI_TIMER entry is already queue just increment |
@@ -1322,77 +1280,15 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1322 | q->info.si_overrun++; | 1280 | q->info.si_overrun++; |
1323 | goto out; | 1281 | goto out; |
1324 | } | 1282 | } |
1325 | /* Short-circuit ignored signals. */ | ||
1326 | if (sig_ignored(p, sig)) { | ||
1327 | ret = 1; | ||
1328 | goto out; | ||
1329 | } | ||
1330 | /* | ||
1331 | * Deliver the signal to listening signalfds. This must be called | ||
1332 | * with the sighand lock held. | ||
1333 | */ | ||
1334 | signalfd_notify(p, sig); | ||
1335 | |||
1336 | list_add_tail(&q->list, &p->pending.list); | ||
1337 | sigaddset(&p->pending.signal, sig); | ||
1338 | if (!sigismember(&p->blocked, sig)) | ||
1339 | signal_wake_up(p, sig == SIGKILL); | ||
1340 | |||
1341 | out: | ||
1342 | unlock_task_sighand(p, &flags); | ||
1343 | out_err: | ||
1344 | rcu_read_unlock(); | ||
1345 | |||
1346 | return ret; | ||
1347 | } | ||
1348 | |||
1349 | int | ||
1350 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | ||
1351 | { | ||
1352 | unsigned long flags; | ||
1353 | int ret = 0; | ||
1354 | |||
1355 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | ||
1356 | |||
1357 | read_lock(&tasklist_lock); | ||
1358 | /* Since it_lock is held, p->sighand cannot be NULL. */ | ||
1359 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
1360 | handle_stop_signal(sig, p); | ||
1361 | |||
1362 | /* Short-circuit ignored signals. */ | ||
1363 | if (sig_ignored(p, sig)) { | ||
1364 | ret = 1; | ||
1365 | goto out; | ||
1366 | } | ||
1367 | 1283 | ||
1368 | if (unlikely(!list_empty(&q->list))) { | 1284 | signalfd_notify(t, sig); |
1369 | /* | 1285 | pending = group ? &t->signal->shared_pending : &t->pending; |
1370 | * If an SI_TIMER entry is already queue just increment | 1286 | list_add_tail(&q->list, &pending->list); |
1371 | * the overrun count. Other uses should not try to | 1287 | sigaddset(&pending->signal, sig); |
1372 | * send the signal multiple times. | 1288 | complete_signal(sig, t, group); |
1373 | */ | ||
1374 | BUG_ON(q->info.si_code != SI_TIMER); | ||
1375 | q->info.si_overrun++; | ||
1376 | goto out; | ||
1377 | } | ||
1378 | /* | ||
1379 | * Deliver the signal to listening signalfds. This must be called | ||
1380 | * with the sighand lock held. | ||
1381 | */ | ||
1382 | signalfd_notify(p, sig); | ||
1383 | |||
1384 | /* | ||
1385 | * Put this signal on the shared-pending queue. | ||
1386 | * We always use the shared queue for process-wide signals, | ||
1387 | * to avoid several races. | ||
1388 | */ | ||
1389 | list_add_tail(&q->list, &p->signal->shared_pending.list); | ||
1390 | sigaddset(&p->signal->shared_pending.signal, sig); | ||
1391 | |||
1392 | __group_complete_signal(sig, p); | ||
1393 | out: | 1289 | out: |
1394 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1290 | unlock_task_sighand(t, &flags); |
1395 | read_unlock(&tasklist_lock); | 1291 | ret: |
1396 | return ret; | 1292 | return ret; |
1397 | } | 1293 | } |
1398 | 1294 | ||
@@ -1723,8 +1619,9 @@ static int do_signal_stop(int signr) | |||
1723 | } else { | 1619 | } else { |
1724 | struct task_struct *t; | 1620 | struct task_struct *t; |
1725 | 1621 | ||
1726 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1622 | if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) |
1727 | unlikely(sig->group_exit_task)) | 1623 | != SIGNAL_STOP_DEQUEUED) || |
1624 | unlikely(signal_group_exit(sig))) | ||
1728 | return 0; | 1625 | return 0; |
1729 | /* | 1626 | /* |
1730 | * There is no group stop already in progress. | 1627 | * There is no group stop already in progress. |
@@ -1799,8 +1696,9 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1799 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | 1696 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
1800 | struct pt_regs *regs, void *cookie) | 1697 | struct pt_regs *regs, void *cookie) |
1801 | { | 1698 | { |
1802 | sigset_t *mask = ¤t->blocked; | 1699 | struct sighand_struct *sighand = current->sighand; |
1803 | int signr = 0; | 1700 | struct signal_struct *signal = current->signal; |
1701 | int signr; | ||
1804 | 1702 | ||
1805 | relock: | 1703 | relock: |
1806 | /* | 1704 | /* |
@@ -1811,16 +1709,32 @@ relock: | |||
1811 | */ | 1709 | */ |
1812 | try_to_freeze(); | 1710 | try_to_freeze(); |
1813 | 1711 | ||
1814 | spin_lock_irq(¤t->sighand->siglock); | 1712 | spin_lock_irq(&sighand->siglock); |
1713 | /* | ||
1714 | * Every stopped thread goes here after wakeup. Check to see if | ||
1715 | * we should notify the parent, prepare_signal(SIGCONT) encodes | ||
1716 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | ||
1717 | */ | ||
1718 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | ||
1719 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) | ||
1720 | ? CLD_CONTINUED : CLD_STOPPED; | ||
1721 | signal->flags &= ~SIGNAL_CLD_MASK; | ||
1722 | spin_unlock_irq(&sighand->siglock); | ||
1723 | |||
1724 | read_lock(&tasklist_lock); | ||
1725 | do_notify_parent_cldstop(current->group_leader, why); | ||
1726 | read_unlock(&tasklist_lock); | ||
1727 | goto relock; | ||
1728 | } | ||
1729 | |||
1815 | for (;;) { | 1730 | for (;;) { |
1816 | struct k_sigaction *ka; | 1731 | struct k_sigaction *ka; |
1817 | 1732 | ||
1818 | if (unlikely(current->signal->group_stop_count > 0) && | 1733 | if (unlikely(signal->group_stop_count > 0) && |
1819 | do_signal_stop(0)) | 1734 | do_signal_stop(0)) |
1820 | goto relock; | 1735 | goto relock; |
1821 | 1736 | ||
1822 | signr = dequeue_signal(current, mask, info); | 1737 | signr = dequeue_signal(current, ¤t->blocked, info); |
1823 | |||
1824 | if (!signr) | 1738 | if (!signr) |
1825 | break; /* will return 0 */ | 1739 | break; /* will return 0 */ |
1826 | 1740 | ||
@@ -1830,7 +1744,7 @@ relock: | |||
1830 | continue; | 1744 | continue; |
1831 | } | 1745 | } |
1832 | 1746 | ||
1833 | ka = ¤t->sighand->action[signr-1]; | 1747 | ka = &sighand->action[signr-1]; |
1834 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 1748 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1835 | continue; | 1749 | continue; |
1836 | if (ka->sa.sa_handler != SIG_DFL) { | 1750 | if (ka->sa.sa_handler != SIG_DFL) { |
@@ -1852,7 +1766,8 @@ relock: | |||
1852 | /* | 1766 | /* |
1853 | * Global init gets no signals it doesn't want. | 1767 | * Global init gets no signals it doesn't want. |
1854 | */ | 1768 | */ |
1855 | if (is_global_init(current)) | 1769 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
1770 | !signal_group_exit(signal)) | ||
1856 | continue; | 1771 | continue; |
1857 | 1772 | ||
1858 | if (sig_kernel_stop(signr)) { | 1773 | if (sig_kernel_stop(signr)) { |
@@ -1867,14 +1782,14 @@ relock: | |||
1867 | * We need to check for that and bail out if necessary. | 1782 | * We need to check for that and bail out if necessary. |
1868 | */ | 1783 | */ |
1869 | if (signr != SIGSTOP) { | 1784 | if (signr != SIGSTOP) { |
1870 | spin_unlock_irq(¤t->sighand->siglock); | 1785 | spin_unlock_irq(&sighand->siglock); |
1871 | 1786 | ||
1872 | /* signals can be posted during this window */ | 1787 | /* signals can be posted during this window */ |
1873 | 1788 | ||
1874 | if (is_current_pgrp_orphaned()) | 1789 | if (is_current_pgrp_orphaned()) |
1875 | goto relock; | 1790 | goto relock; |
1876 | 1791 | ||
1877 | spin_lock_irq(¤t->sighand->siglock); | 1792 | spin_lock_irq(&sighand->siglock); |
1878 | } | 1793 | } |
1879 | 1794 | ||
1880 | if (likely(do_signal_stop(signr))) { | 1795 | if (likely(do_signal_stop(signr))) { |
@@ -1889,15 +1804,16 @@ relock: | |||
1889 | continue; | 1804 | continue; |
1890 | } | 1805 | } |
1891 | 1806 | ||
1892 | spin_unlock_irq(¤t->sighand->siglock); | 1807 | spin_unlock_irq(&sighand->siglock); |
1893 | 1808 | ||
1894 | /* | 1809 | /* |
1895 | * Anything else is fatal, maybe with a core dump. | 1810 | * Anything else is fatal, maybe with a core dump. |
1896 | */ | 1811 | */ |
1897 | current->flags |= PF_SIGNALED; | 1812 | current->flags |= PF_SIGNALED; |
1898 | if ((signr != SIGKILL) && print_fatal_signals) | 1813 | |
1899 | print_fatal_signal(regs, signr); | ||
1900 | if (sig_kernel_coredump(signr)) { | 1814 | if (sig_kernel_coredump(signr)) { |
1815 | if (print_fatal_signals) | ||
1816 | print_fatal_signal(regs, signr); | ||
1901 | /* | 1817 | /* |
1902 | * If it was able to dump core, this kills all | 1818 | * If it was able to dump core, this kills all |
1903 | * other threads in the group and synchronizes with | 1819 | * other threads in the group and synchronizes with |
@@ -1915,7 +1831,7 @@ relock: | |||
1915 | do_group_exit(signr); | 1831 | do_group_exit(signr); |
1916 | /* NOTREACHED */ | 1832 | /* NOTREACHED */ |
1917 | } | 1833 | } |
1918 | spin_unlock_irq(¤t->sighand->siglock); | 1834 | spin_unlock_irq(&sighand->siglock); |
1919 | return signr; | 1835 | return signr; |
1920 | } | 1836 | } |
1921 | 1837 | ||
@@ -2259,6 +2175,7 @@ static int do_tkill(int tgid, int pid, int sig) | |||
2259 | int error; | 2175 | int error; |
2260 | struct siginfo info; | 2176 | struct siginfo info; |
2261 | struct task_struct *p; | 2177 | struct task_struct *p; |
2178 | unsigned long flags; | ||
2262 | 2179 | ||
2263 | error = -ESRCH; | 2180 | error = -ESRCH; |
2264 | info.si_signo = sig; | 2181 | info.si_signo = sig; |
@@ -2267,22 +2184,24 @@ static int do_tkill(int tgid, int pid, int sig) | |||
2267 | info.si_pid = task_tgid_vnr(current); | 2184 | info.si_pid = task_tgid_vnr(current); |
2268 | info.si_uid = current->uid; | 2185 | info.si_uid = current->uid; |
2269 | 2186 | ||
2270 | read_lock(&tasklist_lock); | 2187 | rcu_read_lock(); |
2271 | p = find_task_by_vpid(pid); | 2188 | p = find_task_by_vpid(pid); |
2272 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { | 2189 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2273 | error = check_kill_permission(sig, &info, p); | 2190 | error = check_kill_permission(sig, &info, p); |
2274 | /* | 2191 | /* |
2275 | * The null signal is a permissions and process existence | 2192 | * The null signal is a permissions and process existence |
2276 | * probe. No signal is actually delivered. | 2193 | * probe. No signal is actually delivered. |
2194 | * | ||
2195 | * If lock_task_sighand() fails we pretend the task dies | ||
2196 | * after receiving the signal. The window is tiny, and the | ||
2197 | * signal is private anyway. | ||
2277 | */ | 2198 | */ |
2278 | if (!error && sig && p->sighand) { | 2199 | if (!error && sig && lock_task_sighand(p, &flags)) { |
2279 | spin_lock_irq(&p->sighand->siglock); | ||
2280 | handle_stop_signal(sig, p); | ||
2281 | error = specific_send_sig_info(sig, &info, p); | 2200 | error = specific_send_sig_info(sig, &info, p); |
2282 | spin_unlock_irq(&p->sighand->siglock); | 2201 | unlock_task_sighand(p, &flags); |
2283 | } | 2202 | } |
2284 | } | 2203 | } |
2285 | read_unlock(&tasklist_lock); | 2204 | rcu_read_unlock(); |
2286 | 2205 | ||
2287 | return error; | 2206 | return error; |
2288 | } | 2207 | } |
@@ -2339,13 +2258,14 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | |||
2339 | 2258 | ||
2340 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | 2259 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
2341 | { | 2260 | { |
2261 | struct task_struct *t = current; | ||
2342 | struct k_sigaction *k; | 2262 | struct k_sigaction *k; |
2343 | sigset_t mask; | 2263 | sigset_t mask; |
2344 | 2264 | ||
2345 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | 2265 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
2346 | return -EINVAL; | 2266 | return -EINVAL; |
2347 | 2267 | ||
2348 | k = ¤t->sighand->action[sig-1]; | 2268 | k = &t->sighand->action[sig-1]; |
2349 | 2269 | ||
2350 | spin_lock_irq(¤t->sighand->siglock); | 2270 | spin_lock_irq(¤t->sighand->siglock); |
2351 | if (oact) | 2271 | if (oact) |
@@ -2366,9 +2286,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
2366 | * (for example, SIGCHLD), shall cause the pending signal to | 2286 | * (for example, SIGCHLD), shall cause the pending signal to |
2367 | * be discarded, whether or not it is blocked" | 2287 | * be discarded, whether or not it is blocked" |
2368 | */ | 2288 | */ |
2369 | if (act->sa.sa_handler == SIG_IGN || | 2289 | if (__sig_ignored(t, sig)) { |
2370 | (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { | ||
2371 | struct task_struct *t = current; | ||
2372 | sigemptyset(&mask); | 2290 | sigemptyset(&mask); |
2373 | sigaddset(&mask, sig); | 2291 | sigaddset(&mask, sig); |
2374 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2292 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
@@ -2623,7 +2541,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) | |||
2623 | 2541 | ||
2624 | current->state = TASK_INTERRUPTIBLE; | 2542 | current->state = TASK_INTERRUPTIBLE; |
2625 | schedule(); | 2543 | schedule(); |
2626 | set_thread_flag(TIF_RESTORE_SIGMASK); | 2544 | set_restore_sigmask(); |
2627 | return -ERESTARTNOHAND; | 2545 | return -ERESTARTNOHAND; |
2628 | } | 2546 | } |
2629 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | 2547 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |