aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2008-04-30 03:52:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-30 11:29:35 -0400
commit71f11dc025055cb2ef9226424f26b3287efadd26 (patch)
treed489b52d6d1779af1a325d4bed2354a07ca1cc60 /kernel/signal.c
parentdb51aeccd7097ce19a522a4c5ff91c320f870e2b (diff)
signals: move the definition of __group_complete_signal() up
Move the unchanged definition of __group_complete_signal() so that send_signal can see it. To simplify the reading of the next patches. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c192
1 files changed, 96 insertions, 96 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 0298bd3d431b..3479a118ba1c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -652,6 +652,102 @@ static void handle_stop_signal(int sig, struct task_struct *p)
652 } 652 }
653} 653}
654 654
655/*
656 * Test if P wants to take SIG. After we've checked all threads with this,
657 * it's equivalent to finding no threads not blocking SIG. Any threads not
658 * blocking SIG were ruled out because they are not running and already
659 * have pending signals. Such threads will dequeue from the shared queue
660 * as soon as they're available, so putting the signal on the shared queue
661 * will be equivalent to sending it to one such thread.
662 */
663static inline int wants_signal(int sig, struct task_struct *p)
664{
665 if (sigismember(&p->blocked, sig))
666 return 0;
667 if (p->flags & PF_EXITING)
668 return 0;
669 if (sig == SIGKILL)
670 return 1;
671 if (task_is_stopped_or_traced(p))
672 return 0;
673 return task_curr(p) || !signal_pending(p);
674}
675
676static void
677__group_complete_signal(int sig, struct task_struct *p)
678{
679 struct signal_struct *signal = p->signal;
680 struct task_struct *t;
681
682 /*
683 * Now find a thread we can wake up to take the signal off the queue.
684 *
685 * If the main thread wants the signal, it gets first crack.
686 * Probably the least surprising to the average bear.
687 */
688 if (wants_signal(sig, p))
689 t = p;
690 else if (thread_group_empty(p))
691 /*
692 * There is just one thread and it does not need to be woken.
693 * It will dequeue unblocked signals before it runs again.
694 */
695 return;
696 else {
697 /*
698 * Otherwise try to find a suitable thread.
699 */
700 t = signal->curr_target;
701 while (!wants_signal(sig, t)) {
702 t = next_thread(t);
703 if (t == signal->curr_target)
704 /*
705 * No thread needs to be woken.
706 * Any eligible threads will see
707 * the signal in the queue soon.
708 */
709 return;
710 }
711 signal->curr_target = t;
712 }
713
714 /*
715 * Found a killable thread. If the signal will be fatal,
716 * then start taking the whole group down immediately.
717 */
718 if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
719 !sigismember(&t->real_blocked, sig) &&
720 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
721 /*
722 * This signal will be fatal to the whole group.
723 */
724 if (!sig_kernel_coredump(sig)) {
725 /*
726 * Start a group exit and wake everybody up.
727 * This way we don't have other threads
728 * running and doing things after a slower
729 * thread has the fatal signal pending.
730 */
731 signal->flags = SIGNAL_GROUP_EXIT;
732 signal->group_exit_code = sig;
733 signal->group_stop_count = 0;
734 t = p;
735 do {
736 sigaddset(&t->pending.signal, SIGKILL);
737 signal_wake_up(t, 1);
738 } while_each_thread(p, t);
739 return;
740 }
741 }
742
743 /*
744 * The signal is already in the shared-pending queue.
745 * Tell the chosen thread to wake up and dequeue it.
746 */
747 signal_wake_up(t, sig == SIGKILL);
748 return;
749}
750
655static inline int legacy_queue(struct sigpending *signals, int sig) 751static inline int legacy_queue(struct sigpending *signals, int sig)
656{ 752{
657 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 753 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
@@ -817,102 +913,6 @@ force_sig_specific(int sig, struct task_struct *t)
817 force_sig_info(sig, SEND_SIG_FORCED, t); 913 force_sig_info(sig, SEND_SIG_FORCED, t);
818} 914}
819 915
820/*
821 * Test if P wants to take SIG. After we've checked all threads with this,
822 * it's equivalent to finding no threads not blocking SIG. Any threads not
823 * blocking SIG were ruled out because they are not running and already
824 * have pending signals. Such threads will dequeue from the shared queue
825 * as soon as they're available, so putting the signal on the shared queue
826 * will be equivalent to sending it to one such thread.
827 */
828static inline int wants_signal(int sig, struct task_struct *p)
829{
830 if (sigismember(&p->blocked, sig))
831 return 0;
832 if (p->flags & PF_EXITING)
833 return 0;
834 if (sig == SIGKILL)
835 return 1;
836 if (task_is_stopped_or_traced(p))
837 return 0;
838 return task_curr(p) || !signal_pending(p);
839}
840
841static void
842__group_complete_signal(int sig, struct task_struct *p)
843{
844 struct signal_struct *signal = p->signal;
845 struct task_struct *t;
846
847 /*
848 * Now find a thread we can wake up to take the signal off the queue.
849 *
850 * If the main thread wants the signal, it gets first crack.
851 * Probably the least surprising to the average bear.
852 */
853 if (wants_signal(sig, p))
854 t = p;
855 else if (thread_group_empty(p))
856 /*
857 * There is just one thread and it does not need to be woken.
858 * It will dequeue unblocked signals before it runs again.
859 */
860 return;
861 else {
862 /*
863 * Otherwise try to find a suitable thread.
864 */
865 t = signal->curr_target;
866 while (!wants_signal(sig, t)) {
867 t = next_thread(t);
868 if (t == signal->curr_target)
869 /*
870 * No thread needs to be woken.
871 * Any eligible threads will see
872 * the signal in the queue soon.
873 */
874 return;
875 }
876 signal->curr_target = t;
877 }
878
879 /*
880 * Found a killable thread. If the signal will be fatal,
881 * then start taking the whole group down immediately.
882 */
883 if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
884 !sigismember(&t->real_blocked, sig) &&
885 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
886 /*
887 * This signal will be fatal to the whole group.
888 */
889 if (!sig_kernel_coredump(sig)) {
890 /*
891 * Start a group exit and wake everybody up.
892 * This way we don't have other threads
893 * running and doing things after a slower
894 * thread has the fatal signal pending.
895 */
896 signal->flags = SIGNAL_GROUP_EXIT;
897 signal->group_exit_code = sig;
898 signal->group_stop_count = 0;
899 t = p;
900 do {
901 sigaddset(&t->pending.signal, SIGKILL);
902 signal_wake_up(t, 1);
903 } while_each_thread(p, t);
904 return;
905 }
906 }
907
908 /*
909 * The signal is already in the shared-pending queue.
910 * Tell the chosen thread to wake up and dequeue it.
911 */
912 signal_wake_up(t, sig == SIGKILL);
913 return;
914}
915
916int 916int
917__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 917__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
918{ 918{