diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 95 |
1 files changed, 45 insertions, 50 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 6ea13c09ae56..a4077e90f19f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -277,6 +277,7 @@ void task_clear_jobctl_trapping(struct task_struct *task) | |||
| 277 | { | 277 | { |
| 278 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { | 278 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
| 279 | task->jobctl &= ~JOBCTL_TRAPPING; | 279 | task->jobctl &= ~JOBCTL_TRAPPING; |
| 280 | smp_mb(); /* advised by wake_up_bit() */ | ||
| 280 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); | 281 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
| 281 | } | 282 | } |
| 282 | } | 283 | } |
| @@ -705,11 +706,8 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state) | |||
| 705 | * Returns 1 if any signals were found. | 706 | * Returns 1 if any signals were found. |
| 706 | * | 707 | * |
| 707 | * All callers must be holding the siglock. | 708 | * All callers must be holding the siglock. |
| 708 | * | ||
| 709 | * This version takes a sigset mask and looks at all signals, | ||
| 710 | * not just those in the first mask word. | ||
| 711 | */ | 709 | */ |
| 712 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | 710 | static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
| 713 | { | 711 | { |
| 714 | struct sigqueue *q, *n; | 712 | struct sigqueue *q, *n; |
| 715 | sigset_t m; | 713 | sigset_t m; |
| @@ -727,29 +725,6 @@ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | |||
| 727 | } | 725 | } |
| 728 | return 1; | 726 | return 1; |
| 729 | } | 727 | } |
| 730 | /* | ||
| 731 | * Remove signals in mask from the pending set and queue. | ||
| 732 | * Returns 1 if any signals were found. | ||
| 733 | * | ||
| 734 | * All callers must be holding the siglock. | ||
| 735 | */ | ||
| 736 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | ||
| 737 | { | ||
| 738 | struct sigqueue *q, *n; | ||
| 739 | |||
| 740 | if (!sigtestsetmask(&s->signal, mask)) | ||
| 741 | return 0; | ||
| 742 | |||
| 743 | sigdelsetmask(&s->signal, mask); | ||
| 744 | list_for_each_entry_safe(q, n, &s->list, list) { | ||
| 745 | if (q->info.si_signo < SIGRTMIN && | ||
| 746 | (mask & sigmask(q->info.si_signo))) { | ||
| 747 | list_del_init(&q->list); | ||
| 748 | __sigqueue_free(q); | ||
| 749 | } | ||
| 750 | } | ||
| 751 | return 1; | ||
| 752 | } | ||
| 753 | 728 | ||
| 754 | static inline int is_si_special(const struct siginfo *info) | 729 | static inline int is_si_special(const struct siginfo *info) |
| 755 | { | 730 | { |
| @@ -861,6 +836,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) | |||
| 861 | { | 836 | { |
| 862 | struct signal_struct *signal = p->signal; | 837 | struct signal_struct *signal = p->signal; |
| 863 | struct task_struct *t; | 838 | struct task_struct *t; |
| 839 | sigset_t flush; | ||
| 864 | 840 | ||
| 865 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { | 841 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
| 866 | if (signal->flags & SIGNAL_GROUP_COREDUMP) | 842 | if (signal->flags & SIGNAL_GROUP_COREDUMP) |
| @@ -872,26 +848,25 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) | |||
| 872 | /* | 848 | /* |
| 873 | * This is a stop signal. Remove SIGCONT from all queues. | 849 | * This is a stop signal. Remove SIGCONT from all queues. |
| 874 | */ | 850 | */ |
| 875 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); | 851 | siginitset(&flush, sigmask(SIGCONT)); |
| 876 | t = p; | 852 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
| 877 | do { | 853 | for_each_thread(p, t) |
| 878 | rm_from_queue(sigmask(SIGCONT), &t->pending); | 854 | flush_sigqueue_mask(&flush, &t->pending); |
| 879 | } while_each_thread(p, t); | ||
| 880 | } else if (sig == SIGCONT) { | 855 | } else if (sig == SIGCONT) { |
| 881 | unsigned int why; | 856 | unsigned int why; |
| 882 | /* | 857 | /* |
| 883 | * Remove all stop signals from all queues, wake all threads. | 858 | * Remove all stop signals from all queues, wake all threads. |
| 884 | */ | 859 | */ |
| 885 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 860 | siginitset(&flush, SIG_KERNEL_STOP_MASK); |
| 886 | t = p; | 861 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
| 887 | do { | 862 | for_each_thread(p, t) { |
| 863 | flush_sigqueue_mask(&flush, &t->pending); | ||
| 888 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); | 864 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
| 889 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | ||
| 890 | if (likely(!(t->ptrace & PT_SEIZED))) | 865 | if (likely(!(t->ptrace & PT_SEIZED))) |
| 891 | wake_up_state(t, __TASK_STOPPED); | 866 | wake_up_state(t, __TASK_STOPPED); |
| 892 | else | 867 | else |
| 893 | ptrace_trap_notify(t); | 868 | ptrace_trap_notify(t); |
| 894 | } while_each_thread(p, t); | 869 | } |
| 895 | 870 | ||
| 896 | /* | 871 | /* |
| 897 | * Notify the parent with CLD_CONTINUED if we were stopped. | 872 | * Notify the parent with CLD_CONTINUED if we were stopped. |
| @@ -2854,7 +2829,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | |||
| 2854 | 2829 | ||
| 2855 | spin_lock_irq(&tsk->sighand->siglock); | 2830 | spin_lock_irq(&tsk->sighand->siglock); |
| 2856 | __set_task_blocked(tsk, &tsk->real_blocked); | 2831 | __set_task_blocked(tsk, &tsk->real_blocked); |
| 2857 | siginitset(&tsk->real_blocked, 0); | 2832 | sigemptyset(&tsk->real_blocked); |
| 2858 | sig = dequeue_signal(tsk, &mask, info); | 2833 | sig = dequeue_signal(tsk, &mask, info); |
| 2859 | } | 2834 | } |
| 2860 | spin_unlock_irq(&tsk->sighand->siglock); | 2835 | spin_unlock_irq(&tsk->sighand->siglock); |
| @@ -3091,18 +3066,39 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, | |||
| 3091 | } | 3066 | } |
| 3092 | #endif | 3067 | #endif |
| 3093 | 3068 | ||
| 3069 | /* | ||
| 3070 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND | ||
| 3071 | */ | ||
| 3072 | void kernel_sigaction(int sig, __sighandler_t action) | ||
| 3073 | { | ||
| 3074 | spin_lock_irq(¤t->sighand->siglock); | ||
| 3075 | current->sighand->action[sig - 1].sa.sa_handler = action; | ||
| 3076 | if (action == SIG_IGN) { | ||
| 3077 | sigset_t mask; | ||
| 3078 | |||
| 3079 | sigemptyset(&mask); | ||
| 3080 | sigaddset(&mask, sig); | ||
| 3081 | |||
| 3082 | flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); | ||
| 3083 | flush_sigqueue_mask(&mask, ¤t->pending); | ||
| 3084 | recalc_sigpending(); | ||
| 3085 | } | ||
| 3086 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 3087 | } | ||
| 3088 | EXPORT_SYMBOL(kernel_sigaction); | ||
| 3089 | |||
| 3094 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | 3090 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
| 3095 | { | 3091 | { |
| 3096 | struct task_struct *t = current; | 3092 | struct task_struct *p = current, *t; |
| 3097 | struct k_sigaction *k; | 3093 | struct k_sigaction *k; |
| 3098 | sigset_t mask; | 3094 | sigset_t mask; |
| 3099 | 3095 | ||
| 3100 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | 3096 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
| 3101 | return -EINVAL; | 3097 | return -EINVAL; |
| 3102 | 3098 | ||
| 3103 | k = &t->sighand->action[sig-1]; | 3099 | k = &p->sighand->action[sig-1]; |
| 3104 | 3100 | ||
| 3105 | spin_lock_irq(¤t->sighand->siglock); | 3101 | spin_lock_irq(&p->sighand->siglock); |
| 3106 | if (oact) | 3102 | if (oact) |
| 3107 | *oact = *k; | 3103 | *oact = *k; |
| 3108 | 3104 | ||
| @@ -3121,21 +3117,20 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
| 3121 | * (for example, SIGCHLD), shall cause the pending signal to | 3117 | * (for example, SIGCHLD), shall cause the pending signal to |
| 3122 | * be discarded, whether or not it is blocked" | 3118 | * be discarded, whether or not it is blocked" |
| 3123 | */ | 3119 | */ |
| 3124 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { | 3120 | if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
| 3125 | sigemptyset(&mask); | 3121 | sigemptyset(&mask); |
| 3126 | sigaddset(&mask, sig); | 3122 | sigaddset(&mask, sig); |
| 3127 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 3123 | flush_sigqueue_mask(&mask, &p->signal->shared_pending); |
| 3128 | do { | 3124 | for_each_thread(p, t) |
| 3129 | rm_from_queue_full(&mask, &t->pending); | 3125 | flush_sigqueue_mask(&mask, &t->pending); |
| 3130 | } while_each_thread(current, t); | ||
| 3131 | } | 3126 | } |
| 3132 | } | 3127 | } |
| 3133 | 3128 | ||
| 3134 | spin_unlock_irq(¤t->sighand->siglock); | 3129 | spin_unlock_irq(&p->sighand->siglock); |
| 3135 | return 0; | 3130 | return 0; |
| 3136 | } | 3131 | } |
| 3137 | 3132 | ||
| 3138 | static int | 3133 | static int |
| 3139 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | 3134 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
| 3140 | { | 3135 | { |
| 3141 | stack_t oss; | 3136 | stack_t oss; |
| @@ -3496,7 +3491,7 @@ COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, | |||
| 3496 | } | 3491 | } |
| 3497 | #endif | 3492 | #endif |
| 3498 | 3493 | ||
| 3499 | #ifdef __ARCH_WANT_SYS_SGETMASK | 3494 | #ifdef CONFIG_SGETMASK_SYSCALL |
| 3500 | 3495 | ||
| 3501 | /* | 3496 | /* |
| 3502 | * For backwards compatibility. Functionality superseded by sigprocmask. | 3497 | * For backwards compatibility. Functionality superseded by sigprocmask. |
| @@ -3517,7 +3512,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask) | |||
| 3517 | 3512 | ||
| 3518 | return old; | 3513 | return old; |
| 3519 | } | 3514 | } |
| 3520 | #endif /* __ARCH_WANT_SGETMASK */ | 3515 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
| 3521 | 3516 | ||
| 3522 | #ifdef __ARCH_WANT_SYS_SIGNAL | 3517 | #ifdef __ARCH_WANT_SYS_SIGNAL |
| 3523 | /* | 3518 | /* |
