aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c279
1 files changed, 147 insertions, 132 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 64c5deeaca5d..1814e68e4de3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,12 +22,14 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 29#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 30#include <linux/nsproxy.h>
30#include <trace/events/sched.h> 31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
31 33
32#include <asm/param.h> 34#include <asm/param.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,6 +43,8 @@
41 43
42static struct kmem_cache *sigqueue_cachep; 44static struct kmem_cache *sigqueue_cachep;
43 45
46int print_fatal_signals __read_mostly;
47
44static void __user *sig_handler(struct task_struct *t, int sig) 48static void __user *sig_handler(struct task_struct *t, int sig)
45{ 49{
46 return t->sighand->action[sig - 1].sa.sa_handler; 50 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
159{ 163{
160 unsigned long i, *s, *m, x; 164 unsigned long i, *s, *m, x;
161 int sig = 0; 165 int sig = 0;
162 166
163 s = pending->signal.sig; 167 s = pending->signal.sig;
164 m = mask->sig; 168 m = mask->sig;
165 switch (_NSIG_WORDS) { 169 switch (_NSIG_WORDS) {
@@ -184,17 +188,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
184 sig = ffz(~x) + 1; 188 sig = ffz(~x) + 1;
185 break; 189 break;
186 } 190 }
187 191
188 return sig; 192 return sig;
189} 193}
190 194
195static inline void print_dropped_signal(int sig)
196{
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198
199 if (!print_fatal_signals)
200 return;
201
202 if (!__ratelimit(&ratelimit_state))
203 return;
204
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
207}
208
191/* 209/*
192 * allocate a new signal queue record 210 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an 211 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting 212 * appopriate lock must be held to stop the target task from exiting
195 */ 213 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 214static struct sigqueue *
197 int override_rlimit) 215__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
198{ 216{
199 struct sigqueue *q = NULL; 217 struct sigqueue *q = NULL;
200 struct user_struct *user; 218 struct user_struct *user;
@@ -207,10 +225,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
207 */ 225 */
208 user = get_uid(__task_cred(t)->user); 226 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending); 227 atomic_inc(&user->sigpending);
228
210 if (override_rlimit || 229 if (override_rlimit ||
211 atomic_read(&user->sigpending) <= 230 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
213 q = kmem_cache_alloc(sigqueue_cachep, flags); 232 q = kmem_cache_alloc(sigqueue_cachep, flags);
233 } else {
234 print_dropped_signal(sig);
235 }
236
214 if (unlikely(q == NULL)) { 237 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending); 238 atomic_dec(&user->sigpending);
216 free_uid(user); 239 free_uid(user);
@@ -400,7 +423,7 @@ still_pending:
400 */ 423 */
401 info->si_signo = sig; 424 info->si_signo = sig;
402 info->si_errno = 0; 425 info->si_errno = 0;
403 info->si_code = 0; 426 info->si_code = SI_USER;
404 info->si_pid = 0; 427 info->si_pid = 0;
405 info->si_uid = 0; 428 info->si_uid = 0;
406 } 429 }
@@ -584,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
584 return 1; 607 return 1;
585} 608}
586 609
610static inline int is_si_special(const struct siginfo *info)
611{
612 return info <= SEND_SIG_FORCED;
613}
614
615static inline bool si_fromuser(const struct siginfo *info)
616{
617 return info == SEND_SIG_NOINFO ||
618 (!is_si_special(info) && SI_FROMUSER(info));
619}
620
587/* 621/*
588 * Bad permissions for sending the signal 622 * Bad permissions for sending the signal
589 * - the caller must hold at least the RCU read lock 623 * - the caller must hold at least the RCU read lock
@@ -598,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
598 if (!valid_signal(sig)) 632 if (!valid_signal(sig))
599 return -EINVAL; 633 return -EINVAL;
600 634
601 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 635 if (!si_fromuser(info))
602 return 0; 636 return 0;
603 637
604 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 638 error = audit_signal_info(sig, t); /* Let audit system see the signal */
@@ -705,7 +739,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
705 739
706 if (why) { 740 if (why) {
707 /* 741 /*
708 * The first thread which returns from finish_stop() 742 * The first thread which returns from do_signal_stop()
709 * will take ->siglock, notice SIGNAL_CLD_MASK, and 743 * will take ->siglock, notice SIGNAL_CLD_MASK, and
710 * notify its parent. See get_signal_to_deliver(). 744 * notify its parent. See get_signal_to_deliver().
711 */ 745 */
@@ -834,7 +868,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
834 struct sigqueue *q; 868 struct sigqueue *q;
835 int override_rlimit; 869 int override_rlimit;
836 870
837 trace_sched_signal_send(sig, t); 871 trace_signal_generate(sig, info, t);
838 872
839 assert_spin_locked(&t->sighand->siglock); 873 assert_spin_locked(&t->sighand->siglock);
840 874
@@ -869,7 +903,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
869 else 903 else
870 override_rlimit = 0; 904 override_rlimit = 0;
871 905
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 906 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit); 907 override_rlimit);
874 if (q) { 908 if (q) {
875 list_add_tail(&q->list, &pending->list); 909 list_add_tail(&q->list, &pending->list);
@@ -896,12 +930,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
896 break; 930 break;
897 } 931 }
898 } else if (!is_si_special(info)) { 932 } else if (!is_si_special(info)) {
899 if (sig >= SIGRTMIN && info->si_code != SI_USER) 933 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
900 /* 934 /*
901 * Queue overflow, abort. We may abort if the signal was rt 935 * Queue overflow, abort. We may abort if the
902 * and sent by user using something other than kill(). 936 * signal was rt and sent by user using something
903 */ 937 * other than kill().
938 */
939 trace_signal_overflow_fail(sig, group, info);
904 return -EAGAIN; 940 return -EAGAIN;
941 } else {
942 /*
943 * This is a silent loss of information. We still
944 * send the signal, but the *info bits are lost.
945 */
946 trace_signal_lose_info(sig, group, info);
947 }
905 } 948 }
906 949
907out_set: 950out_set:
@@ -917,16 +960,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
917 int from_ancestor_ns = 0; 960 int from_ancestor_ns = 0;
918 961
919#ifdef CONFIG_PID_NS 962#ifdef CONFIG_PID_NS
920 if (!is_si_special(info) && SI_FROMUSER(info) && 963 from_ancestor_ns = si_fromuser(info) &&
921 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) 964 !task_pid_nr_ns(current, task_active_pid_ns(t));
922 from_ancestor_ns = 1;
923#endif 965#endif
924 966
925 return __send_signal(sig, info, t, group, from_ancestor_ns); 967 return __send_signal(sig, info, t, group, from_ancestor_ns);
926} 968}
927 969
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr) 970static void print_fatal_signal(struct pt_regs *regs, int signr)
931{ 971{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n", 972 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -971,6 +1011,20 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
971 return send_signal(sig, info, t, 0); 1011 return send_signal(sig, info, t, 0);
972} 1012}
973 1013
1014int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1015 bool group)
1016{
1017 unsigned long flags;
1018 int ret = -ESRCH;
1019
1020 if (lock_task_sighand(p, &flags)) {
1021 ret = send_signal(sig, info, p, group);
1022 unlock_task_sighand(p, &flags);
1023 }
1024
1025 return ret;
1026}
1027
974/* 1028/*
975 * Force a signal that the process can't ignore: if necessary 1029 * Force a signal that the process can't ignore: if necessary
976 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1030 * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1008,12 +1062,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1008 return ret; 1062 return ret;
1009} 1063}
1010 1064
1011void
1012force_sig_specific(int sig, struct task_struct *t)
1013{
1014 force_sig_info(sig, SEND_SIG_FORCED, t);
1015}
1016
1017/* 1065/*
1018 * Nuke all other threads in the group. 1066 * Nuke all other threads in the group.
1019 */ 1067 */
@@ -1036,12 +1084,6 @@ void zap_other_threads(struct task_struct *p)
1036 } 1084 }
1037} 1085}
1038 1086
1039int __fatal_signal_pending(struct task_struct *tsk)
1040{
1041 return sigismember(&tsk->pending.signal, SIGKILL);
1042}
1043EXPORT_SYMBOL(__fatal_signal_pending);
1044
1045struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1087struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1046{ 1088{
1047 struct sighand_struct *sighand; 1089 struct sighand_struct *sighand;
@@ -1068,18 +1110,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
1068 */ 1110 */
1069int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1111int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1070{ 1112{
1071 unsigned long flags; 1113 int ret = check_kill_permission(sig, info, p);
1072 int ret;
1073
1074 ret = check_kill_permission(sig, info, p);
1075 1114
1076 if (!ret && sig) { 1115 if (!ret && sig)
1077 ret = -ESRCH; 1116 ret = do_send_sig_info(sig, info, p, true);
1078 if (lock_task_sighand(p, &flags)) {
1079 ret = __group_send_sig_info(sig, info, p);
1080 unlock_task_sighand(p, &flags);
1081 }
1082 }
1083 1117
1084 return ret; 1118 return ret;
1085} 1119}
@@ -1156,8 +1190,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1156 goto out_unlock; 1190 goto out_unlock;
1157 } 1191 }
1158 pcred = __task_cred(p); 1192 pcred = __task_cred(p);
1159 if ((info == SEND_SIG_NOINFO || 1193 if (si_fromuser(info) &&
1160 (!is_si_special(info) && SI_FROMUSER(info))) &&
1161 euid != pcred->suid && euid != pcred->uid && 1194 euid != pcred->suid && euid != pcred->uid &&
1162 uid != pcred->suid && uid != pcred->uid) { 1195 uid != pcred->suid && uid != pcred->uid) {
1163 ret = -EPERM; 1196 ret = -EPERM;
@@ -1224,15 +1257,9 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1224 * These are for backward compatibility with the rest of the kernel source. 1257 * These are for backward compatibility with the rest of the kernel source.
1225 */ 1258 */
1226 1259
1227/*
1228 * The caller must ensure the task can't exit.
1229 */
1230int 1260int
1231send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1261send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1232{ 1262{
1233 int ret;
1234 unsigned long flags;
1235
1236 /* 1263 /*
1237 * Make sure legacy kernel users don't send in bad values 1264 * Make sure legacy kernel users don't send in bad values
1238 * (normal paths check this in check_kill_permission). 1265 * (normal paths check this in check_kill_permission).
@@ -1240,10 +1267,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1240 if (!valid_signal(sig)) 1267 if (!valid_signal(sig))
1241 return -EINVAL; 1268 return -EINVAL;
1242 1269
1243 spin_lock_irqsave(&p->sighand->siglock, flags); 1270 return do_send_sig_info(sig, info, p, false);
1244 ret = specific_send_sig_info(sig, info, p);
1245 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1246 return ret;
1247} 1271}
1248 1272
1249#define __si_special(priv) \ 1273#define __si_special(priv) \
@@ -1302,19 +1326,19 @@ EXPORT_SYMBOL(kill_pid);
1302 * These functions support sending signals using preallocated sigqueue 1326 * These functions support sending signals using preallocated sigqueue
1303 * structures. This is needed "because realtime applications cannot 1327 * structures. This is needed "because realtime applications cannot
1304 * afford to lose notifications of asynchronous events, like timer 1328 * afford to lose notifications of asynchronous events, like timer
1305 * expirations or I/O completions". In the case of Posix Timers 1329 * expirations or I/O completions". In the case of Posix Timers
1306 * we allocate the sigqueue structure from the timer_create. If this 1330 * we allocate the sigqueue structure from the timer_create. If this
1307 * allocation fails we are able to report the failure to the application 1331 * allocation fails we are able to report the failure to the application
1308 * with an EAGAIN error. 1332 * with an EAGAIN error.
1309 */ 1333 */
1310
1311struct sigqueue *sigqueue_alloc(void) 1334struct sigqueue *sigqueue_alloc(void)
1312{ 1335{
1313 struct sigqueue *q; 1336 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1314 1337
1315 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1338 if (q)
1316 q->flags |= SIGQUEUE_PREALLOC; 1339 q->flags |= SIGQUEUE_PREALLOC;
1317 return(q); 1340
1341 return q;
1318} 1342}
1319 1343
1320void sigqueue_free(struct sigqueue *q) 1344void sigqueue_free(struct sigqueue *q)
@@ -1383,15 +1407,6 @@ ret:
1383} 1407}
1384 1408
1385/* 1409/*
1386 * Wake up any threads in the parent blocked in wait* syscalls.
1387 */
1388static inline void __wake_up_parent(struct task_struct *p,
1389 struct task_struct *parent)
1390{
1391 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1392}
1393
1394/*
1395 * Let a parent know about the death of a child. 1410 * Let a parent know about the death of a child.
1396 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1411 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1397 * 1412 *
@@ -1673,29 +1688,6 @@ void ptrace_notify(int exit_code)
1673 spin_unlock_irq(&current->sighand->siglock); 1688 spin_unlock_irq(&current->sighand->siglock);
1674} 1689}
1675 1690
1676static void
1677finish_stop(int stop_count)
1678{
1679 /*
1680 * If there are no other threads in the group, or if there is
1681 * a group stop in progress and we are the last to stop,
1682 * report to the parent. When ptraced, every thread reports itself.
1683 */
1684 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1685 read_lock(&tasklist_lock);
1686 do_notify_parent_cldstop(current, CLD_STOPPED);
1687 read_unlock(&tasklist_lock);
1688 }
1689
1690 do {
1691 schedule();
1692 } while (try_to_freeze());
1693 /*
1694 * Now we don't run again until continued.
1695 */
1696 current->exit_code = 0;
1697}
1698
1699/* 1691/*
1700 * This performs the stopping for SIGSTOP and other stop signals. 1692 * This performs the stopping for SIGSTOP and other stop signals.
1701 * We have to stop all threads in the thread group. 1693 * We have to stop all threads in the thread group.
@@ -1705,15 +1697,9 @@ finish_stop(int stop_count)
1705static int do_signal_stop(int signr) 1697static int do_signal_stop(int signr)
1706{ 1698{
1707 struct signal_struct *sig = current->signal; 1699 struct signal_struct *sig = current->signal;
1708 int stop_count; 1700 int notify;
1709 1701
1710 if (sig->group_stop_count > 0) { 1702 if (!sig->group_stop_count) {
1711 /*
1712 * There is a group stop in progress. We don't need to
1713 * start another one.
1714 */
1715 stop_count = --sig->group_stop_count;
1716 } else {
1717 struct task_struct *t; 1703 struct task_struct *t;
1718 1704
1719 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1705 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1725,7 +1711,7 @@ static int do_signal_stop(int signr)
1725 */ 1711 */
1726 sig->group_exit_code = signr; 1712 sig->group_exit_code = signr;
1727 1713
1728 stop_count = 0; 1714 sig->group_stop_count = 1;
1729 for (t = next_thread(current); t != current; t = next_thread(t)) 1715 for (t = next_thread(current); t != current; t = next_thread(t))
1730 /* 1716 /*
1731 * Setting state to TASK_STOPPED for a group 1717 * Setting state to TASK_STOPPED for a group
@@ -1734,19 +1720,44 @@ static int do_signal_stop(int signr)
1734 */ 1720 */
1735 if (!(t->flags & PF_EXITING) && 1721 if (!(t->flags & PF_EXITING) &&
1736 !task_is_stopped_or_traced(t)) { 1722 !task_is_stopped_or_traced(t)) {
1737 stop_count++; 1723 sig->group_stop_count++;
1738 signal_wake_up(t, 0); 1724 signal_wake_up(t, 0);
1739 } 1725 }
1740 sig->group_stop_count = stop_count;
1741 } 1726 }
1727 /*
1728 * If there are no other threads in the group, or if there is
1729 * a group stop in progress and we are the last to stop, report
1730 * to the parent. When ptraced, every thread reports itself.
1731 */
1732 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1733 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1734 /*
1735 * tracehook_notify_jctl() can drop and reacquire siglock, so
1736 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1737 * or SIGKILL comes in between ->group_stop_count == 0.
1738 */
1739 if (sig->group_stop_count) {
1740 if (!--sig->group_stop_count)
1741 sig->flags = SIGNAL_STOP_STOPPED;
1742 current->exit_code = sig->group_exit_code;
1743 __set_current_state(TASK_STOPPED);
1744 }
1745 spin_unlock_irq(&current->sighand->siglock);
1742 1746
1743 if (stop_count == 0) 1747 if (notify) {
1744 sig->flags = SIGNAL_STOP_STOPPED; 1748 read_lock(&tasklist_lock);
1745 current->exit_code = sig->group_exit_code; 1749 do_notify_parent_cldstop(current, notify);
1746 __set_current_state(TASK_STOPPED); 1750 read_unlock(&tasklist_lock);
1751 }
1752
1753 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1754 do {
1755 schedule();
1756 } while (try_to_freeze());
1757
1758 tracehook_finish_jctl();
1759 current->exit_code = 0;
1747 1760
1748 spin_unlock_irq(&current->sighand->siglock);
1749 finish_stop(stop_count);
1750 return 1; 1761 return 1;
1751} 1762}
1752 1763
@@ -1815,24 +1826,20 @@ relock:
1815 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1826 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1816 ? CLD_CONTINUED : CLD_STOPPED; 1827 ? CLD_CONTINUED : CLD_STOPPED;
1817 signal->flags &= ~SIGNAL_CLD_MASK; 1828 signal->flags &= ~SIGNAL_CLD_MASK;
1818 spin_unlock_irq(&sighand->siglock);
1819 1829
1820 if (unlikely(!tracehook_notify_jctl(1, why))) 1830 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1821 goto relock; 1831 spin_unlock_irq(&sighand->siglock);
1822 1832
1823 read_lock(&tasklist_lock); 1833 if (why) {
1824 do_notify_parent_cldstop(current->group_leader, why); 1834 read_lock(&tasklist_lock);
1825 read_unlock(&tasklist_lock); 1835 do_notify_parent_cldstop(current->group_leader, why);
1836 read_unlock(&tasklist_lock);
1837 }
1826 goto relock; 1838 goto relock;
1827 } 1839 }
1828 1840
1829 for (;;) { 1841 for (;;) {
1830 struct k_sigaction *ka; 1842 struct k_sigaction *ka;
1831
1832 if (unlikely(signal->group_stop_count > 0) &&
1833 do_signal_stop(0))
1834 goto relock;
1835
1836 /* 1843 /*
1837 * Tracing can induce an artifical signal and choose sigaction. 1844 * Tracing can induce an artifical signal and choose sigaction.
1838 * The return value in @signr determines the default action, 1845 * The return value in @signr determines the default action,
@@ -1844,6 +1851,10 @@ relock:
1844 if (unlikely(signr != 0)) 1851 if (unlikely(signr != 0))
1845 ka = return_ka; 1852 ka = return_ka;
1846 else { 1853 else {
1854 if (unlikely(signal->group_stop_count > 0) &&
1855 do_signal_stop(0))
1856 goto relock;
1857
1847 signr = dequeue_signal(current, &current->blocked, 1858 signr = dequeue_signal(current, &current->blocked,
1848 info); 1859 info);
1849 1860
@@ -1860,6 +1871,9 @@ relock:
1860 ka = &sighand->action[signr-1]; 1871 ka = &sighand->action[signr-1];
1861 } 1872 }
1862 1873
1874 /* Trace actually delivered signals. */
1875 trace_signal_deliver(signr, info, ka);
1876
1863 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1877 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1864 continue; 1878 continue;
1865 if (ka->sa.sa_handler != SIG_DFL) { 1879 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1987,14 +2001,14 @@ void exit_signals(struct task_struct *tsk)
1987 if (unlikely(tsk->signal->group_stop_count) && 2001 if (unlikely(tsk->signal->group_stop_count) &&
1988 !--tsk->signal->group_stop_count) { 2002 !--tsk->signal->group_stop_count) {
1989 tsk->signal->flags = SIGNAL_STOP_STOPPED; 2003 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1990 group_stop = 1; 2004 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1991 } 2005 }
1992out: 2006out:
1993 spin_unlock_irq(&tsk->sighand->siglock); 2007 spin_unlock_irq(&tsk->sighand->siglock);
1994 2008
1995 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { 2009 if (unlikely(group_stop)) {
1996 read_lock(&tasklist_lock); 2010 read_lock(&tasklist_lock);
1997 do_notify_parent_cldstop(tsk, CLD_STOPPED); 2011 do_notify_parent_cldstop(tsk, group_stop);
1998 read_unlock(&tasklist_lock); 2012 read_unlock(&tasklist_lock);
1999 } 2013 }
2000} 2014}
@@ -2290,7 +2304,6 @@ static int
2290do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2304do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2291{ 2305{
2292 struct task_struct *p; 2306 struct task_struct *p;
2293 unsigned long flags;
2294 int error = -ESRCH; 2307 int error = -ESRCH;
2295 2308
2296 rcu_read_lock(); 2309 rcu_read_lock();
@@ -2300,14 +2313,16 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2300 /* 2313 /*
2301 * The null signal is a permissions and process existence 2314 * The null signal is a permissions and process existence
2302 * probe. No signal is actually delivered. 2315 * probe. No signal is actually delivered.
2303 *
2304 * If lock_task_sighand() fails we pretend the task dies
2305 * after receiving the signal. The window is tiny, and the
2306 * signal is private anyway.
2307 */ 2316 */
2308 if (!error && sig && lock_task_sighand(p, &flags)) { 2317 if (!error && sig) {
2309 error = specific_send_sig_info(sig, info, p); 2318 error = do_send_sig_info(sig, info, p, false);
2310 unlock_task_sighand(p, &flags); 2319 /*
2320 * If lock_task_sighand() failed we pretend the task
2321 * dies after receiving the signal. The window is tiny,
2322 * and the signal is private anyway.
2323 */
2324 if (unlikely(error == -ESRCH))
2325 error = 0;
2311 } 2326 }
2312 } 2327 }
2313 rcu_read_unlock(); 2328 rcu_read_unlock();