aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c241
1 files changed, 127 insertions, 114 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 64c5deeaca5d..6b982f2cf524 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,12 +22,14 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 29#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 30#include <linux/nsproxy.h>
30#include <trace/events/sched.h> 31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
31 33
32#include <asm/param.h> 34#include <asm/param.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,6 +43,8 @@
41 43
42static struct kmem_cache *sigqueue_cachep; 44static struct kmem_cache *sigqueue_cachep;
43 45
46int print_fatal_signals __read_mostly;
47
44static void __user *sig_handler(struct task_struct *t, int sig) 48static void __user *sig_handler(struct task_struct *t, int sig)
45{ 49{
46 return t->sighand->action[sig - 1].sa.sa_handler; 50 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
159{ 163{
160 unsigned long i, *s, *m, x; 164 unsigned long i, *s, *m, x;
161 int sig = 0; 165 int sig = 0;
162 166
163 s = pending->signal.sig; 167 s = pending->signal.sig;
164 m = mask->sig; 168 m = mask->sig;
165 switch (_NSIG_WORDS) { 169 switch (_NSIG_WORDS) {
@@ -184,17 +188,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
184 sig = ffz(~x) + 1; 188 sig = ffz(~x) + 1;
185 break; 189 break;
186 } 190 }
187 191
188 return sig; 192 return sig;
189} 193}
190 194
195static inline void print_dropped_signal(int sig)
196{
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198
199 if (!print_fatal_signals)
200 return;
201
202 if (!__ratelimit(&ratelimit_state))
203 return;
204
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
207}
208
191/* 209/*
192 * allocate a new signal queue record 210 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an 211 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting 212 * appopriate lock must be held to stop the target task from exiting
195 */ 213 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 214static struct sigqueue *
197 int override_rlimit) 215__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
198{ 216{
199 struct sigqueue *q = NULL; 217 struct sigqueue *q = NULL;
200 struct user_struct *user; 218 struct user_struct *user;
@@ -207,10 +225,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
207 */ 225 */
208 user = get_uid(__task_cred(t)->user); 226 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending); 227 atomic_inc(&user->sigpending);
228
210 if (override_rlimit || 229 if (override_rlimit ||
211 atomic_read(&user->sigpending) <= 230 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
213 q = kmem_cache_alloc(sigqueue_cachep, flags); 232 q = kmem_cache_alloc(sigqueue_cachep, flags);
233 } else {
234 print_dropped_signal(sig);
235 }
236
214 if (unlikely(q == NULL)) { 237 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending); 238 atomic_dec(&user->sigpending);
216 free_uid(user); 239 free_uid(user);
@@ -705,7 +728,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
705 728
706 if (why) { 729 if (why) {
707 /* 730 /*
708 * The first thread which returns from finish_stop() 731 * The first thread which returns from do_signal_stop()
709 * will take ->siglock, notice SIGNAL_CLD_MASK, and 732 * will take ->siglock, notice SIGNAL_CLD_MASK, and
710 * notify its parent. See get_signal_to_deliver(). 733 * notify its parent. See get_signal_to_deliver().
711 */ 734 */
@@ -834,7 +857,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
834 struct sigqueue *q; 857 struct sigqueue *q;
835 int override_rlimit; 858 int override_rlimit;
836 859
837 trace_sched_signal_send(sig, t); 860 trace_signal_generate(sig, info, t);
838 861
839 assert_spin_locked(&t->sighand->siglock); 862 assert_spin_locked(&t->sighand->siglock);
840 863
@@ -869,7 +892,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
869 else 892 else
870 override_rlimit = 0; 893 override_rlimit = 0;
871 894
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 895 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit); 896 override_rlimit);
874 if (q) { 897 if (q) {
875 list_add_tail(&q->list, &pending->list); 898 list_add_tail(&q->list, &pending->list);
@@ -896,12 +919,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
896 break; 919 break;
897 } 920 }
898 } else if (!is_si_special(info)) { 921 } else if (!is_si_special(info)) {
899 if (sig >= SIGRTMIN && info->si_code != SI_USER) 922 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
900 /* 923 /*
901 * Queue overflow, abort. We may abort if the signal was rt 924 * Queue overflow, abort. We may abort if the
902 * and sent by user using something other than kill(). 925 * signal was rt and sent by user using something
903 */ 926 * other than kill().
927 */
928 trace_signal_overflow_fail(sig, group, info);
904 return -EAGAIN; 929 return -EAGAIN;
930 } else {
931 /*
932 * This is a silent loss of information. We still
933 * send the signal, but the *info bits are lost.
934 */
935 trace_signal_lose_info(sig, group, info);
936 }
905 } 937 }
906 938
907out_set: 939out_set:
@@ -925,8 +957,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
925 return __send_signal(sig, info, t, group, from_ancestor_ns); 957 return __send_signal(sig, info, t, group, from_ancestor_ns);
926} 958}
927 959
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr) 960static void print_fatal_signal(struct pt_regs *regs, int signr)
931{ 961{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n", 962 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -971,6 +1001,20 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
971 return send_signal(sig, info, t, 0); 1001 return send_signal(sig, info, t, 0);
972} 1002}
973 1003
1004int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1005 bool group)
1006{
1007 unsigned long flags;
1008 int ret = -ESRCH;
1009
1010 if (lock_task_sighand(p, &flags)) {
1011 ret = send_signal(sig, info, p, group);
1012 unlock_task_sighand(p, &flags);
1013 }
1014
1015 return ret;
1016}
1017
974/* 1018/*
975 * Force a signal that the process can't ignore: if necessary 1019 * Force a signal that the process can't ignore: if necessary
976 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1020 * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1036,12 +1080,6 @@ void zap_other_threads(struct task_struct *p)
1036 } 1080 }
1037} 1081}
1038 1082
1039int __fatal_signal_pending(struct task_struct *tsk)
1040{
1041 return sigismember(&tsk->pending.signal, SIGKILL);
1042}
1043EXPORT_SYMBOL(__fatal_signal_pending);
1044
1045struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1083struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1046{ 1084{
1047 struct sighand_struct *sighand; 1085 struct sighand_struct *sighand;
@@ -1068,18 +1106,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
1068 */ 1106 */
1069int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1107int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1070{ 1108{
1071 unsigned long flags; 1109 int ret = check_kill_permission(sig, info, p);
1072 int ret;
1073
1074 ret = check_kill_permission(sig, info, p);
1075 1110
1076 if (!ret && sig) { 1111 if (!ret && sig)
1077 ret = -ESRCH; 1112 ret = do_send_sig_info(sig, info, p, true);
1078 if (lock_task_sighand(p, &flags)) {
1079 ret = __group_send_sig_info(sig, info, p);
1080 unlock_task_sighand(p, &flags);
1081 }
1082 }
1083 1113
1084 return ret; 1114 return ret;
1085} 1115}
@@ -1224,15 +1254,9 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1224 * These are for backward compatibility with the rest of the kernel source. 1254 * These are for backward compatibility with the rest of the kernel source.
1225 */ 1255 */
1226 1256
1227/*
1228 * The caller must ensure the task can't exit.
1229 */
1230int 1257int
1231send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1258send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1232{ 1259{
1233 int ret;
1234 unsigned long flags;
1235
1236 /* 1260 /*
1237 * Make sure legacy kernel users don't send in bad values 1261 * Make sure legacy kernel users don't send in bad values
1238 * (normal paths check this in check_kill_permission). 1262 * (normal paths check this in check_kill_permission).
@@ -1240,10 +1264,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1240 if (!valid_signal(sig)) 1264 if (!valid_signal(sig))
1241 return -EINVAL; 1265 return -EINVAL;
1242 1266
1243 spin_lock_irqsave(&p->sighand->siglock, flags); 1267 return do_send_sig_info(sig, info, p, false);
1244 ret = specific_send_sig_info(sig, info, p);
1245 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1246 return ret;
1247} 1268}
1248 1269
1249#define __si_special(priv) \ 1270#define __si_special(priv) \
@@ -1302,19 +1323,19 @@ EXPORT_SYMBOL(kill_pid);
1302 * These functions support sending signals using preallocated sigqueue 1323 * These functions support sending signals using preallocated sigqueue
1303 * structures. This is needed "because realtime applications cannot 1324 * structures. This is needed "because realtime applications cannot
1304 * afford to lose notifications of asynchronous events, like timer 1325 * afford to lose notifications of asynchronous events, like timer
1305 * expirations or I/O completions". In the case of Posix Timers 1326 * expirations or I/O completions". In the case of Posix Timers
1306 * we allocate the sigqueue structure from the timer_create. If this 1327 * we allocate the sigqueue structure from the timer_create. If this
1307 * allocation fails we are able to report the failure to the application 1328 * allocation fails we are able to report the failure to the application
1308 * with an EAGAIN error. 1329 * with an EAGAIN error.
1309 */ 1330 */
1310
1311struct sigqueue *sigqueue_alloc(void) 1331struct sigqueue *sigqueue_alloc(void)
1312{ 1332{
1313 struct sigqueue *q; 1333 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1314 1334
1315 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1335 if (q)
1316 q->flags |= SIGQUEUE_PREALLOC; 1336 q->flags |= SIGQUEUE_PREALLOC;
1317 return(q); 1337
1338 return q;
1318} 1339}
1319 1340
1320void sigqueue_free(struct sigqueue *q) 1341void sigqueue_free(struct sigqueue *q)
@@ -1383,15 +1404,6 @@ ret:
1383} 1404}
1384 1405
1385/* 1406/*
1386 * Wake up any threads in the parent blocked in wait* syscalls.
1387 */
1388static inline void __wake_up_parent(struct task_struct *p,
1389 struct task_struct *parent)
1390{
1391 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1392}
1393
1394/*
1395 * Let a parent know about the death of a child. 1407 * Let a parent know about the death of a child.
1396 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1408 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1397 * 1409 *
@@ -1673,29 +1685,6 @@ void ptrace_notify(int exit_code)
1673 spin_unlock_irq(&current->sighand->siglock); 1685 spin_unlock_irq(&current->sighand->siglock);
1674} 1686}
1675 1687
1676static void
1677finish_stop(int stop_count)
1678{
1679 /*
1680 * If there are no other threads in the group, or if there is
1681 * a group stop in progress and we are the last to stop,
1682 * report to the parent. When ptraced, every thread reports itself.
1683 */
1684 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1685 read_lock(&tasklist_lock);
1686 do_notify_parent_cldstop(current, CLD_STOPPED);
1687 read_unlock(&tasklist_lock);
1688 }
1689
1690 do {
1691 schedule();
1692 } while (try_to_freeze());
1693 /*
1694 * Now we don't run again until continued.
1695 */
1696 current->exit_code = 0;
1697}
1698
1699/* 1688/*
1700 * This performs the stopping for SIGSTOP and other stop signals. 1689 * This performs the stopping for SIGSTOP and other stop signals.
1701 * We have to stop all threads in the thread group. 1690 * We have to stop all threads in the thread group.
@@ -1705,15 +1694,9 @@ finish_stop(int stop_count)
1705static int do_signal_stop(int signr) 1694static int do_signal_stop(int signr)
1706{ 1695{
1707 struct signal_struct *sig = current->signal; 1696 struct signal_struct *sig = current->signal;
1708 int stop_count; 1697 int notify;
1709 1698
1710 if (sig->group_stop_count > 0) { 1699 if (!sig->group_stop_count) {
1711 /*
1712 * There is a group stop in progress. We don't need to
1713 * start another one.
1714 */
1715 stop_count = --sig->group_stop_count;
1716 } else {
1717 struct task_struct *t; 1700 struct task_struct *t;
1718 1701
1719 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1702 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1725,7 +1708,7 @@ static int do_signal_stop(int signr)
1725 */ 1708 */
1726 sig->group_exit_code = signr; 1709 sig->group_exit_code = signr;
1727 1710
1728 stop_count = 0; 1711 sig->group_stop_count = 1;
1729 for (t = next_thread(current); t != current; t = next_thread(t)) 1712 for (t = next_thread(current); t != current; t = next_thread(t))
1730 /* 1713 /*
1731 * Setting state to TASK_STOPPED for a group 1714 * Setting state to TASK_STOPPED for a group
@@ -1734,19 +1717,44 @@ static int do_signal_stop(int signr)
1734 */ 1717 */
1735 if (!(t->flags & PF_EXITING) && 1718 if (!(t->flags & PF_EXITING) &&
1736 !task_is_stopped_or_traced(t)) { 1719 !task_is_stopped_or_traced(t)) {
1737 stop_count++; 1720 sig->group_stop_count++;
1738 signal_wake_up(t, 0); 1721 signal_wake_up(t, 0);
1739 } 1722 }
1740 sig->group_stop_count = stop_count;
1741 } 1723 }
1724 /*
1725 * If there are no other threads in the group, or if there is
1726 * a group stop in progress and we are the last to stop, report
1727 * to the parent. When ptraced, every thread reports itself.
1728 */
1729 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1730 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1731 /*
1732 * tracehook_notify_jctl() can drop and reacquire siglock, so
1733 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1734 * or SIGKILL comes in between ->group_stop_count == 0.
1735 */
1736 if (sig->group_stop_count) {
1737 if (!--sig->group_stop_count)
1738 sig->flags = SIGNAL_STOP_STOPPED;
1739 current->exit_code = sig->group_exit_code;
1740 __set_current_state(TASK_STOPPED);
1741 }
1742 spin_unlock_irq(&current->sighand->siglock);
1742 1743
1743 if (stop_count == 0) 1744 if (notify) {
1744 sig->flags = SIGNAL_STOP_STOPPED; 1745 read_lock(&tasklist_lock);
1745 current->exit_code = sig->group_exit_code; 1746 do_notify_parent_cldstop(current, notify);
1746 __set_current_state(TASK_STOPPED); 1747 read_unlock(&tasklist_lock);
1748 }
1749
1750 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1751 do {
1752 schedule();
1753 } while (try_to_freeze());
1754
1755 tracehook_finish_jctl();
1756 current->exit_code = 0;
1747 1757
1748 spin_unlock_irq(&current->sighand->siglock);
1749 finish_stop(stop_count);
1750 return 1; 1758 return 1;
1751} 1759}
1752 1760
@@ -1815,14 +1823,15 @@ relock:
1815 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1823 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1816 ? CLD_CONTINUED : CLD_STOPPED; 1824 ? CLD_CONTINUED : CLD_STOPPED;
1817 signal->flags &= ~SIGNAL_CLD_MASK; 1825 signal->flags &= ~SIGNAL_CLD_MASK;
1818 spin_unlock_irq(&sighand->siglock);
1819 1826
1820 if (unlikely(!tracehook_notify_jctl(1, why))) 1827 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1821 goto relock; 1828 spin_unlock_irq(&sighand->siglock);
1822 1829
1823 read_lock(&tasklist_lock); 1830 if (why) {
1824 do_notify_parent_cldstop(current->group_leader, why); 1831 read_lock(&tasklist_lock);
1825 read_unlock(&tasklist_lock); 1832 do_notify_parent_cldstop(current->group_leader, why);
1833 read_unlock(&tasklist_lock);
1834 }
1826 goto relock; 1835 goto relock;
1827 } 1836 }
1828 1837
@@ -1860,6 +1869,9 @@ relock:
1860 ka = &sighand->action[signr-1]; 1869 ka = &sighand->action[signr-1];
1861 } 1870 }
1862 1871
1872 /* Trace actually delivered signals. */
1873 trace_signal_deliver(signr, info, ka);
1874
1863 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1875 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1864 continue; 1876 continue;
1865 if (ka->sa.sa_handler != SIG_DFL) { 1877 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1987,14 +1999,14 @@ void exit_signals(struct task_struct *tsk)
1987 if (unlikely(tsk->signal->group_stop_count) && 1999 if (unlikely(tsk->signal->group_stop_count) &&
1988 !--tsk->signal->group_stop_count) { 2000 !--tsk->signal->group_stop_count) {
1989 tsk->signal->flags = SIGNAL_STOP_STOPPED; 2001 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1990 group_stop = 1; 2002 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1991 } 2003 }
1992out: 2004out:
1993 spin_unlock_irq(&tsk->sighand->siglock); 2005 spin_unlock_irq(&tsk->sighand->siglock);
1994 2006
1995 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { 2007 if (unlikely(group_stop)) {
1996 read_lock(&tasklist_lock); 2008 read_lock(&tasklist_lock);
1997 do_notify_parent_cldstop(tsk, CLD_STOPPED); 2009 do_notify_parent_cldstop(tsk, group_stop);
1998 read_unlock(&tasklist_lock); 2010 read_unlock(&tasklist_lock);
1999 } 2011 }
2000} 2012}
@@ -2290,7 +2302,6 @@ static int
2290do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2302do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2291{ 2303{
2292 struct task_struct *p; 2304 struct task_struct *p;
2293 unsigned long flags;
2294 int error = -ESRCH; 2305 int error = -ESRCH;
2295 2306
2296 rcu_read_lock(); 2307 rcu_read_lock();
@@ -2300,14 +2311,16 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2300 /* 2311 /*
2301 * The null signal is a permissions and process existence 2312 * The null signal is a permissions and process existence
2302 * probe. No signal is actually delivered. 2313 * probe. No signal is actually delivered.
2303 *
2304 * If lock_task_sighand() fails we pretend the task dies
2305 * after receiving the signal. The window is tiny, and the
2306 * signal is private anyway.
2307 */ 2314 */
2308 if (!error && sig && lock_task_sighand(p, &flags)) { 2315 if (!error && sig) {
2309 error = specific_send_sig_info(sig, info, p); 2316 error = do_send_sig_info(sig, info, p, false);
2310 unlock_task_sighand(p, &flags); 2317 /*
2318 * If lock_task_sighand() failed we pretend the task
2319 * dies after receiving the signal. The window is tiny,
2320 * and the signal is private anyway.
2321 */
2322 if (unlikely(error == -ESRCH))
2323 error = 0;
2311 } 2324 }
2312 } 2325 }
2313 rcu_read_unlock(); 2326 rcu_read_unlock();