aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c139
1 files changed, 89 insertions, 50 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 6705320784fd..934ae5e687b9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,12 +22,14 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 29#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 30#include <linux/nsproxy.h>
30#include <trace/events/sched.h> 31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
31 33
32#include <asm/param.h> 34#include <asm/param.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,6 +43,8 @@
41 43
42static struct kmem_cache *sigqueue_cachep; 44static struct kmem_cache *sigqueue_cachep;
43 45
46int print_fatal_signals __read_mostly;
47
44static void __user *sig_handler(struct task_struct *t, int sig) 48static void __user *sig_handler(struct task_struct *t, int sig)
45{ 49{
46 return t->sighand->action[sig - 1].sa.sa_handler; 50 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
159{ 163{
160 unsigned long i, *s, *m, x; 164 unsigned long i, *s, *m, x;
161 int sig = 0; 165 int sig = 0;
162 166
163 s = pending->signal.sig; 167 s = pending->signal.sig;
164 m = mask->sig; 168 m = mask->sig;
165 switch (_NSIG_WORDS) { 169 switch (_NSIG_WORDS) {
@@ -184,33 +188,52 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
184 sig = ffz(~x) + 1; 188 sig = ffz(~x) + 1;
185 break; 189 break;
186 } 190 }
187 191
188 return sig; 192 return sig;
189} 193}
190 194
195static inline void print_dropped_signal(int sig)
196{
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198
199 if (!print_fatal_signals)
200 return;
201
202 if (!__ratelimit(&ratelimit_state))
203 return;
204
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
207}
208
191/* 209/*
192 * allocate a new signal queue record 210 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an 211 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting 212 * appopriate lock must be held to stop the target task from exiting
195 */ 213 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 214static struct sigqueue *
197 int override_rlimit) 215__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
198{ 216{
199 struct sigqueue *q = NULL; 217 struct sigqueue *q = NULL;
200 struct user_struct *user; 218 struct user_struct *user;
201 219
202 /* 220 /*
203 * We won't get problems with the target's UID changing under us 221 * Protect access to @t credentials. This can go away when all
204 * because changing it requires RCU be used, and if t != current, the 222 * callers hold rcu read lock.
205 * caller must be holding the RCU readlock (by way of a spinlock) and
206 * we use RCU protection here
207 */ 223 */
224 rcu_read_lock();
208 user = get_uid(__task_cred(t)->user); 225 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending); 226 atomic_inc(&user->sigpending);
227 rcu_read_unlock();
228
210 if (override_rlimit || 229 if (override_rlimit ||
211 atomic_read(&user->sigpending) <= 230 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
213 q = kmem_cache_alloc(sigqueue_cachep, flags); 232 q = kmem_cache_alloc(sigqueue_cachep, flags);
233 } else {
234 print_dropped_signal(sig);
235 }
236
214 if (unlikely(q == NULL)) { 237 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending); 238 atomic_dec(&user->sigpending);
216 free_uid(user); 239 free_uid(user);
@@ -400,7 +423,7 @@ still_pending:
400 */ 423 */
401 info->si_signo = sig; 424 info->si_signo = sig;
402 info->si_errno = 0; 425 info->si_errno = 0;
403 info->si_code = 0; 426 info->si_code = SI_USER;
404 info->si_pid = 0; 427 info->si_pid = 0;
405 info->si_uid = 0; 428 info->si_uid = 0;
406 } 429 }
@@ -584,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
584 return 1; 607 return 1;
585} 608}
586 609
610static inline int is_si_special(const struct siginfo *info)
611{
612 return info <= SEND_SIG_FORCED;
613}
614
615static inline bool si_fromuser(const struct siginfo *info)
616{
617 return info == SEND_SIG_NOINFO ||
618 (!is_si_special(info) && SI_FROMUSER(info));
619}
620
587/* 621/*
588 * Bad permissions for sending the signal 622 * Bad permissions for sending the signal
589 * - the caller must hold at least the RCU read lock 623 * - the caller must hold at least the RCU read lock
@@ -598,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
598 if (!valid_signal(sig)) 632 if (!valid_signal(sig))
599 return -EINVAL; 633 return -EINVAL;
600 634
601 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 635 if (!si_fromuser(info))
602 return 0; 636 return 0;
603 637
604 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 638 error = audit_signal_info(sig, t); /* Let audit system see the signal */
@@ -834,7 +868,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
834 struct sigqueue *q; 868 struct sigqueue *q;
835 int override_rlimit; 869 int override_rlimit;
836 870
837 trace_sched_signal_send(sig, t); 871 trace_signal_generate(sig, info, t);
838 872
839 assert_spin_locked(&t->sighand->siglock); 873 assert_spin_locked(&t->sighand->siglock);
840 874
@@ -869,7 +903,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
869 else 903 else
870 override_rlimit = 0; 904 override_rlimit = 0;
871 905
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 906 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit); 907 override_rlimit);
874 if (q) { 908 if (q) {
875 list_add_tail(&q->list, &pending->list); 909 list_add_tail(&q->list, &pending->list);
@@ -896,12 +930,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
896 break; 930 break;
897 } 931 }
898 } else if (!is_si_special(info)) { 932 } else if (!is_si_special(info)) {
899 if (sig >= SIGRTMIN && info->si_code != SI_USER) 933 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
900 /* 934 /*
901 * Queue overflow, abort. We may abort if the signal was rt 935 * Queue overflow, abort. We may abort if the
902 * and sent by user using something other than kill(). 936 * signal was rt and sent by user using something
903 */ 937 * other than kill().
938 */
939 trace_signal_overflow_fail(sig, group, info);
904 return -EAGAIN; 940 return -EAGAIN;
941 } else {
942 /*
943 * This is a silent loss of information. We still
944 * send the signal, but the *info bits are lost.
945 */
946 trace_signal_lose_info(sig, group, info);
947 }
905 } 948 }
906 949
907out_set: 950out_set:
@@ -917,16 +960,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
917 int from_ancestor_ns = 0; 960 int from_ancestor_ns = 0;
918 961
919#ifdef CONFIG_PID_NS 962#ifdef CONFIG_PID_NS
920 if (!is_si_special(info) && SI_FROMUSER(info) && 963 from_ancestor_ns = si_fromuser(info) &&
921 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) 964 !task_pid_nr_ns(current, task_active_pid_ns(t));
922 from_ancestor_ns = 1;
923#endif 965#endif
924 966
925 return __send_signal(sig, info, t, group, from_ancestor_ns); 967 return __send_signal(sig, info, t, group, from_ancestor_ns);
926} 968}
927 969
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr) 970static void print_fatal_signal(struct pt_regs *regs, int signr)
931{ 971{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n", 972 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -939,7 +979,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
939 for (i = 0; i < 16; i++) { 979 for (i = 0; i < 16; i++) {
940 unsigned char insn; 980 unsigned char insn;
941 981
942 __get_user(insn, (unsigned char *)(regs->ip + i)); 982 if (get_user(insn, (unsigned char *)(regs->ip + i)))
983 break;
943 printk("%02x ", insn); 984 printk("%02x ", insn);
944 } 985 }
945 } 986 }
@@ -1022,12 +1063,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1022 return ret; 1063 return ret;
1023} 1064}
1024 1065
1025void
1026force_sig_specific(int sig, struct task_struct *t)
1027{
1028 force_sig_info(sig, SEND_SIG_FORCED, t);
1029}
1030
1031/* 1066/*
1032 * Nuke all other threads in the group. 1067 * Nuke all other threads in the group.
1033 */ 1068 */
@@ -1145,19 +1180,19 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1145 int ret = -EINVAL; 1180 int ret = -EINVAL;
1146 struct task_struct *p; 1181 struct task_struct *p;
1147 const struct cred *pcred; 1182 const struct cred *pcred;
1183 unsigned long flags;
1148 1184
1149 if (!valid_signal(sig)) 1185 if (!valid_signal(sig))
1150 return ret; 1186 return ret;
1151 1187
1152 read_lock(&tasklist_lock); 1188 rcu_read_lock();
1153 p = pid_task(pid, PIDTYPE_PID); 1189 p = pid_task(pid, PIDTYPE_PID);
1154 if (!p) { 1190 if (!p) {
1155 ret = -ESRCH; 1191 ret = -ESRCH;
1156 goto out_unlock; 1192 goto out_unlock;
1157 } 1193 }
1158 pcred = __task_cred(p); 1194 pcred = __task_cred(p);
1159 if ((info == SEND_SIG_NOINFO || 1195 if (si_fromuser(info) &&
1160 (!is_si_special(info) && SI_FROMUSER(info))) &&
1161 euid != pcred->suid && euid != pcred->uid && 1196 euid != pcred->suid && euid != pcred->uid &&
1162 uid != pcred->suid && uid != pcred->uid) { 1197 uid != pcred->suid && uid != pcred->uid) {
1163 ret = -EPERM; 1198 ret = -EPERM;
@@ -1166,14 +1201,16 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1166 ret = security_task_kill(p, info, sig, secid); 1201 ret = security_task_kill(p, info, sig, secid);
1167 if (ret) 1202 if (ret)
1168 goto out_unlock; 1203 goto out_unlock;
1169 if (sig && p->sighand) { 1204
1170 unsigned long flags; 1205 if (sig) {
1171 spin_lock_irqsave(&p->sighand->siglock, flags); 1206 if (lock_task_sighand(p, &flags)) {
1172 ret = __send_signal(sig, info, p, 1, 0); 1207 ret = __send_signal(sig, info, p, 1, 0);
1173 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1208 unlock_task_sighand(p, &flags);
1209 } else
1210 ret = -ESRCH;
1174 } 1211 }
1175out_unlock: 1212out_unlock:
1176 read_unlock(&tasklist_lock); 1213 rcu_read_unlock();
1177 return ret; 1214 return ret;
1178} 1215}
1179EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1216EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
@@ -1293,19 +1330,19 @@ EXPORT_SYMBOL(kill_pid);
1293 * These functions support sending signals using preallocated sigqueue 1330 * These functions support sending signals using preallocated sigqueue
1294 * structures. This is needed "because realtime applications cannot 1331 * structures. This is needed "because realtime applications cannot
1295 * afford to lose notifications of asynchronous events, like timer 1332 * afford to lose notifications of asynchronous events, like timer
1296 * expirations or I/O completions". In the case of Posix Timers 1333 * expirations or I/O completions". In the case of Posix Timers
1297 * we allocate the sigqueue structure from the timer_create. If this 1334 * we allocate the sigqueue structure from the timer_create. If this
1298 * allocation fails we are able to report the failure to the application 1335 * allocation fails we are able to report the failure to the application
1299 * with an EAGAIN error. 1336 * with an EAGAIN error.
1300 */ 1337 */
1301
1302struct sigqueue *sigqueue_alloc(void) 1338struct sigqueue *sigqueue_alloc(void)
1303{ 1339{
1304 struct sigqueue *q; 1340 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1305 1341
1306 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1342 if (q)
1307 q->flags |= SIGQUEUE_PREALLOC; 1343 q->flags |= SIGQUEUE_PREALLOC;
1308 return(q); 1344
1345 return q;
1309} 1346}
1310 1347
1311void sigqueue_free(struct sigqueue *q) 1348void sigqueue_free(struct sigqueue *q)
@@ -1807,11 +1844,6 @@ relock:
1807 1844
1808 for (;;) { 1845 for (;;) {
1809 struct k_sigaction *ka; 1846 struct k_sigaction *ka;
1810
1811 if (unlikely(signal->group_stop_count > 0) &&
1812 do_signal_stop(0))
1813 goto relock;
1814
1815 /* 1847 /*
1816 * Tracing can induce an artifical signal and choose sigaction. 1848 * Tracing can induce an artifical signal and choose sigaction.
1817 * The return value in @signr determines the default action, 1849 * The return value in @signr determines the default action,
@@ -1823,6 +1855,10 @@ relock:
1823 if (unlikely(signr != 0)) 1855 if (unlikely(signr != 0))
1824 ka = return_ka; 1856 ka = return_ka;
1825 else { 1857 else {
1858 if (unlikely(signal->group_stop_count > 0) &&
1859 do_signal_stop(0))
1860 goto relock;
1861
1826 signr = dequeue_signal(current, &current->blocked, 1862 signr = dequeue_signal(current, &current->blocked,
1827 info); 1863 info);
1828 1864
@@ -1839,6 +1875,9 @@ relock:
1839 ka = &sighand->action[signr-1]; 1875 ka = &sighand->action[signr-1];
1840 } 1876 }
1841 1877
1878 /* Trace actually delivered signals. */
1879 trace_signal_deliver(signr, info, ka);
1880
1842 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1881 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1843 continue; 1882 continue;
1844 if (ka->sa.sa_handler != SIG_DFL) { 1883 if (ka->sa.sa_handler != SIG_DFL) {