aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c62
1 files changed, 41 insertions, 21 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 4530fc654455..8e95855ff3cf 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -41,6 +41,8 @@
41 41
42static struct kmem_cache *sigqueue_cachep; 42static struct kmem_cache *sigqueue_cachep;
43 43
44DEFINE_TRACE(sched_signal_send);
45
44static void __user *sig_handler(struct task_struct *t, int sig) 46static void __user *sig_handler(struct task_struct *t, int sig)
45{ 47{
46 return t->sighand->action[sig - 1].sa.sa_handler; 48 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -177,6 +179,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
177 return sig; 179 return sig;
178} 180}
179 181
182/*
183 * allocate a new signal queue record
184 * - this may be called without locks if and only if t == current, otherwise an
185 * appopriate lock must be held to stop the target task from exiting
186 */
180static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 187static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
181 int override_rlimit) 188 int override_rlimit)
182{ 189{
@@ -184,11 +191,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
184 struct user_struct *user; 191 struct user_struct *user;
185 192
186 /* 193 /*
187 * In order to avoid problems with "switch_user()", we want to make 194 * We won't get problems with the target's UID changing under us
188 * sure that the compiler doesn't re-load "t->user" 195 * because changing it requires RCU be used, and if t != current, the
196 * caller must be holding the RCU readlock (by way of a spinlock) and
197 * we use RCU protection here
189 */ 198 */
190 user = t->user; 199 user = get_uid(__task_cred(t)->user);
191 barrier();
192 atomic_inc(&user->sigpending); 200 atomic_inc(&user->sigpending);
193 if (override_rlimit || 201 if (override_rlimit ||
194 atomic_read(&user->sigpending) <= 202 atomic_read(&user->sigpending) <=
@@ -196,12 +204,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
196 q = kmem_cache_alloc(sigqueue_cachep, flags); 204 q = kmem_cache_alloc(sigqueue_cachep, flags);
197 if (unlikely(q == NULL)) { 205 if (unlikely(q == NULL)) {
198 atomic_dec(&user->sigpending); 206 atomic_dec(&user->sigpending);
207 free_uid(user);
199 } else { 208 } else {
200 INIT_LIST_HEAD(&q->list); 209 INIT_LIST_HEAD(&q->list);
201 q->flags = 0; 210 q->flags = 0;
202 q->user = get_uid(user); 211 q->user = user;
203 } 212 }
204 return(q); 213
214 return q;
205} 215}
206 216
207static void __sigqueue_free(struct sigqueue *q) 217static void __sigqueue_free(struct sigqueue *q)
@@ -562,10 +572,12 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
562 572
563/* 573/*
564 * Bad permissions for sending the signal 574 * Bad permissions for sending the signal
575 * - the caller must hold at least the RCU read lock
565 */ 576 */
566static int check_kill_permission(int sig, struct siginfo *info, 577static int check_kill_permission(int sig, struct siginfo *info,
567 struct task_struct *t) 578 struct task_struct *t)
568{ 579{
580 const struct cred *cred = current_cred(), *tcred;
569 struct pid *sid; 581 struct pid *sid;
570 int error; 582 int error;
571 583
@@ -579,8 +591,11 @@ static int check_kill_permission(int sig, struct siginfo *info,
579 if (error) 591 if (error)
580 return error; 592 return error;
581 593
582 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && 594 tcred = __task_cred(t);
583 (current->uid ^ t->suid) && (current->uid ^ t->uid) && 595 if ((cred->euid ^ tcred->suid) &&
596 (cred->euid ^ tcred->uid) &&
597 (cred->uid ^ tcred->suid) &&
598 (cred->uid ^ tcred->uid) &&
584 !capable(CAP_KILL)) { 599 !capable(CAP_KILL)) {
585 switch (sig) { 600 switch (sig) {
586 case SIGCONT: 601 case SIGCONT:
@@ -844,7 +859,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
844 q->info.si_errno = 0; 859 q->info.si_errno = 0;
845 q->info.si_code = SI_USER; 860 q->info.si_code = SI_USER;
846 q->info.si_pid = task_pid_vnr(current); 861 q->info.si_pid = task_pid_vnr(current);
847 q->info.si_uid = current->uid; 862 q->info.si_uid = current_uid();
848 break; 863 break;
849 case (unsigned long) SEND_SIG_PRIV: 864 case (unsigned long) SEND_SIG_PRIV:
850 q->info.si_signo = sig; 865 q->info.si_signo = sig;
@@ -1008,6 +1023,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
1008 return sighand; 1023 return sighand;
1009} 1024}
1010 1025
1026/*
1027 * send signal info to all the members of a group
1028 * - the caller must hold the RCU read lock at least
1029 */
1011int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1030int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1012{ 1031{
1013 unsigned long flags; 1032 unsigned long flags;
@@ -1029,8 +1048,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1029/* 1048/*
1030 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1049 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1031 * control characters do (^C, ^Z etc) 1050 * control characters do (^C, ^Z etc)
1051 * - the caller must hold at least a readlock on tasklist_lock
1032 */ 1052 */
1033
1034int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1053int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1035{ 1054{
1036 struct task_struct *p = NULL; 1055 struct task_struct *p = NULL;
@@ -1086,6 +1105,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1086{ 1105{
1087 int ret = -EINVAL; 1106 int ret = -EINVAL;
1088 struct task_struct *p; 1107 struct task_struct *p;
1108 const struct cred *pcred;
1089 1109
1090 if (!valid_signal(sig)) 1110 if (!valid_signal(sig))
1091 return ret; 1111 return ret;
@@ -1096,9 +1116,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1096 ret = -ESRCH; 1116 ret = -ESRCH;
1097 goto out_unlock; 1117 goto out_unlock;
1098 } 1118 }
1099 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 1119 pcred = __task_cred(p);
1100 && (euid != p->suid) && (euid != p->uid) 1120 if ((info == SEND_SIG_NOINFO ||
1101 && (uid != p->suid) && (uid != p->uid)) { 1121 (!is_si_special(info) && SI_FROMUSER(info))) &&
1122 euid != pcred->suid && euid != pcred->uid &&
1123 uid != pcred->suid && uid != pcred->uid) {
1102 ret = -EPERM; 1124 ret = -EPERM;
1103 goto out_unlock; 1125 goto out_unlock;
1104 } 1126 }
@@ -1369,10 +1391,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1369 */ 1391 */
1370 rcu_read_lock(); 1392 rcu_read_lock();
1371 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1393 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1394 info.si_uid = __task_cred(tsk)->uid;
1372 rcu_read_unlock(); 1395 rcu_read_unlock();
1373 1396
1374 info.si_uid = tsk->uid;
1375
1376 thread_group_cputime(tsk, &cputime); 1397 thread_group_cputime(tsk, &cputime);
1377 info.si_utime = cputime_to_jiffies(cputime.utime); 1398 info.si_utime = cputime_to_jiffies(cputime.utime);
1378 info.si_stime = cputime_to_jiffies(cputime.stime); 1399 info.si_stime = cputime_to_jiffies(cputime.stime);
@@ -1440,10 +1461,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1440 */ 1461 */
1441 rcu_read_lock(); 1462 rcu_read_lock();
1442 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1463 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1464 info.si_uid = __task_cred(tsk)->uid;
1443 rcu_read_unlock(); 1465 rcu_read_unlock();
1444 1466
1445 info.si_uid = tsk->uid;
1446
1447 info.si_utime = cputime_to_clock_t(tsk->utime); 1467 info.si_utime = cputime_to_clock_t(tsk->utime);
1448 info.si_stime = cputime_to_clock_t(tsk->stime); 1468 info.si_stime = cputime_to_clock_t(tsk->stime);
1449 1469
@@ -1598,7 +1618,7 @@ void ptrace_notify(int exit_code)
1598 info.si_signo = SIGTRAP; 1618 info.si_signo = SIGTRAP;
1599 info.si_code = exit_code; 1619 info.si_code = exit_code;
1600 info.si_pid = task_pid_vnr(current); 1620 info.si_pid = task_pid_vnr(current);
1601 info.si_uid = current->uid; 1621 info.si_uid = current_uid();
1602 1622
1603 /* Let the debugger run. */ 1623 /* Let the debugger run. */
1604 spin_lock_irq(&current->sighand->siglock); 1624 spin_lock_irq(&current->sighand->siglock);
@@ -1710,7 +1730,7 @@ static int ptrace_signal(int signr, siginfo_t *info,
1710 info->si_errno = 0; 1730 info->si_errno = 0;
1711 info->si_code = SI_USER; 1731 info->si_code = SI_USER;
1712 info->si_pid = task_pid_vnr(current->parent); 1732 info->si_pid = task_pid_vnr(current->parent);
1713 info->si_uid = current->parent->uid; 1733 info->si_uid = task_uid(current->parent);
1714 } 1734 }
1715 1735
1716 /* If the (new) signal is now blocked, requeue it. */ 1736 /* If the (new) signal is now blocked, requeue it. */
@@ -2211,7 +2231,7 @@ sys_kill(pid_t pid, int sig)
2211 info.si_errno = 0; 2231 info.si_errno = 0;
2212 info.si_code = SI_USER; 2232 info.si_code = SI_USER;
2213 info.si_pid = task_tgid_vnr(current); 2233 info.si_pid = task_tgid_vnr(current);
2214 info.si_uid = current->uid; 2234 info.si_uid = current_uid();
2215 2235
2216 return kill_something_info(sig, &info, pid); 2236 return kill_something_info(sig, &info, pid);
2217} 2237}
@@ -2228,7 +2248,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
2228 info.si_errno = 0; 2248 info.si_errno = 0;
2229 info.si_code = SI_TKILL; 2249 info.si_code = SI_TKILL;
2230 info.si_pid = task_tgid_vnr(current); 2250 info.si_pid = task_tgid_vnr(current);
2231 info.si_uid = current->uid; 2251 info.si_uid = current_uid();
2232 2252
2233 rcu_read_lock(); 2253 rcu_read_lock();
2234 p = find_task_by_vpid(pid); 2254 p = find_task_by_vpid(pid);