aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c137
1 files changed, 117 insertions, 20 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index d7611f189ef7..08aa5b263f36 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -329,13 +329,20 @@ void __exit_sighand(struct task_struct *tsk)
329 /* Ok, we're done with the signal handlers */ 329 /* Ok, we're done with the signal handlers */
330 tsk->sighand = NULL; 330 tsk->sighand = NULL;
331 if (atomic_dec_and_test(&sighand->count)) 331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand); 332 sighand_free(sighand);
333} 333}
334 334
335void exit_sighand(struct task_struct *tsk) 335void exit_sighand(struct task_struct *tsk)
336{ 336{
337 write_lock_irq(&tasklist_lock); 337 write_lock_irq(&tasklist_lock);
338 __exit_sighand(tsk); 338 rcu_read_lock();
339 if (tsk->sighand != NULL) {
340 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
341 spin_lock(&sighand->siglock);
342 __exit_sighand(tsk);
343 spin_unlock(&sighand->siglock);
344 }
345 rcu_read_unlock();
339 write_unlock_irq(&tasklist_lock); 346 write_unlock_irq(&tasklist_lock);
340} 347}
341 348
@@ -345,19 +352,20 @@ void exit_sighand(struct task_struct *tsk)
345void __exit_signal(struct task_struct *tsk) 352void __exit_signal(struct task_struct *tsk)
346{ 353{
347 struct signal_struct * sig = tsk->signal; 354 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand; 355 struct sighand_struct * sighand;
349 356
350 if (!sig) 357 if (!sig)
351 BUG(); 358 BUG();
352 if (!atomic_read(&sig->count)) 359 if (!atomic_read(&sig->count))
353 BUG(); 360 BUG();
361 rcu_read_lock();
362 sighand = rcu_dereference(tsk->sighand);
354 spin_lock(&sighand->siglock); 363 spin_lock(&sighand->siglock);
355 posix_cpu_timers_exit(tsk); 364 posix_cpu_timers_exit(tsk);
356 if (atomic_dec_and_test(&sig->count)) { 365 if (atomic_dec_and_test(&sig->count)) {
357 posix_cpu_timers_exit_group(tsk); 366 posix_cpu_timers_exit_group(tsk);
358 if (tsk == sig->curr_target)
359 sig->curr_target = next_thread(tsk);
360 tsk->signal = NULL; 367 tsk->signal = NULL;
368 __exit_sighand(tsk);
361 spin_unlock(&sighand->siglock); 369 spin_unlock(&sighand->siglock);
362 flush_sigqueue(&sig->shared_pending); 370 flush_sigqueue(&sig->shared_pending);
363 } else { 371 } else {
@@ -389,9 +397,11 @@ void __exit_signal(struct task_struct *tsk)
389 sig->nvcsw += tsk->nvcsw; 397 sig->nvcsw += tsk->nvcsw;
390 sig->nivcsw += tsk->nivcsw; 398 sig->nivcsw += tsk->nivcsw;
391 sig->sched_time += tsk->sched_time; 399 sig->sched_time += tsk->sched_time;
400 __exit_sighand(tsk);
392 spin_unlock(&sighand->siglock); 401 spin_unlock(&sighand->siglock);
393 sig = NULL; /* Marker for below. */ 402 sig = NULL; /* Marker for below. */
394 } 403 }
404 rcu_read_unlock();
395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 405 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending); 406 flush_sigqueue(&tsk->pending);
397 if (sig) { 407 if (sig) {
@@ -613,6 +623,33 @@ void signal_wake_up(struct task_struct *t, int resume)
613 * Returns 1 if any signals were found. 623 * Returns 1 if any signals were found.
614 * 624 *
615 * All callers must be holding the siglock. 625 * All callers must be holding the siglock.
626 *
627 * This version takes a sigset mask and looks at all signals,
628 * not just those in the first mask word.
629 */
630static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
631{
632 struct sigqueue *q, *n;
633 sigset_t m;
634
635 sigandsets(&m, mask, &s->signal);
636 if (sigisemptyset(&m))
637 return 0;
638
639 signandsets(&s->signal, &s->signal, mask);
640 list_for_each_entry_safe(q, n, &s->list, list) {
641 if (sigismember(mask, q->info.si_signo)) {
642 list_del_init(&q->list);
643 __sigqueue_free(q);
644 }
645 }
646 return 1;
647}
648/*
649 * Remove signals in mask from the pending set and queue.
650 * Returns 1 if any signals were found.
651 *
652 * All callers must be holding the siglock.
616 */ 653 */
617static int rm_from_queue(unsigned long mask, struct sigpending *s) 654static int rm_from_queue(unsigned long mask, struct sigpending *s)
618{ 655{
@@ -1080,18 +1117,29 @@ void zap_other_threads(struct task_struct *p)
1080} 1117}
1081 1118
1082/* 1119/*
1083 * Must be called with the tasklist_lock held for reading! 1120 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1084 */ 1121 */
1085int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1122int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1086{ 1123{
1087 unsigned long flags; 1124 unsigned long flags;
1125 struct sighand_struct *sp;
1088 int ret; 1126 int ret;
1089 1127
1128retry:
1090 ret = check_kill_permission(sig, info, p); 1129 ret = check_kill_permission(sig, info, p);
1091 if (!ret && sig && p->sighand) { 1130 if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1092 spin_lock_irqsave(&p->sighand->siglock, flags); 1131 spin_lock_irqsave(&sp->siglock, flags);
1132 if (p->sighand != sp) {
1133 spin_unlock_irqrestore(&sp->siglock, flags);
1134 goto retry;
1135 }
1136 if ((atomic_read(&sp->count) == 0) ||
1137 (atomic_read(&p->usage) == 0)) {
1138 spin_unlock_irqrestore(&sp->siglock, flags);
1139 return -ESRCH;
1140 }
1093 ret = __group_send_sig_info(sig, info, p); 1141 ret = __group_send_sig_info(sig, info, p);
1094 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1142 spin_unlock_irqrestore(&sp->siglock, flags);
1095 } 1143 }
1096 1144
1097 return ret; 1145 return ret;
@@ -1136,14 +1184,21 @@ int
1136kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1184kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1137{ 1185{
1138 int error; 1186 int error;
1187 int acquired_tasklist_lock = 0;
1139 struct task_struct *p; 1188 struct task_struct *p;
1140 1189
1141 read_lock(&tasklist_lock); 1190 rcu_read_lock();
1191 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1192 read_lock(&tasklist_lock);
1193 acquired_tasklist_lock = 1;
1194 }
1142 p = find_task_by_pid(pid); 1195 p = find_task_by_pid(pid);
1143 error = -ESRCH; 1196 error = -ESRCH;
1144 if (p) 1197 if (p)
1145 error = group_send_sig_info(sig, info, p); 1198 error = group_send_sig_info(sig, info, p);
1146 read_unlock(&tasklist_lock); 1199 if (unlikely(acquired_tasklist_lock))
1200 read_unlock(&tasklist_lock);
1201 rcu_read_unlock();
1147 return error; 1202 return error;
1148} 1203}
1149 1204
@@ -1163,8 +1218,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1163 ret = -ESRCH; 1218 ret = -ESRCH;
1164 goto out_unlock; 1219 goto out_unlock;
1165 } 1220 }
1166 if ((!info || ((unsigned long)info != 1 && 1221 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1167 (unsigned long)info != 2 && SI_FROMUSER(info)))
1168 && (euid != p->suid) && (euid != p->uid) 1222 && (euid != p->suid) && (euid != p->uid)
1169 && (uid != p->suid) && (uid != p->uid)) { 1223 && (uid != p->suid) && (uid != p->uid)) {
1170 ret = -EPERM; 1224 ret = -EPERM;
@@ -1355,16 +1409,54 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1355{ 1409{
1356 unsigned long flags; 1410 unsigned long flags;
1357 int ret = 0; 1411 int ret = 0;
1412 struct sighand_struct *sh;
1358 1413
1359 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1414 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1360 read_lock(&tasklist_lock); 1415
1416 /*
1417 * The rcu based delayed sighand destroy makes it possible to
1418 * run this without tasklist lock held. The task struct itself
1419 * cannot go away as create_timer did get_task_struct().
1420 *
1421 * We return -1, when the task is marked exiting, so
1422 * posix_timer_event can redirect it to the group leader
1423 */
1424 rcu_read_lock();
1361 1425
1362 if (unlikely(p->flags & PF_EXITING)) { 1426 if (unlikely(p->flags & PF_EXITING)) {
1363 ret = -1; 1427 ret = -1;
1364 goto out_err; 1428 goto out_err;
1365 } 1429 }
1366 1430
1367 spin_lock_irqsave(&p->sighand->siglock, flags); 1431retry:
1432 sh = rcu_dereference(p->sighand);
1433
1434 spin_lock_irqsave(&sh->siglock, flags);
1435 if (p->sighand != sh) {
1436 /* We raced with exec() in a multithreaded process... */
1437 spin_unlock_irqrestore(&sh->siglock, flags);
1438 goto retry;
1439 }
1440
1441 /*
1442 * We do the check here again to handle the following scenario:
1443 *
1444 * CPU 0 CPU 1
1445 * send_sigqueue
1446 * check PF_EXITING
1447 * interrupt exit code running
1448 * __exit_signal
1449 * lock sighand->siglock
1450 * unlock sighand->siglock
1451 * lock sh->siglock
1452 * add(tsk->pending) flush_sigqueue(tsk->pending)
1453 *
1454 */
1455
1456 if (unlikely(p->flags & PF_EXITING)) {
1457 ret = -1;
1458 goto out;
1459 }
1368 1460
1369 if (unlikely(!list_empty(&q->list))) { 1461 if (unlikely(!list_empty(&q->list))) {
1370 /* 1462 /*
@@ -1388,9 +1480,9 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1388 signal_wake_up(p, sig == SIGKILL); 1480 signal_wake_up(p, sig == SIGKILL);
1389 1481
1390out: 1482out:
1391 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1483 spin_unlock_irqrestore(&sh->siglock, flags);
1392out_err: 1484out_err:
1393 read_unlock(&tasklist_lock); 1485 rcu_read_unlock();
1394 1486
1395 return ret; 1487 return ret;
1396} 1488}
@@ -1402,7 +1494,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1402 int ret = 0; 1494 int ret = 0;
1403 1495
1404 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1496 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1497
1405 read_lock(&tasklist_lock); 1498 read_lock(&tasklist_lock);
1499 /* Since it_lock is held, p->sighand cannot be NULL. */
1406 spin_lock_irqsave(&p->sighand->siglock, flags); 1500 spin_lock_irqsave(&p->sighand->siglock, flags);
1407 handle_stop_signal(sig, p); 1501 handle_stop_signal(sig, p);
1408 1502
@@ -1436,7 +1530,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1436out: 1530out:
1437 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1531 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1438 read_unlock(&tasklist_lock); 1532 read_unlock(&tasklist_lock);
1439 return(ret); 1533 return ret;
1440} 1534}
1441 1535
1442/* 1536/*
@@ -2338,6 +2432,7 @@ int
2338do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) 2432do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2339{ 2433{
2340 struct k_sigaction *k; 2434 struct k_sigaction *k;
2435 sigset_t mask;
2341 2436
2342 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2437 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2343 return -EINVAL; 2438 return -EINVAL;
@@ -2385,9 +2480,11 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2385 *k = *act; 2480 *k = *act;
2386 sigdelsetmask(&k->sa.sa_mask, 2481 sigdelsetmask(&k->sa.sa_mask,
2387 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2482 sigmask(SIGKILL) | sigmask(SIGSTOP));
2388 rm_from_queue(sigmask(sig), &t->signal->shared_pending); 2483 sigemptyset(&mask);
2484 sigaddset(&mask, sig);
2485 rm_from_queue_full(&mask, &t->signal->shared_pending);
2389 do { 2486 do {
2390 rm_from_queue(sigmask(sig), &t->pending); 2487 rm_from_queue_full(&mask, &t->pending);
2391 recalc_sigpending_tsk(t); 2488 recalc_sigpending_tsk(t);
2392 t = next_thread(t); 2489 t = next_thread(t);
2393 } while (t != current); 2490 } while (t != current);