diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 179 |
1 files changed, 147 insertions, 32 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 80789a59b4db..d3efafd8109a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/posix-timers.h> | 25 | #include <linux/posix-timers.h> |
26 | #include <linux/signal.h> | 26 | #include <linux/signal.h> |
27 | #include <linux/audit.h> | 27 | #include <linux/audit.h> |
28 | #include <linux/capability.h> | ||
28 | #include <asm/param.h> | 29 | #include <asm/param.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/unistd.h> | 31 | #include <asm/unistd.h> |
@@ -329,13 +330,20 @@ void __exit_sighand(struct task_struct *tsk) | |||
329 | /* Ok, we're done with the signal handlers */ | 330 | /* Ok, we're done with the signal handlers */ |
330 | tsk->sighand = NULL; | 331 | tsk->sighand = NULL; |
331 | if (atomic_dec_and_test(&sighand->count)) | 332 | if (atomic_dec_and_test(&sighand->count)) |
332 | kmem_cache_free(sighand_cachep, sighand); | 333 | sighand_free(sighand); |
333 | } | 334 | } |
334 | 335 | ||
335 | void exit_sighand(struct task_struct *tsk) | 336 | void exit_sighand(struct task_struct *tsk) |
336 | { | 337 | { |
337 | write_lock_irq(&tasklist_lock); | 338 | write_lock_irq(&tasklist_lock); |
338 | __exit_sighand(tsk); | 339 | rcu_read_lock(); |
340 | if (tsk->sighand != NULL) { | ||
341 | struct sighand_struct *sighand = rcu_dereference(tsk->sighand); | ||
342 | spin_lock(&sighand->siglock); | ||
343 | __exit_sighand(tsk); | ||
344 | spin_unlock(&sighand->siglock); | ||
345 | } | ||
346 | rcu_read_unlock(); | ||
339 | write_unlock_irq(&tasklist_lock); | 347 | write_unlock_irq(&tasklist_lock); |
340 | } | 348 | } |
341 | 349 | ||
@@ -345,19 +353,20 @@ void exit_sighand(struct task_struct *tsk) | |||
345 | void __exit_signal(struct task_struct *tsk) | 353 | void __exit_signal(struct task_struct *tsk) |
346 | { | 354 | { |
347 | struct signal_struct * sig = tsk->signal; | 355 | struct signal_struct * sig = tsk->signal; |
348 | struct sighand_struct * sighand = tsk->sighand; | 356 | struct sighand_struct * sighand; |
349 | 357 | ||
350 | if (!sig) | 358 | if (!sig) |
351 | BUG(); | 359 | BUG(); |
352 | if (!atomic_read(&sig->count)) | 360 | if (!atomic_read(&sig->count)) |
353 | BUG(); | 361 | BUG(); |
362 | rcu_read_lock(); | ||
363 | sighand = rcu_dereference(tsk->sighand); | ||
354 | spin_lock(&sighand->siglock); | 364 | spin_lock(&sighand->siglock); |
355 | posix_cpu_timers_exit(tsk); | 365 | posix_cpu_timers_exit(tsk); |
356 | if (atomic_dec_and_test(&sig->count)) { | 366 | if (atomic_dec_and_test(&sig->count)) { |
357 | posix_cpu_timers_exit_group(tsk); | 367 | posix_cpu_timers_exit_group(tsk); |
358 | if (tsk == sig->curr_target) | ||
359 | sig->curr_target = next_thread(tsk); | ||
360 | tsk->signal = NULL; | 368 | tsk->signal = NULL; |
369 | __exit_sighand(tsk); | ||
361 | spin_unlock(&sighand->siglock); | 370 | spin_unlock(&sighand->siglock); |
362 | flush_sigqueue(&sig->shared_pending); | 371 | flush_sigqueue(&sig->shared_pending); |
363 | } else { | 372 | } else { |
@@ -389,9 +398,11 @@ void __exit_signal(struct task_struct *tsk) | |||
389 | sig->nvcsw += tsk->nvcsw; | 398 | sig->nvcsw += tsk->nvcsw; |
390 | sig->nivcsw += tsk->nivcsw; | 399 | sig->nivcsw += tsk->nivcsw; |
391 | sig->sched_time += tsk->sched_time; | 400 | sig->sched_time += tsk->sched_time; |
401 | __exit_sighand(tsk); | ||
392 | spin_unlock(&sighand->siglock); | 402 | spin_unlock(&sighand->siglock); |
393 | sig = NULL; /* Marker for below. */ | 403 | sig = NULL; /* Marker for below. */ |
394 | } | 404 | } |
405 | rcu_read_unlock(); | ||
395 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 406 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
396 | flush_sigqueue(&tsk->pending); | 407 | flush_sigqueue(&tsk->pending); |
397 | if (sig) { | 408 | if (sig) { |
@@ -465,7 +476,7 @@ unblock_all_signals(void) | |||
465 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 476 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
466 | } | 477 | } |
467 | 478 | ||
468 | static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 479 | static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
469 | { | 480 | { |
470 | struct sigqueue *q, *first = NULL; | 481 | struct sigqueue *q, *first = NULL; |
471 | int still_pending = 0; | 482 | int still_pending = 0; |
@@ -513,16 +524,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
513 | { | 524 | { |
514 | int sig = 0; | 525 | int sig = 0; |
515 | 526 | ||
516 | /* SIGKILL must have priority, otherwise it is quite easy | 527 | sig = next_signal(pending, mask); |
517 | * to create an unkillable process, sending sig < SIGKILL | ||
518 | * to self */ | ||
519 | if (unlikely(sigismember(&pending->signal, SIGKILL))) { | ||
520 | if (!sigismember(mask, SIGKILL)) | ||
521 | sig = SIGKILL; | ||
522 | } | ||
523 | |||
524 | if (likely(!sig)) | ||
525 | sig = next_signal(pending, mask); | ||
526 | if (sig) { | 528 | if (sig) { |
527 | if (current->notifier) { | 529 | if (current->notifier) { |
528 | if (sigismember(current->notifier_mask, sig)) { | 530 | if (sigismember(current->notifier_mask, sig)) { |
@@ -622,6 +624,33 @@ void signal_wake_up(struct task_struct *t, int resume) | |||
622 | * Returns 1 if any signals were found. | 624 | * Returns 1 if any signals were found. |
623 | * | 625 | * |
624 | * All callers must be holding the siglock. | 626 | * All callers must be holding the siglock. |
627 | * | ||
628 | * This version takes a sigset mask and looks at all signals, | ||
629 | * not just those in the first mask word. | ||
630 | */ | ||
631 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | ||
632 | { | ||
633 | struct sigqueue *q, *n; | ||
634 | sigset_t m; | ||
635 | |||
636 | sigandsets(&m, mask, &s->signal); | ||
637 | if (sigisemptyset(&m)) | ||
638 | return 0; | ||
639 | |||
640 | signandsets(&s->signal, &s->signal, mask); | ||
641 | list_for_each_entry_safe(q, n, &s->list, list) { | ||
642 | if (sigismember(mask, q->info.si_signo)) { | ||
643 | list_del_init(&q->list); | ||
644 | __sigqueue_free(q); | ||
645 | } | ||
646 | } | ||
647 | return 1; | ||
648 | } | ||
649 | /* | ||
650 | * Remove signals in mask from the pending set and queue. | ||
651 | * Returns 1 if any signals were found. | ||
652 | * | ||
653 | * All callers must be holding the siglock. | ||
625 | */ | 654 | */ |
626 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | 655 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
627 | { | 656 | { |
@@ -1089,18 +1118,29 @@ void zap_other_threads(struct task_struct *p) | |||
1089 | } | 1118 | } |
1090 | 1119 | ||
1091 | /* | 1120 | /* |
1092 | * Must be called with the tasklist_lock held for reading! | 1121 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
1093 | */ | 1122 | */ |
1094 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1123 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1095 | { | 1124 | { |
1096 | unsigned long flags; | 1125 | unsigned long flags; |
1126 | struct sighand_struct *sp; | ||
1097 | int ret; | 1127 | int ret; |
1098 | 1128 | ||
1129 | retry: | ||
1099 | ret = check_kill_permission(sig, info, p); | 1130 | ret = check_kill_permission(sig, info, p); |
1100 | if (!ret && sig && p->sighand) { | 1131 | if (!ret && sig && (sp = rcu_dereference(p->sighand))) { |
1101 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1132 | spin_lock_irqsave(&sp->siglock, flags); |
1133 | if (p->sighand != sp) { | ||
1134 | spin_unlock_irqrestore(&sp->siglock, flags); | ||
1135 | goto retry; | ||
1136 | } | ||
1137 | if ((atomic_read(&sp->count) == 0) || | ||
1138 | (atomic_read(&p->usage) == 0)) { | ||
1139 | spin_unlock_irqrestore(&sp->siglock, flags); | ||
1140 | return -ESRCH; | ||
1141 | } | ||
1102 | ret = __group_send_sig_info(sig, info, p); | 1142 | ret = __group_send_sig_info(sig, info, p); |
1103 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1143 | spin_unlock_irqrestore(&sp->siglock, flags); |
1104 | } | 1144 | } |
1105 | 1145 | ||
1106 | return ret; | 1146 | return ret; |
@@ -1145,14 +1185,21 @@ int | |||
1145 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) | 1185 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1146 | { | 1186 | { |
1147 | int error; | 1187 | int error; |
1188 | int acquired_tasklist_lock = 0; | ||
1148 | struct task_struct *p; | 1189 | struct task_struct *p; |
1149 | 1190 | ||
1150 | read_lock(&tasklist_lock); | 1191 | rcu_read_lock(); |
1192 | if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) { | ||
1193 | read_lock(&tasklist_lock); | ||
1194 | acquired_tasklist_lock = 1; | ||
1195 | } | ||
1151 | p = find_task_by_pid(pid); | 1196 | p = find_task_by_pid(pid); |
1152 | error = -ESRCH; | 1197 | error = -ESRCH; |
1153 | if (p) | 1198 | if (p) |
1154 | error = group_send_sig_info(sig, info, p); | 1199 | error = group_send_sig_info(sig, info, p); |
1155 | read_unlock(&tasklist_lock); | 1200 | if (unlikely(acquired_tasklist_lock)) |
1201 | read_unlock(&tasklist_lock); | ||
1202 | rcu_read_unlock(); | ||
1156 | return error; | 1203 | return error; |
1157 | } | 1204 | } |
1158 | 1205 | ||
@@ -1172,8 +1219,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, | |||
1172 | ret = -ESRCH; | 1219 | ret = -ESRCH; |
1173 | goto out_unlock; | 1220 | goto out_unlock; |
1174 | } | 1221 | } |
1175 | if ((!info || ((unsigned long)info != 1 && | 1222 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
1176 | (unsigned long)info != 2 && SI_FROMUSER(info))) | ||
1177 | && (euid != p->suid) && (euid != p->uid) | 1223 | && (euid != p->suid) && (euid != p->uid) |
1178 | && (uid != p->suid) && (uid != p->uid)) { | 1224 | && (uid != p->suid) && (uid != p->uid)) { |
1179 | ret = -EPERM; | 1225 | ret = -EPERM; |
@@ -1364,16 +1410,54 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1364 | { | 1410 | { |
1365 | unsigned long flags; | 1411 | unsigned long flags; |
1366 | int ret = 0; | 1412 | int ret = 0; |
1413 | struct sighand_struct *sh; | ||
1367 | 1414 | ||
1368 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1415 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1369 | read_lock(&tasklist_lock); | 1416 | |
1417 | /* | ||
1418 | * The rcu based delayed sighand destroy makes it possible to | ||
1419 | * run this without tasklist lock held. The task struct itself | ||
1420 | * cannot go away as create_timer did get_task_struct(). | ||
1421 | * | ||
1422 | * We return -1, when the task is marked exiting, so | ||
1423 | * posix_timer_event can redirect it to the group leader | ||
1424 | */ | ||
1425 | rcu_read_lock(); | ||
1370 | 1426 | ||
1371 | if (unlikely(p->flags & PF_EXITING)) { | 1427 | if (unlikely(p->flags & PF_EXITING)) { |
1372 | ret = -1; | 1428 | ret = -1; |
1373 | goto out_err; | 1429 | goto out_err; |
1374 | } | 1430 | } |
1375 | 1431 | ||
1376 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1432 | retry: |
1433 | sh = rcu_dereference(p->sighand); | ||
1434 | |||
1435 | spin_lock_irqsave(&sh->siglock, flags); | ||
1436 | if (p->sighand != sh) { | ||
1437 | /* We raced with exec() in a multithreaded process... */ | ||
1438 | spin_unlock_irqrestore(&sh->siglock, flags); | ||
1439 | goto retry; | ||
1440 | } | ||
1441 | |||
1442 | /* | ||
1443 | * We do the check here again to handle the following scenario: | ||
1444 | * | ||
1445 | * CPU 0 CPU 1 | ||
1446 | * send_sigqueue | ||
1447 | * check PF_EXITING | ||
1448 | * interrupt exit code running | ||
1449 | * __exit_signal | ||
1450 | * lock sighand->siglock | ||
1451 | * unlock sighand->siglock | ||
1452 | * lock sh->siglock | ||
1453 | * add(tsk->pending) flush_sigqueue(tsk->pending) | ||
1454 | * | ||
1455 | */ | ||
1456 | |||
1457 | if (unlikely(p->flags & PF_EXITING)) { | ||
1458 | ret = -1; | ||
1459 | goto out; | ||
1460 | } | ||
1377 | 1461 | ||
1378 | if (unlikely(!list_empty(&q->list))) { | 1462 | if (unlikely(!list_empty(&q->list))) { |
1379 | /* | 1463 | /* |
@@ -1397,9 +1481,9 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1397 | signal_wake_up(p, sig == SIGKILL); | 1481 | signal_wake_up(p, sig == SIGKILL); |
1398 | 1482 | ||
1399 | out: | 1483 | out: |
1400 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1484 | spin_unlock_irqrestore(&sh->siglock, flags); |
1401 | out_err: | 1485 | out_err: |
1402 | read_unlock(&tasklist_lock); | 1486 | rcu_read_unlock(); |
1403 | 1487 | ||
1404 | return ret; | 1488 | return ret; |
1405 | } | 1489 | } |
@@ -1411,7 +1495,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1411 | int ret = 0; | 1495 | int ret = 0; |
1412 | 1496 | ||
1413 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1497 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1498 | |||
1414 | read_lock(&tasklist_lock); | 1499 | read_lock(&tasklist_lock); |
1500 | /* Since it_lock is held, p->sighand cannot be NULL. */ | ||
1415 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1501 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1416 | handle_stop_signal(sig, p); | 1502 | handle_stop_signal(sig, p); |
1417 | 1503 | ||
@@ -1445,7 +1531,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1445 | out: | 1531 | out: |
1446 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1532 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1447 | read_unlock(&tasklist_lock); | 1533 | read_unlock(&tasklist_lock); |
1448 | return(ret); | 1534 | return ret; |
1449 | } | 1535 | } |
1450 | 1536 | ||
1451 | /* | 1537 | /* |
@@ -1795,7 +1881,7 @@ do_signal_stop(int signr) | |||
1795 | * We return zero if we still hold the siglock and should look | 1881 | * We return zero if we still hold the siglock and should look |
1796 | * for another signal without checking group_stop_count again. | 1882 | * for another signal without checking group_stop_count again. |
1797 | */ | 1883 | */ |
1798 | static inline int handle_group_stop(void) | 1884 | static int handle_group_stop(void) |
1799 | { | 1885 | { |
1800 | int stop_count; | 1886 | int stop_count; |
1801 | 1887 | ||
@@ -2347,6 +2433,7 @@ int | |||
2347 | do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | 2433 | do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) |
2348 | { | 2434 | { |
2349 | struct k_sigaction *k; | 2435 | struct k_sigaction *k; |
2436 | sigset_t mask; | ||
2350 | 2437 | ||
2351 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | 2438 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
2352 | return -EINVAL; | 2439 | return -EINVAL; |
@@ -2394,9 +2481,11 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | |||
2394 | *k = *act; | 2481 | *k = *act; |
2395 | sigdelsetmask(&k->sa.sa_mask, | 2482 | sigdelsetmask(&k->sa.sa_mask, |
2396 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2483 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2397 | rm_from_queue(sigmask(sig), &t->signal->shared_pending); | 2484 | sigemptyset(&mask); |
2485 | sigaddset(&mask, sig); | ||
2486 | rm_from_queue_full(&mask, &t->signal->shared_pending); | ||
2398 | do { | 2487 | do { |
2399 | rm_from_queue(sigmask(sig), &t->pending); | 2488 | rm_from_queue_full(&mask, &t->pending); |
2400 | recalc_sigpending_tsk(t); | 2489 | recalc_sigpending_tsk(t); |
2401 | t = next_thread(t); | 2490 | t = next_thread(t); |
2402 | } while (t != current); | 2491 | } while (t != current); |
@@ -2632,6 +2721,32 @@ sys_pause(void) | |||
2632 | 2721 | ||
2633 | #endif | 2722 | #endif |
2634 | 2723 | ||
2724 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
2725 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) | ||
2726 | { | ||
2727 | sigset_t newset; | ||
2728 | |||
2729 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
2730 | if (sigsetsize != sizeof(sigset_t)) | ||
2731 | return -EINVAL; | ||
2732 | |||
2733 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
2734 | return -EFAULT; | ||
2735 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
2736 | |||
2737 | spin_lock_irq(¤t->sighand->siglock); | ||
2738 | current->saved_sigmask = current->blocked; | ||
2739 | current->blocked = newset; | ||
2740 | recalc_sigpending(); | ||
2741 | spin_unlock_irq(¤t->sighand->siglock); | ||
2742 | |||
2743 | current->state = TASK_INTERRUPTIBLE; | ||
2744 | schedule(); | ||
2745 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
2746 | return -ERESTARTNOHAND; | ||
2747 | } | ||
2748 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | ||
2749 | |||
2635 | void __init signals_init(void) | 2750 | void __init signals_init(void) |
2636 | { | 2751 | { |
2637 | sigqueue_cachep = | 2752 | sigqueue_cachep = |