aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c58
1 files changed, 41 insertions, 17 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 8072e568bbe0..e2a7d4bf7d57 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,26 +456,50 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
456int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 456int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
457{ 457{
458 int signr = __dequeue_signal(&tsk->pending, mask, info); 458 int signr = __dequeue_signal(&tsk->pending, mask, info);
459 if (!signr) 459 if (!signr) {
460 signr = __dequeue_signal(&tsk->signal->shared_pending, 460 signr = __dequeue_signal(&tsk->signal->shared_pending,
461 mask, info); 461 mask, info);
462 /*
463 * itimer signal ?
464 *
465 * itimers are process shared and we restart periodic
466 * itimers in the signal delivery path to prevent DoS
467 * attacks in the high resolution timer case. This is
468 * compliant with the old way of self restarting
469 * itimers, as the SIGALRM is a legacy signal and only
470 * queued once. Changing the restart behaviour to
471 * restart the timer in the signal dequeue path is
472 * reducing the timer noise on heavy loaded !highres
473 * systems too.
474 */
475 if (unlikely(signr == SIGALRM)) {
476 struct hrtimer *tmr = &tsk->signal->real_timer;
477
478 if (!hrtimer_is_queued(tmr) &&
479 tsk->signal->it_real_incr.tv64 != 0) {
480 hrtimer_forward(tmr, tmr->base->get_time(),
481 tsk->signal->it_real_incr);
482 hrtimer_restart(tmr);
483 }
484 }
485 }
462 recalc_sigpending_tsk(tsk); 486 recalc_sigpending_tsk(tsk);
463 if (signr && unlikely(sig_kernel_stop(signr))) { 487 if (signr && unlikely(sig_kernel_stop(signr))) {
464 /* 488 /*
465 * Set a marker that we have dequeued a stop signal. Our 489 * Set a marker that we have dequeued a stop signal. Our
466 * caller might release the siglock and then the pending 490 * caller might release the siglock and then the pending
467 * stop signal it is about to process is no longer in the 491 * stop signal it is about to process is no longer in the
468 * pending bitmasks, but must still be cleared by a SIGCONT 492 * pending bitmasks, but must still be cleared by a SIGCONT
469 * (and overruled by a SIGKILL). So those cases clear this 493 * (and overruled by a SIGKILL). So those cases clear this
470 * shared flag after we've set it. Note that this flag may 494 * shared flag after we've set it. Note that this flag may
471 * remain set after the signal we return is ignored or 495 * remain set after the signal we return is ignored or
472 * handled. That doesn't matter because its only purpose 496 * handled. That doesn't matter because its only purpose
473 * is to alert stop-signal processing code when another 497 * is to alert stop-signal processing code when another
474 * processor has come along and cleared the flag. 498 * processor has come along and cleared the flag.
475 */ 499 */
476 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 500 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
477 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 501 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
478 } 502 }
479 if ( signr && 503 if ( signr &&
480 ((info->si_code & __SI_MASK) == __SI_TIMER) && 504 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
481 info->si_sys_private){ 505 info->si_sys_private){