diff options
-rw-r--r-- | kernel/itimer.c | 14 | ||||
-rw-r--r-- | kernel/signal.c | 58 |
2 files changed, 47 insertions, 25 deletions
diff --git a/kernel/itimer.c b/kernel/itimer.c index 4fc6c0caf5d4..307c6a632ef6 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
@@ -135,11 +135,6 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) | |||
135 | 135 | ||
136 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); | 136 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); |
137 | 137 | ||
138 | if (sig->it_real_incr.tv64 != 0) { | ||
139 | hrtimer_forward(timer, hrtimer_cb_get_time(timer), | ||
140 | sig->it_real_incr); | ||
141 | return HRTIMER_RESTART; | ||
142 | } | ||
143 | return HRTIMER_NORESTART; | 138 | return HRTIMER_NORESTART; |
144 | } | 139 | } |
145 | 140 | ||
@@ -231,11 +226,14 @@ again: | |||
231 | spin_unlock_irq(&tsk->sighand->siglock); | 226 | spin_unlock_irq(&tsk->sighand->siglock); |
232 | goto again; | 227 | goto again; |
233 | } | 228 | } |
234 | tsk->signal->it_real_incr = | ||
235 | timeval_to_ktime(value->it_interval); | ||
236 | expires = timeval_to_ktime(value->it_value); | 229 | expires = timeval_to_ktime(value->it_value); |
237 | if (expires.tv64 != 0) | 230 | if (expires.tv64 != 0) { |
231 | tsk->signal->it_real_incr = | ||
232 | timeval_to_ktime(value->it_interval); | ||
238 | hrtimer_start(timer, expires, HRTIMER_MODE_REL); | 233 | hrtimer_start(timer, expires, HRTIMER_MODE_REL); |
234 | } else | ||
235 | tsk->signal->it_real_incr.tv64 = 0; | ||
236 | |||
239 | spin_unlock_irq(&tsk->sighand->siglock); | 237 | spin_unlock_irq(&tsk->sighand->siglock); |
240 | break; | 238 | break; |
241 | case ITIMER_VIRTUAL: | 239 | case ITIMER_VIRTUAL: |
diff --git a/kernel/signal.c b/kernel/signal.c index 8072e568bbe0..e2a7d4bf7d57 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -456,26 +456,50 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
456 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 456 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
457 | { | 457 | { |
458 | int signr = __dequeue_signal(&tsk->pending, mask, info); | 458 | int signr = __dequeue_signal(&tsk->pending, mask, info); |
459 | if (!signr) | 459 | if (!signr) { |
460 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 460 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
461 | mask, info); | 461 | mask, info); |
462 | /* | ||
463 | * itimer signal ? | ||
464 | * | ||
465 | * itimers are process shared and we restart periodic | ||
466 | * itimers in the signal delivery path to prevent DoS | ||
467 | * attacks in the high resolution timer case. This is | ||
468 | * compliant with the old way of self restarting | ||
469 | * itimers, as the SIGALRM is a legacy signal and only | ||
470 | * queued once. Changing the restart behaviour to | ||
471 | * restart the timer in the signal dequeue path is | ||
472 | * reducing the timer noise on heavy loaded !highres | ||
473 | * systems too. | ||
474 | */ | ||
475 | if (unlikely(signr == SIGALRM)) { | ||
476 | struct hrtimer *tmr = &tsk->signal->real_timer; | ||
477 | |||
478 | if (!hrtimer_is_queued(tmr) && | ||
479 | tsk->signal->it_real_incr.tv64 != 0) { | ||
480 | hrtimer_forward(tmr, tmr->base->get_time(), | ||
481 | tsk->signal->it_real_incr); | ||
482 | hrtimer_restart(tmr); | ||
483 | } | ||
484 | } | ||
485 | } | ||
462 | recalc_sigpending_tsk(tsk); | 486 | recalc_sigpending_tsk(tsk); |
463 | if (signr && unlikely(sig_kernel_stop(signr))) { | 487 | if (signr && unlikely(sig_kernel_stop(signr))) { |
464 | /* | 488 | /* |
465 | * Set a marker that we have dequeued a stop signal. Our | 489 | * Set a marker that we have dequeued a stop signal. Our |
466 | * caller might release the siglock and then the pending | 490 | * caller might release the siglock and then the pending |
467 | * stop signal it is about to process is no longer in the | 491 | * stop signal it is about to process is no longer in the |
468 | * pending bitmasks, but must still be cleared by a SIGCONT | 492 | * pending bitmasks, but must still be cleared by a SIGCONT |
469 | * (and overruled by a SIGKILL). So those cases clear this | 493 | * (and overruled by a SIGKILL). So those cases clear this |
470 | * shared flag after we've set it. Note that this flag may | 494 | * shared flag after we've set it. Note that this flag may |
471 | * remain set after the signal we return is ignored or | 495 | * remain set after the signal we return is ignored or |
472 | * handled. That doesn't matter because its only purpose | 496 | * handled. That doesn't matter because its only purpose |
473 | * is to alert stop-signal processing code when another | 497 | * is to alert stop-signal processing code when another |
474 | * processor has come along and cleared the flag. | 498 | * processor has come along and cleared the flag. |
475 | */ | 499 | */ |
476 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 500 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
477 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 501 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
478 | } | 502 | } |
479 | if ( signr && | 503 | if ( signr && |
480 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | 504 | ((info->si_code & __SI_MASK) == __SI_TIMER) && |
481 | info->si_sys_private){ | 505 | info->si_sys_private){ |