diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-05-30 17:15:42 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-06-04 09:40:24 -0400 |
commit | af888d677a3f4473c198b4720319dd037f398b51 (patch) | |
tree | 4b9dafa3a2c39f3fd6809cd53bc434357fc601a2 | |
parent | bab0aae9dcba9466dcc968b8bd21914f8f691631 (diff) |
posix-timers: Unify overrun/requeue_pending handling
hrtimer based posix-timers and posix-cpu-timers handle the update of the
rearming and overflow related status fields differently.
Move that update to the common rearming code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: John Stultz <john.stultz@linaro.org>
Link: http://lkml.kernel.org/r/20170530211656.484936964@linutronix.de
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 18 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 15 |
2 files changed, 15 insertions, 18 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index a77a792f2570..1683e503179e 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -527,6 +527,7 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
527 | * ticking in case the signal is deliverable next time. | 527 | * ticking in case the signal is deliverable next time. |
528 | */ | 528 | */ |
529 | posix_cpu_timer_schedule(timer); | 529 | posix_cpu_timer_schedule(timer); |
530 | ++timer->it_requeue_pending; | ||
530 | } | 531 | } |
531 | } | 532 | } |
532 | 533 | ||
@@ -997,12 +998,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
997 | cpu_clock_sample(timer->it_clock, p, &now); | 998 | cpu_clock_sample(timer->it_clock, p, &now); |
998 | bump_cpu_timer(timer, now); | 999 | bump_cpu_timer(timer, now); |
999 | if (unlikely(p->exit_state)) | 1000 | if (unlikely(p->exit_state)) |
1000 | goto out; | 1001 | return; |
1001 | 1002 | ||
1002 | /* Protect timer list r/w in arm_timer() */ | 1003 | /* Protect timer list r/w in arm_timer() */ |
1003 | sighand = lock_task_sighand(p, &flags); | 1004 | sighand = lock_task_sighand(p, &flags); |
1004 | if (!sighand) | 1005 | if (!sighand) |
1005 | goto out; | 1006 | return; |
1006 | } else { | 1007 | } else { |
1007 | /* | 1008 | /* |
1008 | * Protect arm_timer() and timer sampling in case of call to | 1009 | * Protect arm_timer() and timer sampling in case of call to |
@@ -1015,11 +1016,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1015 | * We can't even collect a sample any more. | 1016 | * We can't even collect a sample any more. |
1016 | */ | 1017 | */ |
1017 | timer->it.cpu.expires = 0; | 1018 | timer->it.cpu.expires = 0; |
1018 | goto out; | 1019 | return; |
1019 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { | 1020 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1020 | unlock_task_sighand(p, &flags); | 1021 | /* If the process is dying, no need to rearm */ |
1021 | /* Optimizations: if the process is dying, no need to rearm */ | 1022 | goto unlock; |
1022 | goto out; | ||
1023 | } | 1023 | } |
1024 | cpu_timer_sample_group(timer->it_clock, p, &now); | 1024 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1025 | bump_cpu_timer(timer, now); | 1025 | bump_cpu_timer(timer, now); |
@@ -1031,12 +1031,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1031 | */ | 1031 | */ |
1032 | WARN_ON_ONCE(!irqs_disabled()); | 1032 | WARN_ON_ONCE(!irqs_disabled()); |
1033 | arm_timer(timer); | 1033 | arm_timer(timer); |
1034 | unlock: | ||
1034 | unlock_task_sighand(p, &flags); | 1035 | unlock_task_sighand(p, &flags); |
1035 | |||
1036 | out: | ||
1037 | timer->it_overrun_last = timer->it_overrun; | ||
1038 | timer->it_overrun = -1; | ||
1039 | ++timer->it_requeue_pending; | ||
1040 | } | 1036 | } |
1041 | 1037 | ||
1042 | /** | 1038 | /** |
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index dee6a0d911d4..79a00e0f1ef9 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c | |||
@@ -291,10 +291,6 @@ static void schedule_next_timer(struct k_itimer *timr) | |||
291 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, | 291 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, |
292 | timer->base->get_time(), | 292 | timer->base->get_time(), |
293 | timr->it.real.interval); | 293 | timr->it.real.interval); |
294 | |||
295 | timr->it_overrun_last = timr->it_overrun; | ||
296 | timr->it_overrun = -1; | ||
297 | ++timr->it_requeue_pending; | ||
298 | hrtimer_restart(timer); | 294 | hrtimer_restart(timer); |
299 | } | 295 | } |
300 | 296 | ||
@@ -315,18 +311,23 @@ void do_schedule_next_timer(struct siginfo *info) | |||
315 | unsigned long flags; | 311 | unsigned long flags; |
316 | 312 | ||
317 | timr = lock_timer(info->si_tid, &flags); | 313 | timr = lock_timer(info->si_tid, &flags); |
314 | if (!timr) | ||
315 | return; | ||
318 | 316 | ||
319 | if (timr && timr->it_requeue_pending == info->si_sys_private) { | 317 | if (timr->it_requeue_pending == info->si_sys_private) { |
320 | if (timr->it_clock < 0) | 318 | if (timr->it_clock < 0) |
321 | posix_cpu_timer_schedule(timr); | 319 | posix_cpu_timer_schedule(timr); |
322 | else | 320 | else |
323 | schedule_next_timer(timr); | 321 | schedule_next_timer(timr); |
324 | 322 | ||
323 | timr->it_overrun_last = timr->it_overrun; | ||
324 | timr->it_overrun = -1; | ||
325 | ++timr->it_requeue_pending; | ||
326 | |||
325 | info->si_overrun += timr->it_overrun_last; | 327 | info->si_overrun += timr->it_overrun_last; |
326 | } | 328 | } |
327 | 329 | ||
328 | if (timr) | 330 | unlock_timer(timr, flags); |
329 | unlock_timer(timr, flags); | ||
330 | } | 331 | } |
331 | 332 | ||
332 | int posix_timer_event(struct k_itimer *timr, int si_private) | 333 | int posix_timer_event(struct k_itimer *timr, int si_private) |