aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c55
1 files changed, 17 insertions, 38 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6db7a5ed52b..2043c08d36c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/export.h> 35#include <linux/module.h>
36#include <linux/percpu.h> 36#include <linux/percpu.h>
37#include <linux/hrtimer.h> 37#include <linux/hrtimer.h>
38#include <linux/notifier.h> 38#include <linux/notifier.h>
@@ -657,14 +657,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
657 return 0; 657 return 0;
658} 658}
659 659
660static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
661{
662 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
663 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
664
665 return ktime_get_update_offsets(offs_real, offs_boot);
666}
667
668/* 660/*
669 * Retrigger next event is called after clock was set 661 * Retrigger next event is called after clock was set
670 * 662 *
@@ -673,12 +665,22 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
673static void retrigger_next_event(void *arg) 665static void retrigger_next_event(void *arg)
674{ 666{
675 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 667 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
668 struct timespec realtime_offset, xtim, wtm, sleep;
676 669
677 if (!hrtimer_hres_active()) 670 if (!hrtimer_hres_active())
678 return; 671 return;
679 672
673 /* Optimized out for !HIGH_RES */
674 get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
675 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
676
677 /* Adjust CLOCK_REALTIME offset */
680 raw_spin_lock(&base->lock); 678 raw_spin_lock(&base->lock);
681 hrtimer_update_base(base); 679 base->clock_base[HRTIMER_BASE_REALTIME].offset =
680 timespec_to_ktime(realtime_offset);
681 base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
682 timespec_to_ktime(sleep);
683
682 hrtimer_force_reprogram(base, 0); 684 hrtimer_force_reprogram(base, 0);
683 raw_spin_unlock(&base->lock); 685 raw_spin_unlock(&base->lock);
684} 686}
@@ -708,25 +710,13 @@ static int hrtimer_switch_to_hres(void)
708 base->clock_base[i].resolution = KTIME_HIGH_RES; 710 base->clock_base[i].resolution = KTIME_HIGH_RES;
709 711
710 tick_setup_sched_timer(); 712 tick_setup_sched_timer();
713
711 /* "Retrigger" the interrupt to get things going */ 714 /* "Retrigger" the interrupt to get things going */
712 retrigger_next_event(NULL); 715 retrigger_next_event(NULL);
713 local_irq_restore(flags); 716 local_irq_restore(flags);
714 return 1; 717 return 1;
715} 718}
716 719
717/*
718 * Called from timekeeping code to reprogramm the hrtimer interrupt
719 * device. If called from the timer interrupt context we defer it to
720 * softirq context.
721 */
722void clock_was_set_delayed(void)
723{
724 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
725
726 cpu_base->clock_was_set = 1;
727 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
728}
729
730#else 720#else
731 721
732static inline int hrtimer_hres_active(void) { return 0; } 722static inline int hrtimer_hres_active(void) { return 0; }
@@ -1260,10 +1250,11 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1260 cpu_base->nr_events++; 1250 cpu_base->nr_events++;
1261 dev->next_event.tv64 = KTIME_MAX; 1251 dev->next_event.tv64 = KTIME_MAX;
1262 1252
1263 raw_spin_lock(&cpu_base->lock); 1253 entry_time = now = ktime_get();
1264 entry_time = now = hrtimer_update_base(cpu_base);
1265retry: 1254retry:
1266 expires_next.tv64 = KTIME_MAX; 1255 expires_next.tv64 = KTIME_MAX;
1256
1257 raw_spin_lock(&cpu_base->lock);
1267 /* 1258 /*
1268 * We set expires_next to KTIME_MAX here with cpu_base->lock 1259 * We set expires_next to KTIME_MAX here with cpu_base->lock
1269 * held to prevent that a timer is enqueued in our queue via 1260 * held to prevent that a timer is enqueued in our queue via
@@ -1339,12 +1330,8 @@ retry:
1339 * We need to prevent that we loop forever in the hrtimer 1330 * We need to prevent that we loop forever in the hrtimer
1340 * interrupt routine. We give it 3 attempts to avoid 1331 * interrupt routine. We give it 3 attempts to avoid
1341 * overreacting on some spurious event. 1332 * overreacting on some spurious event.
1342 *
1343 * Acquire base lock for updating the offsets and retrieving
1344 * the current time.
1345 */ 1333 */
1346 raw_spin_lock(&cpu_base->lock); 1334 now = ktime_get();
1347 now = hrtimer_update_base(cpu_base);
1348 cpu_base->nr_retries++; 1335 cpu_base->nr_retries++;
1349 if (++retries < 3) 1336 if (++retries < 3)
1350 goto retry; 1337 goto retry;
@@ -1356,7 +1343,6 @@ retry:
1356 */ 1343 */
1357 cpu_base->nr_hangs++; 1344 cpu_base->nr_hangs++;
1358 cpu_base->hang_detected = 1; 1345 cpu_base->hang_detected = 1;
1359 raw_spin_unlock(&cpu_base->lock);
1360 delta = ktime_sub(now, entry_time); 1346 delta = ktime_sub(now, entry_time);
1361 if (delta.tv64 > cpu_base->max_hang_time.tv64) 1347 if (delta.tv64 > cpu_base->max_hang_time.tv64)
1362 cpu_base->max_hang_time = delta; 1348 cpu_base->max_hang_time = delta;
@@ -1409,13 +1395,6 @@ void hrtimer_peek_ahead_timers(void)
1409 1395
1410static void run_hrtimer_softirq(struct softirq_action *h) 1396static void run_hrtimer_softirq(struct softirq_action *h)
1411{ 1397{
1412 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1413
1414 if (cpu_base->clock_was_set) {
1415 cpu_base->clock_was_set = 0;
1416 clock_was_set();
1417 }
1418
1419 hrtimer_peek_ahead_timers(); 1398 hrtimer_peek_ahead_timers();
1420} 1399}
1421 1400