diff options
| -rw-r--r-- | kernel/hrtimer.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 8f320af837b5..6db7a5ed52b5 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 657 | return 0; | 657 | return 0; |
| 658 | } | 658 | } |
| 659 | 659 | ||
| 660 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | ||
| 661 | { | ||
| 662 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | ||
| 663 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | ||
| 664 | |||
| 665 | return ktime_get_update_offsets(offs_real, offs_boot); | ||
| 666 | } | ||
| 667 | |||
| 660 | /* | 668 | /* |
| 661 | * Retrigger next event is called after clock was set | 669 | * Retrigger next event is called after clock was set |
| 662 | * | 670 | * |
| @@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 665 | static void retrigger_next_event(void *arg) | 673 | static void retrigger_next_event(void *arg) |
| 666 | { | 674 | { |
| 667 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 675 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
| 668 | struct timespec realtime_offset, xtim, wtm, sleep; | ||
| 669 | 676 | ||
| 670 | if (!hrtimer_hres_active()) | 677 | if (!hrtimer_hres_active()) |
| 671 | return; | 678 | return; |
| 672 | 679 | ||
| 673 | /* Optimized out for !HIGH_RES */ | ||
| 674 | get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep); | ||
| 675 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
| 676 | |||
| 677 | /* Adjust CLOCK_REALTIME offset */ | ||
| 678 | raw_spin_lock(&base->lock); | 680 | raw_spin_lock(&base->lock); |
| 679 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | 681 | hrtimer_update_base(base); |
| 680 | timespec_to_ktime(realtime_offset); | ||
| 681 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
| 682 | timespec_to_ktime(sleep); | ||
| 683 | |||
| 684 | hrtimer_force_reprogram(base, 0); | 682 | hrtimer_force_reprogram(base, 0); |
| 685 | raw_spin_unlock(&base->lock); | 683 | raw_spin_unlock(&base->lock); |
| 686 | } | 684 | } |
| @@ -710,7 +708,6 @@ static int hrtimer_switch_to_hres(void) | |||
| 710 | base->clock_base[i].resolution = KTIME_HIGH_RES; | 708 | base->clock_base[i].resolution = KTIME_HIGH_RES; |
| 711 | 709 | ||
| 712 | tick_setup_sched_timer(); | 710 | tick_setup_sched_timer(); |
| 713 | |||
| 714 | /* "Retrigger" the interrupt to get things going */ | 711 | /* "Retrigger" the interrupt to get things going */ |
| 715 | retrigger_next_event(NULL); | 712 | retrigger_next_event(NULL); |
| 716 | local_irq_restore(flags); | 713 | local_irq_restore(flags); |
| @@ -1264,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1264 | dev->next_event.tv64 = KTIME_MAX; | 1261 | dev->next_event.tv64 = KTIME_MAX; |
| 1265 | 1262 | ||
| 1266 | raw_spin_lock(&cpu_base->lock); | 1263 | raw_spin_lock(&cpu_base->lock); |
| 1267 | entry_time = now = ktime_get(); | 1264 | entry_time = now = hrtimer_update_base(cpu_base); |
| 1268 | retry: | 1265 | retry: |
| 1269 | expires_next.tv64 = KTIME_MAX; | 1266 | expires_next.tv64 = KTIME_MAX; |
| 1270 | /* | 1267 | /* |
| @@ -1342,9 +1339,12 @@ retry: | |||
| 1342 | * We need to prevent that we loop forever in the hrtimer | 1339 | * We need to prevent that we loop forever in the hrtimer |
| 1343 | * interrupt routine. We give it 3 attempts to avoid | 1340 | * interrupt routine. We give it 3 attempts to avoid |
| 1344 | * overreacting on some spurious event. | 1341 | * overreacting on some spurious event. |
| 1342 | * | ||
| 1343 | * Acquire base lock for updating the offsets and retrieving | ||
| 1344 | * the current time. | ||
| 1345 | */ | 1345 | */ |
| 1346 | raw_spin_lock(&cpu_base->lock); | 1346 | raw_spin_lock(&cpu_base->lock); |
| 1347 | now = ktime_get(); | 1347 | now = hrtimer_update_base(cpu_base); |
| 1348 | cpu_base->nr_retries++; | 1348 | cpu_base->nr_retries++; |
| 1349 | if (++retries < 3) | 1349 | if (++retries < 3) |
| 1350 | goto retry; | 1350 | goto retry; |
