diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
commit | bcd550745fc54f789c14e7526e0633222c505faa (patch) | |
tree | c3fe11a6503b7ffdd4406a9fece5c40b3e2a3f6d /kernel | |
parent | 93f378883cecb9dcb2cf5b51d9d24175906659da (diff) | |
parent | 646783a389828e76e813f50791f7999429c821bc (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner.
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
ia64: vsyscall: Add missing paranthesis
alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n
x86: vdso: Put declaration before code
x86-64: Inline vdso clock_gettime helpers
x86-64: Simplify and optimize vdso clock_gettime monotonic variants
kernel-time: fix s/then/than/ spelling errors
time: remove no_sync_cmos_clock
time: Avoid scary backtraces when warning of > 11% adj
alarmtimer: Make sure we initialize the rtctimer
ntp: Fix leap-second hrtimer livelock
x86, tsc: Skip refined tsc calibration on systems with reliable TSC
rtc: Provide flag for rtc devices that don't support UIE
ia64: vsyscall: Use seqcount instead of seqlock
x86: vdso: Use seqcount instead of seqlock
x86: vdso: Remove bogus locking in update_vsyscall_tz()
time: Remove bogus comments
time: Fix change_clocksource locking
time: x86: Fix race switching from vsyscall to non-vsyscall clock
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time.c | 6 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 8 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
-rw-r--r-- | kernel/time/ntp.c | 134 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 51 |
5 files changed, 77 insertions, 124 deletions
diff --git a/kernel/time.c b/kernel/time.c index 73e416db0a1e..ba744cf80696 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) | |||
163 | return error; | 163 | return error; |
164 | 164 | ||
165 | if (tz) { | 165 | if (tz) { |
166 | /* SMP safe, global irq locking makes it work. */ | ||
167 | sys_tz = *tz; | 166 | sys_tz = *tz; |
168 | update_vsyscall_tz(); | 167 | update_vsyscall_tz(); |
169 | if (firsttime) { | 168 | if (firsttime) { |
@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) | |||
173 | } | 172 | } |
174 | } | 173 | } |
175 | if (tv) | 174 | if (tv) |
176 | { | ||
177 | /* SMP safe, again the code in arch/foo/time.c should | ||
178 | * globally block out interrupts when it runs. | ||
179 | */ | ||
180 | return do_settimeofday(tv); | 175 | return do_settimeofday(tv); |
181 | } | ||
182 | return 0; | 176 | return 0; |
183 | } | 177 | } |
184 | 178 | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 8a46f5d64504..8a538c55fc7b 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev, | |||
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline void alarmtimer_rtc_timer_init(void) | ||
100 | { | ||
101 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
102 | } | ||
103 | |||
99 | static struct class_interface alarmtimer_rtc_interface = { | 104 | static struct class_interface alarmtimer_rtc_interface = { |
100 | .add_dev = &alarmtimer_rtc_add_device, | 105 | .add_dev = &alarmtimer_rtc_add_device, |
101 | }; | 106 | }; |
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void) | |||
117 | #define rtcdev (NULL) | 122 | #define rtcdev (NULL) |
118 | static inline int alarmtimer_rtc_interface_setup(void) { return 0; } | 123 | static inline int alarmtimer_rtc_interface_setup(void) { return 0; } |
119 | static inline void alarmtimer_rtc_interface_remove(void) { } | 124 | static inline void alarmtimer_rtc_interface_remove(void) { } |
125 | static inline void alarmtimer_rtc_timer_init(void) { } | ||
120 | #endif | 126 | #endif |
121 | 127 | ||
122 | /** | 128 | /** |
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void) | |||
783 | .nsleep = alarm_timer_nsleep, | 789 | .nsleep = alarm_timer_nsleep, |
784 | }; | 790 | }; |
785 | 791 | ||
792 | alarmtimer_rtc_timer_init(); | ||
793 | |||
786 | posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); | 794 | posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); |
787 | posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); | 795 | posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); |
788 | 796 | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index a45ca167ab24..c9583382141a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) | |||
500 | { | 500 | { |
501 | u64 ret; | 501 | u64 ret; |
502 | /* | 502 | /* |
503 | * We won't try to correct for more then 11% adjustments (110,000 ppm), | 503 | * We won't try to correct for more than 11% adjustments (110,000 ppm), |
504 | */ | 504 | */ |
505 | ret = (u64)cs->mult * 11; | 505 | ret = (u64)cs->mult * 11; |
506 | do_div(ret,100); | 506 | do_div(ret,100); |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 6e039b144daf..f03fd83b170b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -34,8 +34,6 @@ unsigned long tick_nsec; | |||
34 | static u64 tick_length; | 34 | static u64 tick_length; |
35 | static u64 tick_length_base; | 35 | static u64 tick_length_base; |
36 | 36 | ||
37 | static struct hrtimer leap_timer; | ||
38 | |||
39 | #define MAX_TICKADJ 500LL /* usecs */ | 37 | #define MAX_TICKADJ 500LL /* usecs */ |
40 | #define MAX_TICKADJ_SCALED \ | 38 | #define MAX_TICKADJ_SCALED \ |
41 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) | 39 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) |
@@ -381,70 +379,63 @@ u64 ntp_tick_length(void) | |||
381 | 379 | ||
382 | 380 | ||
383 | /* | 381 | /* |
384 | * Leap second processing. If in leap-insert state at the end of the | 382 | * this routine handles the overflow of the microsecond field |
385 | * day, the system clock is set back one second; if in leap-delete | 383 | * |
386 | * state, the system clock is set ahead one second. | 384 | * The tricky bits of code to handle the accurate clock support |
385 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
386 | * They were originally developed for SUN and DEC kernels. | ||
387 | * All the kudos should go to Dave for this stuff. | ||
388 | * | ||
389 | * Also handles leap second processing, and returns leap offset | ||
387 | */ | 390 | */ |
388 | static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | 391 | int second_overflow(unsigned long secs) |
389 | { | 392 | { |
390 | enum hrtimer_restart res = HRTIMER_NORESTART; | 393 | s64 delta; |
391 | unsigned long flags; | ||
392 | int leap = 0; | 394 | int leap = 0; |
395 | unsigned long flags; | ||
393 | 396 | ||
394 | spin_lock_irqsave(&ntp_lock, flags); | 397 | spin_lock_irqsave(&ntp_lock, flags); |
398 | |||
399 | /* | ||
400 | * Leap second processing. If in leap-insert state at the end of the | ||
401 | * day, the system clock is set back one second; if in leap-delete | ||
402 | * state, the system clock is set ahead one second. | ||
403 | */ | ||
395 | switch (time_state) { | 404 | switch (time_state) { |
396 | case TIME_OK: | 405 | case TIME_OK: |
406 | if (time_status & STA_INS) | ||
407 | time_state = TIME_INS; | ||
408 | else if (time_status & STA_DEL) | ||
409 | time_state = TIME_DEL; | ||
397 | break; | 410 | break; |
398 | case TIME_INS: | 411 | case TIME_INS: |
399 | leap = -1; | 412 | if (secs % 86400 == 0) { |
400 | time_state = TIME_OOP; | 413 | leap = -1; |
401 | printk(KERN_NOTICE | 414 | time_state = TIME_OOP; |
402 | "Clock: inserting leap second 23:59:60 UTC\n"); | 415 | printk(KERN_NOTICE |
403 | hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); | 416 | "Clock: inserting leap second 23:59:60 UTC\n"); |
404 | res = HRTIMER_RESTART; | 417 | } |
405 | break; | 418 | break; |
406 | case TIME_DEL: | 419 | case TIME_DEL: |
407 | leap = 1; | 420 | if ((secs + 1) % 86400 == 0) { |
408 | time_tai--; | 421 | leap = 1; |
409 | time_state = TIME_WAIT; | 422 | time_tai--; |
410 | printk(KERN_NOTICE | 423 | time_state = TIME_WAIT; |
411 | "Clock: deleting leap second 23:59:59 UTC\n"); | 424 | printk(KERN_NOTICE |
425 | "Clock: deleting leap second 23:59:59 UTC\n"); | ||
426 | } | ||
412 | break; | 427 | break; |
413 | case TIME_OOP: | 428 | case TIME_OOP: |
414 | time_tai++; | 429 | time_tai++; |
415 | time_state = TIME_WAIT; | 430 | time_state = TIME_WAIT; |
416 | /* fall through */ | 431 | break; |
432 | |||
417 | case TIME_WAIT: | 433 | case TIME_WAIT: |
418 | if (!(time_status & (STA_INS | STA_DEL))) | 434 | if (!(time_status & (STA_INS | STA_DEL))) |
419 | time_state = TIME_OK; | 435 | time_state = TIME_OK; |
420 | break; | 436 | break; |
421 | } | 437 | } |
422 | spin_unlock_irqrestore(&ntp_lock, flags); | ||
423 | 438 | ||
424 | /* | ||
425 | * We have to call this outside of the ntp_lock to keep | ||
426 | * the proper locking hierarchy | ||
427 | */ | ||
428 | if (leap) | ||
429 | timekeeping_leap_insert(leap); | ||
430 | |||
431 | return res; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * this routine handles the overflow of the microsecond field | ||
436 | * | ||
437 | * The tricky bits of code to handle the accurate clock support | ||
438 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
439 | * They were originally developed for SUN and DEC kernels. | ||
440 | * All the kudos should go to Dave for this stuff. | ||
441 | */ | ||
442 | void second_overflow(void) | ||
443 | { | ||
444 | s64 delta; | ||
445 | unsigned long flags; | ||
446 | |||
447 | spin_lock_irqsave(&ntp_lock, flags); | ||
448 | 439 | ||
449 | /* Bump the maxerror field */ | 440 | /* Bump the maxerror field */ |
450 | time_maxerror += MAXFREQ / NSEC_PER_USEC; | 441 | time_maxerror += MAXFREQ / NSEC_PER_USEC; |
@@ -481,15 +472,17 @@ void second_overflow(void) | |||
481 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) | 472 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) |
482 | << NTP_SCALE_SHIFT; | 473 | << NTP_SCALE_SHIFT; |
483 | time_adjust = 0; | 474 | time_adjust = 0; |
475 | |||
476 | |||
477 | |||
484 | out: | 478 | out: |
485 | spin_unlock_irqrestore(&ntp_lock, flags); | 479 | spin_unlock_irqrestore(&ntp_lock, flags); |
480 | |||
481 | return leap; | ||
486 | } | 482 | } |
487 | 483 | ||
488 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 484 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
489 | 485 | ||
490 | /* Disable the cmos update - used by virtualization and embedded */ | ||
491 | int no_sync_cmos_clock __read_mostly; | ||
492 | |||
493 | static void sync_cmos_clock(struct work_struct *work); | 486 | static void sync_cmos_clock(struct work_struct *work); |
494 | 487 | ||
495 | static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); | 488 | static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); |
@@ -536,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
536 | 529 | ||
537 | static void notify_cmos_timer(void) | 530 | static void notify_cmos_timer(void) |
538 | { | 531 | { |
539 | if (!no_sync_cmos_clock) | 532 | schedule_delayed_work(&sync_cmos_work, 0); |
540 | schedule_delayed_work(&sync_cmos_work, 0); | ||
541 | } | 533 | } |
542 | 534 | ||
543 | #else | 535 | #else |
544 | static inline void notify_cmos_timer(void) { } | 536 | static inline void notify_cmos_timer(void) { } |
545 | #endif | 537 | #endif |
546 | 538 | ||
547 | /* | ||
548 | * Start the leap seconds timer: | ||
549 | */ | ||
550 | static inline void ntp_start_leap_timer(struct timespec *ts) | ||
551 | { | ||
552 | long now = ts->tv_sec; | ||
553 | |||
554 | if (time_status & STA_INS) { | ||
555 | time_state = TIME_INS; | ||
556 | now += 86400 - now % 86400; | ||
557 | hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); | ||
558 | |||
559 | return; | ||
560 | } | ||
561 | |||
562 | if (time_status & STA_DEL) { | ||
563 | time_state = TIME_DEL; | ||
564 | now += 86400 - (now + 1) % 86400; | ||
565 | hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); | ||
566 | } | ||
567 | } | ||
568 | 539 | ||
569 | /* | 540 | /* |
570 | * Propagate a new txc->status value into the NTP state: | 541 | * Propagate a new txc->status value into the NTP state: |
@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) | |||
589 | time_status &= STA_RONLY; | 560 | time_status &= STA_RONLY; |
590 | time_status |= txc->status & ~STA_RONLY; | 561 | time_status |= txc->status & ~STA_RONLY; |
591 | 562 | ||
592 | switch (time_state) { | ||
593 | case TIME_OK: | ||
594 | ntp_start_leap_timer(ts); | ||
595 | break; | ||
596 | case TIME_INS: | ||
597 | case TIME_DEL: | ||
598 | time_state = TIME_OK; | ||
599 | ntp_start_leap_timer(ts); | ||
600 | case TIME_WAIT: | ||
601 | if (!(time_status & (STA_INS | STA_DEL))) | ||
602 | time_state = TIME_OK; | ||
603 | break; | ||
604 | case TIME_OOP: | ||
605 | hrtimer_restart(&leap_timer); | ||
606 | break; | ||
607 | } | ||
608 | } | 563 | } |
609 | /* | 564 | /* |
610 | * Called with the xtime lock held, so we can access and modify | 565 | * Called with the xtime lock held, so we can access and modify |
@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc) | |||
686 | (txc->tick < 900000/USER_HZ || | 641 | (txc->tick < 900000/USER_HZ || |
687 | txc->tick > 1100000/USER_HZ)) | 642 | txc->tick > 1100000/USER_HZ)) |
688 | return -EINVAL; | 643 | return -EINVAL; |
689 | |||
690 | if (txc->modes & ADJ_STATUS && time_state != TIME_OK) | ||
691 | hrtimer_cancel(&leap_timer); | ||
692 | } | 644 | } |
693 | 645 | ||
694 | if (txc->modes & ADJ_SETOFFSET) { | 646 | if (txc->modes & ADJ_SETOFFSET) { |
@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup); | |||
1010 | void __init ntp_init(void) | 962 | void __init ntp_init(void) |
1011 | { | 963 | { |
1012 | ntp_clear(); | 964 | ntp_clear(); |
1013 | hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | ||
1014 | leap_timer.function = ntp_leap_second; | ||
1015 | } | 965 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 15be32e19c6e..d66b21308f7c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp) | |||
184 | } | 184 | } |
185 | 185 | ||
186 | 186 | ||
187 | void timekeeping_leap_insert(int leapsecond) | ||
188 | { | ||
189 | unsigned long flags; | ||
190 | |||
191 | write_seqlock_irqsave(&timekeeper.lock, flags); | ||
192 | timekeeper.xtime.tv_sec += leapsecond; | ||
193 | timekeeper.wall_to_monotonic.tv_sec -= leapsecond; | ||
194 | timekeeping_update(false); | ||
195 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | ||
196 | |||
197 | } | ||
198 | |||
199 | /** | 187 | /** |
200 | * timekeeping_forward_now - update clock to the current time | 188 | * timekeeping_forward_now - update clock to the current time |
201 | * | 189 | * |
@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset); | |||
448 | static int change_clocksource(void *data) | 436 | static int change_clocksource(void *data) |
449 | { | 437 | { |
450 | struct clocksource *new, *old; | 438 | struct clocksource *new, *old; |
439 | unsigned long flags; | ||
451 | 440 | ||
452 | new = (struct clocksource *) data; | 441 | new = (struct clocksource *) data; |
453 | 442 | ||
443 | write_seqlock_irqsave(&timekeeper.lock, flags); | ||
444 | |||
454 | timekeeping_forward_now(); | 445 | timekeeping_forward_now(); |
455 | if (!new->enable || new->enable(new) == 0) { | 446 | if (!new->enable || new->enable(new) == 0) { |
456 | old = timekeeper.clock; | 447 | old = timekeeper.clock; |
@@ -458,6 +449,10 @@ static int change_clocksource(void *data) | |||
458 | if (old->disable) | 449 | if (old->disable) |
459 | old->disable(old); | 450 | old->disable(old); |
460 | } | 451 | } |
452 | timekeeping_update(true); | ||
453 | |||
454 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | ||
455 | |||
461 | return 0; | 456 | return 0; |
462 | } | 457 | } |
463 | 458 | ||
@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset) | |||
827 | int adj; | 822 | int adj; |
828 | 823 | ||
829 | /* | 824 | /* |
830 | * The point of this is to check if the error is greater then half | 825 | * The point of this is to check if the error is greater than half |
831 | * an interval. | 826 | * an interval. |
832 | * | 827 | * |
833 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. | 828 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. |
@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset) | |||
835 | * Note we subtract one in the shift, so that error is really error*2. | 830 | * Note we subtract one in the shift, so that error is really error*2. |
836 | * This "saves" dividing(shifting) interval twice, but keeps the | 831 | * This "saves" dividing(shifting) interval twice, but keeps the |
837 | * (error > interval) comparison as still measuring if error is | 832 | * (error > interval) comparison as still measuring if error is |
838 | * larger then half an interval. | 833 | * larger than half an interval. |
839 | * | 834 | * |
840 | * Note: It does not "save" on aggravation when reading the code. | 835 | * Note: It does not "save" on aggravation when reading the code. |
841 | */ | 836 | */ |
@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset) | |||
843 | if (error > interval) { | 838 | if (error > interval) { |
844 | /* | 839 | /* |
845 | * We now divide error by 4(via shift), which checks if | 840 | * We now divide error by 4(via shift), which checks if |
846 | * the error is greater then twice the interval. | 841 | * the error is greater than twice the interval. |
847 | * If it is greater, we need a bigadjust, if its smaller, | 842 | * If it is greater, we need a bigadjust, if its smaller, |
848 | * we can adjust by 1. | 843 | * we can adjust by 1. |
849 | */ | 844 | */ |
@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset) | |||
874 | } else /* No adjustment needed */ | 869 | } else /* No adjustment needed */ |
875 | return; | 870 | return; |
876 | 871 | ||
877 | WARN_ONCE(timekeeper.clock->maxadj && | 872 | if (unlikely(timekeeper.clock->maxadj && |
878 | (timekeeper.mult + adj > timekeeper.clock->mult + | 873 | (timekeeper.mult + adj > |
879 | timekeeper.clock->maxadj), | 874 | timekeeper.clock->mult + timekeeper.clock->maxadj))) { |
880 | "Adjusting %s more then 11%% (%ld vs %ld)\n", | 875 | printk_once(KERN_WARNING |
876 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | ||
881 | timekeeper.clock->name, (long)timekeeper.mult + adj, | 877 | timekeeper.clock->name, (long)timekeeper.mult + adj, |
882 | (long)timekeeper.clock->mult + | 878 | (long)timekeeper.clock->mult + |
883 | timekeeper.clock->maxadj); | 879 | timekeeper.clock->maxadj); |
880 | } | ||
884 | /* | 881 | /* |
885 | * So the following can be confusing. | 882 | * So the following can be confusing. |
886 | * | 883 | * |
@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
952 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 949 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
953 | u64 raw_nsecs; | 950 | u64 raw_nsecs; |
954 | 951 | ||
955 | /* If the offset is smaller then a shifted interval, do nothing */ | 952 | /* If the offset is smaller than a shifted interval, do nothing */ |
956 | if (offset < timekeeper.cycle_interval<<shift) | 953 | if (offset < timekeeper.cycle_interval<<shift) |
957 | return offset; | 954 | return offset; |
958 | 955 | ||
@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
962 | 959 | ||
963 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | 960 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; |
964 | while (timekeeper.xtime_nsec >= nsecps) { | 961 | while (timekeeper.xtime_nsec >= nsecps) { |
962 | int leap; | ||
965 | timekeeper.xtime_nsec -= nsecps; | 963 | timekeeper.xtime_nsec -= nsecps; |
966 | timekeeper.xtime.tv_sec++; | 964 | timekeeper.xtime.tv_sec++; |
967 | second_overflow(); | 965 | leap = second_overflow(timekeeper.xtime.tv_sec); |
966 | timekeeper.xtime.tv_sec += leap; | ||
968 | } | 967 | } |
969 | 968 | ||
970 | /* Accumulate raw time */ | 969 | /* Accumulate raw time */ |
@@ -1018,13 +1017,13 @@ static void update_wall_time(void) | |||
1018 | * With NO_HZ we may have to accumulate many cycle_intervals | 1017 | * With NO_HZ we may have to accumulate many cycle_intervals |
1019 | * (think "ticks") worth of time at once. To do this efficiently, | 1018 | * (think "ticks") worth of time at once. To do this efficiently, |
1020 | * we calculate the largest doubling multiple of cycle_intervals | 1019 | * we calculate the largest doubling multiple of cycle_intervals |
1021 | * that is smaller then the offset. We then accumulate that | 1020 | * that is smaller than the offset. We then accumulate that |
1022 | * chunk in one go, and then try to consume the next smaller | 1021 | * chunk in one go, and then try to consume the next smaller |
1023 | * doubled multiple. | 1022 | * doubled multiple. |
1024 | */ | 1023 | */ |
1025 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); | 1024 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); |
1026 | shift = max(0, shift); | 1025 | shift = max(0, shift); |
1027 | /* Bound shift to one less then what overflows tick_length */ | 1026 | /* Bound shift to one less than what overflows tick_length */ |
1028 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; | 1027 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
1029 | shift = min(shift, maxshift); | 1028 | shift = min(shift, maxshift); |
1030 | while (offset >= timekeeper.cycle_interval) { | 1029 | while (offset >= timekeeper.cycle_interval) { |
@@ -1072,12 +1071,14 @@ static void update_wall_time(void) | |||
1072 | 1071 | ||
1073 | /* | 1072 | /* |
1074 | * Finally, make sure that after the rounding | 1073 | * Finally, make sure that after the rounding |
1075 | * xtime.tv_nsec isn't larger then NSEC_PER_SEC | 1074 | * xtime.tv_nsec isn't larger than NSEC_PER_SEC |
1076 | */ | 1075 | */ |
1077 | if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { | 1076 | if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { |
1077 | int leap; | ||
1078 | timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; | 1078 | timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; |
1079 | timekeeper.xtime.tv_sec++; | 1079 | timekeeper.xtime.tv_sec++; |
1080 | second_overflow(); | 1080 | leap = second_overflow(timekeeper.xtime.tv_sec); |
1081 | timekeeper.xtime.tv_sec += leap; | ||
1081 | } | 1082 | } |
1082 | 1083 | ||
1083 | timekeeping_update(false); | 1084 | timekeeping_update(false); |