diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/alarmtimer.c | 8 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
-rw-r--r-- | kernel/time/ntp.c | 191 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 17 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 373 |
6 files changed, 315 insertions, 280 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 8a46f5d64504..8a538c55fc7b 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev, | |||
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline void alarmtimer_rtc_timer_init(void) | ||
100 | { | ||
101 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
102 | } | ||
103 | |||
99 | static struct class_interface alarmtimer_rtc_interface = { | 104 | static struct class_interface alarmtimer_rtc_interface = { |
100 | .add_dev = &alarmtimer_rtc_add_device, | 105 | .add_dev = &alarmtimer_rtc_add_device, |
101 | }; | 106 | }; |
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void) | |||
117 | #define rtcdev (NULL) | 122 | #define rtcdev (NULL) |
118 | static inline int alarmtimer_rtc_interface_setup(void) { return 0; } | 123 | static inline int alarmtimer_rtc_interface_setup(void) { return 0; } |
119 | static inline void alarmtimer_rtc_interface_remove(void) { } | 124 | static inline void alarmtimer_rtc_interface_remove(void) { } |
125 | static inline void alarmtimer_rtc_timer_init(void) { } | ||
120 | #endif | 126 | #endif |
121 | 127 | ||
122 | /** | 128 | /** |
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void) | |||
783 | .nsleep = alarm_timer_nsleep, | 789 | .nsleep = alarm_timer_nsleep, |
784 | }; | 790 | }; |
785 | 791 | ||
792 | alarmtimer_rtc_timer_init(); | ||
793 | |||
786 | posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); | 794 | posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); |
787 | posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); | 795 | posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); |
788 | 796 | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index a45ca167ab24..c9583382141a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) | |||
500 | { | 500 | { |
501 | u64 ret; | 501 | u64 ret; |
502 | /* | 502 | /* |
503 | * We won't try to correct for more then 11% adjustments (110,000 ppm), | 503 | * We won't try to correct for more than 11% adjustments (110,000 ppm), |
504 | */ | 504 | */ |
505 | ret = (u64)cs->mult * 11; | 505 | ret = (u64)cs->mult * 11; |
506 | do_div(ret,100); | 506 | do_div(ret,100); |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index f6117a4c7cb8..f03fd83b170b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -22,17 +22,18 @@ | |||
22 | * NTP timekeeping variables: | 22 | * NTP timekeeping variables: |
23 | */ | 23 | */ |
24 | 24 | ||
25 | DEFINE_SPINLOCK(ntp_lock); | ||
26 | |||
27 | |||
25 | /* USER_HZ period (usecs): */ | 28 | /* USER_HZ period (usecs): */ |
26 | unsigned long tick_usec = TICK_USEC; | 29 | unsigned long tick_usec = TICK_USEC; |
27 | 30 | ||
28 | /* ACTHZ period (nsecs): */ | 31 | /* ACTHZ period (nsecs): */ |
29 | unsigned long tick_nsec; | 32 | unsigned long tick_nsec; |
30 | 33 | ||
31 | u64 tick_length; | 34 | static u64 tick_length; |
32 | static u64 tick_length_base; | 35 | static u64 tick_length_base; |
33 | 36 | ||
34 | static struct hrtimer leap_timer; | ||
35 | |||
36 | #define MAX_TICKADJ 500LL /* usecs */ | 37 | #define MAX_TICKADJ 500LL /* usecs */ |
37 | #define MAX_TICKADJ_SCALED \ | 38 | #define MAX_TICKADJ_SCALED \ |
38 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) | 39 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) |
@@ -49,7 +50,7 @@ static struct hrtimer leap_timer; | |||
49 | static int time_state = TIME_OK; | 50 | static int time_state = TIME_OK; |
50 | 51 | ||
51 | /* clock status bits: */ | 52 | /* clock status bits: */ |
52 | int time_status = STA_UNSYNC; | 53 | static int time_status = STA_UNSYNC; |
53 | 54 | ||
54 | /* TAI offset (secs): */ | 55 | /* TAI offset (secs): */ |
55 | static long time_tai; | 56 | static long time_tai; |
@@ -133,7 +134,7 @@ static inline void pps_reset_freq_interval(void) | |||
133 | /** | 134 | /** |
134 | * pps_clear - Clears the PPS state variables | 135 | * pps_clear - Clears the PPS state variables |
135 | * | 136 | * |
136 | * Must be called while holding a write on the xtime_lock | 137 | * Must be called while holding a write on the ntp_lock |
137 | */ | 138 | */ |
138 | static inline void pps_clear(void) | 139 | static inline void pps_clear(void) |
139 | { | 140 | { |
@@ -149,7 +150,7 @@ static inline void pps_clear(void) | |||
149 | * the last PPS signal. When it reaches 0, indicate that PPS signal is | 150 | * the last PPS signal. When it reaches 0, indicate that PPS signal is |
150 | * missing. | 151 | * missing. |
151 | * | 152 | * |
152 | * Must be called while holding a write on the xtime_lock | 153 | * Must be called while holding a write on the ntp_lock |
153 | */ | 154 | */ |
154 | static inline void pps_dec_valid(void) | 155 | static inline void pps_dec_valid(void) |
155 | { | 156 | { |
@@ -233,6 +234,17 @@ static inline void pps_fill_timex(struct timex *txc) | |||
233 | 234 | ||
234 | #endif /* CONFIG_NTP_PPS */ | 235 | #endif /* CONFIG_NTP_PPS */ |
235 | 236 | ||
237 | |||
238 | /** | ||
239 | * ntp_synced - Returns 1 if the NTP status is not UNSYNC | ||
240 | * | ||
241 | */ | ||
242 | static inline int ntp_synced(void) | ||
243 | { | ||
244 | return !(time_status & STA_UNSYNC); | ||
245 | } | ||
246 | |||
247 | |||
236 | /* | 248 | /* |
237 | * NTP methods: | 249 | * NTP methods: |
238 | */ | 250 | */ |
@@ -275,7 +287,7 @@ static inline s64 ntp_update_offset_fll(s64 offset64, long secs) | |||
275 | 287 | ||
276 | time_status |= STA_MODE; | 288 | time_status |= STA_MODE; |
277 | 289 | ||
278 | return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); | 290 | return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); |
279 | } | 291 | } |
280 | 292 | ||
281 | static void ntp_update_offset(long offset) | 293 | static void ntp_update_offset(long offset) |
@@ -330,11 +342,13 @@ static void ntp_update_offset(long offset) | |||
330 | 342 | ||
331 | /** | 343 | /** |
332 | * ntp_clear - Clears the NTP state variables | 344 | * ntp_clear - Clears the NTP state variables |
333 | * | ||
334 | * Must be called while holding a write on the xtime_lock | ||
335 | */ | 345 | */ |
336 | void ntp_clear(void) | 346 | void ntp_clear(void) |
337 | { | 347 | { |
348 | unsigned long flags; | ||
349 | |||
350 | spin_lock_irqsave(&ntp_lock, flags); | ||
351 | |||
338 | time_adjust = 0; /* stop active adjtime() */ | 352 | time_adjust = 0; /* stop active adjtime() */ |
339 | time_status |= STA_UNSYNC; | 353 | time_status |= STA_UNSYNC; |
340 | time_maxerror = NTP_PHASE_LIMIT; | 354 | time_maxerror = NTP_PHASE_LIMIT; |
@@ -347,63 +361,81 @@ void ntp_clear(void) | |||
347 | 361 | ||
348 | /* Clear PPS state variables */ | 362 | /* Clear PPS state variables */ |
349 | pps_clear(); | 363 | pps_clear(); |
364 | spin_unlock_irqrestore(&ntp_lock, flags); | ||
365 | |||
366 | } | ||
367 | |||
368 | |||
369 | u64 ntp_tick_length(void) | ||
370 | { | ||
371 | unsigned long flags; | ||
372 | s64 ret; | ||
373 | |||
374 | spin_lock_irqsave(&ntp_lock, flags); | ||
375 | ret = tick_length; | ||
376 | spin_unlock_irqrestore(&ntp_lock, flags); | ||
377 | return ret; | ||
350 | } | 378 | } |
351 | 379 | ||
380 | |||
352 | /* | 381 | /* |
353 | * Leap second processing. If in leap-insert state at the end of the | 382 | * this routine handles the overflow of the microsecond field |
354 | * day, the system clock is set back one second; if in leap-delete | 383 | * |
355 | * state, the system clock is set ahead one second. | 384 | * The tricky bits of code to handle the accurate clock support |
385 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
386 | * They were originally developed for SUN and DEC kernels. | ||
387 | * All the kudos should go to Dave for this stuff. | ||
388 | * | ||
389 | * Also handles leap second processing, and returns leap offset | ||
356 | */ | 390 | */ |
357 | static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | 391 | int second_overflow(unsigned long secs) |
358 | { | 392 | { |
359 | enum hrtimer_restart res = HRTIMER_NORESTART; | 393 | s64 delta; |
394 | int leap = 0; | ||
395 | unsigned long flags; | ||
360 | 396 | ||
361 | write_seqlock(&xtime_lock); | 397 | spin_lock_irqsave(&ntp_lock, flags); |
362 | 398 | ||
399 | /* | ||
400 | * Leap second processing. If in leap-insert state at the end of the | ||
401 | * day, the system clock is set back one second; if in leap-delete | ||
402 | * state, the system clock is set ahead one second. | ||
403 | */ | ||
363 | switch (time_state) { | 404 | switch (time_state) { |
364 | case TIME_OK: | 405 | case TIME_OK: |
406 | if (time_status & STA_INS) | ||
407 | time_state = TIME_INS; | ||
408 | else if (time_status & STA_DEL) | ||
409 | time_state = TIME_DEL; | ||
365 | break; | 410 | break; |
366 | case TIME_INS: | 411 | case TIME_INS: |
367 | timekeeping_leap_insert(-1); | 412 | if (secs % 86400 == 0) { |
368 | time_state = TIME_OOP; | 413 | leap = -1; |
369 | printk(KERN_NOTICE | 414 | time_state = TIME_OOP; |
370 | "Clock: inserting leap second 23:59:60 UTC\n"); | 415 | printk(KERN_NOTICE |
371 | hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); | 416 | "Clock: inserting leap second 23:59:60 UTC\n"); |
372 | res = HRTIMER_RESTART; | 417 | } |
373 | break; | 418 | break; |
374 | case TIME_DEL: | 419 | case TIME_DEL: |
375 | timekeeping_leap_insert(1); | 420 | if ((secs + 1) % 86400 == 0) { |
376 | time_tai--; | 421 | leap = 1; |
377 | time_state = TIME_WAIT; | 422 | time_tai--; |
378 | printk(KERN_NOTICE | 423 | time_state = TIME_WAIT; |
379 | "Clock: deleting leap second 23:59:59 UTC\n"); | 424 | printk(KERN_NOTICE |
425 | "Clock: deleting leap second 23:59:59 UTC\n"); | ||
426 | } | ||
380 | break; | 427 | break; |
381 | case TIME_OOP: | 428 | case TIME_OOP: |
382 | time_tai++; | 429 | time_tai++; |
383 | time_state = TIME_WAIT; | 430 | time_state = TIME_WAIT; |
384 | /* fall through */ | 431 | break; |
432 | |||
385 | case TIME_WAIT: | 433 | case TIME_WAIT: |
386 | if (!(time_status & (STA_INS | STA_DEL))) | 434 | if (!(time_status & (STA_INS | STA_DEL))) |
387 | time_state = TIME_OK; | 435 | time_state = TIME_OK; |
388 | break; | 436 | break; |
389 | } | 437 | } |
390 | 438 | ||
391 | write_sequnlock(&xtime_lock); | ||
392 | |||
393 | return res; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * this routine handles the overflow of the microsecond field | ||
398 | * | ||
399 | * The tricky bits of code to handle the accurate clock support | ||
400 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
401 | * They were originally developed for SUN and DEC kernels. | ||
402 | * All the kudos should go to Dave for this stuff. | ||
403 | */ | ||
404 | void second_overflow(void) | ||
405 | { | ||
406 | s64 delta; | ||
407 | 439 | ||
408 | /* Bump the maxerror field */ | 440 | /* Bump the maxerror field */ |
409 | time_maxerror += MAXFREQ / NSEC_PER_USEC; | 441 | time_maxerror += MAXFREQ / NSEC_PER_USEC; |
@@ -423,30 +455,34 @@ void second_overflow(void) | |||
423 | pps_dec_valid(); | 455 | pps_dec_valid(); |
424 | 456 | ||
425 | if (!time_adjust) | 457 | if (!time_adjust) |
426 | return; | 458 | goto out; |
427 | 459 | ||
428 | if (time_adjust > MAX_TICKADJ) { | 460 | if (time_adjust > MAX_TICKADJ) { |
429 | time_adjust -= MAX_TICKADJ; | 461 | time_adjust -= MAX_TICKADJ; |
430 | tick_length += MAX_TICKADJ_SCALED; | 462 | tick_length += MAX_TICKADJ_SCALED; |
431 | return; | 463 | goto out; |
432 | } | 464 | } |
433 | 465 | ||
434 | if (time_adjust < -MAX_TICKADJ) { | 466 | if (time_adjust < -MAX_TICKADJ) { |
435 | time_adjust += MAX_TICKADJ; | 467 | time_adjust += MAX_TICKADJ; |
436 | tick_length -= MAX_TICKADJ_SCALED; | 468 | tick_length -= MAX_TICKADJ_SCALED; |
437 | return; | 469 | goto out; |
438 | } | 470 | } |
439 | 471 | ||
440 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) | 472 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) |
441 | << NTP_SCALE_SHIFT; | 473 | << NTP_SCALE_SHIFT; |
442 | time_adjust = 0; | 474 | time_adjust = 0; |
475 | |||
476 | |||
477 | |||
478 | out: | ||
479 | spin_unlock_irqrestore(&ntp_lock, flags); | ||
480 | |||
481 | return leap; | ||
443 | } | 482 | } |
444 | 483 | ||
445 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 484 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
446 | 485 | ||
447 | /* Disable the cmos update - used by virtualization and embedded */ | ||
448 | int no_sync_cmos_clock __read_mostly; | ||
449 | |||
450 | static void sync_cmos_clock(struct work_struct *work); | 486 | static void sync_cmos_clock(struct work_struct *work); |
451 | 487 | ||
452 | static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); | 488 | static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); |
@@ -493,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
493 | 529 | ||
494 | static void notify_cmos_timer(void) | 530 | static void notify_cmos_timer(void) |
495 | { | 531 | { |
496 | if (!no_sync_cmos_clock) | 532 | schedule_delayed_work(&sync_cmos_work, 0); |
497 | schedule_delayed_work(&sync_cmos_work, 0); | ||
498 | } | 533 | } |
499 | 534 | ||
500 | #else | 535 | #else |
501 | static inline void notify_cmos_timer(void) { } | 536 | static inline void notify_cmos_timer(void) { } |
502 | #endif | 537 | #endif |
503 | 538 | ||
504 | /* | ||
505 | * Start the leap seconds timer: | ||
506 | */ | ||
507 | static inline void ntp_start_leap_timer(struct timespec *ts) | ||
508 | { | ||
509 | long now = ts->tv_sec; | ||
510 | |||
511 | if (time_status & STA_INS) { | ||
512 | time_state = TIME_INS; | ||
513 | now += 86400 - now % 86400; | ||
514 | hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); | ||
515 | |||
516 | return; | ||
517 | } | ||
518 | |||
519 | if (time_status & STA_DEL) { | ||
520 | time_state = TIME_DEL; | ||
521 | now += 86400 - (now + 1) % 86400; | ||
522 | hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS); | ||
523 | } | ||
524 | } | ||
525 | 539 | ||
526 | /* | 540 | /* |
527 | * Propagate a new txc->status value into the NTP state: | 541 | * Propagate a new txc->status value into the NTP state: |
@@ -546,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) | |||
546 | time_status &= STA_RONLY; | 560 | time_status &= STA_RONLY; |
547 | time_status |= txc->status & ~STA_RONLY; | 561 | time_status |= txc->status & ~STA_RONLY; |
548 | 562 | ||
549 | switch (time_state) { | ||
550 | case TIME_OK: | ||
551 | ntp_start_leap_timer(ts); | ||
552 | break; | ||
553 | case TIME_INS: | ||
554 | case TIME_DEL: | ||
555 | time_state = TIME_OK; | ||
556 | ntp_start_leap_timer(ts); | ||
557 | case TIME_WAIT: | ||
558 | if (!(time_status & (STA_INS | STA_DEL))) | ||
559 | time_state = TIME_OK; | ||
560 | break; | ||
561 | case TIME_OOP: | ||
562 | hrtimer_restart(&leap_timer); | ||
563 | break; | ||
564 | } | ||
565 | } | 563 | } |
566 | /* | 564 | /* |
567 | * Called with the xtime lock held, so we can access and modify | 565 | * Called with the xtime lock held, so we can access and modify |
@@ -643,9 +641,6 @@ int do_adjtimex(struct timex *txc) | |||
643 | (txc->tick < 900000/USER_HZ || | 641 | (txc->tick < 900000/USER_HZ || |
644 | txc->tick > 1100000/USER_HZ)) | 642 | txc->tick > 1100000/USER_HZ)) |
645 | return -EINVAL; | 643 | return -EINVAL; |
646 | |||
647 | if (txc->modes & ADJ_STATUS && time_state != TIME_OK) | ||
648 | hrtimer_cancel(&leap_timer); | ||
649 | } | 644 | } |
650 | 645 | ||
651 | if (txc->modes & ADJ_SETOFFSET) { | 646 | if (txc->modes & ADJ_SETOFFSET) { |
@@ -663,7 +658,7 @@ int do_adjtimex(struct timex *txc) | |||
663 | 658 | ||
664 | getnstimeofday(&ts); | 659 | getnstimeofday(&ts); |
665 | 660 | ||
666 | write_seqlock_irq(&xtime_lock); | 661 | spin_lock_irq(&ntp_lock); |
667 | 662 | ||
668 | if (txc->modes & ADJ_ADJTIME) { | 663 | if (txc->modes & ADJ_ADJTIME) { |
669 | long save_adjust = time_adjust; | 664 | long save_adjust = time_adjust; |
@@ -705,7 +700,7 @@ int do_adjtimex(struct timex *txc) | |||
705 | /* fill PPS status fields */ | 700 | /* fill PPS status fields */ |
706 | pps_fill_timex(txc); | 701 | pps_fill_timex(txc); |
707 | 702 | ||
708 | write_sequnlock_irq(&xtime_lock); | 703 | spin_unlock_irq(&ntp_lock); |
709 | 704 | ||
710 | txc->time.tv_sec = ts.tv_sec; | 705 | txc->time.tv_sec = ts.tv_sec; |
711 | txc->time.tv_usec = ts.tv_nsec; | 706 | txc->time.tv_usec = ts.tv_nsec; |
@@ -903,7 +898,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
903 | 898 | ||
904 | pts_norm = pps_normalize_ts(*phase_ts); | 899 | pts_norm = pps_normalize_ts(*phase_ts); |
905 | 900 | ||
906 | write_seqlock_irqsave(&xtime_lock, flags); | 901 | spin_lock_irqsave(&ntp_lock, flags); |
907 | 902 | ||
908 | /* clear the error bits, they will be set again if needed */ | 903 | /* clear the error bits, they will be set again if needed */ |
909 | time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); | 904 | time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); |
@@ -916,7 +911,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
916 | * just start the frequency interval */ | 911 | * just start the frequency interval */ |
917 | if (unlikely(pps_fbase.tv_sec == 0)) { | 912 | if (unlikely(pps_fbase.tv_sec == 0)) { |
918 | pps_fbase = *raw_ts; | 913 | pps_fbase = *raw_ts; |
919 | write_sequnlock_irqrestore(&xtime_lock, flags); | 914 | spin_unlock_irqrestore(&ntp_lock, flags); |
920 | return; | 915 | return; |
921 | } | 916 | } |
922 | 917 | ||
@@ -931,7 +926,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
931 | time_status |= STA_PPSJITTER; | 926 | time_status |= STA_PPSJITTER; |
932 | /* restart the frequency calibration interval */ | 927 | /* restart the frequency calibration interval */ |
933 | pps_fbase = *raw_ts; | 928 | pps_fbase = *raw_ts; |
934 | write_sequnlock_irqrestore(&xtime_lock, flags); | 929 | spin_unlock_irqrestore(&ntp_lock, flags); |
935 | pr_err("hardpps: PPSJITTER: bad pulse\n"); | 930 | pr_err("hardpps: PPSJITTER: bad pulse\n"); |
936 | return; | 931 | return; |
937 | } | 932 | } |
@@ -948,7 +943,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
948 | 943 | ||
949 | hardpps_update_phase(pts_norm.nsec); | 944 | hardpps_update_phase(pts_norm.nsec); |
950 | 945 | ||
951 | write_sequnlock_irqrestore(&xtime_lock, flags); | 946 | spin_unlock_irqrestore(&ntp_lock, flags); |
952 | } | 947 | } |
953 | EXPORT_SYMBOL(hardpps); | 948 | EXPORT_SYMBOL(hardpps); |
954 | 949 | ||
@@ -967,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup); | |||
967 | void __init ntp_init(void) | 962 | void __init ntp_init(void) |
968 | { | 963 | { |
969 | ntp_clear(); | 964 | ntp_clear(); |
970 | hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | ||
971 | leap_timer.function = ntp_leap_second; | ||
972 | } | 965 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index fd4a7b1625a2..e883f57a3cd3 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -575,11 +575,15 @@ void tick_broadcast_switch_to_oneshot(void) | |||
575 | unsigned long flags; | 575 | unsigned long flags; |
576 | 576 | ||
577 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 577 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
578 | if (cpumask_empty(tick_get_broadcast_mask())) | ||
579 | goto end; | ||
578 | 580 | ||
579 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | 581 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; |
580 | bc = tick_broadcast_device.evtdev; | 582 | bc = tick_broadcast_device.evtdev; |
581 | if (bc) | 583 | if (bc) |
582 | tick_broadcast_setup_oneshot(bc); | 584 | tick_broadcast_setup_oneshot(bc); |
585 | |||
586 | end: | ||
583 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 587 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
584 | } | 588 | } |
585 | 589 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7656642e4b8e..3526038f2836 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -182,11 +182,7 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) | |||
182 | 182 | ||
183 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) | 183 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
184 | { | 184 | { |
185 | ktime_t now; | 185 | ktime_t now = ktime_get(); |
186 | |||
187 | now = ktime_get(); | ||
188 | |||
189 | update_ts_time_stats(cpu, ts, now, NULL); | ||
190 | 186 | ||
191 | ts->idle_entrytime = now; | 187 | ts->idle_entrytime = now; |
192 | ts->idle_active = 1; | 188 | ts->idle_active = 1; |
@@ -562,20 +558,21 @@ void tick_nohz_idle_exit(void) | |||
562 | 558 | ||
563 | local_irq_disable(); | 559 | local_irq_disable(); |
564 | 560 | ||
565 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) | 561 | WARN_ON_ONCE(!ts->inidle); |
562 | |||
563 | ts->inidle = 0; | ||
564 | |||
565 | if (ts->idle_active || ts->tick_stopped) | ||
566 | now = ktime_get(); | 566 | now = ktime_get(); |
567 | 567 | ||
568 | if (ts->idle_active) | 568 | if (ts->idle_active) |
569 | tick_nohz_stop_idle(cpu, now); | 569 | tick_nohz_stop_idle(cpu, now); |
570 | 570 | ||
571 | if (!ts->inidle || !ts->tick_stopped) { | 571 | if (!ts->tick_stopped) { |
572 | ts->inidle = 0; | ||
573 | local_irq_enable(); | 572 | local_irq_enable(); |
574 | return; | 573 | return; |
575 | } | 574 | } |
576 | 575 | ||
577 | ts->inidle = 0; | ||
578 | |||
579 | /* Update jiffies first */ | 576 | /* Update jiffies first */ |
580 | select_nohz_load_balancer(0); | 577 | select_nohz_load_balancer(0); |
581 | tick_do_update_jiffies64(now); | 578 | tick_do_update_jiffies64(now); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0c6358186401..d66b21308f7c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -25,6 +25,8 @@ | |||
25 | struct timekeeper { | 25 | struct timekeeper { |
26 | /* Current clocksource used for timekeeping. */ | 26 | /* Current clocksource used for timekeeping. */ |
27 | struct clocksource *clock; | 27 | struct clocksource *clock; |
28 | /* NTP adjusted clock multiplier */ | ||
29 | u32 mult; | ||
28 | /* The shift value of the current clocksource. */ | 30 | /* The shift value of the current clocksource. */ |
29 | int shift; | 31 | int shift; |
30 | 32 | ||
@@ -45,12 +47,47 @@ struct timekeeper { | |||
45 | /* Shift conversion between clock shifted nano seconds and | 47 | /* Shift conversion between clock shifted nano seconds and |
46 | * ntp shifted nano seconds. */ | 48 | * ntp shifted nano seconds. */ |
47 | int ntp_error_shift; | 49 | int ntp_error_shift; |
48 | /* NTP adjusted clock multiplier */ | 50 | |
49 | u32 mult; | 51 | /* The current time */ |
52 | struct timespec xtime; | ||
53 | /* | ||
54 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | ||
55 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | ||
56 | * at zero at system boot time, so wall_to_monotonic will be negative, | ||
57 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | ||
58 | * the usual normalization. | ||
59 | * | ||
60 | * wall_to_monotonic is moved after resume from suspend for the | ||
61 | * monotonic time not to jump. We need to add total_sleep_time to | ||
62 | * wall_to_monotonic to get the real boot based time offset. | ||
63 | * | ||
64 | * - wall_to_monotonic is no longer the boot time, getboottime must be | ||
65 | * used instead. | ||
66 | */ | ||
67 | struct timespec wall_to_monotonic; | ||
68 | /* time spent in suspend */ | ||
69 | struct timespec total_sleep_time; | ||
70 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ | ||
71 | struct timespec raw_time; | ||
72 | |||
73 | /* Seqlock for all timekeeper values */ | ||
74 | seqlock_t lock; | ||
50 | }; | 75 | }; |
51 | 76 | ||
52 | static struct timekeeper timekeeper; | 77 | static struct timekeeper timekeeper; |
53 | 78 | ||
79 | /* | ||
80 | * This read-write spinlock protects us from races in SMP while | ||
81 | * playing with xtime. | ||
82 | */ | ||
83 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | ||
84 | |||
85 | |||
86 | /* flag for if timekeeping is suspended */ | ||
87 | int __read_mostly timekeeping_suspended; | ||
88 | |||
89 | |||
90 | |||
54 | /** | 91 | /** |
55 | * timekeeper_setup_internals - Set up internals to use clocksource clock. | 92 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
56 | * | 93 | * |
@@ -135,49 +172,18 @@ static inline s64 timekeeping_get_ns_raw(void) | |||
135 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 172 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
136 | } | 173 | } |
137 | 174 | ||
138 | /* | 175 | /* must hold write on timekeeper.lock */ |
139 | * This read-write spinlock protects us from races in SMP while | 176 | static void timekeeping_update(bool clearntp) |
140 | * playing with xtime. | ||
141 | */ | ||
142 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | ||
143 | |||
144 | |||
145 | /* | ||
146 | * The current time | ||
147 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | ||
148 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | ||
149 | * at zero at system boot time, so wall_to_monotonic will be negative, | ||
150 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | ||
151 | * the usual normalization. | ||
152 | * | ||
153 | * wall_to_monotonic is moved after resume from suspend for the monotonic | ||
154 | * time not to jump. We need to add total_sleep_time to wall_to_monotonic | ||
155 | * to get the real boot based time offset. | ||
156 | * | ||
157 | * - wall_to_monotonic is no longer the boot time, getboottime must be | ||
158 | * used instead. | ||
159 | */ | ||
160 | static struct timespec xtime __attribute__ ((aligned (16))); | ||
161 | static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | ||
162 | static struct timespec total_sleep_time; | ||
163 | |||
164 | /* | ||
165 | * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. | ||
166 | */ | ||
167 | static struct timespec raw_time; | ||
168 | |||
169 | /* flag for if timekeeping is suspended */ | ||
170 | int __read_mostly timekeeping_suspended; | ||
171 | |||
172 | /* must hold xtime_lock */ | ||
173 | void timekeeping_leap_insert(int leapsecond) | ||
174 | { | 177 | { |
175 | xtime.tv_sec += leapsecond; | 178 | if (clearntp) { |
176 | wall_to_monotonic.tv_sec -= leapsecond; | 179 | timekeeper.ntp_error = 0; |
177 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | 180 | ntp_clear(); |
178 | timekeeper.mult); | 181 | } |
182 | update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic, | ||
183 | timekeeper.clock, timekeeper.mult); | ||
179 | } | 184 | } |
180 | 185 | ||
186 | |||
181 | /** | 187 | /** |
182 | * timekeeping_forward_now - update clock to the current time | 188 | * timekeeping_forward_now - update clock to the current time |
183 | * | 189 | * |
@@ -202,10 +208,10 @@ static void timekeeping_forward_now(void) | |||
202 | /* If arch requires, add in gettimeoffset() */ | 208 | /* If arch requires, add in gettimeoffset() */ |
203 | nsec += arch_gettimeoffset(); | 209 | nsec += arch_gettimeoffset(); |
204 | 210 | ||
205 | timespec_add_ns(&xtime, nsec); | 211 | timespec_add_ns(&timekeeper.xtime, nsec); |
206 | 212 | ||
207 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 213 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
208 | timespec_add_ns(&raw_time, nsec); | 214 | timespec_add_ns(&timekeeper.raw_time, nsec); |
209 | } | 215 | } |
210 | 216 | ||
211 | /** | 217 | /** |
@@ -222,15 +228,15 @@ void getnstimeofday(struct timespec *ts) | |||
222 | WARN_ON(timekeeping_suspended); | 228 | WARN_ON(timekeeping_suspended); |
223 | 229 | ||
224 | do { | 230 | do { |
225 | seq = read_seqbegin(&xtime_lock); | 231 | seq = read_seqbegin(&timekeeper.lock); |
226 | 232 | ||
227 | *ts = xtime; | 233 | *ts = timekeeper.xtime; |
228 | nsecs = timekeeping_get_ns(); | 234 | nsecs = timekeeping_get_ns(); |
229 | 235 | ||
230 | /* If arch requires, add in gettimeoffset() */ | 236 | /* If arch requires, add in gettimeoffset() */ |
231 | nsecs += arch_gettimeoffset(); | 237 | nsecs += arch_gettimeoffset(); |
232 | 238 | ||
233 | } while (read_seqretry(&xtime_lock, seq)); | 239 | } while (read_seqretry(&timekeeper.lock, seq)); |
234 | 240 | ||
235 | timespec_add_ns(ts, nsecs); | 241 | timespec_add_ns(ts, nsecs); |
236 | } | 242 | } |
@@ -245,14 +251,16 @@ ktime_t ktime_get(void) | |||
245 | WARN_ON(timekeeping_suspended); | 251 | WARN_ON(timekeeping_suspended); |
246 | 252 | ||
247 | do { | 253 | do { |
248 | seq = read_seqbegin(&xtime_lock); | 254 | seq = read_seqbegin(&timekeeper.lock); |
249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; | 255 | secs = timekeeper.xtime.tv_sec + |
250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | 256 | timekeeper.wall_to_monotonic.tv_sec; |
257 | nsecs = timekeeper.xtime.tv_nsec + | ||
258 | timekeeper.wall_to_monotonic.tv_nsec; | ||
251 | nsecs += timekeeping_get_ns(); | 259 | nsecs += timekeeping_get_ns(); |
252 | /* If arch requires, add in gettimeoffset() */ | 260 | /* If arch requires, add in gettimeoffset() */ |
253 | nsecs += arch_gettimeoffset(); | 261 | nsecs += arch_gettimeoffset(); |
254 | 262 | ||
255 | } while (read_seqretry(&xtime_lock, seq)); | 263 | } while (read_seqretry(&timekeeper.lock, seq)); |
256 | /* | 264 | /* |
257 | * Use ktime_set/ktime_add_ns to create a proper ktime on | 265 | * Use ktime_set/ktime_add_ns to create a proper ktime on |
258 | * 32-bit architectures without CONFIG_KTIME_SCALAR. | 266 | * 32-bit architectures without CONFIG_KTIME_SCALAR. |
@@ -278,14 +286,14 @@ void ktime_get_ts(struct timespec *ts) | |||
278 | WARN_ON(timekeeping_suspended); | 286 | WARN_ON(timekeeping_suspended); |
279 | 287 | ||
280 | do { | 288 | do { |
281 | seq = read_seqbegin(&xtime_lock); | 289 | seq = read_seqbegin(&timekeeper.lock); |
282 | *ts = xtime; | 290 | *ts = timekeeper.xtime; |
283 | tomono = wall_to_monotonic; | 291 | tomono = timekeeper.wall_to_monotonic; |
284 | nsecs = timekeeping_get_ns(); | 292 | nsecs = timekeeping_get_ns(); |
285 | /* If arch requires, add in gettimeoffset() */ | 293 | /* If arch requires, add in gettimeoffset() */ |
286 | nsecs += arch_gettimeoffset(); | 294 | nsecs += arch_gettimeoffset(); |
287 | 295 | ||
288 | } while (read_seqretry(&xtime_lock, seq)); | 296 | } while (read_seqretry(&timekeeper.lock, seq)); |
289 | 297 | ||
290 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | 298 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
291 | ts->tv_nsec + tomono.tv_nsec + nsecs); | 299 | ts->tv_nsec + tomono.tv_nsec + nsecs); |
@@ -313,10 +321,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
313 | do { | 321 | do { |
314 | u32 arch_offset; | 322 | u32 arch_offset; |
315 | 323 | ||
316 | seq = read_seqbegin(&xtime_lock); | 324 | seq = read_seqbegin(&timekeeper.lock); |
317 | 325 | ||
318 | *ts_raw = raw_time; | 326 | *ts_raw = timekeeper.raw_time; |
319 | *ts_real = xtime; | 327 | *ts_real = timekeeper.xtime; |
320 | 328 | ||
321 | nsecs_raw = timekeeping_get_ns_raw(); | 329 | nsecs_raw = timekeeping_get_ns_raw(); |
322 | nsecs_real = timekeeping_get_ns(); | 330 | nsecs_real = timekeeping_get_ns(); |
@@ -326,7 +334,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
326 | nsecs_raw += arch_offset; | 334 | nsecs_raw += arch_offset; |
327 | nsecs_real += arch_offset; | 335 | nsecs_real += arch_offset; |
328 | 336 | ||
329 | } while (read_seqretry(&xtime_lock, seq)); | 337 | } while (read_seqretry(&timekeeper.lock, seq)); |
330 | 338 | ||
331 | timespec_add_ns(ts_raw, nsecs_raw); | 339 | timespec_add_ns(ts_raw, nsecs_raw); |
332 | timespec_add_ns(ts_real, nsecs_real); | 340 | timespec_add_ns(ts_real, nsecs_real); |
@@ -365,23 +373,19 @@ int do_settimeofday(const struct timespec *tv) | |||
365 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 373 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
366 | return -EINVAL; | 374 | return -EINVAL; |
367 | 375 | ||
368 | write_seqlock_irqsave(&xtime_lock, flags); | 376 | write_seqlock_irqsave(&timekeeper.lock, flags); |
369 | 377 | ||
370 | timekeeping_forward_now(); | 378 | timekeeping_forward_now(); |
371 | 379 | ||
372 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; | 380 | ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec; |
373 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; | 381 | ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec; |
374 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); | 382 | timekeeper.wall_to_monotonic = |
383 | timespec_sub(timekeeper.wall_to_monotonic, ts_delta); | ||
375 | 384 | ||
376 | xtime = *tv; | 385 | timekeeper.xtime = *tv; |
377 | 386 | timekeeping_update(true); | |
378 | timekeeper.ntp_error = 0; | ||
379 | ntp_clear(); | ||
380 | 387 | ||
381 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | 388 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
382 | timekeeper.mult); | ||
383 | |||
384 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
385 | 389 | ||
386 | /* signal hrtimers about time change */ | 390 | /* signal hrtimers about time change */ |
387 | clock_was_set(); | 391 | clock_was_set(); |
@@ -405,20 +409,17 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
405 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 409 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
406 | return -EINVAL; | 410 | return -EINVAL; |
407 | 411 | ||
408 | write_seqlock_irqsave(&xtime_lock, flags); | 412 | write_seqlock_irqsave(&timekeeper.lock, flags); |
409 | 413 | ||
410 | timekeeping_forward_now(); | 414 | timekeeping_forward_now(); |
411 | 415 | ||
412 | xtime = timespec_add(xtime, *ts); | 416 | timekeeper.xtime = timespec_add(timekeeper.xtime, *ts); |
413 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts); | 417 | timekeeper.wall_to_monotonic = |
414 | 418 | timespec_sub(timekeeper.wall_to_monotonic, *ts); | |
415 | timekeeper.ntp_error = 0; | ||
416 | ntp_clear(); | ||
417 | 419 | ||
418 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | 420 | timekeeping_update(true); |
419 | timekeeper.mult); | ||
420 | 421 | ||
421 | write_sequnlock_irqrestore(&xtime_lock, flags); | 422 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
422 | 423 | ||
423 | /* signal hrtimers about time change */ | 424 | /* signal hrtimers about time change */ |
424 | clock_was_set(); | 425 | clock_was_set(); |
@@ -435,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset); | |||
435 | static int change_clocksource(void *data) | 436 | static int change_clocksource(void *data) |
436 | { | 437 | { |
437 | struct clocksource *new, *old; | 438 | struct clocksource *new, *old; |
439 | unsigned long flags; | ||
438 | 440 | ||
439 | new = (struct clocksource *) data; | 441 | new = (struct clocksource *) data; |
440 | 442 | ||
443 | write_seqlock_irqsave(&timekeeper.lock, flags); | ||
444 | |||
441 | timekeeping_forward_now(); | 445 | timekeeping_forward_now(); |
442 | if (!new->enable || new->enable(new) == 0) { | 446 | if (!new->enable || new->enable(new) == 0) { |
443 | old = timekeeper.clock; | 447 | old = timekeeper.clock; |
@@ -445,6 +449,10 @@ static int change_clocksource(void *data) | |||
445 | if (old->disable) | 449 | if (old->disable) |
446 | old->disable(old); | 450 | old->disable(old); |
447 | } | 451 | } |
452 | timekeeping_update(true); | ||
453 | |||
454 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | ||
455 | |||
448 | return 0; | 456 | return 0; |
449 | } | 457 | } |
450 | 458 | ||
@@ -490,11 +498,11 @@ void getrawmonotonic(struct timespec *ts) | |||
490 | s64 nsecs; | 498 | s64 nsecs; |
491 | 499 | ||
492 | do { | 500 | do { |
493 | seq = read_seqbegin(&xtime_lock); | 501 | seq = read_seqbegin(&timekeeper.lock); |
494 | nsecs = timekeeping_get_ns_raw(); | 502 | nsecs = timekeeping_get_ns_raw(); |
495 | *ts = raw_time; | 503 | *ts = timekeeper.raw_time; |
496 | 504 | ||
497 | } while (read_seqretry(&xtime_lock, seq)); | 505 | } while (read_seqretry(&timekeeper.lock, seq)); |
498 | 506 | ||
499 | timespec_add_ns(ts, nsecs); | 507 | timespec_add_ns(ts, nsecs); |
500 | } | 508 | } |
@@ -510,24 +518,30 @@ int timekeeping_valid_for_hres(void) | |||
510 | int ret; | 518 | int ret; |
511 | 519 | ||
512 | do { | 520 | do { |
513 | seq = read_seqbegin(&xtime_lock); | 521 | seq = read_seqbegin(&timekeeper.lock); |
514 | 522 | ||
515 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 523 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
516 | 524 | ||
517 | } while (read_seqretry(&xtime_lock, seq)); | 525 | } while (read_seqretry(&timekeeper.lock, seq)); |
518 | 526 | ||
519 | return ret; | 527 | return ret; |
520 | } | 528 | } |
521 | 529 | ||
522 | /** | 530 | /** |
523 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred | 531 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred |
524 | * | ||
525 | * Caller must observe xtime_lock via read_seqbegin/read_seqretry to | ||
526 | * ensure that the clocksource does not change! | ||
527 | */ | 532 | */ |
528 | u64 timekeeping_max_deferment(void) | 533 | u64 timekeeping_max_deferment(void) |
529 | { | 534 | { |
530 | return timekeeper.clock->max_idle_ns; | 535 | unsigned long seq; |
536 | u64 ret; | ||
537 | do { | ||
538 | seq = read_seqbegin(&timekeeper.lock); | ||
539 | |||
540 | ret = timekeeper.clock->max_idle_ns; | ||
541 | |||
542 | } while (read_seqretry(&timekeeper.lock, seq)); | ||
543 | |||
544 | return ret; | ||
531 | } | 545 | } |
532 | 546 | ||
533 | /** | 547 | /** |
@@ -572,28 +586,29 @@ void __init timekeeping_init(void) | |||
572 | read_persistent_clock(&now); | 586 | read_persistent_clock(&now); |
573 | read_boot_clock(&boot); | 587 | read_boot_clock(&boot); |
574 | 588 | ||
575 | write_seqlock_irqsave(&xtime_lock, flags); | 589 | seqlock_init(&timekeeper.lock); |
576 | 590 | ||
577 | ntp_init(); | 591 | ntp_init(); |
578 | 592 | ||
593 | write_seqlock_irqsave(&timekeeper.lock, flags); | ||
579 | clock = clocksource_default_clock(); | 594 | clock = clocksource_default_clock(); |
580 | if (clock->enable) | 595 | if (clock->enable) |
581 | clock->enable(clock); | 596 | clock->enable(clock); |
582 | timekeeper_setup_internals(clock); | 597 | timekeeper_setup_internals(clock); |
583 | 598 | ||
584 | xtime.tv_sec = now.tv_sec; | 599 | timekeeper.xtime.tv_sec = now.tv_sec; |
585 | xtime.tv_nsec = now.tv_nsec; | 600 | timekeeper.xtime.tv_nsec = now.tv_nsec; |
586 | raw_time.tv_sec = 0; | 601 | timekeeper.raw_time.tv_sec = 0; |
587 | raw_time.tv_nsec = 0; | 602 | timekeeper.raw_time.tv_nsec = 0; |
588 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) { | 603 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) { |
589 | boot.tv_sec = xtime.tv_sec; | 604 | boot.tv_sec = timekeeper.xtime.tv_sec; |
590 | boot.tv_nsec = xtime.tv_nsec; | 605 | boot.tv_nsec = timekeeper.xtime.tv_nsec; |
591 | } | 606 | } |
592 | set_normalized_timespec(&wall_to_monotonic, | 607 | set_normalized_timespec(&timekeeper.wall_to_monotonic, |
593 | -boot.tv_sec, -boot.tv_nsec); | 608 | -boot.tv_sec, -boot.tv_nsec); |
594 | total_sleep_time.tv_sec = 0; | 609 | timekeeper.total_sleep_time.tv_sec = 0; |
595 | total_sleep_time.tv_nsec = 0; | 610 | timekeeper.total_sleep_time.tv_nsec = 0; |
596 | write_sequnlock_irqrestore(&xtime_lock, flags); | 611 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
597 | } | 612 | } |
598 | 613 | ||
599 | /* time in seconds when suspend began */ | 614 | /* time in seconds when suspend began */ |
@@ -614,9 +629,11 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta) | |||
614 | return; | 629 | return; |
615 | } | 630 | } |
616 | 631 | ||
617 | xtime = timespec_add(xtime, *delta); | 632 | timekeeper.xtime = timespec_add(timekeeper.xtime, *delta); |
618 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); | 633 | timekeeper.wall_to_monotonic = |
619 | total_sleep_time = timespec_add(total_sleep_time, *delta); | 634 | timespec_sub(timekeeper.wall_to_monotonic, *delta); |
635 | timekeeper.total_sleep_time = timespec_add( | ||
636 | timekeeper.total_sleep_time, *delta); | ||
620 | } | 637 | } |
621 | 638 | ||
622 | 639 | ||
@@ -640,17 +657,15 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
640 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) | 657 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) |
641 | return; | 658 | return; |
642 | 659 | ||
643 | write_seqlock_irqsave(&xtime_lock, flags); | 660 | write_seqlock_irqsave(&timekeeper.lock, flags); |
661 | |||
644 | timekeeping_forward_now(); | 662 | timekeeping_forward_now(); |
645 | 663 | ||
646 | __timekeeping_inject_sleeptime(delta); | 664 | __timekeeping_inject_sleeptime(delta); |
647 | 665 | ||
648 | timekeeper.ntp_error = 0; | 666 | timekeeping_update(true); |
649 | ntp_clear(); | ||
650 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | ||
651 | timekeeper.mult); | ||
652 | 667 | ||
653 | write_sequnlock_irqrestore(&xtime_lock, flags); | 668 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
654 | 669 | ||
655 | /* signal hrtimers about time change */ | 670 | /* signal hrtimers about time change */ |
656 | clock_was_set(); | 671 | clock_was_set(); |
@@ -673,7 +688,7 @@ static void timekeeping_resume(void) | |||
673 | 688 | ||
674 | clocksource_resume(); | 689 | clocksource_resume(); |
675 | 690 | ||
676 | write_seqlock_irqsave(&xtime_lock, flags); | 691 | write_seqlock_irqsave(&timekeeper.lock, flags); |
677 | 692 | ||
678 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { | 693 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
679 | ts = timespec_sub(ts, timekeeping_suspend_time); | 694 | ts = timespec_sub(ts, timekeeping_suspend_time); |
@@ -683,7 +698,7 @@ static void timekeeping_resume(void) | |||
683 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 698 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
684 | timekeeper.ntp_error = 0; | 699 | timekeeper.ntp_error = 0; |
685 | timekeeping_suspended = 0; | 700 | timekeeping_suspended = 0; |
686 | write_sequnlock_irqrestore(&xtime_lock, flags); | 701 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
687 | 702 | ||
688 | touch_softlockup_watchdog(); | 703 | touch_softlockup_watchdog(); |
689 | 704 | ||
@@ -701,7 +716,7 @@ static int timekeeping_suspend(void) | |||
701 | 716 | ||
702 | read_persistent_clock(&timekeeping_suspend_time); | 717 | read_persistent_clock(&timekeeping_suspend_time); |
703 | 718 | ||
704 | write_seqlock_irqsave(&xtime_lock, flags); | 719 | write_seqlock_irqsave(&timekeeper.lock, flags); |
705 | timekeeping_forward_now(); | 720 | timekeeping_forward_now(); |
706 | timekeeping_suspended = 1; | 721 | timekeeping_suspended = 1; |
707 | 722 | ||
@@ -711,7 +726,7 @@ static int timekeeping_suspend(void) | |||
711 | * try to compensate so the difference in system time | 726 | * try to compensate so the difference in system time |
712 | * and persistent_clock time stays close to constant. | 727 | * and persistent_clock time stays close to constant. |
713 | */ | 728 | */ |
714 | delta = timespec_sub(xtime, timekeeping_suspend_time); | 729 | delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time); |
715 | delta_delta = timespec_sub(delta, old_delta); | 730 | delta_delta = timespec_sub(delta, old_delta); |
716 | if (abs(delta_delta.tv_sec) >= 2) { | 731 | if (abs(delta_delta.tv_sec) >= 2) { |
717 | /* | 732 | /* |
@@ -724,7 +739,7 @@ static int timekeeping_suspend(void) | |||
724 | timekeeping_suspend_time = | 739 | timekeeping_suspend_time = |
725 | timespec_add(timekeeping_suspend_time, delta_delta); | 740 | timespec_add(timekeeping_suspend_time, delta_delta); |
726 | } | 741 | } |
727 | write_sequnlock_irqrestore(&xtime_lock, flags); | 742 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
728 | 743 | ||
729 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | 744 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
730 | clocksource_suspend(); | 745 | clocksource_suspend(); |
@@ -775,7 +790,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, | |||
775 | * Now calculate the error in (1 << look_ahead) ticks, but first | 790 | * Now calculate the error in (1 << look_ahead) ticks, but first |
776 | * remove the single look ahead already included in the error. | 791 | * remove the single look ahead already included in the error. |
777 | */ | 792 | */ |
778 | tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); | 793 | tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); |
779 | tick_error -= timekeeper.xtime_interval >> 1; | 794 | tick_error -= timekeeper.xtime_interval >> 1; |
780 | error = ((error - tick_error) >> look_ahead) + tick_error; | 795 | error = ((error - tick_error) >> look_ahead) + tick_error; |
781 | 796 | ||
@@ -807,7 +822,7 @@ static void timekeeping_adjust(s64 offset) | |||
807 | int adj; | 822 | int adj; |
808 | 823 | ||
809 | /* | 824 | /* |
810 | * The point of this is to check if the error is greater then half | 825 | * The point of this is to check if the error is greater than half |
811 | * an interval. | 826 | * an interval. |
812 | * | 827 | * |
813 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. | 828 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. |
@@ -815,7 +830,7 @@ static void timekeeping_adjust(s64 offset) | |||
815 | * Note we subtract one in the shift, so that error is really error*2. | 830 | * Note we subtract one in the shift, so that error is really error*2. |
816 | * This "saves" dividing(shifting) interval twice, but keeps the | 831 | * This "saves" dividing(shifting) interval twice, but keeps the |
817 | * (error > interval) comparison as still measuring if error is | 832 | * (error > interval) comparison as still measuring if error is |
818 | * larger then half an interval. | 833 | * larger than half an interval. |
819 | * | 834 | * |
820 | * Note: It does not "save" on aggravation when reading the code. | 835 | * Note: It does not "save" on aggravation when reading the code. |
821 | */ | 836 | */ |
@@ -823,7 +838,7 @@ static void timekeeping_adjust(s64 offset) | |||
823 | if (error > interval) { | 838 | if (error > interval) { |
824 | /* | 839 | /* |
825 | * We now divide error by 4(via shift), which checks if | 840 | * We now divide error by 4(via shift), which checks if |
826 | * the error is greater then twice the interval. | 841 | * the error is greater than twice the interval. |
827 | * If it is greater, we need a bigadjust, if its smaller, | 842 | * If it is greater, we need a bigadjust, if its smaller, |
828 | * we can adjust by 1. | 843 | * we can adjust by 1. |
829 | */ | 844 | */ |
@@ -854,13 +869,15 @@ static void timekeeping_adjust(s64 offset) | |||
854 | } else /* No adjustment needed */ | 869 | } else /* No adjustment needed */ |
855 | return; | 870 | return; |
856 | 871 | ||
857 | WARN_ONCE(timekeeper.clock->maxadj && | 872 | if (unlikely(timekeeper.clock->maxadj && |
858 | (timekeeper.mult + adj > timekeeper.clock->mult + | 873 | (timekeeper.mult + adj > |
859 | timekeeper.clock->maxadj), | 874 | timekeeper.clock->mult + timekeeper.clock->maxadj))) { |
860 | "Adjusting %s more then 11%% (%ld vs %ld)\n", | 875 | printk_once(KERN_WARNING |
876 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | ||
861 | timekeeper.clock->name, (long)timekeeper.mult + adj, | 877 | timekeeper.clock->name, (long)timekeeper.mult + adj, |
862 | (long)timekeeper.clock->mult + | 878 | (long)timekeeper.clock->mult + |
863 | timekeeper.clock->maxadj); | 879 | timekeeper.clock->maxadj); |
880 | } | ||
864 | /* | 881 | /* |
865 | * So the following can be confusing. | 882 | * So the following can be confusing. |
866 | * | 883 | * |
@@ -932,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
932 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 949 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
933 | u64 raw_nsecs; | 950 | u64 raw_nsecs; |
934 | 951 | ||
935 | /* If the offset is smaller then a shifted interval, do nothing */ | 952 | /* If the offset is smaller than a shifted interval, do nothing */ |
936 | if (offset < timekeeper.cycle_interval<<shift) | 953 | if (offset < timekeeper.cycle_interval<<shift) |
937 | return offset; | 954 | return offset; |
938 | 955 | ||
@@ -942,23 +959,25 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
942 | 959 | ||
943 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | 960 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; |
944 | while (timekeeper.xtime_nsec >= nsecps) { | 961 | while (timekeeper.xtime_nsec >= nsecps) { |
962 | int leap; | ||
945 | timekeeper.xtime_nsec -= nsecps; | 963 | timekeeper.xtime_nsec -= nsecps; |
946 | xtime.tv_sec++; | 964 | timekeeper.xtime.tv_sec++; |
947 | second_overflow(); | 965 | leap = second_overflow(timekeeper.xtime.tv_sec); |
966 | timekeeper.xtime.tv_sec += leap; | ||
948 | } | 967 | } |
949 | 968 | ||
950 | /* Accumulate raw time */ | 969 | /* Accumulate raw time */ |
951 | raw_nsecs = timekeeper.raw_interval << shift; | 970 | raw_nsecs = timekeeper.raw_interval << shift; |
952 | raw_nsecs += raw_time.tv_nsec; | 971 | raw_nsecs += timekeeper.raw_time.tv_nsec; |
953 | if (raw_nsecs >= NSEC_PER_SEC) { | 972 | if (raw_nsecs >= NSEC_PER_SEC) { |
954 | u64 raw_secs = raw_nsecs; | 973 | u64 raw_secs = raw_nsecs; |
955 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); | 974 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
956 | raw_time.tv_sec += raw_secs; | 975 | timekeeper.raw_time.tv_sec += raw_secs; |
957 | } | 976 | } |
958 | raw_time.tv_nsec = raw_nsecs; | 977 | timekeeper.raw_time.tv_nsec = raw_nsecs; |
959 | 978 | ||
960 | /* Accumulate error between NTP and clock interval */ | 979 | /* Accumulate error between NTP and clock interval */ |
961 | timekeeper.ntp_error += tick_length << shift; | 980 | timekeeper.ntp_error += ntp_tick_length() << shift; |
962 | timekeeper.ntp_error -= | 981 | timekeeper.ntp_error -= |
963 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << | 982 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << |
964 | (timekeeper.ntp_error_shift + shift); | 983 | (timekeeper.ntp_error_shift + shift); |
@@ -970,17 +989,19 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
970 | /** | 989 | /** |
971 | * update_wall_time - Uses the current clocksource to increment the wall time | 990 | * update_wall_time - Uses the current clocksource to increment the wall time |
972 | * | 991 | * |
973 | * Called from the timer interrupt, must hold a write on xtime_lock. | ||
974 | */ | 992 | */ |
975 | static void update_wall_time(void) | 993 | static void update_wall_time(void) |
976 | { | 994 | { |
977 | struct clocksource *clock; | 995 | struct clocksource *clock; |
978 | cycle_t offset; | 996 | cycle_t offset; |
979 | int shift = 0, maxshift; | 997 | int shift = 0, maxshift; |
998 | unsigned long flags; | ||
999 | |||
1000 | write_seqlock_irqsave(&timekeeper.lock, flags); | ||
980 | 1001 | ||
981 | /* Make sure we're fully resumed: */ | 1002 | /* Make sure we're fully resumed: */ |
982 | if (unlikely(timekeeping_suspended)) | 1003 | if (unlikely(timekeeping_suspended)) |
983 | return; | 1004 | goto out; |
984 | 1005 | ||
985 | clock = timekeeper.clock; | 1006 | clock = timekeeper.clock; |
986 | 1007 | ||
@@ -989,20 +1010,21 @@ static void update_wall_time(void) | |||
989 | #else | 1010 | #else |
990 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 1011 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
991 | #endif | 1012 | #endif |
992 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; | 1013 | timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec << |
1014 | timekeeper.shift; | ||
993 | 1015 | ||
994 | /* | 1016 | /* |
995 | * With NO_HZ we may have to accumulate many cycle_intervals | 1017 | * With NO_HZ we may have to accumulate many cycle_intervals |
996 | * (think "ticks") worth of time at once. To do this efficiently, | 1018 | * (think "ticks") worth of time at once. To do this efficiently, |
997 | * we calculate the largest doubling multiple of cycle_intervals | 1019 | * we calculate the largest doubling multiple of cycle_intervals |
998 | * that is smaller then the offset. We then accumulate that | 1020 | * that is smaller than the offset. We then accumulate that |
999 | * chunk in one go, and then try to consume the next smaller | 1021 | * chunk in one go, and then try to consume the next smaller |
1000 | * doubled multiple. | 1022 | * doubled multiple. |
1001 | */ | 1023 | */ |
1002 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); | 1024 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); |
1003 | shift = max(0, shift); | 1025 | shift = max(0, shift); |
1004 | /* Bound shift to one less then what overflows tick_length */ | 1026 | /* Bound shift to one less than what overflows tick_length */ |
1005 | maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; | 1027 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
1006 | shift = min(shift, maxshift); | 1028 | shift = min(shift, maxshift); |
1007 | while (offset >= timekeeper.cycle_interval) { | 1029 | while (offset >= timekeeper.cycle_interval) { |
1008 | offset = logarithmic_accumulation(offset, shift); | 1030 | offset = logarithmic_accumulation(offset, shift); |
@@ -1040,24 +1062,30 @@ static void update_wall_time(void) | |||
1040 | * Store full nanoseconds into xtime after rounding it up and | 1062 | * Store full nanoseconds into xtime after rounding it up and |
1041 | * add the remainder to the error difference. | 1063 | * add the remainder to the error difference. |
1042 | */ | 1064 | */ |
1043 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; | 1065 | timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> |
1044 | timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; | 1066 | timekeeper.shift) + 1; |
1067 | timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec << | ||
1068 | timekeeper.shift; | ||
1045 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 1069 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
1046 | timekeeper.ntp_error_shift; | 1070 | timekeeper.ntp_error_shift; |
1047 | 1071 | ||
1048 | /* | 1072 | /* |
1049 | * Finally, make sure that after the rounding | 1073 | * Finally, make sure that after the rounding |
1050 | * xtime.tv_nsec isn't larger then NSEC_PER_SEC | 1074 | * xtime.tv_nsec isn't larger than NSEC_PER_SEC |
1051 | */ | 1075 | */ |
1052 | if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { | 1076 | if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { |
1053 | xtime.tv_nsec -= NSEC_PER_SEC; | 1077 | int leap; |
1054 | xtime.tv_sec++; | 1078 | timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; |
1055 | second_overflow(); | 1079 | timekeeper.xtime.tv_sec++; |
1080 | leap = second_overflow(timekeeper.xtime.tv_sec); | ||
1081 | timekeeper.xtime.tv_sec += leap; | ||
1056 | } | 1082 | } |
1057 | 1083 | ||
1058 | /* check to see if there is a new clocksource to use */ | 1084 | timekeeping_update(false); |
1059 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, | 1085 | |
1060 | timekeeper.mult); | 1086 | out: |
1087 | write_sequnlock_irqrestore(&timekeeper.lock, flags); | ||
1088 | |||
1061 | } | 1089 | } |
1062 | 1090 | ||
1063 | /** | 1091 | /** |
@@ -1074,8 +1102,10 @@ static void update_wall_time(void) | |||
1074 | void getboottime(struct timespec *ts) | 1102 | void getboottime(struct timespec *ts) |
1075 | { | 1103 | { |
1076 | struct timespec boottime = { | 1104 | struct timespec boottime = { |
1077 | .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, | 1105 | .tv_sec = timekeeper.wall_to_monotonic.tv_sec + |
1078 | .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec | 1106 | timekeeper.total_sleep_time.tv_sec, |
1107 | .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + | ||
1108 | timekeeper.total_sleep_time.tv_nsec | ||
1079 | }; | 1109 | }; |
1080 | 1110 | ||
1081 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | 1111 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
@@ -1101,13 +1131,13 @@ void get_monotonic_boottime(struct timespec *ts) | |||
1101 | WARN_ON(timekeeping_suspended); | 1131 | WARN_ON(timekeeping_suspended); |
1102 | 1132 | ||
1103 | do { | 1133 | do { |
1104 | seq = read_seqbegin(&xtime_lock); | 1134 | seq = read_seqbegin(&timekeeper.lock); |
1105 | *ts = xtime; | 1135 | *ts = timekeeper.xtime; |
1106 | tomono = wall_to_monotonic; | 1136 | tomono = timekeeper.wall_to_monotonic; |
1107 | sleep = total_sleep_time; | 1137 | sleep = timekeeper.total_sleep_time; |
1108 | nsecs = timekeeping_get_ns(); | 1138 | nsecs = timekeeping_get_ns(); |
1109 | 1139 | ||
1110 | } while (read_seqretry(&xtime_lock, seq)); | 1140 | } while (read_seqretry(&timekeeper.lock, seq)); |
1111 | 1141 | ||
1112 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, | 1142 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, |
1113 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); | 1143 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); |
@@ -1137,19 +1167,19 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime); | |||
1137 | */ | 1167 | */ |
1138 | void monotonic_to_bootbased(struct timespec *ts) | 1168 | void monotonic_to_bootbased(struct timespec *ts) |
1139 | { | 1169 | { |
1140 | *ts = timespec_add(*ts, total_sleep_time); | 1170 | *ts = timespec_add(*ts, timekeeper.total_sleep_time); |
1141 | } | 1171 | } |
1142 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | 1172 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
1143 | 1173 | ||
1144 | unsigned long get_seconds(void) | 1174 | unsigned long get_seconds(void) |
1145 | { | 1175 | { |
1146 | return xtime.tv_sec; | 1176 | return timekeeper.xtime.tv_sec; |
1147 | } | 1177 | } |
1148 | EXPORT_SYMBOL(get_seconds); | 1178 | EXPORT_SYMBOL(get_seconds); |
1149 | 1179 | ||
1150 | struct timespec __current_kernel_time(void) | 1180 | struct timespec __current_kernel_time(void) |
1151 | { | 1181 | { |
1152 | return xtime; | 1182 | return timekeeper.xtime; |
1153 | } | 1183 | } |
1154 | 1184 | ||
1155 | struct timespec current_kernel_time(void) | 1185 | struct timespec current_kernel_time(void) |
@@ -1158,10 +1188,10 @@ struct timespec current_kernel_time(void) | |||
1158 | unsigned long seq; | 1188 | unsigned long seq; |
1159 | 1189 | ||
1160 | do { | 1190 | do { |
1161 | seq = read_seqbegin(&xtime_lock); | 1191 | seq = read_seqbegin(&timekeeper.lock); |
1162 | 1192 | ||
1163 | now = xtime; | 1193 | now = timekeeper.xtime; |
1164 | } while (read_seqretry(&xtime_lock, seq)); | 1194 | } while (read_seqretry(&timekeeper.lock, seq)); |
1165 | 1195 | ||
1166 | return now; | 1196 | return now; |
1167 | } | 1197 | } |
@@ -1173,11 +1203,11 @@ struct timespec get_monotonic_coarse(void) | |||
1173 | unsigned long seq; | 1203 | unsigned long seq; |
1174 | 1204 | ||
1175 | do { | 1205 | do { |
1176 | seq = read_seqbegin(&xtime_lock); | 1206 | seq = read_seqbegin(&timekeeper.lock); |
1177 | 1207 | ||
1178 | now = xtime; | 1208 | now = timekeeper.xtime; |
1179 | mono = wall_to_monotonic; | 1209 | mono = timekeeper.wall_to_monotonic; |
1180 | } while (read_seqretry(&xtime_lock, seq)); | 1210 | } while (read_seqretry(&timekeeper.lock, seq)); |
1181 | 1211 | ||
1182 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, | 1212 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, |
1183 | now.tv_nsec + mono.tv_nsec); | 1213 | now.tv_nsec + mono.tv_nsec); |
@@ -1209,11 +1239,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |||
1209 | unsigned long seq; | 1239 | unsigned long seq; |
1210 | 1240 | ||
1211 | do { | 1241 | do { |
1212 | seq = read_seqbegin(&xtime_lock); | 1242 | seq = read_seqbegin(&timekeeper.lock); |
1213 | *xtim = xtime; | 1243 | *xtim = timekeeper.xtime; |
1214 | *wtom = wall_to_monotonic; | 1244 | *wtom = timekeeper.wall_to_monotonic; |
1215 | *sleep = total_sleep_time; | 1245 | *sleep = timekeeper.total_sleep_time; |
1216 | } while (read_seqretry(&xtime_lock, seq)); | 1246 | } while (read_seqretry(&timekeeper.lock, seq)); |
1217 | } | 1247 | } |
1218 | 1248 | ||
1219 | /** | 1249 | /** |
@@ -1225,11 +1255,14 @@ ktime_t ktime_get_monotonic_offset(void) | |||
1225 | struct timespec wtom; | 1255 | struct timespec wtom; |
1226 | 1256 | ||
1227 | do { | 1257 | do { |
1228 | seq = read_seqbegin(&xtime_lock); | 1258 | seq = read_seqbegin(&timekeeper.lock); |
1229 | wtom = wall_to_monotonic; | 1259 | wtom = timekeeper.wall_to_monotonic; |
1230 | } while (read_seqretry(&xtime_lock, seq)); | 1260 | } while (read_seqretry(&timekeeper.lock, seq)); |
1261 | |||
1231 | return timespec_to_ktime(wtom); | 1262 | return timespec_to_ktime(wtom); |
1232 | } | 1263 | } |
1264 | EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); | ||
1265 | |||
1233 | 1266 | ||
1234 | /** | 1267 | /** |
1235 | * xtime_update() - advances the timekeeping infrastructure | 1268 | * xtime_update() - advances the timekeeping infrastructure |