diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 46 |
1 files changed, 26 insertions, 20 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b602c48cb841..cedafa008de5 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -72,6 +72,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) | |||
72 | tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; | 72 | tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; |
73 | tk->xtime_sec++; | 73 | tk->xtime_sec++; |
74 | } | 74 | } |
75 | while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { | ||
76 | tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; | ||
77 | tk->raw_sec++; | ||
78 | } | ||
75 | } | 79 | } |
76 | 80 | ||
77 | static inline struct timespec64 tk_xtime(struct timekeeper *tk) | 81 | static inline struct timespec64 tk_xtime(struct timekeeper *tk) |
@@ -285,12 +289,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
285 | /* if changing clocks, convert xtime_nsec shift units */ | 289 | /* if changing clocks, convert xtime_nsec shift units */ |
286 | if (old_clock) { | 290 | if (old_clock) { |
287 | int shift_change = clock->shift - old_clock->shift; | 291 | int shift_change = clock->shift - old_clock->shift; |
288 | if (shift_change < 0) | 292 | if (shift_change < 0) { |
289 | tk->tkr_mono.xtime_nsec >>= -shift_change; | 293 | tk->tkr_mono.xtime_nsec >>= -shift_change; |
290 | else | 294 | tk->tkr_raw.xtime_nsec >>= -shift_change; |
295 | } else { | ||
291 | tk->tkr_mono.xtime_nsec <<= shift_change; | 296 | tk->tkr_mono.xtime_nsec <<= shift_change; |
297 | tk->tkr_raw.xtime_nsec <<= shift_change; | ||
298 | } | ||
292 | } | 299 | } |
293 | tk->tkr_raw.xtime_nsec = 0; | ||
294 | 300 | ||
295 | tk->tkr_mono.shift = clock->shift; | 301 | tk->tkr_mono.shift = clock->shift; |
296 | tk->tkr_raw.shift = clock->shift; | 302 | tk->tkr_raw.shift = clock->shift; |
@@ -510,6 +516,7 @@ static void halt_fast_timekeeper(struct timekeeper *tk) | |||
510 | } | 516 | } |
511 | 517 | ||
512 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | 518 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD |
519 | #warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon. | ||
513 | 520 | ||
514 | static inline void update_vsyscall(struct timekeeper *tk) | 521 | static inline void update_vsyscall(struct timekeeper *tk) |
515 | { | 522 | { |
@@ -619,9 +626,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
619 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; | 626 | nsec = (u32) tk->wall_to_monotonic.tv_nsec; |
620 | tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); | 627 | tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); |
621 | 628 | ||
622 | /* Update the monotonic raw base */ | ||
623 | tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time); | ||
624 | |||
625 | /* | 629 | /* |
626 | * The sum of the nanoseconds portions of xtime and | 630 | * The sum of the nanoseconds portions of xtime and |
627 | * wall_to_monotonic can be greater/equal one second. Take | 631 | * wall_to_monotonic can be greater/equal one second. Take |
@@ -631,6 +635,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) | |||
631 | if (nsec >= NSEC_PER_SEC) | 635 | if (nsec >= NSEC_PER_SEC) |
632 | seconds++; | 636 | seconds++; |
633 | tk->ktime_sec = seconds; | 637 | tk->ktime_sec = seconds; |
638 | |||
639 | /* Update the monotonic raw base */ | ||
640 | seconds = tk->raw_sec; | ||
641 | nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift); | ||
642 | tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); | ||
634 | } | 643 | } |
635 | 644 | ||
636 | /* must hold timekeeper_lock */ | 645 | /* must hold timekeeper_lock */ |
@@ -672,7 +681,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
672 | static void timekeeping_forward_now(struct timekeeper *tk) | 681 | static void timekeeping_forward_now(struct timekeeper *tk) |
673 | { | 682 | { |
674 | u64 cycle_now, delta; | 683 | u64 cycle_now, delta; |
675 | u64 nsec; | ||
676 | 684 | ||
677 | cycle_now = tk_clock_read(&tk->tkr_mono); | 685 | cycle_now = tk_clock_read(&tk->tkr_mono); |
678 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); | 686 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
@@ -684,10 +692,13 @@ static void timekeeping_forward_now(struct timekeeper *tk) | |||
684 | /* If arch requires, add in get_arch_timeoffset() */ | 692 | /* If arch requires, add in get_arch_timeoffset() */ |
685 | tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; | 693 | tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; |
686 | 694 | ||
687 | tk_normalize_xtime(tk); | ||
688 | 695 | ||
689 | nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift); | 696 | tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult; |
690 | timespec64_add_ns(&tk->raw_time, nsec); | 697 | |
698 | /* If arch requires, add in get_arch_timeoffset() */ | ||
699 | tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift; | ||
700 | |||
701 | tk_normalize_xtime(tk); | ||
691 | } | 702 | } |
692 | 703 | ||
693 | /** | 704 | /** |
@@ -1373,19 +1384,18 @@ int timekeeping_notify(struct clocksource *clock) | |||
1373 | void getrawmonotonic64(struct timespec64 *ts) | 1384 | void getrawmonotonic64(struct timespec64 *ts) |
1374 | { | 1385 | { |
1375 | struct timekeeper *tk = &tk_core.timekeeper; | 1386 | struct timekeeper *tk = &tk_core.timekeeper; |
1376 | struct timespec64 ts64; | ||
1377 | unsigned long seq; | 1387 | unsigned long seq; |
1378 | u64 nsecs; | 1388 | u64 nsecs; |
1379 | 1389 | ||
1380 | do { | 1390 | do { |
1381 | seq = read_seqcount_begin(&tk_core.seq); | 1391 | seq = read_seqcount_begin(&tk_core.seq); |
1392 | ts->tv_sec = tk->raw_sec; | ||
1382 | nsecs = timekeeping_get_ns(&tk->tkr_raw); | 1393 | nsecs = timekeeping_get_ns(&tk->tkr_raw); |
1383 | ts64 = tk->raw_time; | ||
1384 | 1394 | ||
1385 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1395 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1386 | 1396 | ||
1387 | timespec64_add_ns(&ts64, nsecs); | 1397 | ts->tv_nsec = 0; |
1388 | *ts = ts64; | 1398 | timespec64_add_ns(ts, nsecs); |
1389 | } | 1399 | } |
1390 | EXPORT_SYMBOL(getrawmonotonic64); | 1400 | EXPORT_SYMBOL(getrawmonotonic64); |
1391 | 1401 | ||
@@ -1509,8 +1519,7 @@ void __init timekeeping_init(void) | |||
1509 | tk_setup_internals(tk, clock); | 1519 | tk_setup_internals(tk, clock); |
1510 | 1520 | ||
1511 | tk_set_xtime(tk, &now); | 1521 | tk_set_xtime(tk, &now); |
1512 | tk->raw_time.tv_sec = 0; | 1522 | tk->raw_sec = 0; |
1513 | tk->raw_time.tv_nsec = 0; | ||
1514 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) | 1523 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
1515 | boot = tk_xtime(tk); | 1524 | boot = tk_xtime(tk); |
1516 | 1525 | ||
@@ -2011,15 +2020,12 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, | |||
2011 | *clock_set |= accumulate_nsecs_to_secs(tk); | 2020 | *clock_set |= accumulate_nsecs_to_secs(tk); |
2012 | 2021 | ||
2013 | /* Accumulate raw time */ | 2022 | /* Accumulate raw time */ |
2014 | tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; | ||
2015 | tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; | 2023 | tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; |
2016 | snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; | 2024 | snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; |
2017 | while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { | 2025 | while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { |
2018 | tk->tkr_raw.xtime_nsec -= snsec_per_sec; | 2026 | tk->tkr_raw.xtime_nsec -= snsec_per_sec; |
2019 | tk->raw_time.tv_sec++; | 2027 | tk->raw_sec++; |
2020 | } | 2028 | } |
2021 | tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; | ||
2022 | tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; | ||
2023 | 2029 | ||
2024 | /* Accumulate error between NTP and clock interval */ | 2030 | /* Accumulate error between NTP and clock interval */ |
2025 | tk->ntp_error += tk->ntp_tick << shift; | 2031 | tk->ntp_error += tk->ntp_tick << shift; |