aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2017-06-08 19:44:21 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-06-20 04:41:50 -0400
commit3d88d56c5873f6eebe23e05c3da701960146b801 (patch)
treea689f1d4acc29fa7966cd92587862fffd1e47e8e
parentceea5e3771ed2378668455fa21861bead7504df5 (diff)
time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
Due to how the MONOTONIC_RAW accumulation logic was handled, there is the potential for a 1ns discontinuity when we do accumulations. This small discontinuity has for the most part gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW in their vDSO clock_gettime implementation, we've seen failures with the inconsistency-check test in kselftest. This patch addresses the issue by using the same sub-ns accumulation handling that CLOCK_MONOTONIC uses, which avoids the issue for in-kernel users. Since the ARM64 vDSO implementation has its own clock_gettime calculation logic, this patch reduces the frequency of errors, but failures are still seen. The ARM64 vDSO will need to be updated to include the sub-nanosecond xtime_nsec values in its calculation for this issue to be completely fixed. Signed-off-by: John Stultz <john.stultz@linaro.org> Tested-by: Daniel Mentz <danielmentz@google.com> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Stephen Boyd <stephen.boyd@linaro.org> Cc: Will Deacon <will.deacon@arm.com> Cc: "stable #4 . 8+" <stable@vger.kernel.org> Cc: Miroslav Lichvar <mlichvar@redhat.com> Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/timekeeper_internal.h4
-rw-r--r--kernel/time/timekeeping.c19
2 files changed, 12 insertions, 11 deletions
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e9834ada4d0c..f7043ccca81c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,7 +57,7 @@ struct tk_read_base {
57 * interval. 57 * interval.
58 * @xtime_remainder: Shifted nano seconds left over when rounding 58 * @xtime_remainder: Shifted nano seconds left over when rounding
59 * @cycle_interval 59 * @cycle_interval
60 * @raw_interval: Raw nano seconds accumulated per NTP interval. 60 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
61 * @ntp_error: Difference between accumulated time and NTP time in ntp 61 * @ntp_error: Difference between accumulated time and NTP time in ntp
62 * shifted nano seconds. 62 * shifted nano seconds.
63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and 63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -99,7 +99,7 @@ struct timekeeper {
99 u64 cycle_interval; 99 u64 cycle_interval;
100 u64 xtime_interval; 100 u64 xtime_interval;
101 s64 xtime_remainder; 101 s64 xtime_remainder;
102 u32 raw_interval; 102 u64 raw_interval;
103 /* The ntp_tick_length() value currently being used. 103 /* The ntp_tick_length() value currently being used.
104 * This cached copy ensures we consistently apply the tick 104 * This cached copy ensures we consistently apply the tick
105 * length for an entire tick, as ntp_tick_length may change 105 * length for an entire tick, as ntp_tick_length may change
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index eff94cb8e89e..b602c48cb841 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -280,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
280 /* Go back from cycles -> shifted ns */ 280 /* Go back from cycles -> shifted ns */
281 tk->xtime_interval = interval * clock->mult; 281 tk->xtime_interval = interval * clock->mult;
282 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 282 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
283 tk->raw_interval = (interval * clock->mult) >> clock->shift; 283 tk->raw_interval = interval * clock->mult;
284 284
285 /* if changing clocks, convert xtime_nsec shift units */ 285 /* if changing clocks, convert xtime_nsec shift units */
286 if (old_clock) { 286 if (old_clock) {
@@ -1996,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1996 u32 shift, unsigned int *clock_set) 1996 u32 shift, unsigned int *clock_set)
1997{ 1997{
1998 u64 interval = tk->cycle_interval << shift; 1998 u64 interval = tk->cycle_interval << shift;
1999 u64 raw_nsecs; 1999 u64 snsec_per_sec;
2000 2000
2001 /* If the offset is smaller than a shifted interval, do nothing */ 2001 /* If the offset is smaller than a shifted interval, do nothing */
2002 if (offset < interval) 2002 if (offset < interval)
@@ -2011,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2011 *clock_set |= accumulate_nsecs_to_secs(tk); 2011 *clock_set |= accumulate_nsecs_to_secs(tk);
2012 2012
2013 /* Accumulate raw time */ 2013 /* Accumulate raw time */
2014 raw_nsecs = (u64)tk->raw_interval << shift; 2014 tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2015 raw_nsecs += tk->raw_time.tv_nsec; 2015 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2016 if (raw_nsecs >= NSEC_PER_SEC) { 2016 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2017 u64 raw_secs = raw_nsecs; 2017 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2018 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 2018 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2019 tk->raw_time.tv_sec += raw_secs; 2019 tk->raw_time.tv_sec++;
2020 } 2020 }
2021 tk->raw_time.tv_nsec = raw_nsecs; 2021 tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
2022 tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2022 2023
2023 /* Accumulate error between NTP and clock interval */ 2024 /* Accumulate error between NTP and clock interval */
2024 tk->ntp_error += tk->ntp_tick << shift; 2025 tk->ntp_error += tk->ntp_tick << shift;