aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2017-05-22 20:20:20 -0400
committerJohn Stultz <john.stultz@linaro.org>2017-06-21 01:13:59 -0400
commitfc6eead7c1e2e5376c25d2795d4539fdacbc0648 (patch)
treef27d96be2ac36569bf21d60fddb2f397a6ec2e06
parent8e6cec1c7c5afa489687c90be15d6ed82c742975 (diff)
time: Clean up CLOCK_MONOTONIC_RAW time handling
Now that we fixed the sub-ns handling for CLOCK_MONOTONIC_RAW, remove the duplicitive tk->raw_time.tv_nsec, which can be stored in tk->tkr_raw.xtime_nsec (similarly to how its handled for monotonic time). Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Miroslav Lichvar <mlichvar@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Stephen Boyd <stephen.boyd@linaro.org> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Daniel Mentz <danielmentz@google.com> Tested-by: Daniel Mentz <danielmentz@google.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
-rw-r--r--arch/arm64/kernel/vdso.c6
-rw-r--r--include/linux/timekeeper_internal.h4
-rw-r--r--kernel/time/timekeeping.c45
3 files changed, 29 insertions, 26 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d0cb007fa482..7492d9009610 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -220,10 +220,8 @@ void update_vsyscall(struct timekeeper *tk)
220 if (!use_syscall) { 220 if (!use_syscall) {
221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */ 221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; 222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
223 vdso_data->raw_time_sec = tk->raw_time.tv_sec; 223 vdso_data->raw_time_sec = tk->raw_sec;
224 vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << 224 vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
225 tk->tkr_raw.shift) +
226 tk->tkr_raw.xtime_nsec;
227 vdso_data->xtime_clock_sec = tk->xtime_sec; 225 vdso_data->xtime_clock_sec = tk->xtime_sec;
228 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; 226 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
229 vdso_data->cs_mono_mult = tk->tkr_mono.mult; 227 vdso_data->cs_mono_mult = tk->tkr_mono.mult;
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index f7043ccca81c..0a0a53daf2a2 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -51,7 +51,7 @@ struct tk_read_base {
51 * @clock_was_set_seq: The sequence number of clock was set events 51 * @clock_was_set_seq: The sequence number of clock was set events
52 * @cs_was_changed_seq: The sequence number of clocksource change events 52 * @cs_was_changed_seq: The sequence number of clocksource change events
53 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 53 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
54 * @raw_time: Monotonic raw base time in timespec64 format 54 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
55 * @cycle_interval: Number of clock cycles in one NTP interval 55 * @cycle_interval: Number of clock cycles in one NTP interval
56 * @xtime_interval: Number of clock shifted nano seconds in one NTP 56 * @xtime_interval: Number of clock shifted nano seconds in one NTP
57 * interval. 57 * interval.
@@ -93,7 +93,7 @@ struct timekeeper {
93 unsigned int clock_was_set_seq; 93 unsigned int clock_was_set_seq;
94 u8 cs_was_changed_seq; 94 u8 cs_was_changed_seq;
95 ktime_t next_leap_ktime; 95 ktime_t next_leap_ktime;
96 struct timespec64 raw_time; 96 u64 raw_sec;
97 97
98 /* The following members are for timekeeping internal use */ 98 /* The following members are for timekeeping internal use */
99 u64 cycle_interval; 99 u64 cycle_interval;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b602c48cb841..0454bfa24353 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -72,6 +72,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
72 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; 72 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73 tk->xtime_sec++; 73 tk->xtime_sec++;
74 } 74 }
75 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
76 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
77 tk->raw_sec++;
78 }
75} 79}
76 80
77static inline struct timespec64 tk_xtime(struct timekeeper *tk) 81static inline struct timespec64 tk_xtime(struct timekeeper *tk)
@@ -285,12 +289,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
285 /* if changing clocks, convert xtime_nsec shift units */ 289 /* if changing clocks, convert xtime_nsec shift units */
286 if (old_clock) { 290 if (old_clock) {
287 int shift_change = clock->shift - old_clock->shift; 291 int shift_change = clock->shift - old_clock->shift;
288 if (shift_change < 0) 292 if (shift_change < 0) {
289 tk->tkr_mono.xtime_nsec >>= -shift_change; 293 tk->tkr_mono.xtime_nsec >>= -shift_change;
290 else 294 tk->tkr_raw.xtime_nsec >>= -shift_change;
295 } else {
291 tk->tkr_mono.xtime_nsec <<= shift_change; 296 tk->tkr_mono.xtime_nsec <<= shift_change;
297 tk->tkr_raw.xtime_nsec <<= shift_change;
298 }
292 } 299 }
293 tk->tkr_raw.xtime_nsec = 0;
294 300
295 tk->tkr_mono.shift = clock->shift; 301 tk->tkr_mono.shift = clock->shift;
296 tk->tkr_raw.shift = clock->shift; 302 tk->tkr_raw.shift = clock->shift;
@@ -619,9 +625,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
619 nsec = (u32) tk->wall_to_monotonic.tv_nsec; 625 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
620 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); 626 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
621 627
622 /* Update the monotonic raw base */
623 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
624
625 /* 628 /*
626 * The sum of the nanoseconds portions of xtime and 629 * The sum of the nanoseconds portions of xtime and
627 * wall_to_monotonic can be greater/equal one second. Take 630 * wall_to_monotonic can be greater/equal one second. Take
@@ -631,6 +634,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
631 if (nsec >= NSEC_PER_SEC) 634 if (nsec >= NSEC_PER_SEC)
632 seconds++; 635 seconds++;
633 tk->ktime_sec = seconds; 636 tk->ktime_sec = seconds;
637
638 /* Update the monotonic raw base */
639 seconds = tk->raw_sec;
640 nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift);
641 tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
634} 642}
635 643
636/* must hold timekeeper_lock */ 644/* must hold timekeeper_lock */
@@ -672,7 +680,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
672static void timekeeping_forward_now(struct timekeeper *tk) 680static void timekeeping_forward_now(struct timekeeper *tk)
673{ 681{
674 u64 cycle_now, delta; 682 u64 cycle_now, delta;
675 u64 nsec;
676 683
677 cycle_now = tk_clock_read(&tk->tkr_mono); 684 cycle_now = tk_clock_read(&tk->tkr_mono);
678 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 685 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
@@ -684,10 +691,13 @@ static void timekeeping_forward_now(struct timekeeper *tk)
684 /* If arch requires, add in get_arch_timeoffset() */ 691 /* If arch requires, add in get_arch_timeoffset() */
685 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; 692 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
686 693
687 tk_normalize_xtime(tk);
688 694
689 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift); 695 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
690 timespec64_add_ns(&tk->raw_time, nsec); 696
697 /* If arch requires, add in get_arch_timeoffset() */
698 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
699
700 tk_normalize_xtime(tk);
691} 701}
692 702
693/** 703/**
@@ -1373,19 +1383,18 @@ int timekeeping_notify(struct clocksource *clock)
1373void getrawmonotonic64(struct timespec64 *ts) 1383void getrawmonotonic64(struct timespec64 *ts)
1374{ 1384{
1375 struct timekeeper *tk = &tk_core.timekeeper; 1385 struct timekeeper *tk = &tk_core.timekeeper;
1376 struct timespec64 ts64;
1377 unsigned long seq; 1386 unsigned long seq;
1378 u64 nsecs; 1387 u64 nsecs;
1379 1388
1380 do { 1389 do {
1381 seq = read_seqcount_begin(&tk_core.seq); 1390 seq = read_seqcount_begin(&tk_core.seq);
1391 ts->tv_sec = tk->raw_sec;
1382 nsecs = timekeeping_get_ns(&tk->tkr_raw); 1392 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1383 ts64 = tk->raw_time;
1384 1393
1385 } while (read_seqcount_retry(&tk_core.seq, seq)); 1394 } while (read_seqcount_retry(&tk_core.seq, seq));
1386 1395
1387 timespec64_add_ns(&ts64, nsecs); 1396 ts->tv_nsec = 0;
1388 *ts = ts64; 1397 timespec64_add_ns(ts, nsecs);
1389} 1398}
1390EXPORT_SYMBOL(getrawmonotonic64); 1399EXPORT_SYMBOL(getrawmonotonic64);
1391 1400
@@ -1509,8 +1518,7 @@ void __init timekeeping_init(void)
1509 tk_setup_internals(tk, clock); 1518 tk_setup_internals(tk, clock);
1510 1519
1511 tk_set_xtime(tk, &now); 1520 tk_set_xtime(tk, &now);
1512 tk->raw_time.tv_sec = 0; 1521 tk->raw_sec = 0;
1513 tk->raw_time.tv_nsec = 0;
1514 if (boot.tv_sec == 0 && boot.tv_nsec == 0) 1522 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1515 boot = tk_xtime(tk); 1523 boot = tk_xtime(tk);
1516 1524
@@ -2011,15 +2019,12 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2011 *clock_set |= accumulate_nsecs_to_secs(tk); 2019 *clock_set |= accumulate_nsecs_to_secs(tk);
2012 2020
2013 /* Accumulate raw time */ 2021 /* Accumulate raw time */
2014 tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2015 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; 2022 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2016 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; 2023 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2017 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { 2024 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2018 tk->tkr_raw.xtime_nsec -= snsec_per_sec; 2025 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2019 tk->raw_time.tv_sec++; 2026 tk->raw_sec++;
2020 } 2027 }
2021 tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
2022 tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2023 2028
2024 /* Accumulate error between NTP and clock interval */ 2029 /* Accumulate error between NTP and clock interval */
2025 tk->ntp_error += tk->ntp_tick << shift; 2030 tk->ntp_error += tk->ntp_tick << shift;