aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-12-25 05:38:40 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-12-25 11:21:22 -0500
commit2456e855354415bfaeb7badaa14e11b3e02c8466 (patch)
tree6fc81500645174c246c3fdb568cba32aa01960c6 /kernel
parenta5a1d1c2914b5316924c7893eb683a5420ebd3be (diff)
ktime: Get rid of the union
ktime is a union because the initial implementation stored the time in scalar nanoseconds on 64 bit machine and in a endianess optimized timespec variant for 32bit machines. The Y2038 cleanup removed the timespec variant and switched everything to scalar nanoseconds. The union remained, but become completely pointless. Get rid of the union and just keep ktime_t as simple typedef of type s64. The conversion was done with coccinelle and some manual mopping up. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/hrtimer.c52
-rw-r--r--kernel/time/itimer.c10
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/posix-timers.c20
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c22
-rw-r--r--kernel/time/timekeeping.c6
13 files changed, 88 insertions, 88 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 9246d9f593d1..0842c8ca534b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2459,7 +2459,7 @@ retry:
2459 restart->fn = futex_wait_restart; 2459 restart->fn = futex_wait_restart;
2460 restart->futex.uaddr = uaddr; 2460 restart->futex.uaddr = uaddr;
2461 restart->futex.val = val; 2461 restart->futex.val = val;
2462 restart->futex.time = abs_time->tv64; 2462 restart->futex.time = *abs_time;
2463 restart->futex.bitset = bitset; 2463 restart->futex.bitset = bitset;
2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; 2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2465 2465
@@ -2480,7 +2480,7 @@ static long futex_wait_restart(struct restart_block *restart)
2480 ktime_t t, *tp = NULL; 2480 ktime_t t, *tp = NULL;
2481 2481
2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { 2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2483 t.tv64 = restart->futex.time; 2483 t = restart->futex.time;
2484 tp = &t; 2484 tp = &t;
2485 } 2485 }
2486 restart->fn = do_no_restart_syscall; 2486 restart->fn = do_no_restart_syscall;
diff --git a/kernel/signal.c b/kernel/signal.c
index f5d4e275345e..ff046b73ff2d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -587,7 +587,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
587 struct hrtimer *tmr = &tsk->signal->real_timer; 587 struct hrtimer *tmr = &tsk->signal->real_timer;
588 588
589 if (!hrtimer_is_queued(tmr) && 589 if (!hrtimer_is_queued(tmr) &&
590 tsk->signal->it_real_incr.tv64 != 0) { 590 tsk->signal->it_real_incr != 0) {
591 hrtimer_forward(tmr, tmr->base->get_time(), 591 hrtimer_forward(tmr, tmr->base->get_time(),
592 tsk->signal->it_real_incr); 592 tsk->signal->it_real_incr);
593 hrtimer_restart(tmr); 593 hrtimer_restart(tmr);
@@ -2766,7 +2766,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767 const struct timespec *ts) 2767 const struct timespec *ts)
2768{ 2768{
2769 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; 2769 ktime_t *to = NULL, timeout = KTIME_MAX;
2770 struct task_struct *tsk = current; 2770 struct task_struct *tsk = current;
2771 sigset_t mask = *which; 2771 sigset_t mask = *which;
2772 int sig, ret = 0; 2772 int sig, ret = 0;
@@ -2786,7 +2786,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2786 2786
2787 spin_lock_irq(&tsk->sighand->siglock); 2787 spin_lock_irq(&tsk->sighand->siglock);
2788 sig = dequeue_signal(tsk, &mask, info); 2788 sig = dequeue_signal(tsk, &mask, info);
2789 if (!sig && timeout.tv64) { 2789 if (!sig && timeout) {
2790 /* 2790 /*
2791 * None ready, temporarily unblock those we're interested 2791 * None ready, temporarily unblock those we're interested
2792 * while we are sleeping in so that we'll be awakened when 2792 * while we are sleeping in so that we'll be awakened when
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 3921cf7fea8e..ab6ac077bdb7 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -254,13 +254,13 @@ static int alarmtimer_suspend(struct device *dev)
254 if (!next) 254 if (!next)
255 continue; 255 continue;
256 delta = ktime_sub(next->expires, base->gettime()); 256 delta = ktime_sub(next->expires, base->gettime());
257 if (!min.tv64 || (delta.tv64 < min.tv64)) { 257 if (!min || (delta < min)) {
258 expires = next->expires; 258 expires = next->expires;
259 min = delta; 259 min = delta;
260 type = i; 260 type = i;
261 } 261 }
262 } 262 }
263 if (min.tv64 == 0) 263 if (min == 0)
264 return 0; 264 return 0;
265 265
266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { 266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
@@ -328,7 +328,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
328 delta = ktime_sub(absexp, base->gettime()); 328 delta = ktime_sub(absexp, base->gettime());
329 329
330 spin_lock_irqsave(&freezer_delta_lock, flags); 330 spin_lock_irqsave(&freezer_delta_lock, flags);
331 if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { 331 if (!freezer_delta || (delta < freezer_delta)) {
332 freezer_delta = delta; 332 freezer_delta = delta;
333 freezer_expires = absexp; 333 freezer_expires = absexp;
334 freezer_alarmtype = type; 334 freezer_alarmtype = type;
@@ -453,10 +453,10 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
453 453
454 delta = ktime_sub(now, alarm->node.expires); 454 delta = ktime_sub(now, alarm->node.expires);
455 455
456 if (delta.tv64 < 0) 456 if (delta < 0)
457 return 0; 457 return 0;
458 458
459 if (unlikely(delta.tv64 >= interval.tv64)) { 459 if (unlikely(delta >= interval)) {
460 s64 incr = ktime_to_ns(interval); 460 s64 incr = ktime_to_ns(interval);
461 461
462 overrun = ktime_divns(delta, incr); 462 overrun = ktime_divns(delta, incr);
@@ -464,7 +464,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
464 alarm->node.expires = ktime_add_ns(alarm->node.expires, 464 alarm->node.expires = ktime_add_ns(alarm->node.expires,
465 incr*overrun); 465 incr*overrun);
466 466
467 if (alarm->node.expires.tv64 > now.tv64) 467 if (alarm->node.expires > now)
468 return overrun; 468 return overrun;
469 /* 469 /*
470 * This (and the ktime_add() below) is the 470 * This (and the ktime_add() below) is the
@@ -522,7 +522,7 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
522 } 522 }
523 523
524 /* Re-add periodic timers */ 524 /* Re-add periodic timers */
525 if (ptr->it.alarm.interval.tv64) { 525 if (ptr->it.alarm.interval) {
526 ptr->it_overrun += alarm_forward(alarm, now, 526 ptr->it_overrun += alarm_forward(alarm, now,
527 ptr->it.alarm.interval); 527 ptr->it.alarm.interval);
528 result = ALARMTIMER_RESTART; 528 result = ALARMTIMER_RESTART;
@@ -730,7 +730,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type,
730 730
731 rem = ktime_sub(exp, alarm_bases[type].gettime()); 731 rem = ktime_sub(exp, alarm_bases[type].gettime());
732 732
733 if (rem.tv64 <= 0) 733 if (rem <= 0)
734 return 0; 734 return 0;
735 rmt = ktime_to_timespec(rem); 735 rmt = ktime_to_timespec(rem);
736 736
@@ -755,7 +755,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
755 struct alarm alarm; 755 struct alarm alarm;
756 int ret = 0; 756 int ret = 0;
757 757
758 exp.tv64 = restart->nanosleep.expires; 758 exp = restart->nanosleep.expires;
759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); 759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
760 760
761 if (alarmtimer_do_nsleep(&alarm, exp)) 761 if (alarmtimer_do_nsleep(&alarm, exp))
@@ -835,7 +835,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
835 restart = &current->restart_block; 835 restart = &current->restart_block;
836 restart->fn = alarm_timer_nsleep_restart; 836 restart->fn = alarm_timer_nsleep_restart;
837 restart->nanosleep.clockid = type; 837 restart->nanosleep.clockid = type;
838 restart->nanosleep.expires = exp.tv64; 838 restart->nanosleep.expires = exp;
839 restart->nanosleep.rmtp = rmtp; 839 restart->nanosleep.rmtp = rmtp;
840 ret = -ERESTART_RESTARTBLOCK; 840 ret = -ERESTART_RESTARTBLOCK;
841 841
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2c5bc77c0bb0..97ac0951f164 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -179,7 +179,7 @@ void clockevents_switch_state(struct clock_event_device *dev,
179void clockevents_shutdown(struct clock_event_device *dev) 179void clockevents_shutdown(struct clock_event_device *dev)
180{ 180{
181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
182 dev->next_event.tv64 = KTIME_MAX; 182 dev->next_event = KTIME_MAX;
183} 183}
184 184
185/** 185/**
@@ -213,7 +213,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
214 printk_deferred(KERN_WARNING 214 printk_deferred(KERN_WARNING
215 "CE: Reprogramming failure. Giving up\n"); 215 "CE: Reprogramming failure. Giving up\n");
216 dev->next_event.tv64 = KTIME_MAX; 216 dev->next_event = KTIME_MAX;
217 return -ETIME; 217 return -ETIME;
218 } 218 }
219 219
@@ -310,7 +310,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
310 int64_t delta; 310 int64_t delta;
311 int rc; 311 int rc;
312 312
313 if (unlikely(expires.tv64 < 0)) { 313 if (unlikely(expires < 0)) {
314 WARN_ON_ONCE(1); 314 WARN_ON_ONCE(1);
315 return -ETIME; 315 return -ETIME;
316 } 316 }
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 161e340395d5..c7f780113884 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
171 return 0; 171 return 0;
172 172
173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
174 return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 174 return expires <= new_base->cpu_base->expires_next;
175#else 175#else
176 return 0; 176 return 0;
177#endif 177#endif
@@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
313 * We use KTIME_SEC_MAX here, the maximum timeout which we can 313 * We use KTIME_SEC_MAX here, the maximum timeout which we can
314 * return to user space in a timespec: 314 * return to user space in a timespec:
315 */ 315 */
316 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 316 if (res < 0 || res < lhs || res < rhs)
317 res = ktime_set(KTIME_SEC_MAX, 0); 317 res = ktime_set(KTIME_SEC_MAX, 0);
318 318
319 return res; 319 return res;
@@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
466{ 466{
467 struct hrtimer_clock_base *base = cpu_base->clock_base; 467 struct hrtimer_clock_base *base = cpu_base->clock_base;
468 ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
469 unsigned int active = cpu_base->active_bases; 468 unsigned int active = cpu_base->active_bases;
469 ktime_t expires, expires_next = KTIME_MAX;
470 470
471 hrtimer_update_next_timer(cpu_base, NULL); 471 hrtimer_update_next_timer(cpu_base, NULL);
472 for (; active; base++, active >>= 1) { 472 for (; active; base++, active >>= 1) {
@@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
479 next = timerqueue_getnext(&base->active); 479 next = timerqueue_getnext(&base->active);
480 timer = container_of(next, struct hrtimer, node); 480 timer = container_of(next, struct hrtimer, node);
481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
482 if (expires.tv64 < expires_next.tv64) { 482 if (expires < expires_next) {
483 expires_next = expires; 483 expires_next = expires;
484 hrtimer_update_next_timer(cpu_base, timer); 484 hrtimer_update_next_timer(cpu_base, timer);
485 } 485 }
@@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
489 * the clock bases so the result might be negative. Fix it up 489 * the clock bases so the result might be negative. Fix it up
490 * to prevent a false positive in clockevents_program_event(). 490 * to prevent a false positive in clockevents_program_event().
491 */ 491 */
492 if (expires_next.tv64 < 0) 492 if (expires_next < 0)
493 expires_next.tv64 = 0; 493 expires_next = 0;
494 return expires_next; 494 return expires_next;
495} 495}
496#endif 496#endif
@@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
561 561
562 expires_next = __hrtimer_get_next_event(cpu_base); 562 expires_next = __hrtimer_get_next_event(cpu_base);
563 563
564 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 564 if (skip_equal && expires_next == cpu_base->expires_next)
565 return; 565 return;
566 566
567 cpu_base->expires_next.tv64 = expires_next.tv64; 567 cpu_base->expires_next = expires_next;
568 568
569 /* 569 /*
570 * If a hang was detected in the last timer interrupt then we 570 * If a hang was detected in the last timer interrupt then we
@@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer,
622 * CLOCK_REALTIME timer might be requested with an absolute 622 * CLOCK_REALTIME timer might be requested with an absolute
623 * expiry time which is less than base->offset. Set it to 0. 623 * expiry time which is less than base->offset. Set it to 0.
624 */ 624 */
625 if (expires.tv64 < 0) 625 if (expires < 0)
626 expires.tv64 = 0; 626 expires = 0;
627 627
628 if (expires.tv64 >= cpu_base->expires_next.tv64) 628 if (expires >= cpu_base->expires_next)
629 return; 629 return;
630 630
631 /* Update the pointer to the next expiring timer */ 631 /* Update the pointer to the next expiring timer */
@@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer,
653 */ 653 */
654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
655{ 655{
656 base->expires_next.tv64 = KTIME_MAX; 656 base->expires_next = KTIME_MAX;
657 base->hres_active = 0; 657 base->hres_active = 0;
658} 658}
659 659
@@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
827 827
828 delta = ktime_sub(now, hrtimer_get_expires(timer)); 828 delta = ktime_sub(now, hrtimer_get_expires(timer));
829 829
830 if (delta.tv64 < 0) 830 if (delta < 0)
831 return 0; 831 return 0;
832 832
833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
834 return 0; 834 return 0;
835 835
836 if (interval.tv64 < hrtimer_resolution) 836 if (interval < hrtimer_resolution)
837 interval.tv64 = hrtimer_resolution; 837 interval = hrtimer_resolution;
838 838
839 if (unlikely(delta.tv64 >= interval.tv64)) { 839 if (unlikely(delta >= interval)) {
840 s64 incr = ktime_to_ns(interval); 840 s64 incr = ktime_to_ns(interval);
841 841
842 orun = ktime_divns(delta, incr); 842 orun = ktime_divns(delta, incr);
843 hrtimer_add_expires_ns(timer, incr * orun); 843 hrtimer_add_expires_ns(timer, incr * orun);
844 if (hrtimer_get_expires_tv64(timer) > now.tv64) 844 if (hrtimer_get_expires_tv64(timer) > now)
845 return orun; 845 return orun;
846 /* 846 /*
847 * This (and the ktime_add() below) is the 847 * This (and the ktime_add() below) is the
@@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void)
1104 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1104 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1105 1105
1106 if (!__hrtimer_hres_active(cpu_base)) 1106 if (!__hrtimer_hres_active(cpu_base))
1107 expires = __hrtimer_get_next_event(cpu_base).tv64; 1107 expires = __hrtimer_get_next_event(cpu_base);
1108 1108
1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1110 1110
@@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
1296 * are right-of a not yet expired timer, because that 1296 * are right-of a not yet expired timer, because that
1297 * timer will have to trigger a wakeup anyway. 1297 * timer will have to trigger a wakeup anyway.
1298 */ 1298 */
1299 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) 1299 if (basenow < hrtimer_get_softexpires_tv64(timer))
1300 break; 1300 break;
1301 1301
1302 __run_hrtimer(cpu_base, base, timer, &basenow); 1302 __run_hrtimer(cpu_base, base, timer, &basenow);
@@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1318 1318
1319 BUG_ON(!cpu_base->hres_active); 1319 BUG_ON(!cpu_base->hres_active);
1320 cpu_base->nr_events++; 1320 cpu_base->nr_events++;
1321 dev->next_event.tv64 = KTIME_MAX; 1321 dev->next_event = KTIME_MAX;
1322 1322
1323 raw_spin_lock(&cpu_base->lock); 1323 raw_spin_lock(&cpu_base->lock);
1324 entry_time = now = hrtimer_update_base(cpu_base); 1324 entry_time = now = hrtimer_update_base(cpu_base);
@@ -1331,7 +1331,7 @@ retry:
1331 * timers which run their callback and need to be requeued on 1331 * timers which run their callback and need to be requeued on
1332 * this CPU. 1332 * this CPU.
1333 */ 1333 */
1334 cpu_base->expires_next.tv64 = KTIME_MAX; 1334 cpu_base->expires_next = KTIME_MAX;
1335 1335
1336 __hrtimer_run_queues(cpu_base, now); 1336 __hrtimer_run_queues(cpu_base, now);
1337 1337
@@ -1379,13 +1379,13 @@ retry:
1379 cpu_base->hang_detected = 1; 1379 cpu_base->hang_detected = 1;
1380 raw_spin_unlock(&cpu_base->lock); 1380 raw_spin_unlock(&cpu_base->lock);
1381 delta = ktime_sub(now, entry_time); 1381 delta = ktime_sub(now, entry_time);
1382 if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) 1382 if ((unsigned int)delta > cpu_base->max_hang_time)
1383 cpu_base->max_hang_time = (unsigned int) delta.tv64; 1383 cpu_base->max_hang_time = (unsigned int) delta;
1384 /* 1384 /*
1385 * Limit it to a sensible value as we enforce a longer 1385 * Limit it to a sensible value as we enforce a longer
1386 * delay. Give the CPU at least 100ms to catch up. 1386 * delay. Give the CPU at least 100ms to catch up.
1387 */ 1387 */
1388 if (delta.tv64 > 100 * NSEC_PER_MSEC) 1388 if (delta > 100 * NSEC_PER_MSEC)
1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1390 else 1390 else
1391 expires_next = ktime_add(now, delta); 1391 expires_next = ktime_add(now, delta);
@@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1495 ktime_t rem; 1495 ktime_t rem;
1496 1496
1497 rem = hrtimer_expires_remaining(timer); 1497 rem = hrtimer_expires_remaining(timer);
1498 if (rem.tv64 <= 0) 1498 if (rem <= 0)
1499 return 0; 1499 return 0;
1500 rmt = ktime_to_timespec(rem); 1500 rmt = ktime_to_timespec(rem);
1501 1501
@@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
1693 * Optimize when a zero timeout value is given. It does not 1693 * Optimize when a zero timeout value is given. It does not
1694 * matter whether this is an absolute or a relative time. 1694 * matter whether this is an absolute or a relative time.
1695 */ 1695 */
1696 if (expires && !expires->tv64) { 1696 if (expires && *expires == 0) {
1697 __set_current_state(TASK_RUNNING); 1697 __set_current_state(TASK_RUNNING);
1698 return 0; 1698 return 0;
1699 } 1699 }
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index a45afb7277c2..8c89143f9ebf 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -34,10 +34,10 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
34 * then we return 0 - which is correct. 34 * then we return 0 - which is correct.
35 */ 35 */
36 if (hrtimer_active(timer)) { 36 if (hrtimer_active(timer)) {
37 if (rem.tv64 <= 0) 37 if (rem <= 0)
38 rem.tv64 = NSEC_PER_USEC; 38 rem = NSEC_PER_USEC;
39 } else 39 } else
40 rem.tv64 = 0; 40 rem = 0;
41 41
42 return ktime_to_timeval(rem); 42 return ktime_to_timeval(rem);
43} 43}
@@ -216,12 +216,12 @@ again:
216 goto again; 216 goto again;
217 } 217 }
218 expires = timeval_to_ktime(value->it_value); 218 expires = timeval_to_ktime(value->it_value);
219 if (expires.tv64 != 0) { 219 if (expires != 0) {
220 tsk->signal->it_real_incr = 220 tsk->signal->it_real_incr =
221 timeval_to_ktime(value->it_interval); 221 timeval_to_ktime(value->it_interval);
222 hrtimer_start(timer, expires, HRTIMER_MODE_REL); 222 hrtimer_start(timer, expires, HRTIMER_MODE_REL);
223 } else 223 } else
224 tsk->signal->it_real_incr.tv64 = 0; 224 tsk->signal->it_real_incr = 0;
225 225
226 trace_itimer_state(ITIMER_REAL, value, 0); 226 trace_itimer_state(ITIMER_REAL, value, 0);
227 spin_unlock_irq(&tsk->sighand->siglock); 227 spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6df8927c58a5..edf19cc53140 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -381,7 +381,7 @@ ktime_t ntp_get_next_leap(void)
381 381
382 if ((time_state == TIME_INS) && (time_status & STA_INS)) 382 if ((time_state == TIME_INS) && (time_status & STA_INS))
383 return ktime_set(ntp_next_leap_sec, 0); 383 return ktime_set(ntp_next_leap_sec, 0);
384 ret.tv64 = KTIME_MAX; 384 ret = KTIME_MAX;
385 return ret; 385 return ret;
386} 386}
387 387
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42d7b9558741..9fe98b3777a2 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -359,7 +359,7 @@ static void schedule_next_timer(struct k_itimer *timr)
359{ 359{
360 struct hrtimer *timer = &timr->it.real.timer; 360 struct hrtimer *timer = &timr->it.real.timer;
361 361
362 if (timr->it.real.interval.tv64 == 0) 362 if (timr->it.real.interval == 0)
363 return; 363 return;
364 364
365 timr->it_overrun += (unsigned int) hrtimer_forward(timer, 365 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
@@ -449,7 +449,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
449 timr = container_of(timer, struct k_itimer, it.real.timer); 449 timr = container_of(timer, struct k_itimer, it.real.timer);
450 spin_lock_irqsave(&timr->it_lock, flags); 450 spin_lock_irqsave(&timr->it_lock, flags);
451 451
452 if (timr->it.real.interval.tv64 != 0) 452 if (timr->it.real.interval != 0)
453 si_private = ++timr->it_requeue_pending; 453 si_private = ++timr->it_requeue_pending;
454 454
455 if (posix_timer_event(timr, si_private)) { 455 if (posix_timer_event(timr, si_private)) {
@@ -458,7 +458,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
458 * we will not get a call back to restart it AND 458 * we will not get a call back to restart it AND
459 * it should be restarted. 459 * it should be restarted.
460 */ 460 */
461 if (timr->it.real.interval.tv64 != 0) { 461 if (timr->it.real.interval != 0) {
462 ktime_t now = hrtimer_cb_get_time(timer); 462 ktime_t now = hrtimer_cb_get_time(timer);
463 463
464 /* 464 /*
@@ -487,7 +487,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
487 { 487 {
488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); 488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
489 489
490 if (timr->it.real.interval.tv64 < kj.tv64) 490 if (timr->it.real.interval < kj)
491 now = ktime_add(now, kj); 491 now = ktime_add(now, kj);
492 } 492 }
493#endif 493#endif
@@ -743,7 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
743 iv = timr->it.real.interval; 743 iv = timr->it.real.interval;
744 744
745 /* interval timer ? */ 745 /* interval timer ? */
746 if (iv.tv64) 746 if (iv)
747 cur_setting->it_interval = ktime_to_timespec(iv); 747 cur_setting->it_interval = ktime_to_timespec(iv);
748 else if (!hrtimer_active(timer) && 748 else if (!hrtimer_active(timer) &&
749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
@@ -756,13 +756,13 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
756 * timer move the expiry time forward by intervals, so 756 * timer move the expiry time forward by intervals, so
757 * expiry is > now. 757 * expiry is > now.
758 */ 758 */
759 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || 759 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING ||
760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
762 762
763 remaining = __hrtimer_expires_remaining_adjusted(timer, now); 763 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
764 /* Return 0 only, when the timer is expired and not pending */ 764 /* Return 0 only, when the timer is expired and not pending */
765 if (remaining.tv64 <= 0) { 765 if (remaining <= 0) {
766 /* 766 /*
767 * A single shot SIGEV_NONE timer must return 0, when 767 * A single shot SIGEV_NONE timer must return 0, when
768 * it is expired ! 768 * it is expired !
@@ -839,7 +839,7 @@ common_timer_set(struct k_itimer *timr, int flags,
839 common_timer_get(timr, old_setting); 839 common_timer_get(timr, old_setting);
840 840
841 /* disable the timer */ 841 /* disable the timer */
842 timr->it.real.interval.tv64 = 0; 842 timr->it.real.interval = 0;
843 /* 843 /*
844 * careful here. If smp we could be in the "fire" routine which will 844 * careful here. If smp we could be in the "fire" routine which will
845 * be spinning as we hold the lock. But this is ONLY an SMP issue. 845 * be spinning as we hold the lock. But this is ONLY an SMP issue.
@@ -924,7 +924,7 @@ retry:
924 924
925static int common_timer_del(struct k_itimer *timer) 925static int common_timer_del(struct k_itimer *timer)
926{ 926{
927 timer->it.real.interval.tv64 = 0; 927 timer->it.real.interval = 0;
928 928
929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) 929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
930 return TIMER_RETRY; 930 return TIMER_RETRY;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 690b797f522e..a7bb8f33ae07 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -97,7 +97,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); 97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
98 98
99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) 99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
100 if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) 100 if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
101 return HRTIMER_RESTART; 101 return HRTIMER_RESTART;
102 102
103 return HRTIMER_NORESTART; 103 return HRTIMER_NORESTART;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e83ebae..3109204c87cc 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -604,14 +604,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
604 bool bc_local; 604 bool bc_local;
605 605
606 raw_spin_lock(&tick_broadcast_lock); 606 raw_spin_lock(&tick_broadcast_lock);
607 dev->next_event.tv64 = KTIME_MAX; 607 dev->next_event = KTIME_MAX;
608 next_event.tv64 = KTIME_MAX; 608 next_event = KTIME_MAX;
609 cpumask_clear(tmpmask); 609 cpumask_clear(tmpmask);
610 now = ktime_get(); 610 now = ktime_get();
611 /* Find all expired events */ 611 /* Find all expired events */
612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
613 td = &per_cpu(tick_cpu_device, cpu); 613 td = &per_cpu(tick_cpu_device, cpu);
614 if (td->evtdev->next_event.tv64 <= now.tv64) { 614 if (td->evtdev->next_event <= now) {
615 cpumask_set_cpu(cpu, tmpmask); 615 cpumask_set_cpu(cpu, tmpmask);
616 /* 616 /*
617 * Mark the remote cpu in the pending mask, so 617 * Mark the remote cpu in the pending mask, so
@@ -619,8 +619,8 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
619 * timer in tick_broadcast_oneshot_control(). 619 * timer in tick_broadcast_oneshot_control().
620 */ 620 */
621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
622 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 622 } else if (td->evtdev->next_event < next_event) {
623 next_event.tv64 = td->evtdev->next_event.tv64; 623 next_event = td->evtdev->next_event;
624 next_cpu = cpu; 624 next_cpu = cpu;
625 } 625 }
626 } 626 }
@@ -657,7 +657,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
657 * - There are pending events on sleeping CPUs which were not 657 * - There are pending events on sleeping CPUs which were not
658 * in the event mask 658 * in the event mask
659 */ 659 */
660 if (next_event.tv64 != KTIME_MAX) 660 if (next_event != KTIME_MAX)
661 tick_broadcast_set_event(dev, next_cpu, next_event); 661 tick_broadcast_set_event(dev, next_cpu, next_event);
662 662
663 raw_spin_unlock(&tick_broadcast_lock); 663 raw_spin_unlock(&tick_broadcast_lock);
@@ -672,7 +672,7 @@ static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
672{ 672{
673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) 673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
674 return 0; 674 return 0;
675 if (bc->next_event.tv64 == KTIME_MAX) 675 if (bc->next_event == KTIME_MAX)
676 return 0; 676 return 0;
677 return bc->bound_on == cpu ? -EBUSY : 0; 677 return bc->bound_on == cpu ? -EBUSY : 0;
678} 678}
@@ -688,7 +688,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { 688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
689 if (broadcast_needs_cpu(bc, smp_processor_id())) 689 if (broadcast_needs_cpu(bc, smp_processor_id()))
690 return; 690 return;
691 if (dev->next_event.tv64 < bc->next_event.tv64) 691 if (dev->next_event < bc->next_event)
692 return; 692 return;
693 } 693 }
694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
@@ -754,7 +754,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
754 */ 754 */
755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
756 ret = -EBUSY; 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) { 757 } else if (dev->next_event < bc->next_event) {
758 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /* 759 /*
760 * In case of hrtimer broadcasts the 760 * In case of hrtimer broadcasts the
@@ -789,7 +789,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
789 /* 789 /*
790 * Bail out if there is no next event. 790 * Bail out if there is no next event.
791 */ 791 */
792 if (dev->next_event.tv64 == KTIME_MAX) 792 if (dev->next_event == KTIME_MAX)
793 goto out; 793 goto out;
794 /* 794 /*
795 * If the pending bit is not set, then we are 795 * If the pending bit is not set, then we are
@@ -824,7 +824,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
824 * nohz fixups. 824 * nohz fixups.
825 */ 825 */
826 now = ktime_get(); 826 now = ktime_get();
827 if (dev->next_event.tv64 <= now.tv64) { 827 if (dev->next_event <= now) {
828 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 828 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
829 goto out; 829 goto out;
830 } 830 }
@@ -897,7 +897,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
897 tick_next_period); 897 tick_next_period);
898 tick_broadcast_set_event(bc, cpu, tick_next_period); 898 tick_broadcast_set_event(bc, cpu, tick_next_period);
899 } else 899 } else
900 bc->next_event.tv64 = KTIME_MAX; 900 bc->next_event = KTIME_MAX;
901 } else { 901 } else {
902 /* 902 /*
903 * The first cpu which switches to oneshot mode sets 903 * The first cpu which switches to oneshot mode sets
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index b51344652330..6b009c207671 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -28,7 +28,7 @@ int tick_program_event(ktime_t expires, int force)
28{ 28{
29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
30 30
31 if (unlikely(expires.tv64 == KTIME_MAX)) { 31 if (unlikely(expires == KTIME_MAX)) {
32 /* 32 /*
33 * We don't need the clock event device any more, stop it. 33 * We don't need the clock event device any more, stop it.
34 */ 34 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 71496a20e670..2c115fdab397 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now)
58 * Do a quick check without holding jiffies_lock: 58 * Do a quick check without holding jiffies_lock:
59 */ 59 */
60 delta = ktime_sub(now, last_jiffies_update); 60 delta = ktime_sub(now, last_jiffies_update);
61 if (delta.tv64 < tick_period.tv64) 61 if (delta < tick_period)
62 return; 62 return;
63 63
64 /* Reevaluate with jiffies_lock held */ 64 /* Reevaluate with jiffies_lock held */
65 write_seqlock(&jiffies_lock); 65 write_seqlock(&jiffies_lock);
66 66
67 delta = ktime_sub(now, last_jiffies_update); 67 delta = ktime_sub(now, last_jiffies_update);
68 if (delta.tv64 >= tick_period.tv64) { 68 if (delta >= tick_period) {
69 69
70 delta = ktime_sub(delta, tick_period); 70 delta = ktime_sub(delta, tick_period);
71 last_jiffies_update = ktime_add(last_jiffies_update, 71 last_jiffies_update = ktime_add(last_jiffies_update,
72 tick_period); 72 tick_period);
73 73
74 /* Slow path for long timeouts */ 74 /* Slow path for long timeouts */
75 if (unlikely(delta.tv64 >= tick_period.tv64)) { 75 if (unlikely(delta >= tick_period)) {
76 s64 incr = ktime_to_ns(tick_period); 76 s64 incr = ktime_to_ns(tick_period);
77 77
78 ticks = ktime_divns(delta, incr); 78 ticks = ktime_divns(delta, incr);
@@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void)
101 101
102 write_seqlock(&jiffies_lock); 102 write_seqlock(&jiffies_lock);
103 /* Did we start the jiffies update yet ? */ 103 /* Did we start the jiffies update yet ? */
104 if (last_jiffies_update.tv64 == 0) 104 if (last_jiffies_update == 0)
105 last_jiffies_update = tick_next_period; 105 last_jiffies_update = tick_next_period;
106 period = last_jiffies_update; 106 period = last_jiffies_update;
107 write_sequnlock(&jiffies_lock); 107 write_sequnlock(&jiffies_lock);
@@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
669 /* Read jiffies and the time when jiffies were updated last */ 669 /* Read jiffies and the time when jiffies were updated last */
670 do { 670 do {
671 seq = read_seqbegin(&jiffies_lock); 671 seq = read_seqbegin(&jiffies_lock);
672 basemono = last_jiffies_update.tv64; 672 basemono = last_jiffies_update;
673 basejiff = jiffies; 673 basejiff = jiffies;
674 } while (read_seqretry(&jiffies_lock, seq)); 674 } while (read_seqretry(&jiffies_lock, seq));
675 ts->last_jiffies = basejiff; 675 ts->last_jiffies = basejiff;
@@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
697 */ 697 */
698 delta = next_tick - basemono; 698 delta = next_tick - basemono;
699 if (delta <= (u64)TICK_NSEC) { 699 if (delta <= (u64)TICK_NSEC) {
700 tick.tv64 = 0; 700 tick = 0;
701 701
702 /* 702 /*
703 * Tell the timer code that the base is not idle, i.e. undo 703 * Tell the timer code that the base is not idle, i.e. undo
@@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
764 expires = KTIME_MAX; 764 expires = KTIME_MAX;
765 765
766 expires = min_t(u64, expires, next_tick); 766 expires = min_t(u64, expires, next_tick);
767 tick.tv64 = expires; 767 tick = expires;
768 768
769 /* Skip reprogram of event if its not changed */ 769 /* Skip reprogram of event if its not changed */
770 if (ts->tick_stopped && (expires == dev->next_event.tv64)) 770 if (ts->tick_stopped && (expires == dev->next_event))
771 goto out; 771 goto out;
772 772
773 /* 773 /*
@@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
864 } 864 }
865 865
866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { 866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
867 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; 867 ts->sleep_length = NSEC_PER_SEC / HZ;
868 return false; 868 return false;
869 } 869 }
870 870
@@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
914 ts->idle_calls++; 914 ts->idle_calls++;
915 915
916 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 916 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
917 if (expires.tv64 > 0LL) { 917 if (expires > 0LL) {
918 ts->idle_sleeps++; 918 ts->idle_sleeps++;
919 ts->idle_expires = expires; 919 ts->idle_expires = expires;
920 } 920 }
@@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1051 struct pt_regs *regs = get_irq_regs(); 1051 struct pt_regs *regs = get_irq_regs();
1052 ktime_t now = ktime_get(); 1052 ktime_t now = ktime_get();
1053 1053
1054 dev->next_event.tv64 = KTIME_MAX; 1054 dev->next_event = KTIME_MAX;
1055 1055
1056 tick_sched_do_timer(now); 1056 tick_sched_do_timer(now);
1057 tick_sched_handle(ts, regs); 1057 tick_sched_handle(ts, regs);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f4152a69277f..db087d7e106d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -104,7 +104,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104 */ 104 */
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, 105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec); 106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); 107 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
108 tk->wall_to_monotonic = wtm; 108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp); 110 tk->offs_real = timespec64_to_ktime(tmp);
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
571static inline void tk_update_leap_state(struct timekeeper *tk) 571static inline void tk_update_leap_state(struct timekeeper *tk)
572{ 572{
573 tk->next_leap_ktime = ntp_get_next_leap(); 573 tk->next_leap_ktime = ntp_get_next_leap();
574 if (tk->next_leap_ktime.tv64 != KTIME_MAX) 574 if (tk->next_leap_ktime != KTIME_MAX)
575 /* Convert to monotonic time */ 575 /* Convert to monotonic time */
576 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); 576 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
577} 577}
@@ -2250,7 +2250,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2250 } 2250 }
2251 2251
2252 /* Handle leapsecond insertion adjustments */ 2252 /* Handle leapsecond insertion adjustments */
2253 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) 2253 if (unlikely(base >= tk->next_leap_ktime))
2254 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); 2254 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2255 2255
2256 } while (read_seqcount_retry(&tk_core.seq, seq)); 2256 } while (read_seqcount_retry(&tk_core.seq, seq));