summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/rtc/interface.c8
-rw-r--r--drivers/usb/chipidea/otg_fsm.c14
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--fs/aio.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/timerfd.c26
-rw-r--r--include/linux/futex.h4
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/ktime.h68
-rw-r--r--include/linux/tick.h4
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/net/red.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/events/alarmtimer.h6
-rw-r--r--include/trace/events/timer.h16
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/hrtimer.c52
-rw-r--r--kernel/time/itimer.c10
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/posix-timers.c20
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c22
-rw-r--r--kernel/time/timekeeping.c6
-rw-r--r--lib/timerqueue.c4
-rw-r--r--net/can/bcm.c28
-rw-r--r--net/can/gw.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--sound/core/hrtimer.c2
48 files changed, 200 insertions, 227 deletions
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bf9ba26981a5..94332902a1cf 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -998,7 +998,7 @@ static int print_wakeup_source_stats(struct seq_file *m,
998 998
999 active_time = ktime_sub(now, ws->last_time); 999 active_time = ktime_sub(now, ws->last_time);
1000 total_time = ktime_add(total_time, active_time); 1000 total_time = ktime_add(total_time, active_time);
1001 if (active_time.tv64 > max_time.tv64) 1001 if (active_time > max_time)
1002 max_time = active_time; 1002 max_time = active_time;
1003 1003
1004 if (ws->autosleep_enabled) 1004 if (ws->autosleep_enabled)
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 82fb6f2ca011..e6efa8c267a0 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -109,7 +109,7 @@ static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
109 109
110 now = timer->base->get_time(); 110 now = timer->base->get_time();
111 111
112 } while (hrtimer_get_expires_tv64(timer) < now.tv64); 112 } while (hrtimer_get_expires_tv64(timer) < now);
113 113
114 return HRTIMER_RESTART; 114 return HRTIMER_RESTART;
115end: 115end:
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 84a52db9b05f..5cf196dfc193 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -394,8 +394,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
394 rtc->aie_timer.period = ktime_set(0, 0); 394 rtc->aie_timer.period = ktime_set(0, 0);
395 395
396 /* Alarm has to be enabled & in the future for us to enqueue it */ 396 /* Alarm has to be enabled & in the future for us to enqueue it */
397 if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < 397 if (alarm->enabled && (rtc_tm_to_ktime(now) <
398 rtc->aie_timer.node.expires.tv64)) { 398 rtc->aie_timer.node.expires)) {
399 399
400 rtc->aie_timer.enabled = 1; 400 rtc->aie_timer.enabled = 1;
401 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); 401 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
@@ -766,7 +766,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
766 766
767 /* Skip over expired timers */ 767 /* Skip over expired timers */
768 while (next) { 768 while (next) {
769 if (next->expires.tv64 >= now.tv64) 769 if (next->expires >= now)
770 break; 770 break;
771 next = timerqueue_iterate_next(next); 771 next = timerqueue_iterate_next(next);
772 } 772 }
@@ -858,7 +858,7 @@ again:
858 __rtc_read_time(rtc, &tm); 858 __rtc_read_time(rtc, &tm);
859 now = rtc_tm_to_ktime(tm); 859 now = rtc_tm_to_ktime(tm);
860 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 860 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
861 if (next->expires.tv64 > now.tv64) 861 if (next->expires > now)
862 break; 862 break;
863 863
864 /* expire timer */ 864 /* expire timer */
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index de8e22ec3902..93e24ce61a3a 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -234,8 +234,8 @@ static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
234 ktime_set(timer_sec, timer_nsec)); 234 ktime_set(timer_sec, timer_nsec));
235 ci->enabled_otg_timer_bits |= (1 << t); 235 ci->enabled_otg_timer_bits |= (1 << t);
236 if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) || 236 if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) ||
237 (ci->hr_timeouts[ci->next_otg_timer].tv64 > 237 (ci->hr_timeouts[ci->next_otg_timer] >
238 ci->hr_timeouts[t].tv64)) { 238 ci->hr_timeouts[t])) {
239 ci->next_otg_timer = t; 239 ci->next_otg_timer = t;
240 hrtimer_start_range_ns(&ci->otg_fsm_hrtimer, 240 hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
241 ci->hr_timeouts[t], NSEC_PER_MSEC, 241 ci->hr_timeouts[t], NSEC_PER_MSEC,
@@ -269,8 +269,8 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
269 for_each_set_bit(cur_timer, &enabled_timer_bits, 269 for_each_set_bit(cur_timer, &enabled_timer_bits,
270 NUM_OTG_FSM_TIMERS) { 270 NUM_OTG_FSM_TIMERS) {
271 if ((next_timer == NUM_OTG_FSM_TIMERS) || 271 if ((next_timer == NUM_OTG_FSM_TIMERS) ||
272 (ci->hr_timeouts[next_timer].tv64 < 272 (ci->hr_timeouts[next_timer] <
273 ci->hr_timeouts[cur_timer].tv64)) 273 ci->hr_timeouts[cur_timer]))
274 next_timer = cur_timer; 274 next_timer = cur_timer;
275 } 275 }
276 } 276 }
@@ -397,14 +397,14 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
397 397
398 now = ktime_get(); 398 now = ktime_get();
399 for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) { 399 for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) {
400 if (now.tv64 >= ci->hr_timeouts[cur_timer].tv64) { 400 if (now >= ci->hr_timeouts[cur_timer]) {
401 ci->enabled_otg_timer_bits &= ~(1 << cur_timer); 401 ci->enabled_otg_timer_bits &= ~(1 << cur_timer);
402 if (otg_timer_handlers[cur_timer]) 402 if (otg_timer_handlers[cur_timer])
403 ret = otg_timer_handlers[cur_timer](ci); 403 ret = otg_timer_handlers[cur_timer](ci);
404 } else { 404 } else {
405 if ((next_timer == NUM_OTG_FSM_TIMERS) || 405 if ((next_timer == NUM_OTG_FSM_TIMERS) ||
406 (ci->hr_timeouts[cur_timer].tv64 < 406 (ci->hr_timeouts[cur_timer] <
407 ci->hr_timeouts[next_timer].tv64)) 407 ci->hr_timeouts[next_timer]))
408 next_timer = cur_timer; 408 next_timer = cur_timer;
409 } 409 }
410 } 410 }
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 69f50e6533a6..262e10cacc8c 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -425,7 +425,7 @@ static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
425 */ 425 */
426 now = ktime_get(); 426 now = ktime_get();
427 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { 427 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
428 if (now.tv64 >= ehci->hr_timeouts[e].tv64) 428 if (now >= ehci->hr_timeouts[e])
429 event_handlers[e](ehci); 429 event_handlers[e](ehci);
430 else 430 else
431 ehci_enable_event(ehci, e, false); 431 ehci_enable_event(ehci, e, false);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 66efa9a67687..4dda56ef06cd 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1381,7 +1381,7 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
1381 */ 1381 */
1382 now = ktime_get(); 1382 now = ktime_get();
1383 for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { 1383 for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
1384 if (now.tv64 >= fotg210->hr_timeouts[e].tv64) 1384 if (now >= fotg210->hr_timeouts[e])
1385 event_handlers[e](fotg210); 1385 event_handlers[e](fotg210);
1386 else 1386 else
1387 fotg210_enable_event(fotg210, e, false); 1387 fotg210_enable_event(fotg210, e, false);
diff --git a/fs/aio.c b/fs/aio.c
index 955c5241a8f2..4ab67e8cb776 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
1285 struct io_event __user *event, 1285 struct io_event __user *event,
1286 struct timespec __user *timeout) 1286 struct timespec __user *timeout)
1287{ 1287{
1288 ktime_t until = { .tv64 = KTIME_MAX }; 1288 ktime_t until = KTIME_MAX;
1289 long ret = 0; 1289 long ret = 0;
1290 1290
1291 if (timeout) { 1291 if (timeout) {
@@ -1311,7 +1311,7 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
1311 * the ringbuffer empty. So in practice we should be ok, but it's 1311 * the ringbuffer empty. So in practice we should be ok, but it's
1312 * something to be aware of when touching this code. 1312 * something to be aware of when touching this code.
1313 */ 1313 */
1314 if (until.tv64 == 0) 1314 if (until == 0)
1315 aio_read_events(ctx, min_nr, nr, event, &ret); 1315 aio_read_events(ctx, min_nr, nr, event, &ret);
1316 else 1316 else
1317 wait_event_interruptible_hrtimeout(ctx->wait, 1317 wait_event_interruptible_hrtimeout(ctx->wait,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 45962fe5098c..c98f6db9aa6b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -619,12 +619,11 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
619 struct nfs4_ff_layoutstat *layoutstat, 619 struct nfs4_ff_layoutstat *layoutstat,
620 ktime_t now) 620 ktime_t now)
621{ 621{
622 static const ktime_t notime = {0};
623 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 622 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 623 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625 624
626 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 625 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 if (ktime_equal(mirror->start_time, notime)) 626 if (ktime_equal(mirror->start_time, 0))
628 mirror->start_time = now; 627 mirror->start_time = now;
629 if (mirror->report_interval != 0) 628 if (mirror->report_interval != 0)
630 report_interval = (s64)mirror->report_interval * 1000LL; 629 report_interval = (s64)mirror->report_interval * 1000LL;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 96a155ab5059..f6e871760f8d 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1250,7 +1250,7 @@ static int o2hb_thread(void *data)
1250 1250
1251 mlog(ML_HEARTBEAT, 1251 mlog(ML_HEARTBEAT,
1252 "start = %lld, end = %lld, msec = %u, ret = %d\n", 1252 "start = %lld, end = %lld, msec = %u, ret = %d\n",
1253 before_hb.tv64, after_hb.tv64, elapsed_msec, ret); 1253 before_hb, after_hb, elapsed_msec, ret);
1254 1254
1255 if (!kthread_should_stop() && 1255 if (!kthread_should_stop() &&
1256 elapsed_msec < reg->hr_timeout_ms) { 1256 elapsed_msec < reg->hr_timeout_ms) {
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 9ae4abb4110b..fb4407a7cf9e 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -55,7 +55,7 @@ static inline bool isalarm(struct timerfd_ctx *ctx)
55/* 55/*
56 * This gets called when the timer event triggers. We set the "expired" 56 * This gets called when the timer event triggers. We set the "expired"
57 * flag, but we do not re-arm the timer (in case it's necessary, 57 * flag, but we do not re-arm the timer (in case it's necessary,
58 * tintv.tv64 != 0) until the timer is accessed. 58 * tintv != 0) until the timer is accessed.
59 */ 59 */
60static void timerfd_triggered(struct timerfd_ctx *ctx) 60static void timerfd_triggered(struct timerfd_ctx *ctx)
61{ 61{
@@ -93,7 +93,7 @@ static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
93 */ 93 */
94void timerfd_clock_was_set(void) 94void timerfd_clock_was_set(void)
95{ 95{
96 ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 96 ktime_t moffs = ktime_mono_to_real(0);
97 struct timerfd_ctx *ctx; 97 struct timerfd_ctx *ctx;
98 unsigned long flags; 98 unsigned long flags;
99 99
@@ -102,8 +102,8 @@ void timerfd_clock_was_set(void)
102 if (!ctx->might_cancel) 102 if (!ctx->might_cancel)
103 continue; 103 continue;
104 spin_lock_irqsave(&ctx->wqh.lock, flags); 104 spin_lock_irqsave(&ctx->wqh.lock, flags);
105 if (ctx->moffs.tv64 != moffs.tv64) { 105 if (ctx->moffs != moffs) {
106 ctx->moffs.tv64 = KTIME_MAX; 106 ctx->moffs = KTIME_MAX;
107 ctx->ticks++; 107 ctx->ticks++;
108 wake_up_locked(&ctx->wqh); 108 wake_up_locked(&ctx->wqh);
109 } 109 }
@@ -124,9 +124,9 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
124 124
125static bool timerfd_canceled(struct timerfd_ctx *ctx) 125static bool timerfd_canceled(struct timerfd_ctx *ctx)
126{ 126{
127 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) 127 if (!ctx->might_cancel || ctx->moffs != KTIME_MAX)
128 return false; 128 return false;
129 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 129 ctx->moffs = ktime_mono_to_real(0);
130 return true; 130 return true;
131} 131}
132 132
@@ -155,7 +155,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
155 else 155 else
156 remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr); 156 remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
157 157
158 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; 158 return remaining < 0 ? ktime_set(0, 0): remaining;
159} 159}
160 160
161static int timerfd_setup(struct timerfd_ctx *ctx, int flags, 161static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
@@ -184,7 +184,7 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
184 ctx->t.tmr.function = timerfd_tmrproc; 184 ctx->t.tmr.function = timerfd_tmrproc;
185 } 185 }
186 186
187 if (texp.tv64 != 0) { 187 if (texp != 0) {
188 if (isalarm(ctx)) { 188 if (isalarm(ctx)) {
189 if (flags & TFD_TIMER_ABSTIME) 189 if (flags & TFD_TIMER_ABSTIME)
190 alarm_start(&ctx->t.alarm, texp); 190 alarm_start(&ctx->t.alarm, texp);
@@ -261,9 +261,9 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
261 if (ctx->ticks) { 261 if (ctx->ticks) {
262 ticks = ctx->ticks; 262 ticks = ctx->ticks;
263 263
264 if (ctx->expired && ctx->tintv.tv64) { 264 if (ctx->expired && ctx->tintv) {
265 /* 265 /*
266 * If tintv.tv64 != 0, this is a periodic timer that 266 * If tintv != 0, this is a periodic timer that
267 * needs to be re-armed. We avoid doing it in the timer 267 * needs to be re-armed. We avoid doing it in the timer
268 * callback to avoid DoS attacks specifying a very 268 * callback to avoid DoS attacks specifying a very
269 * short timer period. 269 * short timer period.
@@ -410,7 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
410 else 410 else
411 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); 411 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
412 412
413 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 413 ctx->moffs = ktime_mono_to_real(0);
414 414
415 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, 415 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
416 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); 416 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
@@ -469,7 +469,7 @@ static int do_timerfd_settime(int ufd, int flags,
469 * We do not update "ticks" and "expired" since the timer will be 469 * We do not update "ticks" and "expired" since the timer will be
470 * re-programmed again in the following timerfd_setup() call. 470 * re-programmed again in the following timerfd_setup() call.
471 */ 471 */
472 if (ctx->expired && ctx->tintv.tv64) { 472 if (ctx->expired && ctx->tintv) {
473 if (isalarm(ctx)) 473 if (isalarm(ctx))
474 alarm_forward_now(&ctx->t.alarm, ctx->tintv); 474 alarm_forward_now(&ctx->t.alarm, ctx->tintv);
475 else 475 else
@@ -499,7 +499,7 @@ static int do_timerfd_gettime(int ufd, struct itimerspec *t)
499 ctx = f.file->private_data; 499 ctx = f.file->private_data;
500 500
501 spin_lock_irq(&ctx->wqh.lock); 501 spin_lock_irq(&ctx->wqh.lock);
502 if (ctx->expired && ctx->tintv.tv64) { 502 if (ctx->expired && ctx->tintv) {
503 ctx->expired = 0; 503 ctx->expired = 0;
504 504
505 if (isalarm(ctx)) { 505 if (isalarm(ctx)) {
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 6435f46d6e13..7c5b694864cd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,14 +1,14 @@
1#ifndef _LINUX_FUTEX_H 1#ifndef _LINUX_FUTEX_H
2#define _LINUX_FUTEX_H 2#define _LINUX_FUTEX_H
3 3
4#include <linux/ktime.h>
4#include <uapi/linux/futex.h> 5#include <uapi/linux/futex.h>
5 6
6struct inode; 7struct inode;
7struct mm_struct; 8struct mm_struct;
8struct task_struct; 9struct task_struct;
9union ktime;
10 10
11long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, 11long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
12 u32 __user *uaddr2, u32 val2, u32 val3); 12 u32 __user *uaddr2, u32 val2, u32 val3);
13 13
14extern int 14extern int
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80b1535..cdab81ba29f8 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
228 228
229static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) 229static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
230{ 230{
231 timer->node.expires.tv64 = tv64; 231 timer->node.expires = tv64;
232 timer->_softexpires.tv64 = tv64; 232 timer->_softexpires = tv64;
233} 233}
234 234
235static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) 235static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
@@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
256 256
257static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) 257static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
258{ 258{
259 return timer->node.expires.tv64; 259 return timer->node.expires;
260} 260}
261static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) 261static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
262{ 262{
263 return timer->_softexpires.tv64; 263 return timer->_softexpires;
264} 264}
265 265
266static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) 266static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
@@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void);
297 * this resolution values. 297 * this resolution values.
298 */ 298 */
299# define HIGH_RES_NSEC 1 299# define HIGH_RES_NSEC 1
300# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } 300# define KTIME_HIGH_RES (HIGH_RES_NSEC)
301# define MONOTONIC_RES_NSEC HIGH_RES_NSEC 301# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
302# define KTIME_MONOTONIC_RES KTIME_HIGH_RES 302# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
303 303
@@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
333 * hrtimer_start_range_ns() to prevent short timeouts. 333 * hrtimer_start_range_ns() to prevent short timeouts.
334 */ 334 */
335 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) 335 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
336 rem.tv64 -= hrtimer_resolution; 336 rem -= hrtimer_resolution;
337 return rem; 337 return rem;
338} 338}
339 339
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0fb7ffb1775f..8e573deda55e 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -24,21 +24,8 @@
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26 26
27/* 27/* Nanosecond scalar representation for kernel time values */
28 * ktime_t: 28typedef s64 ktime_t;
29 *
30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations.
35 *
36 */
37union ktime {
38 s64 tv64;
39};
40
41typedef union ktime ktime_t; /* Kill this */
42 29
43/** 30/**
44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 31 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */
50static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) 37static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
51{ 38{
52 if (unlikely(secs >= KTIME_SEC_MAX)) 39 if (unlikely(secs >= KTIME_SEC_MAX))
53 return (ktime_t){ .tv64 = KTIME_MAX }; 40 return KTIME_MAX;
54 41
55 return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; 42 return secs * NSEC_PER_SEC + (s64)nsecs;
56} 43}
57 44
58/* Subtract two ktime_t variables. rem = lhs -rhs: */ 45/* Subtract two ktime_t variables. rem = lhs -rhs: */
59#define ktime_sub(lhs, rhs) \ 46#define ktime_sub(lhs, rhs) ((lhs) - (rhs))
60 ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
61 47
62/* Add two ktime_t variables. res = lhs + rhs: */ 48/* Add two ktime_t variables. res = lhs + rhs: */
63#define ktime_add(lhs, rhs) \ 49#define ktime_add(lhs, rhs) ((lhs) + (rhs))
64 ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
65 50
66/* 51/*
67 * Same as ktime_add(), but avoids undefined behaviour on overflow; however, 52 * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
68 * this means that you must check the result for overflow yourself. 53 * this means that you must check the result for overflow yourself.
69 */ 54 */
70#define ktime_add_unsafe(lhs, rhs) \ 55#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs))
71 ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
72 56
73/* 57/*
74 * Add a ktime_t variable and a scalar nanosecond value. 58 * Add a ktime_t variable and a scalar nanosecond value.
75 * res = kt + nsval: 59 * res = kt + nsval:
76 */ 60 */
77#define ktime_add_ns(kt, nsval) \ 61#define ktime_add_ns(kt, nsval) ((kt) + (nsval))
78 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
79 62
80/* 63/*
81 * Subtract a scalar nanosecod from a ktime_t variable 64 * Subtract a scalar nanosecod from a ktime_t variable
82 * res = kt - nsval: 65 * res = kt - nsval:
83 */ 66 */
84#define ktime_sub_ns(kt, nsval) \ 67#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
85 ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
86 68
87/* convert a timespec to ktime_t format: */ 69/* convert a timespec to ktime_t format: */
88static inline ktime_t timespec_to_ktime(struct timespec ts) 70static inline ktime_t timespec_to_ktime(struct timespec ts)
@@ -103,16 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
103} 85}
104 86
105/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 87/* Map the ktime_t to timespec conversion to ns_to_timespec function */
106#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 88#define ktime_to_timespec(kt) ns_to_timespec((kt))
107 89
108/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 90/* Map the ktime_t to timespec conversion to ns_to_timespec function */
109#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) 91#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
110 92
111/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 93/* Map the ktime_t to timeval conversion to ns_to_timeval function */
112#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 94#define ktime_to_timeval(kt) ns_to_timeval((kt))
113 95
114/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 96/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
115#define ktime_to_ns(kt) ((kt).tv64) 97#define ktime_to_ns(kt) (kt)
116 98
117 99
118/** 100/**
@@ -126,7 +108,7 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
126 */ 108 */
127static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) 109static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
128{ 110{
129 return cmp1.tv64 == cmp2.tv64; 111 return cmp1 == cmp2;
130} 112}
131 113
132/** 114/**
@@ -141,9 +123,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
141 */ 123 */
142static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) 124static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
143{ 125{
144 if (cmp1.tv64 < cmp2.tv64) 126 if (cmp1 < cmp2)
145 return -1; 127 return -1;
146 if (cmp1.tv64 > cmp2.tv64) 128 if (cmp1 > cmp2)
147 return 1; 129 return 1;
148 return 0; 130 return 0;
149} 131}
@@ -182,7 +164,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
182 */ 164 */
183 BUG_ON(div < 0); 165 BUG_ON(div < 0);
184 if (__builtin_constant_p(div) && !(div >> 32)) { 166 if (__builtin_constant_p(div) && !(div >> 32)) {
185 s64 ns = kt.tv64; 167 s64 ns = kt;
186 u64 tmp = ns < 0 ? -ns : ns; 168 u64 tmp = ns < 0 ? -ns : ns;
187 169
188 do_div(tmp, div); 170 do_div(tmp, div);
@@ -199,7 +181,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
199 * so catch them on 64bit as well. 181 * so catch them on 64bit as well.
200 */ 182 */
201 WARN_ON(div < 0); 183 WARN_ON(div < 0);
202 return kt.tv64 / div; 184 return kt / div;
203} 185}
204#endif 186#endif
205 187
@@ -256,7 +238,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
256static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, 238static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
257 struct timespec *ts) 239 struct timespec *ts)
258{ 240{
259 if (kt.tv64) { 241 if (kt) {
260 *ts = ktime_to_timespec(kt); 242 *ts = ktime_to_timespec(kt);
261 return true; 243 return true;
262 } else { 244 } else {
@@ -275,7 +257,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
275static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, 257static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
276 struct timespec64 *ts) 258 struct timespec64 *ts)
277{ 259{
278 if (kt.tv64) { 260 if (kt) {
279 *ts = ktime_to_timespec64(kt); 261 *ts = ktime_to_timespec64(kt);
280 return true; 262 return true;
281 } else { 263 } else {
@@ -290,20 +272,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
290 * this resolution values. 272 * this resolution values.
291 */ 273 */
292#define LOW_RES_NSEC TICK_NSEC 274#define LOW_RES_NSEC TICK_NSEC
293#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 275#define KTIME_LOW_RES (LOW_RES_NSEC)
294 276
295static inline ktime_t ns_to_ktime(u64 ns) 277static inline ktime_t ns_to_ktime(u64 ns)
296{ 278{
297 static const ktime_t ktime_zero = { .tv64 = 0 }; 279 return ns;
298
299 return ktime_add_ns(ktime_zero, ns);
300} 280}
301 281
302static inline ktime_t ms_to_ktime(u64 ms) 282static inline ktime_t ms_to_ktime(u64 ms)
303{ 283{
304 static const ktime_t ktime_zero = { .tv64 = 0 }; 284 return ms * NSEC_PER_MSEC;
305
306 return ktime_add_ms(ktime_zero, ms);
307} 285}
308 286
309# include <linux/timekeeping.h> 287# include <linux/timekeeping.h>
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 62be0786d6d0..a04fea19676f 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -127,9 +127,7 @@ static inline void tick_nohz_idle_exit(void) { }
127 127
128static inline ktime_t tick_nohz_get_sleep_length(void) 128static inline ktime_t tick_nohz_get_sleep_length(void)
129{ 129{
130 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 130 return NSEC_PER_SEC / HZ;
131
132 return len;
133} 131}
134static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 132static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
135static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 133static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2408e8d5c05c..1421132e9086 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -510,7 +510,7 @@ do { \
510 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 510 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
511 HRTIMER_MODE_REL); \ 511 HRTIMER_MODE_REL); \
512 hrtimer_init_sleeper(&__t, current); \ 512 hrtimer_init_sleeper(&__t, current); \
513 if ((timeout).tv64 != KTIME_MAX) \ 513 if ((timeout) != KTIME_MAX) \
514 hrtimer_start_range_ns(&__t.timer, timeout, \ 514 hrtimer_start_range_ns(&__t.timer, timeout, \
515 current->timer_slack_ns, \ 515 current->timer_slack_ns, \
516 HRTIMER_MODE_REL); \ 516 HRTIMER_MODE_REL); \
diff --git a/include/net/red.h b/include/net/red.h
index 76e0b5f922c6..208e718e16b9 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -207,7 +207,7 @@ static inline void red_set_parms(struct red_parms *p,
207 207
208static inline int red_is_idling(const struct red_vars *v) 208static inline int red_is_idling(const struct red_vars *v)
209{ 209{
210 return v->qidlestart.tv64 != 0; 210 return v->qidlestart != 0;
211} 211}
212 212
213static inline void red_start_of_idle_period(struct red_vars *v) 213static inline void red_start_of_idle_period(struct red_vars *v)
@@ -217,7 +217,7 @@ static inline void red_start_of_idle_period(struct red_vars *v)
217 217
218static inline void red_end_of_idle_period(struct red_vars *v) 218static inline void red_end_of_idle_period(struct red_vars *v)
219{ 219{
220 v->qidlestart.tv64 = 0; 220 v->qidlestart = 0;
221} 221}
222 222
223static inline void red_restart(struct red_vars *v) 223static inline void red_restart(struct red_vars *v)
diff --git a/include/net/sock.h b/include/net/sock.h
index 282d065e286b..f0e867f58722 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2193,8 +2193,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2193 */ 2193 */
2194 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2194 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2196 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2196 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2197 (hwtstamps->hwtstamp.tv64 && 2197 (hwtstamps->hwtstamp &&
2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2199 __sock_recv_timestamp(msg, sk, skb); 2199 __sock_recv_timestamp(msg, sk, skb);
2200 else 2200 else
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index a1c108c16c9c..ae4f358dd8e9 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -31,7 +31,7 @@ TRACE_EVENT(alarmtimer_suspend,
31 ), 31 ),
32 32
33 TP_fast_assign( 33 TP_fast_assign(
34 __entry->expires = expires.tv64; 34 __entry->expires = expires;
35 __entry->alarm_type = flag; 35 __entry->alarm_type = flag;
36 ), 36 ),
37 37
@@ -57,8 +57,8 @@ DECLARE_EVENT_CLASS(alarm_class,
57 TP_fast_assign( 57 TP_fast_assign(
58 __entry->alarm = alarm; 58 __entry->alarm = alarm;
59 __entry->alarm_type = alarm->type; 59 __entry->alarm_type = alarm->type;
60 __entry->expires = alarm->node.expires.tv64; 60 __entry->expires = alarm->node.expires;
61 __entry->now = now.tv64; 61 __entry->now = now;
62 ), 62 ),
63 63
64 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu", 64 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu",
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6fdfac..1448637616d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -177,16 +177,14 @@ TRACE_EVENT(hrtimer_start,
177 TP_fast_assign( 177 TP_fast_assign(
178 __entry->hrtimer = hrtimer; 178 __entry->hrtimer = hrtimer;
179 __entry->function = hrtimer->function; 179 __entry->function = hrtimer->function;
180 __entry->expires = hrtimer_get_expires(hrtimer).tv64; 180 __entry->expires = hrtimer_get_expires(hrtimer);
181 __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; 181 __entry->softexpires = hrtimer_get_softexpires(hrtimer);
182 ), 182 ),
183 183
184 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", 184 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
185 __entry->hrtimer, __entry->function, 185 __entry->hrtimer, __entry->function,
186 (unsigned long long)ktime_to_ns((ktime_t) { 186 (unsigned long long) __entry->expires,
187 .tv64 = __entry->expires }), 187 (unsigned long long) __entry->softexpires)
188 (unsigned long long)ktime_to_ns((ktime_t) {
189 .tv64 = __entry->softexpires }))
190); 188);
191 189
192/** 190/**
@@ -211,13 +209,13 @@ TRACE_EVENT(hrtimer_expire_entry,
211 209
212 TP_fast_assign( 210 TP_fast_assign(
213 __entry->hrtimer = hrtimer; 211 __entry->hrtimer = hrtimer;
214 __entry->now = now->tv64; 212 __entry->now = *now;
215 __entry->function = hrtimer->function; 213 __entry->function = hrtimer->function;
216 ), 214 ),
217 215
218 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, 216 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
219 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) 217 (unsigned long long) __entry->now)
220 ); 218);
221 219
222DECLARE_EVENT_CLASS(hrtimer_class, 220DECLARE_EVENT_CLASS(hrtimer_class,
223 221
diff --git a/kernel/futex.c b/kernel/futex.c
index 9246d9f593d1..0842c8ca534b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2459,7 +2459,7 @@ retry:
2459 restart->fn = futex_wait_restart; 2459 restart->fn = futex_wait_restart;
2460 restart->futex.uaddr = uaddr; 2460 restart->futex.uaddr = uaddr;
2461 restart->futex.val = val; 2461 restart->futex.val = val;
2462 restart->futex.time = abs_time->tv64; 2462 restart->futex.time = *abs_time;
2463 restart->futex.bitset = bitset; 2463 restart->futex.bitset = bitset;
2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; 2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2465 2465
@@ -2480,7 +2480,7 @@ static long futex_wait_restart(struct restart_block *restart)
2480 ktime_t t, *tp = NULL; 2480 ktime_t t, *tp = NULL;
2481 2481
2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { 2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2483 t.tv64 = restart->futex.time; 2483 t = restart->futex.time;
2484 tp = &t; 2484 tp = &t;
2485 } 2485 }
2486 restart->fn = do_no_restart_syscall; 2486 restart->fn = do_no_restart_syscall;
diff --git a/kernel/signal.c b/kernel/signal.c
index f5d4e275345e..ff046b73ff2d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -587,7 +587,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
587 struct hrtimer *tmr = &tsk->signal->real_timer; 587 struct hrtimer *tmr = &tsk->signal->real_timer;
588 588
589 if (!hrtimer_is_queued(tmr) && 589 if (!hrtimer_is_queued(tmr) &&
590 tsk->signal->it_real_incr.tv64 != 0) { 590 tsk->signal->it_real_incr != 0) {
591 hrtimer_forward(tmr, tmr->base->get_time(), 591 hrtimer_forward(tmr, tmr->base->get_time(),
592 tsk->signal->it_real_incr); 592 tsk->signal->it_real_incr);
593 hrtimer_restart(tmr); 593 hrtimer_restart(tmr);
@@ -2766,7 +2766,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767 const struct timespec *ts) 2767 const struct timespec *ts)
2768{ 2768{
2769 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; 2769 ktime_t *to = NULL, timeout = KTIME_MAX;
2770 struct task_struct *tsk = current; 2770 struct task_struct *tsk = current;
2771 sigset_t mask = *which; 2771 sigset_t mask = *which;
2772 int sig, ret = 0; 2772 int sig, ret = 0;
@@ -2786,7 +2786,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2786 2786
2787 spin_lock_irq(&tsk->sighand->siglock); 2787 spin_lock_irq(&tsk->sighand->siglock);
2788 sig = dequeue_signal(tsk, &mask, info); 2788 sig = dequeue_signal(tsk, &mask, info);
2789 if (!sig && timeout.tv64) { 2789 if (!sig && timeout) {
2790 /* 2790 /*
2791 * None ready, temporarily unblock those we're interested 2791 * None ready, temporarily unblock those we're interested
2792 * while we are sleeping in so that we'll be awakened when 2792 * while we are sleeping in so that we'll be awakened when
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 3921cf7fea8e..ab6ac077bdb7 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -254,13 +254,13 @@ static int alarmtimer_suspend(struct device *dev)
254 if (!next) 254 if (!next)
255 continue; 255 continue;
256 delta = ktime_sub(next->expires, base->gettime()); 256 delta = ktime_sub(next->expires, base->gettime());
257 if (!min.tv64 || (delta.tv64 < min.tv64)) { 257 if (!min || (delta < min)) {
258 expires = next->expires; 258 expires = next->expires;
259 min = delta; 259 min = delta;
260 type = i; 260 type = i;
261 } 261 }
262 } 262 }
263 if (min.tv64 == 0) 263 if (min == 0)
264 return 0; 264 return 0;
265 265
266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { 266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
@@ -328,7 +328,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
328 delta = ktime_sub(absexp, base->gettime()); 328 delta = ktime_sub(absexp, base->gettime());
329 329
330 spin_lock_irqsave(&freezer_delta_lock, flags); 330 spin_lock_irqsave(&freezer_delta_lock, flags);
331 if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { 331 if (!freezer_delta || (delta < freezer_delta)) {
332 freezer_delta = delta; 332 freezer_delta = delta;
333 freezer_expires = absexp; 333 freezer_expires = absexp;
334 freezer_alarmtype = type; 334 freezer_alarmtype = type;
@@ -453,10 +453,10 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
453 453
454 delta = ktime_sub(now, alarm->node.expires); 454 delta = ktime_sub(now, alarm->node.expires);
455 455
456 if (delta.tv64 < 0) 456 if (delta < 0)
457 return 0; 457 return 0;
458 458
459 if (unlikely(delta.tv64 >= interval.tv64)) { 459 if (unlikely(delta >= interval)) {
460 s64 incr = ktime_to_ns(interval); 460 s64 incr = ktime_to_ns(interval);
461 461
462 overrun = ktime_divns(delta, incr); 462 overrun = ktime_divns(delta, incr);
@@ -464,7 +464,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
464 alarm->node.expires = ktime_add_ns(alarm->node.expires, 464 alarm->node.expires = ktime_add_ns(alarm->node.expires,
465 incr*overrun); 465 incr*overrun);
466 466
467 if (alarm->node.expires.tv64 > now.tv64) 467 if (alarm->node.expires > now)
468 return overrun; 468 return overrun;
469 /* 469 /*
470 * This (and the ktime_add() below) is the 470 * This (and the ktime_add() below) is the
@@ -522,7 +522,7 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
522 } 522 }
523 523
524 /* Re-add periodic timers */ 524 /* Re-add periodic timers */
525 if (ptr->it.alarm.interval.tv64) { 525 if (ptr->it.alarm.interval) {
526 ptr->it_overrun += alarm_forward(alarm, now, 526 ptr->it_overrun += alarm_forward(alarm, now,
527 ptr->it.alarm.interval); 527 ptr->it.alarm.interval);
528 result = ALARMTIMER_RESTART; 528 result = ALARMTIMER_RESTART;
@@ -730,7 +730,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type,
730 730
731 rem = ktime_sub(exp, alarm_bases[type].gettime()); 731 rem = ktime_sub(exp, alarm_bases[type].gettime());
732 732
733 if (rem.tv64 <= 0) 733 if (rem <= 0)
734 return 0; 734 return 0;
735 rmt = ktime_to_timespec(rem); 735 rmt = ktime_to_timespec(rem);
736 736
@@ -755,7 +755,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
755 struct alarm alarm; 755 struct alarm alarm;
756 int ret = 0; 756 int ret = 0;
757 757
758 exp.tv64 = restart->nanosleep.expires; 758 exp = restart->nanosleep.expires;
759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); 759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
760 760
761 if (alarmtimer_do_nsleep(&alarm, exp)) 761 if (alarmtimer_do_nsleep(&alarm, exp))
@@ -835,7 +835,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
835 restart = &current->restart_block; 835 restart = &current->restart_block;
836 restart->fn = alarm_timer_nsleep_restart; 836 restart->fn = alarm_timer_nsleep_restart;
837 restart->nanosleep.clockid = type; 837 restart->nanosleep.clockid = type;
838 restart->nanosleep.expires = exp.tv64; 838 restart->nanosleep.expires = exp;
839 restart->nanosleep.rmtp = rmtp; 839 restart->nanosleep.rmtp = rmtp;
840 ret = -ERESTART_RESTARTBLOCK; 840 ret = -ERESTART_RESTARTBLOCK;
841 841
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2c5bc77c0bb0..97ac0951f164 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -179,7 +179,7 @@ void clockevents_switch_state(struct clock_event_device *dev,
179void clockevents_shutdown(struct clock_event_device *dev) 179void clockevents_shutdown(struct clock_event_device *dev)
180{ 180{
181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
182 dev->next_event.tv64 = KTIME_MAX; 182 dev->next_event = KTIME_MAX;
183} 183}
184 184
185/** 185/**
@@ -213,7 +213,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
214 printk_deferred(KERN_WARNING 214 printk_deferred(KERN_WARNING
215 "CE: Reprogramming failure. Giving up\n"); 215 "CE: Reprogramming failure. Giving up\n");
216 dev->next_event.tv64 = KTIME_MAX; 216 dev->next_event = KTIME_MAX;
217 return -ETIME; 217 return -ETIME;
218 } 218 }
219 219
@@ -310,7 +310,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
310 int64_t delta; 310 int64_t delta;
311 int rc; 311 int rc;
312 312
313 if (unlikely(expires.tv64 < 0)) { 313 if (unlikely(expires < 0)) {
314 WARN_ON_ONCE(1); 314 WARN_ON_ONCE(1);
315 return -ETIME; 315 return -ETIME;
316 } 316 }
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 161e340395d5..c7f780113884 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
171 return 0; 171 return 0;
172 172
173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
174 return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 174 return expires <= new_base->cpu_base->expires_next;
175#else 175#else
176 return 0; 176 return 0;
177#endif 177#endif
@@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
313 * We use KTIME_SEC_MAX here, the maximum timeout which we can 313 * We use KTIME_SEC_MAX here, the maximum timeout which we can
314 * return to user space in a timespec: 314 * return to user space in a timespec:
315 */ 315 */
316 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 316 if (res < 0 || res < lhs || res < rhs)
317 res = ktime_set(KTIME_SEC_MAX, 0); 317 res = ktime_set(KTIME_SEC_MAX, 0);
318 318
319 return res; 319 return res;
@@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
466{ 466{
467 struct hrtimer_clock_base *base = cpu_base->clock_base; 467 struct hrtimer_clock_base *base = cpu_base->clock_base;
468 ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
469 unsigned int active = cpu_base->active_bases; 468 unsigned int active = cpu_base->active_bases;
469 ktime_t expires, expires_next = KTIME_MAX;
470 470
471 hrtimer_update_next_timer(cpu_base, NULL); 471 hrtimer_update_next_timer(cpu_base, NULL);
472 for (; active; base++, active >>= 1) { 472 for (; active; base++, active >>= 1) {
@@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
479 next = timerqueue_getnext(&base->active); 479 next = timerqueue_getnext(&base->active);
480 timer = container_of(next, struct hrtimer, node); 480 timer = container_of(next, struct hrtimer, node);
481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
482 if (expires.tv64 < expires_next.tv64) { 482 if (expires < expires_next) {
483 expires_next = expires; 483 expires_next = expires;
484 hrtimer_update_next_timer(cpu_base, timer); 484 hrtimer_update_next_timer(cpu_base, timer);
485 } 485 }
@@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
489 * the clock bases so the result might be negative. Fix it up 489 * the clock bases so the result might be negative. Fix it up
490 * to prevent a false positive in clockevents_program_event(). 490 * to prevent a false positive in clockevents_program_event().
491 */ 491 */
492 if (expires_next.tv64 < 0) 492 if (expires_next < 0)
493 expires_next.tv64 = 0; 493 expires_next = 0;
494 return expires_next; 494 return expires_next;
495} 495}
496#endif 496#endif
@@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
561 561
562 expires_next = __hrtimer_get_next_event(cpu_base); 562 expires_next = __hrtimer_get_next_event(cpu_base);
563 563
564 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 564 if (skip_equal && expires_next == cpu_base->expires_next)
565 return; 565 return;
566 566
567 cpu_base->expires_next.tv64 = expires_next.tv64; 567 cpu_base->expires_next = expires_next;
568 568
569 /* 569 /*
570 * If a hang was detected in the last timer interrupt then we 570 * If a hang was detected in the last timer interrupt then we
@@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer,
622 * CLOCK_REALTIME timer might be requested with an absolute 622 * CLOCK_REALTIME timer might be requested with an absolute
623 * expiry time which is less than base->offset. Set it to 0. 623 * expiry time which is less than base->offset. Set it to 0.
624 */ 624 */
625 if (expires.tv64 < 0) 625 if (expires < 0)
626 expires.tv64 = 0; 626 expires = 0;
627 627
628 if (expires.tv64 >= cpu_base->expires_next.tv64) 628 if (expires >= cpu_base->expires_next)
629 return; 629 return;
630 630
631 /* Update the pointer to the next expiring timer */ 631 /* Update the pointer to the next expiring timer */
@@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer,
653 */ 653 */
654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
655{ 655{
656 base->expires_next.tv64 = KTIME_MAX; 656 base->expires_next = KTIME_MAX;
657 base->hres_active = 0; 657 base->hres_active = 0;
658} 658}
659 659
@@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
827 827
828 delta = ktime_sub(now, hrtimer_get_expires(timer)); 828 delta = ktime_sub(now, hrtimer_get_expires(timer));
829 829
830 if (delta.tv64 < 0) 830 if (delta < 0)
831 return 0; 831 return 0;
832 832
833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
834 return 0; 834 return 0;
835 835
836 if (interval.tv64 < hrtimer_resolution) 836 if (interval < hrtimer_resolution)
837 interval.tv64 = hrtimer_resolution; 837 interval = hrtimer_resolution;
838 838
839 if (unlikely(delta.tv64 >= interval.tv64)) { 839 if (unlikely(delta >= interval)) {
840 s64 incr = ktime_to_ns(interval); 840 s64 incr = ktime_to_ns(interval);
841 841
842 orun = ktime_divns(delta, incr); 842 orun = ktime_divns(delta, incr);
843 hrtimer_add_expires_ns(timer, incr * orun); 843 hrtimer_add_expires_ns(timer, incr * orun);
844 if (hrtimer_get_expires_tv64(timer) > now.tv64) 844 if (hrtimer_get_expires_tv64(timer) > now)
845 return orun; 845 return orun;
846 /* 846 /*
847 * This (and the ktime_add() below) is the 847 * This (and the ktime_add() below) is the
@@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void)
1104 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1104 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1105 1105
1106 if (!__hrtimer_hres_active(cpu_base)) 1106 if (!__hrtimer_hres_active(cpu_base))
1107 expires = __hrtimer_get_next_event(cpu_base).tv64; 1107 expires = __hrtimer_get_next_event(cpu_base);
1108 1108
1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1110 1110
@@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
1296 * are right-of a not yet expired timer, because that 1296 * are right-of a not yet expired timer, because that
1297 * timer will have to trigger a wakeup anyway. 1297 * timer will have to trigger a wakeup anyway.
1298 */ 1298 */
1299 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) 1299 if (basenow < hrtimer_get_softexpires_tv64(timer))
1300 break; 1300 break;
1301 1301
1302 __run_hrtimer(cpu_base, base, timer, &basenow); 1302 __run_hrtimer(cpu_base, base, timer, &basenow);
@@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1318 1318
1319 BUG_ON(!cpu_base->hres_active); 1319 BUG_ON(!cpu_base->hres_active);
1320 cpu_base->nr_events++; 1320 cpu_base->nr_events++;
1321 dev->next_event.tv64 = KTIME_MAX; 1321 dev->next_event = KTIME_MAX;
1322 1322
1323 raw_spin_lock(&cpu_base->lock); 1323 raw_spin_lock(&cpu_base->lock);
1324 entry_time = now = hrtimer_update_base(cpu_base); 1324 entry_time = now = hrtimer_update_base(cpu_base);
@@ -1331,7 +1331,7 @@ retry:
1331 * timers which run their callback and need to be requeued on 1331 * timers which run their callback and need to be requeued on
1332 * this CPU. 1332 * this CPU.
1333 */ 1333 */
1334 cpu_base->expires_next.tv64 = KTIME_MAX; 1334 cpu_base->expires_next = KTIME_MAX;
1335 1335
1336 __hrtimer_run_queues(cpu_base, now); 1336 __hrtimer_run_queues(cpu_base, now);
1337 1337
@@ -1379,13 +1379,13 @@ retry:
1379 cpu_base->hang_detected = 1; 1379 cpu_base->hang_detected = 1;
1380 raw_spin_unlock(&cpu_base->lock); 1380 raw_spin_unlock(&cpu_base->lock);
1381 delta = ktime_sub(now, entry_time); 1381 delta = ktime_sub(now, entry_time);
1382 if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) 1382 if ((unsigned int)delta > cpu_base->max_hang_time)
1383 cpu_base->max_hang_time = (unsigned int) delta.tv64; 1383 cpu_base->max_hang_time = (unsigned int) delta;
1384 /* 1384 /*
1385 * Limit it to a sensible value as we enforce a longer 1385 * Limit it to a sensible value as we enforce a longer
1386 * delay. Give the CPU at least 100ms to catch up. 1386 * delay. Give the CPU at least 100ms to catch up.
1387 */ 1387 */
1388 if (delta.tv64 > 100 * NSEC_PER_MSEC) 1388 if (delta > 100 * NSEC_PER_MSEC)
1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1390 else 1390 else
1391 expires_next = ktime_add(now, delta); 1391 expires_next = ktime_add(now, delta);
@@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1495 ktime_t rem; 1495 ktime_t rem;
1496 1496
1497 rem = hrtimer_expires_remaining(timer); 1497 rem = hrtimer_expires_remaining(timer);
1498 if (rem.tv64 <= 0) 1498 if (rem <= 0)
1499 return 0; 1499 return 0;
1500 rmt = ktime_to_timespec(rem); 1500 rmt = ktime_to_timespec(rem);
1501 1501
@@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
1693 * Optimize when a zero timeout value is given. It does not 1693 * Optimize when a zero timeout value is given. It does not
1694 * matter whether this is an absolute or a relative time. 1694 * matter whether this is an absolute or a relative time.
1695 */ 1695 */
1696 if (expires && !expires->tv64) { 1696 if (expires && *expires == 0) {
1697 __set_current_state(TASK_RUNNING); 1697 __set_current_state(TASK_RUNNING);
1698 return 0; 1698 return 0;
1699 } 1699 }
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index a45afb7277c2..8c89143f9ebf 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -34,10 +34,10 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
34 * then we return 0 - which is correct. 34 * then we return 0 - which is correct.
35 */ 35 */
36 if (hrtimer_active(timer)) { 36 if (hrtimer_active(timer)) {
37 if (rem.tv64 <= 0) 37 if (rem <= 0)
38 rem.tv64 = NSEC_PER_USEC; 38 rem = NSEC_PER_USEC;
39 } else 39 } else
40 rem.tv64 = 0; 40 rem = 0;
41 41
42 return ktime_to_timeval(rem); 42 return ktime_to_timeval(rem);
43} 43}
@@ -216,12 +216,12 @@ again:
216 goto again; 216 goto again;
217 } 217 }
218 expires = timeval_to_ktime(value->it_value); 218 expires = timeval_to_ktime(value->it_value);
219 if (expires.tv64 != 0) { 219 if (expires != 0) {
220 tsk->signal->it_real_incr = 220 tsk->signal->it_real_incr =
221 timeval_to_ktime(value->it_interval); 221 timeval_to_ktime(value->it_interval);
222 hrtimer_start(timer, expires, HRTIMER_MODE_REL); 222 hrtimer_start(timer, expires, HRTIMER_MODE_REL);
223 } else 223 } else
224 tsk->signal->it_real_incr.tv64 = 0; 224 tsk->signal->it_real_incr = 0;
225 225
226 trace_itimer_state(ITIMER_REAL, value, 0); 226 trace_itimer_state(ITIMER_REAL, value, 0);
227 spin_unlock_irq(&tsk->sighand->siglock); 227 spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6df8927c58a5..edf19cc53140 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -381,7 +381,7 @@ ktime_t ntp_get_next_leap(void)
381 381
382 if ((time_state == TIME_INS) && (time_status & STA_INS)) 382 if ((time_state == TIME_INS) && (time_status & STA_INS))
383 return ktime_set(ntp_next_leap_sec, 0); 383 return ktime_set(ntp_next_leap_sec, 0);
384 ret.tv64 = KTIME_MAX; 384 ret = KTIME_MAX;
385 return ret; 385 return ret;
386} 386}
387 387
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42d7b9558741..9fe98b3777a2 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -359,7 +359,7 @@ static void schedule_next_timer(struct k_itimer *timr)
359{ 359{
360 struct hrtimer *timer = &timr->it.real.timer; 360 struct hrtimer *timer = &timr->it.real.timer;
361 361
362 if (timr->it.real.interval.tv64 == 0) 362 if (timr->it.real.interval == 0)
363 return; 363 return;
364 364
365 timr->it_overrun += (unsigned int) hrtimer_forward(timer, 365 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
@@ -449,7 +449,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
449 timr = container_of(timer, struct k_itimer, it.real.timer); 449 timr = container_of(timer, struct k_itimer, it.real.timer);
450 spin_lock_irqsave(&timr->it_lock, flags); 450 spin_lock_irqsave(&timr->it_lock, flags);
451 451
452 if (timr->it.real.interval.tv64 != 0) 452 if (timr->it.real.interval != 0)
453 si_private = ++timr->it_requeue_pending; 453 si_private = ++timr->it_requeue_pending;
454 454
455 if (posix_timer_event(timr, si_private)) { 455 if (posix_timer_event(timr, si_private)) {
@@ -458,7 +458,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
458 * we will not get a call back to restart it AND 458 * we will not get a call back to restart it AND
459 * it should be restarted. 459 * it should be restarted.
460 */ 460 */
461 if (timr->it.real.interval.tv64 != 0) { 461 if (timr->it.real.interval != 0) {
462 ktime_t now = hrtimer_cb_get_time(timer); 462 ktime_t now = hrtimer_cb_get_time(timer);
463 463
464 /* 464 /*
@@ -487,7 +487,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
487 { 487 {
488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); 488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
489 489
490 if (timr->it.real.interval.tv64 < kj.tv64) 490 if (timr->it.real.interval < kj)
491 now = ktime_add(now, kj); 491 now = ktime_add(now, kj);
492 } 492 }
493#endif 493#endif
@@ -743,7 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
743 iv = timr->it.real.interval; 743 iv = timr->it.real.interval;
744 744
745 /* interval timer ? */ 745 /* interval timer ? */
746 if (iv.tv64) 746 if (iv)
747 cur_setting->it_interval = ktime_to_timespec(iv); 747 cur_setting->it_interval = ktime_to_timespec(iv);
748 else if (!hrtimer_active(timer) && 748 else if (!hrtimer_active(timer) &&
749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
@@ -756,13 +756,13 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
756 * timer move the expiry time forward by intervals, so 756 * timer move the expiry time forward by intervals, so
757 * expiry is > now. 757 * expiry is > now.
758 */ 758 */
759 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || 759 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING ||
760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
762 762
763 remaining = __hrtimer_expires_remaining_adjusted(timer, now); 763 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
764 /* Return 0 only, when the timer is expired and not pending */ 764 /* Return 0 only, when the timer is expired and not pending */
765 if (remaining.tv64 <= 0) { 765 if (remaining <= 0) {
766 /* 766 /*
767 * A single shot SIGEV_NONE timer must return 0, when 767 * A single shot SIGEV_NONE timer must return 0, when
768 * it is expired ! 768 * it is expired !
@@ -839,7 +839,7 @@ common_timer_set(struct k_itimer *timr, int flags,
839 common_timer_get(timr, old_setting); 839 common_timer_get(timr, old_setting);
840 840
841 /* disable the timer */ 841 /* disable the timer */
842 timr->it.real.interval.tv64 = 0; 842 timr->it.real.interval = 0;
843 /* 843 /*
844 * careful here. If smp we could be in the "fire" routine which will 844 * careful here. If smp we could be in the "fire" routine which will
845 * be spinning as we hold the lock. But this is ONLY an SMP issue. 845 * be spinning as we hold the lock. But this is ONLY an SMP issue.
@@ -924,7 +924,7 @@ retry:
924 924
925static int common_timer_del(struct k_itimer *timer) 925static int common_timer_del(struct k_itimer *timer)
926{ 926{
927 timer->it.real.interval.tv64 = 0; 927 timer->it.real.interval = 0;
928 928
929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) 929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
930 return TIMER_RETRY; 930 return TIMER_RETRY;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 690b797f522e..a7bb8f33ae07 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -97,7 +97,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); 97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
98 98
99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) 99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
100 if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) 100 if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
101 return HRTIMER_RESTART; 101 return HRTIMER_RESTART;
102 102
103 return HRTIMER_NORESTART; 103 return HRTIMER_NORESTART;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e83ebae..3109204c87cc 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -604,14 +604,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
604 bool bc_local; 604 bool bc_local;
605 605
606 raw_spin_lock(&tick_broadcast_lock); 606 raw_spin_lock(&tick_broadcast_lock);
607 dev->next_event.tv64 = KTIME_MAX; 607 dev->next_event = KTIME_MAX;
608 next_event.tv64 = KTIME_MAX; 608 next_event = KTIME_MAX;
609 cpumask_clear(tmpmask); 609 cpumask_clear(tmpmask);
610 now = ktime_get(); 610 now = ktime_get();
611 /* Find all expired events */ 611 /* Find all expired events */
612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
613 td = &per_cpu(tick_cpu_device, cpu); 613 td = &per_cpu(tick_cpu_device, cpu);
614 if (td->evtdev->next_event.tv64 <= now.tv64) { 614 if (td->evtdev->next_event <= now) {
615 cpumask_set_cpu(cpu, tmpmask); 615 cpumask_set_cpu(cpu, tmpmask);
616 /* 616 /*
617 * Mark the remote cpu in the pending mask, so 617 * Mark the remote cpu in the pending mask, so
@@ -619,8 +619,8 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
619 * timer in tick_broadcast_oneshot_control(). 619 * timer in tick_broadcast_oneshot_control().
620 */ 620 */
621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
622 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 622 } else if (td->evtdev->next_event < next_event) {
623 next_event.tv64 = td->evtdev->next_event.tv64; 623 next_event = td->evtdev->next_event;
624 next_cpu = cpu; 624 next_cpu = cpu;
625 } 625 }
626 } 626 }
@@ -657,7 +657,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
657 * - There are pending events on sleeping CPUs which were not 657 * - There are pending events on sleeping CPUs which were not
658 * in the event mask 658 * in the event mask
659 */ 659 */
660 if (next_event.tv64 != KTIME_MAX) 660 if (next_event != KTIME_MAX)
661 tick_broadcast_set_event(dev, next_cpu, next_event); 661 tick_broadcast_set_event(dev, next_cpu, next_event);
662 662
663 raw_spin_unlock(&tick_broadcast_lock); 663 raw_spin_unlock(&tick_broadcast_lock);
@@ -672,7 +672,7 @@ static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
672{ 672{
673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) 673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
674 return 0; 674 return 0;
675 if (bc->next_event.tv64 == KTIME_MAX) 675 if (bc->next_event == KTIME_MAX)
676 return 0; 676 return 0;
677 return bc->bound_on == cpu ? -EBUSY : 0; 677 return bc->bound_on == cpu ? -EBUSY : 0;
678} 678}
@@ -688,7 +688,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { 688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
689 if (broadcast_needs_cpu(bc, smp_processor_id())) 689 if (broadcast_needs_cpu(bc, smp_processor_id()))
690 return; 690 return;
691 if (dev->next_event.tv64 < bc->next_event.tv64) 691 if (dev->next_event < bc->next_event)
692 return; 692 return;
693 } 693 }
694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
@@ -754,7 +754,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
754 */ 754 */
755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
756 ret = -EBUSY; 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) { 757 } else if (dev->next_event < bc->next_event) {
758 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /* 759 /*
760 * In case of hrtimer broadcasts the 760 * In case of hrtimer broadcasts the
@@ -789,7 +789,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
789 /* 789 /*
790 * Bail out if there is no next event. 790 * Bail out if there is no next event.
791 */ 791 */
792 if (dev->next_event.tv64 == KTIME_MAX) 792 if (dev->next_event == KTIME_MAX)
793 goto out; 793 goto out;
794 /* 794 /*
795 * If the pending bit is not set, then we are 795 * If the pending bit is not set, then we are
@@ -824,7 +824,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
824 * nohz fixups. 824 * nohz fixups.
825 */ 825 */
826 now = ktime_get(); 826 now = ktime_get();
827 if (dev->next_event.tv64 <= now.tv64) { 827 if (dev->next_event <= now) {
828 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 828 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
829 goto out; 829 goto out;
830 } 830 }
@@ -897,7 +897,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
897 tick_next_period); 897 tick_next_period);
898 tick_broadcast_set_event(bc, cpu, tick_next_period); 898 tick_broadcast_set_event(bc, cpu, tick_next_period);
899 } else 899 } else
900 bc->next_event.tv64 = KTIME_MAX; 900 bc->next_event = KTIME_MAX;
901 } else { 901 } else {
902 /* 902 /*
903 * The first cpu which switches to oneshot mode sets 903 * The first cpu which switches to oneshot mode sets
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index b51344652330..6b009c207671 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -28,7 +28,7 @@ int tick_program_event(ktime_t expires, int force)
28{ 28{
29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
30 30
31 if (unlikely(expires.tv64 == KTIME_MAX)) { 31 if (unlikely(expires == KTIME_MAX)) {
32 /* 32 /*
33 * We don't need the clock event device any more, stop it. 33 * We don't need the clock event device any more, stop it.
34 */ 34 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 71496a20e670..2c115fdab397 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now)
58 * Do a quick check without holding jiffies_lock: 58 * Do a quick check without holding jiffies_lock:
59 */ 59 */
60 delta = ktime_sub(now, last_jiffies_update); 60 delta = ktime_sub(now, last_jiffies_update);
61 if (delta.tv64 < tick_period.tv64) 61 if (delta < tick_period)
62 return; 62 return;
63 63
64 /* Reevaluate with jiffies_lock held */ 64 /* Reevaluate with jiffies_lock held */
65 write_seqlock(&jiffies_lock); 65 write_seqlock(&jiffies_lock);
66 66
67 delta = ktime_sub(now, last_jiffies_update); 67 delta = ktime_sub(now, last_jiffies_update);
68 if (delta.tv64 >= tick_period.tv64) { 68 if (delta >= tick_period) {
69 69
70 delta = ktime_sub(delta, tick_period); 70 delta = ktime_sub(delta, tick_period);
71 last_jiffies_update = ktime_add(last_jiffies_update, 71 last_jiffies_update = ktime_add(last_jiffies_update,
72 tick_period); 72 tick_period);
73 73
74 /* Slow path for long timeouts */ 74 /* Slow path for long timeouts */
75 if (unlikely(delta.tv64 >= tick_period.tv64)) { 75 if (unlikely(delta >= tick_period)) {
76 s64 incr = ktime_to_ns(tick_period); 76 s64 incr = ktime_to_ns(tick_period);
77 77
78 ticks = ktime_divns(delta, incr); 78 ticks = ktime_divns(delta, incr);
@@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void)
101 101
102 write_seqlock(&jiffies_lock); 102 write_seqlock(&jiffies_lock);
103 /* Did we start the jiffies update yet ? */ 103 /* Did we start the jiffies update yet ? */
104 if (last_jiffies_update.tv64 == 0) 104 if (last_jiffies_update == 0)
105 last_jiffies_update = tick_next_period; 105 last_jiffies_update = tick_next_period;
106 period = last_jiffies_update; 106 period = last_jiffies_update;
107 write_sequnlock(&jiffies_lock); 107 write_sequnlock(&jiffies_lock);
@@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
669 /* Read jiffies and the time when jiffies were updated last */ 669 /* Read jiffies and the time when jiffies were updated last */
670 do { 670 do {
671 seq = read_seqbegin(&jiffies_lock); 671 seq = read_seqbegin(&jiffies_lock);
672 basemono = last_jiffies_update.tv64; 672 basemono = last_jiffies_update;
673 basejiff = jiffies; 673 basejiff = jiffies;
674 } while (read_seqretry(&jiffies_lock, seq)); 674 } while (read_seqretry(&jiffies_lock, seq));
675 ts->last_jiffies = basejiff; 675 ts->last_jiffies = basejiff;
@@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
697 */ 697 */
698 delta = next_tick - basemono; 698 delta = next_tick - basemono;
699 if (delta <= (u64)TICK_NSEC) { 699 if (delta <= (u64)TICK_NSEC) {
700 tick.tv64 = 0; 700 tick = 0;
701 701
702 /* 702 /*
703 * Tell the timer code that the base is not idle, i.e. undo 703 * Tell the timer code that the base is not idle, i.e. undo
@@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
764 expires = KTIME_MAX; 764 expires = KTIME_MAX;
765 765
766 expires = min_t(u64, expires, next_tick); 766 expires = min_t(u64, expires, next_tick);
767 tick.tv64 = expires; 767 tick = expires;
768 768
769 /* Skip reprogram of event if its not changed */ 769 /* Skip reprogram of event if its not changed */
770 if (ts->tick_stopped && (expires == dev->next_event.tv64)) 770 if (ts->tick_stopped && (expires == dev->next_event))
771 goto out; 771 goto out;
772 772
773 /* 773 /*
@@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
864 } 864 }
865 865
866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { 866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
867 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; 867 ts->sleep_length = NSEC_PER_SEC / HZ;
868 return false; 868 return false;
869 } 869 }
870 870
@@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
914 ts->idle_calls++; 914 ts->idle_calls++;
915 915
916 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 916 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
917 if (expires.tv64 > 0LL) { 917 if (expires > 0LL) {
918 ts->idle_sleeps++; 918 ts->idle_sleeps++;
919 ts->idle_expires = expires; 919 ts->idle_expires = expires;
920 } 920 }
@@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1051 struct pt_regs *regs = get_irq_regs(); 1051 struct pt_regs *regs = get_irq_regs();
1052 ktime_t now = ktime_get(); 1052 ktime_t now = ktime_get();
1053 1053
1054 dev->next_event.tv64 = KTIME_MAX; 1054 dev->next_event = KTIME_MAX;
1055 1055
1056 tick_sched_do_timer(now); 1056 tick_sched_do_timer(now);
1057 tick_sched_handle(ts, regs); 1057 tick_sched_handle(ts, regs);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f4152a69277f..db087d7e106d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -104,7 +104,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104 */ 104 */
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, 105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec); 106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); 107 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
108 tk->wall_to_monotonic = wtm; 108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp); 110 tk->offs_real = timespec64_to_ktime(tmp);
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
571static inline void tk_update_leap_state(struct timekeeper *tk) 571static inline void tk_update_leap_state(struct timekeeper *tk)
572{ 572{
573 tk->next_leap_ktime = ntp_get_next_leap(); 573 tk->next_leap_ktime = ntp_get_next_leap();
574 if (tk->next_leap_ktime.tv64 != KTIME_MAX) 574 if (tk->next_leap_ktime != KTIME_MAX)
575 /* Convert to monotonic time */ 575 /* Convert to monotonic time */
576 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); 576 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
577} 577}
@@ -2250,7 +2250,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2250 } 2250 }
2251 2251
2252 /* Handle leapsecond insertion adjustments */ 2252 /* Handle leapsecond insertion adjustments */
2253 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) 2253 if (unlikely(base >= tk->next_leap_ktime))
2254 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); 2254 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2255 2255
2256 } while (read_seqcount_retry(&tk_core.seq, seq)); 2256 } while (read_seqcount_retry(&tk_core.seq, seq));
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index 782ae8ca2c06..adc6ee0a5126 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -48,7 +48,7 @@ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
48 while (*p) { 48 while (*p) {
49 parent = *p; 49 parent = *p;
50 ptr = rb_entry(parent, struct timerqueue_node, node); 50 ptr = rb_entry(parent, struct timerqueue_node, node);
51 if (node->expires.tv64 < ptr->expires.tv64) 51 if (node->expires < ptr->expires)
52 p = &(*p)->rb_left; 52 p = &(*p)->rb_left;
53 else 53 else
54 p = &(*p)->rb_right; 54 p = &(*p)->rb_right;
@@ -56,7 +56,7 @@ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
56 rb_link_node(&node->node, parent, p); 56 rb_link_node(&node->node, parent, p);
57 rb_insert_color(&node->node, &head->head); 57 rb_insert_color(&node->node, &head->head);
58 58
59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) { 59 if (!head->next || node->expires < head->next->expires) {
60 head->next = node; 60 head->next = node;
61 return true; 61 return true;
62 } 62 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 436a7537e6a9..ab8ba1e16473 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -199,11 +199,11 @@ static int bcm_proc_show(struct seq_file *m, void *v)
199 199
200 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 200 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
201 201
202 if (op->kt_ival1.tv64) 202 if (op->kt_ival1)
203 seq_printf(m, "timeo=%lld ", 203 seq_printf(m, "timeo=%lld ",
204 (long long)ktime_to_us(op->kt_ival1)); 204 (long long)ktime_to_us(op->kt_ival1));
205 205
206 if (op->kt_ival2.tv64) 206 if (op->kt_ival2)
207 seq_printf(m, "thr=%lld ", 207 seq_printf(m, "thr=%lld ",
208 (long long)ktime_to_us(op->kt_ival2)); 208 (long long)ktime_to_us(op->kt_ival2));
209 209
@@ -226,11 +226,11 @@ static int bcm_proc_show(struct seq_file *m, void *v)
226 else 226 else
227 seq_printf(m, "[%u] ", op->nframes); 227 seq_printf(m, "[%u] ", op->nframes);
228 228
229 if (op->kt_ival1.tv64) 229 if (op->kt_ival1)
230 seq_printf(m, "t1=%lld ", 230 seq_printf(m, "t1=%lld ",
231 (long long)ktime_to_us(op->kt_ival1)); 231 (long long)ktime_to_us(op->kt_ival1));
232 232
233 if (op->kt_ival2.tv64) 233 if (op->kt_ival2)
234 seq_printf(m, "t2=%lld ", 234 seq_printf(m, "t2=%lld ",
235 (long long)ktime_to_us(op->kt_ival2)); 235 (long long)ktime_to_us(op->kt_ival2));
236 236
@@ -365,11 +365,11 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
365 365
366static void bcm_tx_start_timer(struct bcm_op *op) 366static void bcm_tx_start_timer(struct bcm_op *op)
367{ 367{
368 if (op->kt_ival1.tv64 && op->count) 368 if (op->kt_ival1 && op->count)
369 hrtimer_start(&op->timer, 369 hrtimer_start(&op->timer,
370 ktime_add(ktime_get(), op->kt_ival1), 370 ktime_add(ktime_get(), op->kt_ival1),
371 HRTIMER_MODE_ABS); 371 HRTIMER_MODE_ABS);
372 else if (op->kt_ival2.tv64) 372 else if (op->kt_ival2)
373 hrtimer_start(&op->timer, 373 hrtimer_start(&op->timer,
374 ktime_add(ktime_get(), op->kt_ival2), 374 ktime_add(ktime_get(), op->kt_ival2),
375 HRTIMER_MODE_ABS); 375 HRTIMER_MODE_ABS);
@@ -380,7 +380,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
380 struct bcm_op *op = (struct bcm_op *)data; 380 struct bcm_op *op = (struct bcm_op *)data;
381 struct bcm_msg_head msg_head; 381 struct bcm_msg_head msg_head;
382 382
383 if (op->kt_ival1.tv64 && (op->count > 0)) { 383 if (op->kt_ival1 && (op->count > 0)) {
384 384
385 op->count--; 385 op->count--;
386 if (!op->count && (op->flags & TX_COUNTEVT)) { 386 if (!op->count && (op->flags & TX_COUNTEVT)) {
@@ -398,7 +398,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
398 } 398 }
399 bcm_can_tx(op); 399 bcm_can_tx(op);
400 400
401 } else if (op->kt_ival2.tv64) 401 } else if (op->kt_ival2)
402 bcm_can_tx(op); 402 bcm_can_tx(op);
403 403
404 bcm_tx_start_timer(op); 404 bcm_tx_start_timer(op);
@@ -459,7 +459,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
459 lastdata->flags |= (RX_RECV|RX_THR); 459 lastdata->flags |= (RX_RECV|RX_THR);
460 460
461 /* throttling mode inactive ? */ 461 /* throttling mode inactive ? */
462 if (!op->kt_ival2.tv64) { 462 if (!op->kt_ival2) {
463 /* send RX_CHANGED to the user immediately */ 463 /* send RX_CHANGED to the user immediately */
464 bcm_rx_changed(op, lastdata); 464 bcm_rx_changed(op, lastdata);
465 return; 465 return;
@@ -470,7 +470,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
470 return; 470 return;
471 471
472 /* first reception with enabled throttling mode */ 472 /* first reception with enabled throttling mode */
473 if (!op->kt_lastmsg.tv64) 473 if (!op->kt_lastmsg)
474 goto rx_changed_settime; 474 goto rx_changed_settime;
475 475
476 /* got a second frame inside a potential throttle period? */ 476 /* got a second frame inside a potential throttle period? */
@@ -537,7 +537,7 @@ static void bcm_rx_starttimer(struct bcm_op *op)
537 if (op->flags & RX_NO_AUTOTIMER) 537 if (op->flags & RX_NO_AUTOTIMER)
538 return; 538 return;
539 539
540 if (op->kt_ival1.tv64) 540 if (op->kt_ival1)
541 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 541 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
542} 542}
543 543
@@ -1005,7 +1005,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1005 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1005 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1006 1006
1007 /* disable an active timer due to zero values? */ 1007 /* disable an active timer due to zero values? */
1008 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 1008 if (!op->kt_ival1 && !op->kt_ival2)
1009 hrtimer_cancel(&op->timer); 1009 hrtimer_cancel(&op->timer);
1010 } 1010 }
1011 1011
@@ -1189,7 +1189,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1190 1190
1191 /* disable an active timer due to zero value? */ 1191 /* disable an active timer due to zero value? */
1192 if (!op->kt_ival1.tv64) 1192 if (!op->kt_ival1)
1193 hrtimer_cancel(&op->timer); 1193 hrtimer_cancel(&op->timer);
1194 1194
1195 /* 1195 /*
@@ -1201,7 +1201,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1201 bcm_rx_thr_flush(op, 1); 1201 bcm_rx_thr_flush(op, 1);
1202 } 1202 }
1203 1203
1204 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1204 if ((op->flags & STARTTIMER) && op->kt_ival1)
1205 hrtimer_start(&op->timer, op->kt_ival1, 1205 hrtimer_start(&op->timer, op->kt_ival1,
1206 HRTIMER_MODE_REL); 1206 HRTIMER_MODE_REL);
1207 } 1207 }
diff --git a/net/can/gw.c b/net/can/gw.c
index 455168718c2e..a54ab0c82104 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -429,7 +429,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
429 429
430 /* clear the skb timestamp if not configured the other way */ 430 /* clear the skb timestamp if not configured the other way */
431 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP)) 431 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
432 nskb->tstamp.tv64 = 0; 432 nskb->tstamp = 0;
433 433
434 /* send to netdevice */ 434 /* send to netdevice */
435 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO)) 435 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
diff --git a/net/core/dev.c b/net/core/dev.c
index 037ffd27fcc2..8db5a0b4b520 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1731,14 +1731,14 @@ EXPORT_SYMBOL(net_disable_timestamp);
1731 1731
1732static inline void net_timestamp_set(struct sk_buff *skb) 1732static inline void net_timestamp_set(struct sk_buff *skb)
1733{ 1733{
1734 skb->tstamp.tv64 = 0; 1734 skb->tstamp = 0;
1735 if (static_key_false(&netstamp_needed)) 1735 if (static_key_false(&netstamp_needed))
1736 __net_timestamp(skb); 1736 __net_timestamp(skb);
1737} 1737}
1738 1738
1739#define net_timestamp_check(COND, SKB) \ 1739#define net_timestamp_check(COND, SKB) \
1740 if (static_key_false(&netstamp_needed)) { \ 1740 if (static_key_false(&netstamp_needed)) { \
1741 if ((COND) && !(SKB)->tstamp.tv64) \ 1741 if ((COND) && !(SKB)->tstamp) \
1742 __net_timestamp(SKB); \ 1742 __net_timestamp(SKB); \
1743 } \ 1743 } \
1744 1744
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e77f40616fea..5a03730fbc1a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4368,7 +4368,7 @@ EXPORT_SYMBOL(skb_try_coalesce);
4368 */ 4368 */
4369void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4369void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4370{ 4370{
4371 skb->tstamp.tv64 = 0; 4371 skb->tstamp = 0;
4372 skb->pkt_type = PACKET_HOST; 4372 skb->pkt_type = PACKET_HOST;
4373 skb->skb_iif = 0; 4373 skb->skb_iif = 0;
4374 skb->ignore_df = 0; 4374 skb->ignore_df = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 31a255b555ad..1d5331a1b1dc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1038,7 +1038,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1038 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1038 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1039 1039
1040 /* Our usage of tstamp should remain private */ 1040 /* Our usage of tstamp should remain private */
1041 skb->tstamp.tv64 = 0; 1041 skb->tstamp = 0;
1042 1042
1043 /* Cleanup our debris for IP stacks */ 1043 /* Cleanup our debris for IP stacks */
1044 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1044 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
@@ -3203,7 +3203,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3203#endif 3203#endif
3204 3204
3205 /* Do not fool tcpdump (if any), clean our debris */ 3205 /* Do not fool tcpdump (if any), clean our debris */
3206 skb->tstamp.tv64 = 0; 3206 skb->tstamp = 0;
3207 return skb; 3207 return skb;
3208} 3208}
3209EXPORT_SYMBOL(tcp_make_synack); 3209EXPORT_SYMBOL(tcp_make_synack);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 926818c331e5..e4198502fd98 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -232,7 +232,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
232 ipv6h->saddr = hao->addr; 232 ipv6h->saddr = hao->addr;
233 hao->addr = tmp_addr; 233 hao->addr = tmp_addr;
234 234
235 if (skb->tstamp.tv64 == 0) 235 if (skb->tstamp == 0)
236 __net_timestamp(skb); 236 __net_timestamp(skb);
237 237
238 return true; 238 return true;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index e07f22b0c58a..8a9219ff2e77 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1809,7 +1809,7 @@ static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1809 rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); 1809 rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied);
1810 if (rc) 1810 if (rc)
1811 goto out_free; 1811 goto out_free;
1812 if (skb->tstamp.tv64) 1812 if (skb->tstamp)
1813 sk->sk_stamp = skb->tstamp; 1813 sk->sk_stamp = skb->tstamp;
1814 1814
1815 if (sipx) { 1815 if (sipx) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6a0bbfa8e702..3a073cd9fcf4 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -783,7 +783,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
783 /* set conntrack timestamp, if enabled. */ 783 /* set conntrack timestamp, if enabled. */
784 tstamp = nf_conn_tstamp_find(ct); 784 tstamp = nf_conn_tstamp_find(ct);
785 if (tstamp) { 785 if (tstamp) {
786 if (skb->tstamp.tv64 == 0) 786 if (skb->tstamp == 0)
787 __net_timestamp(skb); 787 __net_timestamp(skb);
788 788
789 tstamp->start = ktime_to_ns(skb->tstamp); 789 tstamp->start = ktime_to_ns(skb->tstamp);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 200922bb2036..08247bf7d7b8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -538,7 +538,7 @@ __build_packet_message(struct nfnl_log_net *log,
538 goto nla_put_failure; 538 goto nla_put_failure;
539 } 539 }
540 540
541 if (skb->tstamp.tv64) { 541 if (skb->tstamp) {
542 struct nfulnl_msg_packet_timestamp ts; 542 struct nfulnl_msg_packet_timestamp ts;
543 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 543 struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
544 ts.sec = cpu_to_be64(kts.tv_sec); 544 ts.sec = cpu_to_be64(kts.tv_sec);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index be7627b80400..3ee0b8a000a4 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -384,7 +384,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
384 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ 384 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */
385 + nla_total_size(sizeof(u_int32_t)); /* cap_len */ 385 + nla_total_size(sizeof(u_int32_t)); /* cap_len */
386 386
387 if (entskb->tstamp.tv64) 387 if (entskb->tstamp)
388 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 388 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
389 389
390 size += nfqnl_get_bridge_size(entry); 390 size += nfqnl_get_bridge_size(entry);
@@ -555,7 +555,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
555 if (nfqnl_put_bridge(entry, skb) < 0) 555 if (nfqnl_put_bridge(entry, skb) < 0)
556 goto nla_put_failure; 556 goto nla_put_failure;
557 557
558 if (entskb->tstamp.tv64) { 558 if (entskb->tstamp) {
559 struct nfqnl_msg_packet_timestamp ts; 559 struct nfqnl_msg_packet_timestamp ts;
560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); 560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
561 561
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 0ae55a36f492..1b01eec1fbda 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -168,7 +168,7 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
168 * may happen that the same packet matches both rules if 168 * may happen that the same packet matches both rules if
169 * it arrived at the right moment before 13:00. 169 * it arrived at the right moment before 13:00.
170 */ 170 */
171 if (skb->tstamp.tv64 == 0) 171 if (skb->tstamp == 0)
172 __net_timestamp((struct sk_buff *)skb); 172 __net_timestamp((struct sk_buff *)skb);
173 173
174 stamp = ktime_to_ns(skb->tstamp); 174 stamp = ktime_to_ns(skb->tstamp);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b7e4097bfdab..bcfadfdea8e0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -627,7 +627,7 @@ deliver:
627 * from the network (tstamp will be updated). 627 * from the network (tstamp will be updated).
628 */ 628 */
629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
630 skb->tstamp.tv64 = 0; 630 skb->tstamp = 0;
631#endif 631#endif
632 632
633 if (q->qdisc) { 633 if (q->qdisc) {
diff --git a/net/socket.c b/net/socket.c
index 5ff26c44db33..8487bf136e5c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -668,7 +668,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
668 668
669 /* Race occurred between timestamp enabling and packet 669 /* Race occurred between timestamp enabling and packet
670 receiving. Fill in the current time for now. */ 670 receiving. Fill in the current time for now. */
671 if (need_software_tstamp && skb->tstamp.tv64 == 0) 671 if (need_software_tstamp && skb->tstamp == 0)
672 __net_timestamp(skb); 672 __net_timestamp(skb);
673 673
674 if (need_software_tstamp) { 674 if (need_software_tstamp) {
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a3e85ee28b5a..de066acdb34e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -574,7 +574,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
574 } 574 }
575 len = svc_addr_len(svc_addr(rqstp)); 575 len = svc_addr_len(svc_addr(rqstp));
576 rqstp->rq_addrlen = len; 576 rqstp->rq_addrlen = len;
577 if (skb->tstamp.tv64 == 0) { 577 if (skb->tstamp == 0) {
578 skb->tstamp = ktime_get_real(); 578 skb->tstamp = ktime_get_real();
579 /* Don't enable netstamp, sunrpc doesn't 579 /* Don't enable netstamp, sunrpc doesn't
580 need that much accuracy */ 580 need that much accuracy */
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index e2f27022b363..1ac0c423903e 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -58,7 +58,7 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
58 58
59 /* calculate the drift */ 59 /* calculate the drift */
60 delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); 60 delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt));
61 if (delta.tv64 > 0) 61 if (delta > 0)
62 ticks += ktime_divns(delta, ticks * resolution); 62 ticks += ktime_divns(delta, ticks * resolution);
63 63
64 snd_timer_interrupt(stime->timer, ticks); 64 snd_timer_interrupt(stime->timer, ticks);