aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-25 17:30:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-25 17:30:04 -0500
commit3ddc76dfc786cc6f87852693227fb0b1f124f807 (patch)
tree8192b4721e05cf6823087f9696db8c0c8f144b02
parentb272f732f888d4cf43c943a40c9aaa836f9b7431 (diff)
parent1f3a8e49d8f28f498b8694464623ac20aebfe62a (diff)
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer type cleanups from Thomas Gleixner: "This series does a tree wide cleanup of types related to timers/timekeeping. - Get rid of cycles_t and use a plain u64. The type is not really helpful and caused more confusion than clarity - Get rid of the ktime union. The union has become useless as we use the scalar nanoseconds storage unconditionally now. The 32bit timespec alike storage got removed due to the Y2038 limitations some time ago. That leaves the odd union access around for no reason. Clean it up. Both changes have been done with coccinelle and a small amount of manual mopping up" * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ktime: Get rid of ktime_equal() ktime: Cleanup ktime_set() usage ktime: Get rid of the union clocksource: Use a plain u64 instead of cycle_t
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-ep93xx/timer-ep93xx.c4
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-mmp/time.c2
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/plat-iop/time.c2
-rw-r--r--arch/avr32/kernel/time.c4
-rw-r--r--arch/blackfin/kernel/time-ts.c4
-rw-r--r--arch/c6x/kernel/time.c2
-rw-r--r--arch/hexagon/kernel/time.c4
-rw-r--r--arch/ia64/kernel/cyclone.c4
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h6
-rw-r--r--arch/ia64/kernel/time.c6
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c4
-rw-r--r--arch/m68k/68000/timers.c2
-rw-r--r--arch/m68k/coldfire/dma_timer.c2
-rw-r--r--arch/m68k/coldfire/pit.c2
-rw-r--r--arch/m68k/coldfire/sltimers.c2
-rw-r--r--arch/m68k/coldfire/timers.c2
-rw-r--r--arch/microblaze/kernel/timer.c6
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c2
-rw-r--r--arch/mips/jz4740/time.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c4
-rw-r--r--arch/mips/kernel/csrc-ioasic.c2
-rw-r--r--arch/mips/kernel/csrc-r4k.c2
-rw-r--r--arch/mips/kernel/csrc-sb1250.c4
-rw-r--r--arch/mips/loongson32/common/time.c4
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c4
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c4
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/netlogic/common/time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mn10300/kernel/csrc-mn10300.c2
-rw-r--r--arch/nios2/kernel/time.c2
-rw-r--r--arch/openrisc/kernel/time.c4
-rw-r--r--arch/parisc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv.c3
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c4
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/unicore32/kernel/time.c2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c8
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/pvclock.h7
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/include/asm/vgtod.h4
-rw-r--r--arch/x86/kernel/apb_timer.c4
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c4
-rw-r--r--arch/x86/kernel/hpet.c14
-rw-r--r--arch/x86/kernel/kvmclock.c10
-rw-r--r--arch/x86/kernel/pvclock.c4
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/platform/uv/uv_time.c8
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/kernel/time.c4
-rw-r--r--block/blk-mq.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/clocksource/acpi_pm.c14
-rw-r--r--drivers/clocksource/arc_timer.c12
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/arm_global_timer.c2
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c4
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c2
-rw-r--r--drivers/clocksource/dw_apb_timer.c8
-rw-r--r--drivers/clocksource/em_sti.c12
-rw-r--r--drivers/clocksource/exynos_mct.c6
-rw-r--r--drivers/clocksource/h8300_timer16.c2
-rw-r--r--drivers/clocksource/h8300_tpu.c2
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/clocksource/jcore-pit.c2
-rw-r--r--drivers/clocksource/metag_generic.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/mmio.c18
-rw-r--r--drivers/clocksource/mxs_timer.c2
-rw-r--r--drivers/clocksource/qcom-timer.c2
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/scx200_hrt.c4
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/time-pistachio.c4
-rw-r--r--drivers/clocksource/timer-atlas7.c2
-rw-r--r--drivers/clocksource/timer-atmel-pit.c2
-rw-r--r--drivers/clocksource/timer-atmel-st.c2
-rw-r--r--drivers/clocksource/timer-nps.c4
-rw-r--r--drivers/clocksource/timer-prima2.c2
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-ti-32k.c4
-rw-r--r--drivers/clocksource/vt8500_timer.c4
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c5
-rw-r--r--drivers/input/joystick/walkera0701.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c16
-rw-r--r--drivers/mailbox/mailbox.c3
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c6
-rw-r--r--drivers/media/pci/pt3/pt3.c2
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/net/can/softing/softing_fw.c4
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c9
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c2
-rw-r--r--drivers/rtc/interface.c16
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c4
-rw-r--r--drivers/usb/chipidea/otg_fsm.c14
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/host/ehci-timer.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c5
-rw-r--r--drivers/usb/musb/musb_cppi41.c9
-rw-r--r--fs/aio.c4
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/timerfd.c26
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/linux/clocksource.h22
-rw-r--r--include/linux/dw_apb_timer.h2
-rw-r--r--include/linux/futex.h4
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/irqchip/mips-gic.h8
-rw-r--r--include/linux/ktime.h81
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/tick.h4
-rw-r--r--include/linux/timecounter.h12
-rw-r--r--include/linux/timekeeper_internal.h10
-rw-r--r--include/linux/timekeeping.h4
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/net/red.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/events/alarmtimer.h6
-rw-r--r--include/trace/events/timer.h16
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/time/alarmtimer.c24
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/hrtimer.c54
-rw-r--r--kernel/time/itimer.c10
-rw-r--r--kernel/time/jiffies.c4
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/posix-timers.c22
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c22
-rw-r--r--kernel/time/timecounter.c6
-rw-r--r--kernel/time/timekeeping.c63
-rw-r--r--kernel/time/timekeeping_internal.h6
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace.h8
-rw-r--r--kernel/trace/trace_irqsoff.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--lib/timerqueue.c4
-rw-r--r--net/can/bcm.c32
-rw-r--r--net/can/gw.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/mac802154/util.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--sound/core/hrtimer.c2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/firewire/lib.c6
-rw-r--r--sound/hda/hdac_stream.c6
-rw-r--r--sound/sh/sh_dac_audio.c2
-rw-r--r--virt/kvm/arm/arch_timer.c6
227 files changed, 604 insertions, 665 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 992000e3d9e4..3bfe058d75d9 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -133,7 +133,7 @@ init_rtc_clockevent(void)
133 * The QEMU clock as a clocksource primitive. 133 * The QEMU clock as a clocksource primitive.
134 */ 134 */
135 135
136static cycle_t 136static u64
137qemu_cs_read(struct clocksource *cs) 137qemu_cs_read(struct clocksource *cs)
138{ 138{
139 return qemu_get_vmtime(); 139 return qemu_get_vmtime();
@@ -260,7 +260,7 @@ common_init_rtc(void)
260 * use this method when WTINT is in use. 260 * use this method when WTINT is in use.
261 */ 261 */
262 262
263static cycle_t read_rpcc(struct clocksource *cs) 263static u64 read_rpcc(struct clocksource *cs)
264{ 264{
265 return rpcc(); 265 return rpcc();
266} 266}
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 6c18445a4639..034f865fe78e 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -268,7 +268,7 @@ static void __init timer_init(void)
268/* 268/*
269 * clocksource 269 * clocksource
270 */ 270 */
271static cycle_t read_cycles(struct clocksource *cs) 271static u64 read_cycles(struct clocksource *cs)
272{ 272{
273 struct timer_s *t = &timers[TID_CLOCKSOURCE]; 273 struct timer_s *t = &timers[TID_CLOCKSOURCE];
274 274
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
index e5f791145bd0..874cbc91b669 100644
--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
+++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
@@ -59,13 +59,13 @@ static u64 notrace ep93xx_read_sched_clock(void)
59 return ret; 59 return ret;
60} 60}
61 61
62cycle_t ep93xx_clocksource_read(struct clocksource *c) 62u64 ep93xx_clocksource_read(struct clocksource *c)
63{ 63{
64 u64 ret; 64 u64 ret;
65 65
66 ret = readl(EP93XX_TIMER4_VALUE_LOW); 66 ret = readl(EP93XX_TIMER4_VALUE_LOW);
67 ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); 67 ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
68 return (cycle_t) ret; 68 return (u64) ret;
69} 69}
70 70
71static int ep93xx_clkevt_set_next_event(unsigned long next, 71static int ep93xx_clkevt_set_next_event(unsigned long next,
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 810edc78c817..75395a720e63 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -19,7 +19,7 @@
19 19
20#include "common.h" 20#include "common.h"
21 21
22static cycle_t cksrc_dc21285_read(struct clocksource *cs) 22static u64 cksrc_dc21285_read(struct clocksource *cs)
23{ 23{
24 return cs->mask - *CSR_TIMER2_VALUE; 24 return cs->mask - *CSR_TIMER2_VALUE;
25} 25}
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 0f08f611c1a6..846e033c56fa 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -493,7 +493,7 @@ static u64 notrace ixp4xx_read_sched_clock(void)
493 * clocksource 493 * clocksource
494 */ 494 */
495 495
496static cycle_t ixp4xx_clocksource_read(struct clocksource *c) 496static u64 ixp4xx_clocksource_read(struct clocksource *c)
497{ 497{
498 return *IXP4XX_OSTS; 498 return *IXP4XX_OSTS;
499} 499}
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 3c2c92aaa0ae..96ad1db0b04b 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -144,7 +144,7 @@ static struct clock_event_device ckevt = {
144 .set_state_oneshot = timer_set_shutdown, 144 .set_state_oneshot = timer_set_shutdown,
145}; 145};
146 146
147static cycle_t clksrc_read(struct clocksource *cs) 147static u64 clksrc_read(struct clocksource *cs)
148{ 148{
149 return timer_read(); 149 return timer_read();
150} 150}
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 5e2e2218a402..56128da23c3a 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -369,9 +369,9 @@ static bool use_gptimer_clksrc __initdata;
369/* 369/*
370 * clocksource 370 * clocksource
371 */ 371 */
372static cycle_t clocksource_read_cycles(struct clocksource *cs) 372static u64 clocksource_read_cycles(struct clocksource *cs)
373{ 373{
374 return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 374 return (u64)__omap_dm_timer_read_counter(&clksrc,
375 OMAP_TIMER_NONPOSTED); 375 OMAP_TIMER_NONPOSTED);
376} 376}
377 377
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index ed8d129d4bea..2cff0010f677 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -38,7 +38,7 @@
38/* 38/*
39 * IOP clocksource (free-running timer 1). 39 * IOP clocksource (free-running timer 1).
40 */ 40 */
41static cycle_t notrace iop_clocksource_read(struct clocksource *unused) 41static u64 notrace iop_clocksource_read(struct clocksource *unused)
42{ 42{
43 return 0xffffffffu - read_tcr1(); 43 return 0xffffffffu - read_tcr1();
44} 44}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index a124c55733db..4d9b69615979 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -20,9 +20,9 @@
20 20
21static bool disable_cpu_idle_poll; 21static bool disable_cpu_idle_poll;
22 22
23static cycle_t read_cycle_count(struct clocksource *cs) 23static u64 read_cycle_count(struct clocksource *cs)
24{ 24{
25 return (cycle_t)sysreg_read(COUNT); 25 return (u64)sysreg_read(COUNT);
26} 26}
27 27
28/* 28/*
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index fb9e95f1b719..0e9fcf841d67 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -26,7 +26,7 @@
26 26
27#if defined(CONFIG_CYCLES_CLOCKSOURCE) 27#if defined(CONFIG_CYCLES_CLOCKSOURCE)
28 28
29static notrace cycle_t bfin_read_cycles(struct clocksource *cs) 29static notrace u64 bfin_read_cycles(struct clocksource *cs)
30{ 30{
31#ifdef CONFIG_CPU_FREQ 31#ifdef CONFIG_CPU_FREQ
32 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 32 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
@@ -80,7 +80,7 @@ void __init setup_gptimer0(void)
80 enable_gptimers(TIMER0bit); 80 enable_gptimers(TIMER0bit);
81} 81}
82 82
83static cycle_t bfin_read_gptimer0(struct clocksource *cs) 83static u64 bfin_read_gptimer0(struct clocksource *cs)
84{ 84{
85 return bfin_read_TIMER0_COUNTER(); 85 return bfin_read_TIMER0_COUNTER();
86} 86}
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
index 04845aaf5985..6a8e00a1f6d5 100644
--- a/arch/c6x/kernel/time.c
+++ b/arch/c6x/kernel/time.c
@@ -26,7 +26,7 @@
26static u32 sched_clock_multiplier; 26static u32 sched_clock_multiplier;
27#define SCHED_CLOCK_SHIFT 16 27#define SCHED_CLOCK_SHIFT 16
28 28
29static cycle_t tsc_read(struct clocksource *cs) 29static u64 tsc_read(struct clocksource *cs)
30{ 30{
31 return get_cycles(); 31 return get_cycles();
32} 32}
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
index a6a1d1f8309a..ff4e9bf995e9 100644
--- a/arch/hexagon/kernel/time.c
+++ b/arch/hexagon/kernel/time.c
@@ -72,9 +72,9 @@ struct adsp_hw_timer_struct {
72/* Look for "TCX0" for related constants. */ 72/* Look for "TCX0" for related constants. */
73static __iomem struct adsp_hw_timer_struct *rtos_timer; 73static __iomem struct adsp_hw_timer_struct *rtos_timer;
74 74
75static cycle_t timer_get_cycles(struct clocksource *cs) 75static u64 timer_get_cycles(struct clocksource *cs)
76{ 76{
77 return (cycle_t) __vmgettime(); 77 return (u64) __vmgettime();
78} 78}
79 79
80static struct clocksource hexagon_clocksource = { 80static struct clocksource hexagon_clocksource = {
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 5fa3848ba224..ee1a4afbf9da 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -21,9 +21,9 @@ void __init cyclone_setup(void)
21 21
22static void __iomem *cyclone_mc; 22static void __iomem *cyclone_mc;
23 23
24static cycle_t read_cyclone(struct clocksource *cs) 24static u64 read_cyclone(struct clocksource *cs)
25{ 25{
26 return (cycle_t)readq((void __iomem *)cyclone_mc); 26 return (u64)readq((void __iomem *)cyclone_mc);
27} 27}
28 28
29static struct clocksource clocksource_cyclone = { 29static struct clocksource clocksource_cyclone = {
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 146b15b5fec3..dcc514917731 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -9,15 +9,15 @@ struct fsyscall_gtod_data_t {
9 seqcount_t seq; 9 seqcount_t seq;
10 struct timespec wall_time; 10 struct timespec wall_time;
11 struct timespec monotonic_time; 11 struct timespec monotonic_time;
12 cycle_t clk_mask; 12 u64 clk_mask;
13 u32 clk_mult; 13 u32 clk_mult;
14 u32 clk_shift; 14 u32 clk_shift;
15 void *clk_fsys_mmio; 15 void *clk_fsys_mmio;
16 cycle_t clk_cycle_last; 16 u64 clk_cycle_last;
17} ____cacheline_aligned; 17} ____cacheline_aligned;
18 18
19struct itc_jitter_data_t { 19struct itc_jitter_data_t {
20 int itc_jitter; 20 int itc_jitter;
21 cycle_t itc_lastcycle; 21 u64 itc_lastcycle;
22} ____cacheline_aligned; 22} ____cacheline_aligned;
23 23
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 021f44ab4bfb..71775b95d6cc 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -31,7 +31,7 @@
31 31
32#include "fsyscall_gtod_data.h" 32#include "fsyscall_gtod_data.h"
33 33
34static cycle_t itc_get_cycles(struct clocksource *cs); 34static u64 itc_get_cycles(struct clocksource *cs);
35 35
36struct fsyscall_gtod_data_t fsyscall_gtod_data; 36struct fsyscall_gtod_data_t fsyscall_gtod_data;
37 37
@@ -323,7 +323,7 @@ void ia64_init_itm(void)
323 } 323 }
324} 324}
325 325
326static cycle_t itc_get_cycles(struct clocksource *cs) 326static u64 itc_get_cycles(struct clocksource *cs)
327{ 327{
328 unsigned long lcycle, now, ret; 328 unsigned long lcycle, now, ret;
329 329
@@ -397,7 +397,7 @@ void update_vsyscall_tz(void)
397} 397}
398 398
399void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, 399void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
400 struct clocksource *c, u32 mult, cycle_t cycle_last) 400 struct clocksource *c, u32 mult, u64 cycle_last)
401{ 401{
402 write_seqcount_begin(&fsyscall_gtod_data.seq); 402 write_seqcount_begin(&fsyscall_gtod_data.seq);
403 403
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index abab8f99e913..66edc36426ed 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -22,9 +22,9 @@
22 22
23extern unsigned long sn_rtc_cycles_per_second; 23extern unsigned long sn_rtc_cycles_per_second;
24 24
25static cycle_t read_sn2(struct clocksource *cs) 25static u64 read_sn2(struct clocksource *cs)
26{ 26{
27 return (cycle_t)readq(RTC_COUNTER_ADDR); 27 return (u64)readq(RTC_COUNTER_ADDR);
28} 28}
29 29
30static struct clocksource clocksource_sn2 = { 30static struct clocksource clocksource_sn2 = {
diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c
index 99a98698bc95..252455bce144 100644
--- a/arch/m68k/68000/timers.c
+++ b/arch/m68k/68000/timers.c
@@ -76,7 +76,7 @@ static struct irqaction m68328_timer_irq = {
76 76
77/***************************************************************************/ 77/***************************************************************************/
78 78
79static cycle_t m68328_read_clk(struct clocksource *cs) 79static u64 m68328_read_clk(struct clocksource *cs)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 u32 cycles; 82 u32 cycles;
diff --git a/arch/m68k/coldfire/dma_timer.c b/arch/m68k/coldfire/dma_timer.c
index 235ad57c4707..8273eea57874 100644
--- a/arch/m68k/coldfire/dma_timer.c
+++ b/arch/m68k/coldfire/dma_timer.c
@@ -34,7 +34,7 @@
34#define DMA_DTMR_CLK_DIV_16 (2 << 1) 34#define DMA_DTMR_CLK_DIV_16 (2 << 1)
35#define DMA_DTMR_ENABLE (1 << 0) 35#define DMA_DTMR_ENABLE (1 << 0)
36 36
37static cycle_t cf_dt_get_cycles(struct clocksource *cs) 37static u64 cf_dt_get_cycles(struct clocksource *cs)
38{ 38{
39 return __raw_readl(DTCN0); 39 return __raw_readl(DTCN0);
40} 40}
diff --git a/arch/m68k/coldfire/pit.c b/arch/m68k/coldfire/pit.c
index d86a9ffb3f13..175553d5b8ed 100644
--- a/arch/m68k/coldfire/pit.c
+++ b/arch/m68k/coldfire/pit.c
@@ -118,7 +118,7 @@ static struct irqaction pit_irq = {
118 118
119/***************************************************************************/ 119/***************************************************************************/
120 120
121static cycle_t pit_read_clk(struct clocksource *cs) 121static u64 pit_read_clk(struct clocksource *cs)
122{ 122{
123 unsigned long flags; 123 unsigned long flags;
124 u32 cycles; 124 u32 cycles;
diff --git a/arch/m68k/coldfire/sltimers.c b/arch/m68k/coldfire/sltimers.c
index 831a08cf6f40..3292c0d68b18 100644
--- a/arch/m68k/coldfire/sltimers.c
+++ b/arch/m68k/coldfire/sltimers.c
@@ -97,7 +97,7 @@ static struct irqaction mcfslt_timer_irq = {
97 .handler = mcfslt_tick, 97 .handler = mcfslt_tick,
98}; 98};
99 99
100static cycle_t mcfslt_read_clk(struct clocksource *cs) 100static u64 mcfslt_read_clk(struct clocksource *cs)
101{ 101{
102 unsigned long flags; 102 unsigned long flags;
103 u32 cycles, scnt; 103 u32 cycles, scnt;
diff --git a/arch/m68k/coldfire/timers.c b/arch/m68k/coldfire/timers.c
index cd496a20fcc7..2dc7a58204f6 100644
--- a/arch/m68k/coldfire/timers.c
+++ b/arch/m68k/coldfire/timers.c
@@ -89,7 +89,7 @@ static struct irqaction mcftmr_timer_irq = {
89 89
90/***************************************************************************/ 90/***************************************************************************/
91 91
92static cycle_t mcftmr_read_clk(struct clocksource *cs) 92static u64 mcftmr_read_clk(struct clocksource *cs)
93{ 93{
94 unsigned long flags; 94 unsigned long flags;
95 u32 cycles; 95 u32 cycles;
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 9e954959f605..1d6fad50fa76 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -190,17 +190,17 @@ static u64 xilinx_clock_read(void)
190 return read_fn(timer_baseaddr + TCR1); 190 return read_fn(timer_baseaddr + TCR1);
191} 191}
192 192
193static cycle_t xilinx_read(struct clocksource *cs) 193static u64 xilinx_read(struct clocksource *cs)
194{ 194{
195 /* reading actual value of timer 1 */ 195 /* reading actual value of timer 1 */
196 return (cycle_t)xilinx_clock_read(); 196 return (u64)xilinx_clock_read();
197} 197}
198 198
199static struct timecounter xilinx_tc = { 199static struct timecounter xilinx_tc = {
200 .cc = NULL, 200 .cc = NULL,
201}; 201};
202 202
203static cycle_t xilinx_cc_read(const struct cyclecounter *cc) 203static u64 xilinx_cc_read(const struct cyclecounter *cc)
204{ 204{
205 return xilinx_read(NULL); 205 return xilinx_read(NULL);
206} 206}
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index f99d3ec17a45..e1bec5a77c39 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -44,7 +44,7 @@
44/* 32kHz clock enabled and detected */ 44/* 32kHz clock enabled and detected */
45#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) 45#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
46 46
47static cycle_t au1x_counter1_read(struct clocksource *cs) 47static u64 au1x_counter1_read(struct clocksource *cs)
48{ 48{
49 return alchemy_rdsys(AU1000_SYS_RTCREAD); 49 return alchemy_rdsys(AU1000_SYS_RTCREAD);
50} 50}
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 23c2344a3552..39f153fe0022 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -98,7 +98,7 @@ void octeon_init_cvmcount(void)
98 local_irq_restore(flags); 98 local_irq_restore(flags);
99} 99}
100 100
101static cycle_t octeon_cvmcount_read(struct clocksource *cs) 101static u64 octeon_cvmcount_read(struct clocksource *cs)
102{ 102{
103 return read_c0_cvmcount(); 103 return read_c0_cvmcount();
104} 104}
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 1f7ca2c9f262..bcf8f8c62737 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -34,7 +34,7 @@
34 34
35static uint16_t jz4740_jiffies_per_tick; 35static uint16_t jz4740_jiffies_per_tick;
36 36
37static cycle_t jz4740_clocksource_read(struct clocksource *cs) 37static u64 jz4740_clocksource_read(struct clocksource *cs)
38{ 38{
39 return jz4740_timer_get_count(TIMER_CLOCKSOURCE); 39 return jz4740_timer_get_count(TIMER_CLOCKSOURCE);
40} 40}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 537eefdf838f..aaca60d6ffc3 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -27,7 +27,7 @@ struct txx9_clocksource {
27 struct txx9_tmr_reg __iomem *tmrptr; 27 struct txx9_tmr_reg __iomem *tmrptr;
28}; 28};
29 29
30static cycle_t txx9_cs_read(struct clocksource *cs) 30static u64 txx9_cs_read(struct clocksource *cs)
31{ 31{
32 struct txx9_clocksource *txx9_cs = 32 struct txx9_clocksource *txx9_cs =
33 container_of(cs, struct txx9_clocksource, cs); 33 container_of(cs, struct txx9_clocksource, cs);
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index 7f65b53d1b24..f011261e9506 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -25,9 +25,9 @@
25 25
26#include <asm/sibyte/sb1250.h> 26#include <asm/sibyte/sb1250.h>
27 27
28static cycle_t bcm1480_hpt_read(struct clocksource *cs) 28static u64 bcm1480_hpt_read(struct clocksource *cs)
29{ 29{
30 return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); 30 return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
31} 31}
32 32
33struct clocksource bcm1480_clocksource = { 33struct clocksource bcm1480_clocksource = {
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 722f5589cd1d..f6acd1e58c26 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -22,7 +22,7 @@
22#include <asm/dec/ioasic.h> 22#include <asm/dec/ioasic.h>
23#include <asm/dec/ioasic_addrs.h> 23#include <asm/dec/ioasic_addrs.h>
24 24
25static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) 25static u64 dec_ioasic_hpt_read(struct clocksource *cs)
26{ 26{
27 return ioasic_read(IO_REG_FCTR); 27 return ioasic_read(IO_REG_FCTR);
28} 28}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index d76275da54cb..eed099f35bf1 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -11,7 +11,7 @@
11 11
12#include <asm/time.h> 12#include <asm/time.h>
13 13
14static cycle_t c0_hpt_read(struct clocksource *cs) 14static u64 c0_hpt_read(struct clocksource *cs)
15{ 15{
16 return read_c0_count(); 16 return read_c0_count();
17} 17}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index d915652b4d56..b07b7310d3f4 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -30,7 +30,7 @@
30 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over 30 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
31 * again. 31 * again.
32 */ 32 */
33static inline cycle_t sb1250_hpt_get_cycles(void) 33static inline u64 sb1250_hpt_get_cycles(void)
34{ 34{
35 unsigned int count; 35 unsigned int count;
36 void __iomem *addr; 36 void __iomem *addr;
@@ -41,7 +41,7 @@ static inline cycle_t sb1250_hpt_get_cycles(void)
41 return SB1250_HPT_VALUE - count; 41 return SB1250_HPT_VALUE - count;
42} 42}
43 43
44static cycle_t sb1250_hpt_read(struct clocksource *cs) 44static u64 sb1250_hpt_read(struct clocksource *cs)
45{ 45{
46 return sb1250_hpt_get_cycles(); 46 return sb1250_hpt_get_cycles();
47} 47}
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index ff224f0020e5..e6f972d35252 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -63,7 +63,7 @@ void __init ls1x_pwmtimer_init(void)
63 ls1x_pwmtimer_restart(); 63 ls1x_pwmtimer_restart();
64} 64}
65 65
66static cycle_t ls1x_clocksource_read(struct clocksource *cs) 66static u64 ls1x_clocksource_read(struct clocksource *cs)
67{ 67{
68 unsigned long flags; 68 unsigned long flags;
69 int count; 69 int count;
@@ -107,7 +107,7 @@ static cycle_t ls1x_clocksource_read(struct clocksource *cs)
107 107
108 raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags); 108 raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
109 109
110 return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count; 110 return (u64) (jifs * ls1x_jiffies_per_tick) + count;
111} 111}
112 112
113static struct clocksource ls1x_clocksource = { 113static struct clocksource ls1x_clocksource = {
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index da77d412514c..9edfa55a0e78 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -144,7 +144,7 @@ void __init setup_mfgpt0_timer(void)
144 * to just read by itself. So use jiffies to emulate a free 144 * to just read by itself. So use jiffies to emulate a free
145 * running counter: 145 * running counter:
146 */ 146 */
147static cycle_t mfgpt_read(struct clocksource *cs) 147static u64 mfgpt_read(struct clocksource *cs)
148{ 148{
149 unsigned long flags; 149 unsigned long flags;
150 int count; 150 int count;
@@ -188,7 +188,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
188 188
189 raw_spin_unlock_irqrestore(&mfgpt_lock, flags); 189 raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
190 190
191 return (cycle_t) (jifs * COMPARE) + count; 191 return (u64) (jifs * COMPARE) + count;
192} 192}
193 193
194static struct clocksource clocksource_mfgpt = { 194static struct clocksource clocksource_mfgpt = {
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 4788bea62a6a..24afe364637b 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -248,9 +248,9 @@ void __init setup_hpet_timer(void)
248 pr_info("hpet clock event device register\n"); 248 pr_info("hpet clock event device register\n");
249} 249}
250 250
251static cycle_t hpet_read_counter(struct clocksource *cs) 251static u64 hpet_read_counter(struct clocksource *cs)
252{ 252{
253 return (cycle_t)hpet_read(HPET_COUNTER); 253 return (u64)hpet_read(HPET_COUNTER);
254} 254}
255 255
256static void hpet_suspend(struct clocksource *cs) 256static void hpet_suspend(struct clocksource *cs)
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 7407da04f8d6..1829a9031eec 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -75,7 +75,7 @@ static void __init estimate_frequencies(void)
75 unsigned int count, start; 75 unsigned int count, start;
76 unsigned char secs1, secs2, ctrl; 76 unsigned char secs1, secs2, ctrl;
77 int secs; 77 int secs;
78 cycle_t giccount = 0, gicstart = 0; 78 u64 giccount = 0, gicstart = 0;
79 79
80#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ 80#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
81 mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; 81 mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 5873c83e65be..cbbf0d48216b 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -59,14 +59,14 @@ unsigned int get_c0_compare_int(void)
59 return IRQ_TIMER; 59 return IRQ_TIMER;
60} 60}
61 61
62static cycle_t nlm_get_pic_timer(struct clocksource *cs) 62static u64 nlm_get_pic_timer(struct clocksource *cs)
63{ 63{
64 uint64_t picbase = nlm_get_node(0)->picbase; 64 uint64_t picbase = nlm_get_node(0)->picbase;
65 65
66 return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER); 66 return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
67} 67}
68 68
69static cycle_t nlm_get_pic_timer32(struct clocksource *cs) 69static u64 nlm_get_pic_timer32(struct clocksource *cs)
70{ 70{
71 uint64_t picbase = nlm_get_node(0)->picbase; 71 uint64_t picbase = nlm_get_node(0)->picbase;
72 72
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 42d6cb9f956e..695c51bdd7dc 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -140,7 +140,7 @@ static void __init hub_rt_clock_event_global_init(void)
140 setup_irq(irq, &hub_rt_irqaction); 140 setup_irq(irq, &hub_rt_irqaction);
141} 141}
142 142
143static cycle_t hub_rt_read(struct clocksource *cs) 143static u64 hub_rt_read(struct clocksource *cs)
144{ 144{
145 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); 145 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
146} 146}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
index 45644cf18c41..6b74df3661f2 100644
--- a/arch/mn10300/kernel/csrc-mn10300.c
+++ b/arch/mn10300/kernel/csrc-mn10300.c
@@ -13,7 +13,7 @@
13#include <asm/timex.h> 13#include <asm/timex.h>
14#include "internal.h" 14#include "internal.h"
15 15
16static cycle_t mn10300_read(struct clocksource *cs) 16static u64 mn10300_read(struct clocksource *cs)
17{ 17{
18 return read_timestamp_counter(); 18 return read_timestamp_counter();
19} 19}
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index 746bf5caaffc..6e2bdc9b8530 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -81,7 +81,7 @@ static inline unsigned long read_timersnapshot(struct nios2_timer *timer)
81 return count; 81 return count;
82} 82}
83 83
84static cycle_t nios2_timer_read(struct clocksource *cs) 84static u64 nios2_timer_read(struct clocksource *cs)
85{ 85{
86 struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs); 86 struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
87 unsigned long flags; 87 unsigned long flags;
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index 50e970183dcd..687c11d048d7 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -117,9 +117,9 @@ static __init void openrisc_clockevent_init(void)
117 * is 32 bits wide and runs at the CPU clock frequency. 117 * is 32 bits wide and runs at the CPU clock frequency.
118 */ 118 */
119 119
120static cycle_t openrisc_timer_read(struct clocksource *cs) 120static u64 openrisc_timer_read(struct clocksource *cs)
121{ 121{
122 return (cycle_t) mfspr(SPR_TTCR); 122 return (u64) mfspr(SPR_TTCR);
123} 123}
124 124
125static struct clocksource openrisc_timer = { 125static struct clocksource openrisc_timer = {
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 037d81f00520..da0d9cb63403 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(profile_pc);
137 137
138/* clock source code */ 138/* clock source code */
139 139
140static cycle_t notrace read_cr16(struct clocksource *cs) 140static u64 notrace read_cr16(struct clocksource *cs)
141{ 141{
142 return get_cycles(); 142 return get_cycles();
143} 143}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 19397e2a8bf5..bc2e08d415fa 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -80,7 +80,7 @@
80#include <linux/clockchips.h> 80#include <linux/clockchips.h>
81#include <linux/timekeeper_internal.h> 81#include <linux/timekeeper_internal.h>
82 82
83static cycle_t rtc_read(struct clocksource *); 83static u64 rtc_read(struct clocksource *);
84static struct clocksource clocksource_rtc = { 84static struct clocksource clocksource_rtc = {
85 .name = "rtc", 85 .name = "rtc",
86 .rating = 400, 86 .rating = 400,
@@ -89,7 +89,7 @@ static struct clocksource clocksource_rtc = {
89 .read = rtc_read, 89 .read = rtc_read,
90}; 90};
91 91
92static cycle_t timebase_read(struct clocksource *); 92static u64 timebase_read(struct clocksource *);
93static struct clocksource clocksource_timebase = { 93static struct clocksource clocksource_timebase = {
94 .name = "timebase", 94 .name = "timebase",
95 .rating = 400, 95 .rating = 400,
@@ -802,18 +802,18 @@ void read_persistent_clock(struct timespec *ts)
802} 802}
803 803
804/* clocksource code */ 804/* clocksource code */
805static cycle_t rtc_read(struct clocksource *cs) 805static u64 rtc_read(struct clocksource *cs)
806{ 806{
807 return (cycle_t)get_rtc(); 807 return (u64)get_rtc();
808} 808}
809 809
810static cycle_t timebase_read(struct clocksource *cs) 810static u64 timebase_read(struct clocksource *cs)
811{ 811{
812 return (cycle_t)get_tb(); 812 return (u64)get_tb();
813} 813}
814 814
815void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 815void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
816 struct clocksource *clock, u32 mult, cycle_t cycle_last) 816 struct clocksource *clock, u32 mult, u64 cycle_last)
817{ 817{
818 u64 new_tb_to_xs, new_stamp_xsec; 818 u64 new_tb_to_xs, new_stamp_xsec;
819 u32 frac_sec; 819 u32 frac_sec;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 66b2a35be424..ec34e39471a7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1872,8 +1872,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1872 } 1872 }
1873 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC 1873 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1874 / tb_ticks_per_sec; 1874 / tb_ticks_per_sec;
1875 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 1875 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
1876 HRTIMER_MODE_REL);
1877 vcpu->arch.timer_running = 1; 1876 vcpu->arch.timer_running = 1;
1878} 1877}
1879 1878
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index b19265de9178..5182f2936af2 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -180,7 +180,7 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
180 smp_wmb(); /* insure spu event buffer updates are written */ 180 smp_wmb(); /* insure spu event buffer updates are written */
181 /* don't want events intermingled... */ 181 /* don't want events intermingled... */
182 182
183 kt = ktime_set(0, profiling_interval); 183 kt = profiling_interval;
184 if (!spu_prof_running) 184 if (!spu_prof_running)
185 goto stop; 185 goto stop;
186 hrtimer_forward(timer, timer->base->get_time(), kt); 186 hrtimer_forward(timer, timer->base->get_time(), kt);
@@ -204,7 +204,7 @@ int start_spu_profiling_cycles(unsigned int cycles_reset)
204 ktime_t kt; 204 ktime_t kt;
205 205
206 pr_debug("timer resolution: %lu\n", TICK_NSEC); 206 pr_debug("timer resolution: %lu\n", TICK_NSEC);
207 kt = ktime_set(0, profiling_interval); 207 kt = profiling_interval;
208 hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 208 hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
209 hrtimer_set_expires(&timer, kt); 209 hrtimer_set_expires(&timer, kt);
210 timer.function = profile_spus; 210 timer.function = profile_spus;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ec76315c9ee5..52949df88529 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -209,7 +209,7 @@ void read_boot_clock64(struct timespec64 *ts)
209 tod_to_timeval(clock - TOD_UNIX_EPOCH, ts); 209 tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
210} 210}
211 211
212static cycle_t read_tod_clock(struct clocksource *cs) 212static u64 read_tod_clock(struct clocksource *cs)
213{ 213{
214 unsigned long long now, adj; 214 unsigned long long now, adj;
215 215
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 6843dd5a1cba..0f8f14199734 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1019,7 +1019,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1019 return 0; 1019 return 0;
1020 1020
1021 __set_cpu_idle(vcpu); 1021 __set_cpu_idle(vcpu);
1022 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 1022 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1023 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); 1023 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1024no_timer: 1024no_timer:
1025 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1025 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 1affabc96b08..244062bdaa56 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -148,7 +148,7 @@ static unsigned int sbus_cycles_offset(void)
148 return offset; 148 return offset;
149} 149}
150 150
151static cycle_t timer_cs_read(struct clocksource *cs) 151static u64 timer_cs_read(struct clocksource *cs)
152{ 152{
153 unsigned int seq, offset; 153 unsigned int seq, offset;
154 u64 cycles; 154 u64 cycles;
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 807f7e2ce014..12a6d3555cb8 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -770,7 +770,7 @@ void udelay(unsigned long usecs)
770} 770}
771EXPORT_SYMBOL(udelay); 771EXPORT_SYMBOL(udelay);
772 772
773static cycle_t clocksource_tick_read(struct clocksource *cs) 773static u64 clocksource_tick_read(struct clocksource *cs)
774{ 774{
775 return tick_ops->get_tick(); 775 return tick_ops->get_tick();
776} 776}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 25c23666d592..ba87a27d6715 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -83,7 +83,7 @@ static irqreturn_t um_timer(int irq, void *dev)
83 return IRQ_HANDLED; 83 return IRQ_HANDLED;
84} 84}
85 85
86static cycle_t timer_read(struct clocksource *cs) 86static u64 timer_read(struct clocksource *cs)
87{ 87{
88 return os_nsecs() / TIMER_MULTIPLIER; 88 return os_nsecs() / TIMER_MULTIPLIER;
89} 89}
diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c
index ac4c5449bb88..fceaa673f861 100644
--- a/arch/unicore32/kernel/time.c
+++ b/arch/unicore32/kernel/time.c
@@ -62,7 +62,7 @@ static struct clock_event_device ckevt_puv3_osmr0 = {
62 .set_state_oneshot = puv3_osmr0_shutdown, 62 .set_state_oneshot = puv3_osmr0_shutdown,
63}; 63};
64 64
65static cycle_t puv3_read_oscr(struct clocksource *cs) 65static u64 puv3_read_oscr(struct clocksource *cs)
66{ 66{
67 return readl(OST_OSCR); 67 return readl(OST_OSCR);
68} 68}
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 02223cb4bcfd..9d4d6e138311 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -92,10 +92,10 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
92 return (const struct pvclock_vsyscall_time_info *)&pvclock_page; 92 return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
93} 93}
94 94
95static notrace cycle_t vread_pvclock(int *mode) 95static notrace u64 vread_pvclock(int *mode)
96{ 96{
97 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; 97 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
98 cycle_t ret; 98 u64 ret;
99 u64 last; 99 u64 last;
100 u32 version; 100 u32 version;
101 101
@@ -142,9 +142,9 @@ static notrace cycle_t vread_pvclock(int *mode)
142} 142}
143#endif 143#endif
144 144
145notrace static cycle_t vread_tsc(void) 145notrace static u64 vread_tsc(void)
146{ 146{
147 cycle_t ret = (cycle_t)rdtsc_ordered(); 147 u64 ret = (u64)rdtsc_ordered();
148 u64 last = gtod->cycle_last; 148 u64 last = gtod->cycle_last;
149 149
150 if (likely(ret >= last)) 150 if (likely(ret >= last))
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2e25038dbd93..a7066dc1a7e9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -768,7 +768,7 @@ struct kvm_arch {
768 spinlock_t pvclock_gtod_sync_lock; 768 spinlock_t pvclock_gtod_sync_lock;
769 bool use_master_clock; 769 bool use_master_clock;
770 u64 master_kernel_ns; 770 u64 master_kernel_ns;
771 cycle_t master_cycle_now; 771 u64 master_cycle_now;
772 struct delayed_work kvmclock_update_work; 772 struct delayed_work kvmclock_update_work;
773 struct delayed_work kvmclock_sync_work; 773 struct delayed_work kvmclock_sync_work;
774 774
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 3ad741b84072..448cfe1b48cf 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -14,7 +14,7 @@ static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
14#endif 14#endif
15 15
16/* some helper functions for xen and kvm pv clock sources */ 16/* some helper functions for xen and kvm pv clock sources */
17cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 17u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
18u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); 18u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
19void pvclock_set_flags(u8 flags); 19void pvclock_set_flags(u8 flags);
20unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); 20unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
@@ -87,11 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
87} 87}
88 88
89static __always_inline 89static __always_inline
90cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, 90u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
91 u64 tsc)
92{ 91{
93 u64 delta = tsc - src->tsc_timestamp; 92 u64 delta = tsc - src->tsc_timestamp;
94 cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, 93 u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
95 src->tsc_shift); 94 src->tsc_shift);
96 return src->system_time + offset; 95 return src->system_time + offset;
97} 96}
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index abb1fdcc545a..f5e6f1c417df 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
29 return rdtsc(); 29 return rdtsc();
30} 30}
31 31
32extern struct system_counterval_t convert_art_to_tsc(cycle_t art); 32extern struct system_counterval_t convert_art_to_tsc(u64 art);
33 33
34extern void tsc_init(void); 34extern void tsc_init(void);
35extern void mark_tsc_unstable(char *reason); 35extern void mark_tsc_unstable(char *reason);
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 3a01996db58f..022e59714562 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -17,8 +17,8 @@ struct vsyscall_gtod_data {
17 unsigned seq; 17 unsigned seq;
18 18
19 int vclock_mode; 19 int vclock_mode;
20 cycle_t cycle_last; 20 u64 cycle_last;
21 cycle_t mask; 21 u64 mask;
22 u32 mult; 22 u32 mult;
23 u32 shift; 23 u32 shift;
24 24
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 202a7817beaf..65721dc73bd8 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {}
247static int apbt_clocksource_register(void) 247static int apbt_clocksource_register(void)
248{ 248{
249 u64 start, now; 249 u64 start, now;
250 cycle_t t1; 250 u64 t1;
251 251
252 /* Start the counter, use timer 2 as source, timer 0/1 for event */ 252 /* Start the counter, use timer 2 as source, timer 0/1 for event */
253 dw_apb_clocksource_start(clocksource_apbt); 253 dw_apb_clocksource_start(clocksource_apbt);
@@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void)
355{ 355{
356 int i, scale; 356 int i, scale;
357 u64 old, new; 357 u64 old, new;
358 cycle_t t1, t2; 358 u64 t1, t2;
359 unsigned long khz = 0; 359 unsigned long khz = 0;
360 u32 loop, shift; 360 u32 loop, shift;
361 361
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index f37e02e41a77..65e20c97e04b 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void)
133 return 0; 133 return 0;
134} 134}
135 135
136static cycle_t read_hv_clock(struct clocksource *arg) 136static u64 read_hv_clock(struct clocksource *arg)
137{ 137{
138 cycle_t current_tick; 138 u64 current_tick;
139 /* 139 /*
140 * Read the partition counter to get the current tick count. This count 140 * Read the partition counter to get the current tick count. This count
141 * is set to 0 when the partition is created and is incremented in 141 * is set to 0 when the partition is created and is incremented in
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 38c8fd684d38..85e87b46c318 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = {
791 { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, 791 { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
792}; 792};
793 793
794static cycle_t read_hpet(struct clocksource *cs) 794static u64 read_hpet(struct clocksource *cs)
795{ 795{
796 unsigned long flags; 796 unsigned long flags;
797 union hpet_lock old, new; 797 union hpet_lock old, new;
@@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs)
802 * Read HPET directly if in NMI. 802 * Read HPET directly if in NMI.
803 */ 803 */
804 if (in_nmi()) 804 if (in_nmi())
805 return (cycle_t)hpet_readl(HPET_COUNTER); 805 return (u64)hpet_readl(HPET_COUNTER);
806 806
807 /* 807 /*
808 * Read the current state of the lock and HPET value atomically. 808 * Read the current state of the lock and HPET value atomically.
@@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs)
821 WRITE_ONCE(hpet.value, new.value); 821 WRITE_ONCE(hpet.value, new.value);
822 arch_spin_unlock(&hpet.lock); 822 arch_spin_unlock(&hpet.lock);
823 local_irq_restore(flags); 823 local_irq_restore(flags);
824 return (cycle_t)new.value; 824 return (u64)new.value;
825 } 825 }
826 local_irq_restore(flags); 826 local_irq_restore(flags);
827 827
@@ -843,15 +843,15 @@ contended:
843 new.lockval = READ_ONCE(hpet.lockval); 843 new.lockval = READ_ONCE(hpet.lockval);
844 } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); 844 } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
845 845
846 return (cycle_t)new.value; 846 return (u64)new.value;
847} 847}
848#else 848#else
849/* 849/*
850 * For UP or 32-bit. 850 * For UP or 32-bit.
851 */ 851 */
852static cycle_t read_hpet(struct clocksource *cs) 852static u64 read_hpet(struct clocksource *cs)
853{ 853{
854 return (cycle_t)hpet_readl(HPET_COUNTER); 854 return (u64)hpet_readl(HPET_COUNTER);
855} 855}
856#endif 856#endif
857 857
@@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = {
867static int hpet_clocksource_register(void) 867static int hpet_clocksource_register(void)
868{ 868{
869 u64 start, now; 869 u64 start, now;
870 cycle_t t1; 870 u64 t1;
871 871
872 /* Start the counter */ 872 /* Start the counter */
873 hpet_restart_counter(); 873 hpet_restart_counter();
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 60b9949f1e65..2a5cafdf8808 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -32,7 +32,7 @@
32static int kvmclock __ro_after_init = 1; 32static int kvmclock __ro_after_init = 1;
33static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; 33static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
34static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; 34static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
35static cycle_t kvm_sched_clock_offset; 35static u64 kvm_sched_clock_offset;
36 36
37static int parse_no_kvmclock(char *arg) 37static int parse_no_kvmclock(char *arg)
38{ 38{
@@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now)
79 return -1; 79 return -1;
80} 80}
81 81
82static cycle_t kvm_clock_read(void) 82static u64 kvm_clock_read(void)
83{ 83{
84 struct pvclock_vcpu_time_info *src; 84 struct pvclock_vcpu_time_info *src;
85 cycle_t ret; 85 u64 ret;
86 int cpu; 86 int cpu;
87 87
88 preempt_disable_notrace(); 88 preempt_disable_notrace();
@@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void)
93 return ret; 93 return ret;
94} 94}
95 95
96static cycle_t kvm_clock_get_cycles(struct clocksource *cs) 96static u64 kvm_clock_get_cycles(struct clocksource *cs)
97{ 97{
98 return kvm_clock_read(); 98 return kvm_clock_read();
99} 99}
100 100
101static cycle_t kvm_sched_clock_read(void) 101static u64 kvm_sched_clock_read(void)
102{ 102{
103 return kvm_clock_read() - kvm_sched_clock_offset; 103 return kvm_clock_read() - kvm_sched_clock_offset;
104} 104}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 5b2cc889ce34..9e93fe5803b4 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
71 return flags & valid_flags; 71 return flags & valid_flags;
72} 72}
73 73
74cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 74u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
75{ 75{
76 unsigned version; 76 unsigned version;
77 cycle_t ret; 77 u64 ret;
78 u64 last; 78 u64 last;
79 u8 flags; 79 u8 flags;
80 80
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 0aed75a1e31b..be3a49ee0356 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs)
1101 * checking the result of read_tsc() - cycle_last for being negative. 1101 * checking the result of read_tsc() - cycle_last for being negative.
1102 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. 1102 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1103 */ 1103 */
1104static cycle_t read_tsc(struct clocksource *cs) 1104static u64 read_tsc(struct clocksource *cs)
1105{ 1105{
1106 return (cycle_t)rdtsc_ordered(); 1106 return (u64)rdtsc_ordered();
1107} 1107}
1108 1108
1109/* 1109/*
@@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void)
1192/* 1192/*
1193 * Convert ART to TSC given numerator/denominator found in detect_art() 1193 * Convert ART to TSC given numerator/denominator found in detect_art()
1194 */ 1194 */
1195struct system_counterval_t convert_art_to_tsc(cycle_t art) 1195struct system_counterval_t convert_art_to_tsc(u64 art)
1196{ 1196{
1197 u64 tmp, res, rem; 1197 u64 tmp, res, rem;
1198 1198
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 34a66b2d47e6..5fe290c1b7d8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1106,7 +1106,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
1106 now = ktime_get(); 1106 now = ktime_get();
1107 remaining = ktime_sub(apic->lapic_timer.target_expiration, now); 1107 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1108 if (ktime_to_ns(remaining) < 0) 1108 if (ktime_to_ns(remaining) < 0)
1109 remaining = ktime_set(0, 0); 1109 remaining = 0;
1110 1110
1111 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); 1111 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1112 tmcct = div64_u64(ns, 1112 tmcct = div64_u64(ns,
@@ -2057,7 +2057,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2057 apic->lapic_timer.tscdeadline = 0; 2057 apic->lapic_timer.tscdeadline = 0;
2058 if (apic_lvtt_oneshot(apic)) { 2058 if (apic_lvtt_oneshot(apic)) {
2059 apic->lapic_timer.tscdeadline = 0; 2059 apic->lapic_timer.tscdeadline = 0;
2060 apic->lapic_timer.target_expiration = ktime_set(0, 0); 2060 apic->lapic_timer.target_expiration = 0;
2061 } 2061 }
2062 atomic_set(&apic->lapic_timer.pending, 0); 2062 atomic_set(&apic->lapic_timer.pending, 0);
2063 } 2063 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6414fa6cb9fd..51ccfe08e32f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1131,8 +1131,8 @@ struct pvclock_gtod_data {
1131 1131
1132 struct { /* extract of a clocksource struct */ 1132 struct { /* extract of a clocksource struct */
1133 int vclock_mode; 1133 int vclock_mode;
1134 cycle_t cycle_last; 1134 u64 cycle_last;
1135 cycle_t mask; 1135 u64 mask;
1136 u32 mult; 1136 u32 mult;
1137 u32 shift; 1137 u32 shift;
1138 } clock; 1138 } clock;
@@ -1572,9 +1572,9 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1572 1572
1573#ifdef CONFIG_X86_64 1573#ifdef CONFIG_X86_64
1574 1574
1575static cycle_t read_tsc(void) 1575static u64 read_tsc(void)
1576{ 1576{
1577 cycle_t ret = (cycle_t)rdtsc_ordered(); 1577 u64 ret = (u64)rdtsc_ordered();
1578 u64 last = pvclock_gtod_data.clock.cycle_last; 1578 u64 last = pvclock_gtod_data.clock.cycle_last;
1579 1579
1580 if (likely(ret >= last)) 1580 if (likely(ret >= last))
@@ -1592,7 +1592,7 @@ static cycle_t read_tsc(void)
1592 return last; 1592 return last;
1593} 1593}
1594 1594
1595static inline u64 vgettsc(cycle_t *cycle_now) 1595static inline u64 vgettsc(u64 *cycle_now)
1596{ 1596{
1597 long v; 1597 long v;
1598 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1598 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
@@ -1603,7 +1603,7 @@ static inline u64 vgettsc(cycle_t *cycle_now)
1603 return v * gtod->clock.mult; 1603 return v * gtod->clock.mult;
1604} 1604}
1605 1605
1606static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) 1606static int do_monotonic_boot(s64 *t, u64 *cycle_now)
1607{ 1607{
1608 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1608 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1609 unsigned long seq; 1609 unsigned long seq;
@@ -1624,7 +1624,7 @@ static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
1624} 1624}
1625 1625
1626/* returns true if host is using tsc clocksource */ 1626/* returns true if host is using tsc clocksource */
1627static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1627static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
1628{ 1628{
1629 /* checked again under seqlock below */ 1629 /* checked again under seqlock below */
1630 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) 1630 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4ca0d78adcf0..d3289d7e78fa 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -916,7 +916,7 @@ static unsigned long lguest_tsc_khz(void)
916 * If we can't use the TSC, the kernel falls back to our lower-priority 916 * If we can't use the TSC, the kernel falls back to our lower-priority
917 * "lguest_clock", where we read the time value given to us by the Host. 917 * "lguest_clock", where we read the time value given to us by the Host.
918 */ 918 */
919static cycle_t lguest_clock_read(struct clocksource *cs) 919static u64 lguest_clock_read(struct clocksource *cs)
920{ 920{
921 unsigned long sec, nsec; 921 unsigned long sec, nsec;
922 922
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index b333fc45f9ec..2ee7632d4916 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -30,7 +30,7 @@
30 30
31#define RTC_NAME "sgi_rtc" 31#define RTC_NAME "sgi_rtc"
32 32
33static cycle_t uv_read_rtc(struct clocksource *cs); 33static u64 uv_read_rtc(struct clocksource *cs);
34static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 34static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
35static int uv_rtc_shutdown(struct clock_event_device *evt); 35static int uv_rtc_shutdown(struct clock_event_device *evt);
36 36
@@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = {
38 .name = RTC_NAME, 38 .name = RTC_NAME,
39 .rating = 299, 39 .rating = 299,
40 .read = uv_read_rtc, 40 .read = uv_read_rtc,
41 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, 41 .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
42 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 42 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
43}; 43};
44 44
@@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
296 * cachelines of it's own page. This allows faster simultaneous reads 296 * cachelines of it's own page. This allows faster simultaneous reads
297 * from a given socket. 297 * from a given socket.
298 */ 298 */
299static cycle_t uv_read_rtc(struct clocksource *cs) 299static u64 uv_read_rtc(struct clocksource *cs)
300{ 300{
301 unsigned long offset; 301 unsigned long offset;
302 302
@@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs)
305 else 305 else
306 offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; 306 offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
307 307
308 return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); 308 return (u64)uv_read_local_mmr(UVH_RTC | offset);
309} 309}
310 310
311/* 311/*
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 33d8f6a7829d..1e69956d7852 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -39,10 +39,10 @@ static unsigned long xen_tsc_khz(void)
39 return pvclock_tsc_khz(info); 39 return pvclock_tsc_khz(info);
40} 40}
41 41
42cycle_t xen_clocksource_read(void) 42u64 xen_clocksource_read(void)
43{ 43{
44 struct pvclock_vcpu_time_info *src; 44 struct pvclock_vcpu_time_info *src;
45 cycle_t ret; 45 u64 ret;
46 46
47 preempt_disable_notrace(); 47 preempt_disable_notrace();
48 src = &__this_cpu_read(xen_vcpu)->time; 48 src = &__this_cpu_read(xen_vcpu)->time;
@@ -51,7 +51,7 @@ cycle_t xen_clocksource_read(void)
51 return ret; 51 return ret;
52} 52}
53 53
54static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) 54static u64 xen_clocksource_get_cycles(struct clocksource *cs)
55{ 55{
56 return xen_clocksource_read(); 56 return xen_clocksource_read();
57} 57}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 3cbce3b085e7..ac0a2b0f9e62 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -67,7 +67,7 @@ void xen_init_irq_ops(void);
67void xen_setup_timer(int cpu); 67void xen_setup_timer(int cpu);
68void xen_setup_runstate_info(int cpu); 68void xen_setup_runstate_info(int cpu);
69void xen_teardown_timer(int cpu); 69void xen_teardown_timer(int cpu);
70cycle_t xen_clocksource_read(void); 70u64 xen_clocksource_read(void);
71void xen_setup_cpu_clockevents(void); 71void xen_setup_cpu_clockevents(void);
72void __init xen_init_time_ops(void); 72void __init xen_init_time_ops(void);
73void __init xen_hvm_init_time_ops(void); 73void __init xen_hvm_init_time_ops(void);
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index be81e69b25bc..668c1056f9e4 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -34,9 +34,9 @@
34unsigned long ccount_freq; /* ccount Hz */ 34unsigned long ccount_freq; /* ccount Hz */
35EXPORT_SYMBOL(ccount_freq); 35EXPORT_SYMBOL(ccount_freq);
36 36
37static cycle_t ccount_read(struct clocksource *cs) 37static u64 ccount_read(struct clocksource *cs)
38{ 38{
39 return (cycle_t)get_ccount(); 39 return (u64)get_ccount();
40} 40}
41 41
42static u64 notrace ccount_sched_clock_read(void) 42static u64 notrace ccount_sched_clock_read(void)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4bf850e8d6b5..a8e67a155d04 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2569,7 +2569,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2569 * This will be replaced with the stats tracking code, using 2569 * This will be replaced with the stats tracking code, using
2570 * 'avg_completion_time / 2' as the pre-sleep target. 2570 * 'avg_completion_time / 2' as the pre-sleep target.
2571 */ 2571 */
2572 kt = ktime_set(0, nsecs); 2572 kt = nsecs;
2573 2573
2574 mode = HRTIMER_MODE_REL; 2574 mode = HRTIMER_MODE_REL;
2575 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 2575 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 48c6294e9c34..249e0304597f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -194,7 +194,7 @@ void device_pm_move_last(struct device *dev)
194 194
195static ktime_t initcall_debug_start(struct device *dev) 195static ktime_t initcall_debug_start(struct device *dev)
196{ 196{
197 ktime_t calltime = ktime_set(0, 0); 197 ktime_t calltime = 0;
198 198
199 if (pm_print_times_enabled) { 199 if (pm_print_times_enabled) {
200 pr_info("calling %s+ @ %i, parent: %s\n", 200 pr_info("calling %s+ @ %i, parent: %s\n",
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bf9ba26981a5..f546f8f107b0 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -998,14 +998,14 @@ static int print_wakeup_source_stats(struct seq_file *m,
998 998
999 active_time = ktime_sub(now, ws->last_time); 999 active_time = ktime_sub(now, ws->last_time);
1000 total_time = ktime_add(total_time, active_time); 1000 total_time = ktime_add(total_time, active_time);
1001 if (active_time.tv64 > max_time.tv64) 1001 if (active_time > max_time)
1002 max_time = active_time; 1002 max_time = active_time;
1003 1003
1004 if (ws->autosleep_enabled) 1004 if (ws->autosleep_enabled)
1005 prevent_sleep_time = ktime_add(prevent_sleep_time, 1005 prevent_sleep_time = ktime_add(prevent_sleep_time,
1006 ktime_sub(now, ws->start_prevent_time)); 1006 ktime_sub(now, ws->start_prevent_time));
1007 } else { 1007 } else {
1008 active_time = ktime_set(0, 0); 1008 active_time = 0;
1009 } 1009 }
1010 1010
1011 seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", 1011 seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 4943ee22716e..c0e14e54909b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -257,7 +257,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
257 257
258static void null_cmd_end_timer(struct nullb_cmd *cmd) 258static void null_cmd_end_timer(struct nullb_cmd *cmd)
259{ 259{
260 ktime_t kt = ktime_set(0, completion_nsec); 260 ktime_t kt = completion_nsec;
261 261
262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
263} 263}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index be54e5331a45..20b32bb8c2af 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -69,9 +69,9 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
69#ifdef CONFIG_IA64 69#ifdef CONFIG_IA64
70static void __iomem *hpet_mctr; 70static void __iomem *hpet_mctr;
71 71
72static cycle_t read_hpet(struct clocksource *cs) 72static u64 read_hpet(struct clocksource *cs)
73{ 73{
74 return (cycle_t)read_counter((void __iomem *)hpet_mctr); 74 return (u64)read_counter((void __iomem *)hpet_mctr);
75} 75}
76 76
77static struct clocksource clocksource_hpet = { 77static struct clocksource clocksource_hpet = {
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 28037d0b8dcd..1961e3539b57 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void)
58 return v2; 58 return v2;
59} 59}
60 60
61static cycle_t acpi_pm_read(struct clocksource *cs) 61static u64 acpi_pm_read(struct clocksource *cs)
62{ 62{
63 return (cycle_t)read_pmtmr(); 63 return (u64)read_pmtmr();
64} 64}
65 65
66static struct clocksource clocksource_acpi_pm = { 66static struct clocksource clocksource_acpi_pm = {
67 .name = "acpi_pm", 67 .name = "acpi_pm",
68 .rating = 200, 68 .rating = 200,
69 .read = acpi_pm_read, 69 .read = acpi_pm_read,
70 .mask = (cycle_t)ACPI_PM_MASK, 70 .mask = (u64)ACPI_PM_MASK,
71 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 71 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
72}; 72};
73 73
@@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str)
81} 81}
82__setup("acpi_pm_good", acpi_pm_good_setup); 82__setup("acpi_pm_good", acpi_pm_good_setup);
83 83
84static cycle_t acpi_pm_read_slow(struct clocksource *cs) 84static u64 acpi_pm_read_slow(struct clocksource *cs)
85{ 85{
86 return (cycle_t)acpi_pm_read_verified(); 86 return (u64)acpi_pm_read_verified();
87} 87}
88 88
89static inline void acpi_pm_need_workaround(void) 89static inline void acpi_pm_need_workaround(void)
@@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
145 */ 145 */
146static int verify_pmtmr_rate(void) 146static int verify_pmtmr_rate(void)
147{ 147{
148 cycle_t value1, value2; 148 u64 value1, value2;
149 unsigned long count, delta; 149 unsigned long count, delta;
150 150
151 mach_prepare_counter(); 151 mach_prepare_counter();
@@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void)
175 175
176static int __init init_acpi_pm_clocksource(void) 176static int __init init_acpi_pm_clocksource(void)
177{ 177{
178 cycle_t value1, value2; 178 u64 value1, value2;
179 unsigned int i, j = 0; 179 unsigned int i, j = 0;
180 180
181 if (!pmtmr_ioport) 181 if (!pmtmr_ioport)
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 2b7e87134d1a..7517f959cba7 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -56,7 +56,7 @@ static int noinline arc_get_timer_clk(struct device_node *node)
56 56
57#ifdef CONFIG_ARC_TIMERS_64BIT 57#ifdef CONFIG_ARC_TIMERS_64BIT
58 58
59static cycle_t arc_read_gfrc(struct clocksource *cs) 59static u64 arc_read_gfrc(struct clocksource *cs)
60{ 60{
61 unsigned long flags; 61 unsigned long flags;
62 u32 l, h; 62 u32 l, h;
@@ -71,7 +71,7 @@ static cycle_t arc_read_gfrc(struct clocksource *cs)
71 71
72 local_irq_restore(flags); 72 local_irq_restore(flags);
73 73
74 return (((cycle_t)h) << 32) | l; 74 return (((u64)h) << 32) | l;
75} 75}
76 76
77static struct clocksource arc_counter_gfrc = { 77static struct clocksource arc_counter_gfrc = {
@@ -105,7 +105,7 @@ CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
105#define AUX_RTC_LOW 0x104 105#define AUX_RTC_LOW 0x104
106#define AUX_RTC_HIGH 0x105 106#define AUX_RTC_HIGH 0x105
107 107
108static cycle_t arc_read_rtc(struct clocksource *cs) 108static u64 arc_read_rtc(struct clocksource *cs)
109{ 109{
110 unsigned long status; 110 unsigned long status;
111 u32 l, h; 111 u32 l, h;
@@ -122,7 +122,7 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
122 status = read_aux_reg(AUX_RTC_CTRL); 122 status = read_aux_reg(AUX_RTC_CTRL);
123 } while (!(status & _BITUL(31))); 123 } while (!(status & _BITUL(31)));
124 124
125 return (((cycle_t)h) << 32) | l; 125 return (((u64)h) << 32) | l;
126} 126}
127 127
128static struct clocksource arc_counter_rtc = { 128static struct clocksource arc_counter_rtc = {
@@ -166,9 +166,9 @@ CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
166 * 32bit TIMER1 to keep counting monotonically and wraparound 166 * 32bit TIMER1 to keep counting monotonically and wraparound
167 */ 167 */
168 168
169static cycle_t arc_read_timer1(struct clocksource *cs) 169static u64 arc_read_timer1(struct clocksource *cs)
170{ 170{
171 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); 171 return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
172} 172}
173 173
174static struct clocksource arc_counter_timer1 = { 174static struct clocksource arc_counter_timer1 = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index cdeca850f29e..4c8c3fb2e8b2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -562,12 +562,12 @@ static u64 arch_counter_get_cntvct_mem(void)
562 */ 562 */
563u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 563u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
564 564
565static cycle_t arch_counter_read(struct clocksource *cs) 565static u64 arch_counter_read(struct clocksource *cs)
566{ 566{
567 return arch_timer_read_counter(); 567 return arch_timer_read_counter();
568} 568}
569 569
570static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 570static u64 arch_counter_read_cc(const struct cyclecounter *cc)
571{ 571{
572 return arch_timer_read_counter(); 572 return arch_timer_read_counter();
573} 573}
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index bbfeb2800a94..123ed20ac2ff 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu)
195 return 0; 195 return 0;
196} 196}
197 197
198static cycle_t gt_clocksource_read(struct clocksource *cs) 198static u64 gt_clocksource_read(struct clocksource *cs)
199{ 199{
200 return gt_counter_read(); 200 return gt_counter_read();
201} 201}
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index fbfbdec13b08..44e5e951583b 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
158 * 158 *
159 * returns: Current timer counter register value 159 * returns: Current timer counter register value
160 **/ 160 **/
161static cycle_t __ttc_clocksource_read(struct clocksource *cs) 161static u64 __ttc_clocksource_read(struct clocksource *cs)
162{ 162{
163 struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; 163 struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
164 164
165 return (cycle_t)readl_relaxed(timer->base_addr + 165 return (u64)readl_relaxed(timer->base_addr +
166 TTC_COUNT_VAL_OFFSET); 166 TTC_COUNT_VAL_OFFSET);
167} 167}
168 168
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 77a365f573d7..c69e2772658d 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -30,7 +30,7 @@
30 30
31static void __iomem *clksrc_dbx500_timer_base; 31static void __iomem *clksrc_dbx500_timer_base;
32 32
33static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) 33static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
34{ 34{
35 void __iomem *base = clksrc_dbx500_timer_base; 35 void __iomem *base = clksrc_dbx500_timer_base;
36 u32 count, count2; 36 u32 count, count2;
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 797505aa2ba4..63e4f5519577 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
348 dw_apb_clocksource_read(dw_cs); 348 dw_apb_clocksource_read(dw_cs);
349} 349}
350 350
351static cycle_t __apbt_read_clocksource(struct clocksource *cs) 351static u64 __apbt_read_clocksource(struct clocksource *cs)
352{ 352{
353 u32 current_count; 353 u32 current_count;
354 struct dw_apb_clocksource *dw_cs = 354 struct dw_apb_clocksource *dw_cs =
@@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs)
357 current_count = apbt_readl_relaxed(&dw_cs->timer, 357 current_count = apbt_readl_relaxed(&dw_cs->timer,
358 APBTMR_N_CURRENT_VALUE); 358 APBTMR_N_CURRENT_VALUE);
359 359
360 return (cycle_t)~current_count; 360 return (u64)~current_count;
361} 361}
362 362
363static void apbt_restart_clocksource(struct clocksource *cs) 363static void apbt_restart_clocksource(struct clocksource *cs)
@@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
416 * 416 *
417 * @dw_cs: The clocksource to read. 417 * @dw_cs: The clocksource to read.
418 */ 418 */
419cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) 419u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
420{ 420{
421 return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); 421 return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
422} 422}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 19bb1792d647..aff87df07449 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p)
110 clk_disable_unprepare(p->clk); 110 clk_disable_unprepare(p->clk);
111} 111}
112 112
113static cycle_t em_sti_count(struct em_sti_priv *p) 113static u64 em_sti_count(struct em_sti_priv *p)
114{ 114{
115 cycle_t ticks; 115 u64 ticks;
116 unsigned long flags; 116 unsigned long flags;
117 117
118 /* the STI hardware buffers the 48-bit count, but to 118 /* the STI hardware buffers the 48-bit count, but to
@@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p)
121 * Always read STI_COUNT_H before STI_COUNT_L. 121 * Always read STI_COUNT_H before STI_COUNT_L.
122 */ 122 */
123 raw_spin_lock_irqsave(&p->lock, flags); 123 raw_spin_lock_irqsave(&p->lock, flags);
124 ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; 124 ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
125 ticks |= em_sti_read(p, STI_COUNT_L); 125 ticks |= em_sti_read(p, STI_COUNT_L);
126 raw_spin_unlock_irqrestore(&p->lock, flags); 126 raw_spin_unlock_irqrestore(&p->lock, flags);
127 127
128 return ticks; 128 return ticks;
129} 129}
130 130
131static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) 131static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
132{ 132{
133 unsigned long flags; 133 unsigned long flags;
134 134
@@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
198 return container_of(cs, struct em_sti_priv, cs); 198 return container_of(cs, struct em_sti_priv, cs);
199} 199}
200 200
201static cycle_t em_sti_clocksource_read(struct clocksource *cs) 201static u64 em_sti_clocksource_read(struct clocksource *cs)
202{ 202{
203 return em_sti_count(cs_to_em_sti(cs)); 203 return em_sti_count(cs_to_em_sti(cs));
204} 204}
@@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta,
271 struct clock_event_device *ced) 271 struct clock_event_device *ced)
272{ 272{
273 struct em_sti_priv *p = ced_to_em_sti(ced); 273 struct em_sti_priv *p = ced_to_em_sti(ced);
274 cycle_t next; 274 u64 next;
275 int safe; 275 int safe;
276 276
277 next = em_sti_set_next(p, em_sti_count(p) + delta); 277 next = em_sti_set_next(p, em_sti_count(p) + delta);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index b45b72b95861..4da1dc2278bd 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void)
183 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 183 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
184 } while (hi != hi2); 184 } while (hi != hi2);
185 185
186 return ((cycle_t)hi << 32) | lo; 186 return ((u64)hi << 32) | lo;
187} 187}
188 188
189/** 189/**
@@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void)
199 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 199 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
200} 200}
201 201
202static cycle_t exynos4_frc_read(struct clocksource *cs) 202static u64 exynos4_frc_read(struct clocksource *cs)
203{ 203{
204 return exynos4_read_count_32(); 204 return exynos4_read_count_32();
205} 205}
@@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void)
266static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) 266static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
267{ 267{
268 unsigned int tcon; 268 unsigned int tcon;
269 cycle_t comp_cycle; 269 u64 comp_cycle;
270 270
271 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 271 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
272 272
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 07d9d5be9054..5b27fb9997c2 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs)
72 return container_of(cs, struct timer16_priv, cs); 72 return container_of(cs, struct timer16_priv, cs);
73} 73}
74 74
75static cycle_t timer16_clocksource_read(struct clocksource *cs) 75static u64 timer16_clocksource_read(struct clocksource *cs)
76{ 76{
77 struct timer16_priv *p = cs_to_priv(cs); 77 struct timer16_priv *p = cs_to_priv(cs);
78 unsigned long raw, value; 78 unsigned long raw, value;
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index 7bdf1991c847..72e1cf2b3096 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs)
64 return container_of(cs, struct tpu_priv, cs); 64 return container_of(cs, struct tpu_priv, cs);
65} 65}
66 66
67static cycle_t tpu_clocksource_read(struct clocksource *cs) 67static u64 tpu_clocksource_read(struct clocksource *cs)
68{ 68{
69 struct tpu_priv *p = cs_to_priv(cs); 69 struct tpu_priv *p = cs_to_priv(cs);
70 unsigned long flags; 70 unsigned long flags;
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 0efd36e483ab..64f6490740d7 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock);
25 * to just read by itself. So use jiffies to emulate a free 25 * to just read by itself. So use jiffies to emulate a free
26 * running counter: 26 * running counter:
27 */ 27 */
28static cycle_t i8253_read(struct clocksource *cs) 28static u64 i8253_read(struct clocksource *cs)
29{ 29{
30 static int old_count; 30 static int old_count;
31 static u32 old_jifs; 31 static u32 old_jifs;
@@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs)
83 83
84 count = (PIT_LATCH - 1) - count; 84 count = (PIT_LATCH - 1) - count;
85 85
86 return (cycle_t)(jifs * PIT_LATCH) + count; 86 return (u64)(jifs * PIT_LATCH) + count;
87} 87}
88 88
89static struct clocksource i8253_cs = { 89static struct clocksource i8253_cs = {
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 4e4146f69845..7c61226f4359 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void)
57 return seclo * NSEC_PER_SEC + nsec; 57 return seclo * NSEC_PER_SEC + nsec;
58} 58}
59 59
60static cycle_t jcore_clocksource_read(struct clocksource *cs) 60static u64 jcore_clocksource_read(struct clocksource *cs)
61{ 61{
62 return jcore_sched_clock_read(); 62 return jcore_sched_clock_read();
63} 63}
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index 172f43d4bc1a..6fcf96540631 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta,
56 return 0; 56 return 0;
57} 57}
58 58
59static cycle_t metag_clocksource_read(struct clocksource *cs) 59static u64 metag_clocksource_read(struct clocksource *cs)
60{ 60{
61 return __core_reg_get(TXTIMER); 61 return __core_reg_get(TXTIMER);
62} 62}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index d9278847ffb2..d9ef7a61e093 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -125,7 +125,7 @@ static int gic_clockevent_init(void)
125 return 0; 125 return 0;
126} 126}
127 127
128static cycle_t gic_hpt_read(struct clocksource *cs) 128static u64 gic_hpt_read(struct clocksource *cs)
129{ 129{
130 return gic_read_count(); 130 return gic_read_count();
131} 131}
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index c4f7d7a9b689..4c4df981d8cc 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
20 return container_of(c, struct clocksource_mmio, clksrc); 20 return container_of(c, struct clocksource_mmio, clksrc);
21} 21}
22 22
23cycle_t clocksource_mmio_readl_up(struct clocksource *c) 23u64 clocksource_mmio_readl_up(struct clocksource *c)
24{ 24{
25 return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg); 25 return (u64)readl_relaxed(to_mmio_clksrc(c)->reg);
26} 26}
27 27
28cycle_t clocksource_mmio_readl_down(struct clocksource *c) 28u64 clocksource_mmio_readl_down(struct clocksource *c)
29{ 29{
30 return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 30 return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
31} 31}
32 32
33cycle_t clocksource_mmio_readw_up(struct clocksource *c) 33u64 clocksource_mmio_readw_up(struct clocksource *c)
34{ 34{
35 return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg); 35 return (u64)readw_relaxed(to_mmio_clksrc(c)->reg);
36} 36}
37 37
38cycle_t clocksource_mmio_readw_down(struct clocksource *c) 38u64 clocksource_mmio_readw_down(struct clocksource *c)
39{ 39{
40 return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 40 return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
41} 41}
42 42
43/** 43/**
@@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c)
51 */ 51 */
52int __init clocksource_mmio_init(void __iomem *base, const char *name, 52int __init clocksource_mmio_init(void __iomem *base, const char *name,
53 unsigned long hz, int rating, unsigned bits, 53 unsigned long hz, int rating, unsigned bits,
54 cycle_t (*read)(struct clocksource *)) 54 u64 (*read)(struct clocksource *))
55{ 55{
56 struct clocksource_mmio *cs; 56 struct clocksource_mmio *cs;
57 57
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 0ba0a913b41d..99b77aff0839 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void)
97 HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); 97 HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
98} 98}
99 99
100static cycle_t timrotv1_get_cycles(struct clocksource *cs) 100static u64 timrotv1_get_cycles(struct clocksource *cs)
101{ 101{
102 return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)) 102 return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
103 & 0xffff0000) >> 16); 103 & 0xffff0000) >> 16);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 3bf65fff5c08..ee358cdf4a07 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt;
89 89
90static void __iomem *source_base; 90static void __iomem *source_base;
91 91
92static notrace cycle_t msm_read_timer_count(struct clocksource *cs) 92static notrace u64 msm_read_timer_count(struct clocksource *cs)
93{ 93{
94 return readl_relaxed(source_base + TIMER_COUNT_VAL); 94 return readl_relaxed(source_base + TIMER_COUNT_VAL);
95} 95}
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 54565bd0093b..0093ece661fe 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
307 samsung_time_start(pwm.source_id, true); 307 samsung_time_start(pwm.source_id, true);
308} 308}
309 309
310static cycle_t notrace samsung_clocksource_read(struct clocksource *c) 310static u64 notrace samsung_clocksource_read(struct clocksource *c)
311{ 311{
312 return ~readl_relaxed(pwm.source_reg); 312 return ~readl_relaxed(pwm.source_reg);
313} 313}
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index 64f9e8294434..a46660bf6588 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
43/* The base timer frequency, * 27 if selected */ 43/* The base timer frequency, * 27 if selected */
44#define HRT_FREQ 1000000 44#define HRT_FREQ 1000000
45 45
46static cycle_t read_hrt(struct clocksource *cs) 46static u64 read_hrt(struct clocksource *cs)
47{ 47{
48 /* Read the timer value */ 48 /* Read the timer value */
49 return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); 49 return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
50} 50}
51 51
52static struct clocksource cs_hrt = { 52static struct clocksource cs_hrt = {
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 103c49362c68..28757edf6aca 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
612 return container_of(cs, struct sh_cmt_channel, cs); 612 return container_of(cs, struct sh_cmt_channel, cs);
613} 613}
614 614
615static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) 615static u64 sh_cmt_clocksource_read(struct clocksource *cs)
616{ 616{
617 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 617 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
618 unsigned long flags, raw; 618 unsigned long flags, raw;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 469e776ec17a..1fbf2aadcfd4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
255 return container_of(cs, struct sh_tmu_channel, cs); 255 return container_of(cs, struct sh_tmu_channel, cs);
256} 256}
257 257
258static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) 258static u64 sh_tmu_clocksource_read(struct clocksource *cs)
259{ 259{
260 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 260 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
261 261
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 4da2af9694a2..d4ca9962a759 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -41,7 +41,7 @@
41 41
42static void __iomem *tcaddr; 42static void __iomem *tcaddr;
43 43
44static cycle_t tc_get_cycles(struct clocksource *cs) 44static u64 tc_get_cycles(struct clocksource *cs)
45{ 45{
46 unsigned long flags; 46 unsigned long flags;
47 u32 lower, upper; 47 u32 lower, upper;
@@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs)
56 return (upper << 16) | lower; 56 return (upper << 16) | lower;
57} 57}
58 58
59static cycle_t tc_get_cycles32(struct clocksource *cs) 59static u64 tc_get_cycles32(struct clocksource *cs)
60{ 60{
61 return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); 61 return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
62} 62}
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index a8e6c7df853d..3710e4d9dcba 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
67 writel(value, base + 0x20 * gpt_id + offset); 67 writel(value, base + 0x20 * gpt_id + offset);
68} 68}
69 69
70static cycle_t notrace 70static u64 notrace
71pistachio_clocksource_read_cycles(struct clocksource *cs) 71pistachio_clocksource_read_cycles(struct clocksource *cs)
72{ 72{
73 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); 73 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
@@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
84 counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); 84 counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
85 raw_spin_unlock_irqrestore(&pcs->lock, flags); 85 raw_spin_unlock_irqrestore(&pcs->lock, flags);
86 86
87 return (cycle_t)~counter; 87 return (u64)~counter;
88} 88}
89 89
90static u64 notrace pistachio_read_sched_clock(void) 90static u64 notrace pistachio_read_sched_clock(void)
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 3c23e1744f4a..3d8a181f0252 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
85} 85}
86 86
87/* read 64-bit timer counter */ 87/* read 64-bit timer counter */
88static cycle_t sirfsoc_timer_read(struct clocksource *cs) 88static u64 sirfsoc_timer_read(struct clocksource *cs)
89{ 89{
90 u64 cycles; 90 u64 cycles;
91 91
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index 6555821bbdae..c0b5df3167a0 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign
73 * Clocksource: just a monotonic counter of MCK/16 cycles. 73 * Clocksource: just a monotonic counter of MCK/16 cycles.
74 * We don't care whether or not PIT irqs are enabled. 74 * We don't care whether or not PIT irqs are enabled.
75 */ 75 */
76static cycle_t read_pit_clk(struct clocksource *cs) 76static u64 read_pit_clk(struct clocksource *cs)
77{ 77{
78 struct pit_data *data = clksrc_to_pit_data(cs); 78 struct pit_data *data = clksrc_to_pit_data(cs);
79 unsigned long flags; 79 unsigned long flags;
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index e90ab5b63a90..be4ac7604136 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
92 return IRQ_NONE; 92 return IRQ_NONE;
93} 93}
94 94
95static cycle_t read_clk32k(struct clocksource *cs) 95static u64 read_clk32k(struct clocksource *cs)
96{ 96{
97 return read_CRTR(); 97 return read_CRTR();
98} 98}
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index 8da5e93b6810..da1f7986e477 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -77,11 +77,11 @@ static int __init nps_get_timer_clk(struct device_node *node,
77 return 0; 77 return 0;
78} 78}
79 79
80static cycle_t nps_clksrc_read(struct clocksource *clksrc) 80static u64 nps_clksrc_read(struct clocksource *clksrc)
81{ 81{
82 int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; 82 int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
83 83
84 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); 84 return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
85} 85}
86 86
87static int __init nps_setup_clocksource(struct device_node *node) 87static int __init nps_setup_clocksource(struct device_node *node)
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index c32148ec7a38..bfa981ac1eaf 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -72,7 +72,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
72} 72}
73 73
74/* read 64-bit timer counter */ 74/* read 64-bit timer counter */
75static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs) 75static u64 notrace sirfsoc_timer_read(struct clocksource *cs)
76{ 76{
77 u64 cycles; 77 u64 cycles;
78 78
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 4f87f3e76d83..a3e662b15964 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,7 +152,7 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
152 return IRQ_HANDLED; 152 return IRQ_HANDLED;
153} 153}
154 154
155static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) 155static u64 sun5i_clksrc_read(struct clocksource *clksrc)
156{ 156{
157 struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); 157 struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
158 158
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index cf5b14e442e4..624067712ef0 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -65,11 +65,11 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs)
65 return container_of(cs, struct ti_32k, cs); 65 return container_of(cs, struct ti_32k, cs);
66} 66}
67 67
68static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs) 68static u64 notrace ti_32k_read_cycles(struct clocksource *cs)
69{ 69{
70 struct ti_32k *ti = to_ti_32k(cs); 70 struct ti_32k *ti = to_ti_32k(cs);
71 71
72 return (cycle_t)readl_relaxed(ti->counter); 72 return (u64)readl_relaxed(ti->counter);
73} 73}
74 74
75static struct ti_32k ti_32k_timer = { 75static struct ti_32k ti_32k_timer = {
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index b15069483fbd..d02b51075ad1 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -53,7 +53,7 @@
53 53
54static void __iomem *regbase; 54static void __iomem *regbase;
55 55
56static cycle_t vt8500_timer_read(struct clocksource *cs) 56static u64 vt8500_timer_read(struct clocksource *cs)
57{ 57{
58 int loops = msecs_to_loops(10); 58 int loops = msecs_to_loops(10);
59 writel(3, regbase + TIMER_CTRL_VAL); 59 writel(3, regbase + TIMER_CTRL_VAL);
@@ -75,7 +75,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
75 struct clock_event_device *evt) 75 struct clock_event_device *evt)
76{ 76{
77 int loops = msecs_to_loops(10); 77 int loops = msecs_to_loops(10);
78 cycle_t alarm = clocksource.read(&clocksource) + cycles; 78 u64 alarm = clocksource.read(&clocksource) + cycles;
79 while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) 79 while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
80 && --loops) 80 && --loops)
81 cpu_relax(); 81 cpu_relax();
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 451f899f74e4..c9297605058c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -429,8 +429,8 @@ static int dmatest_func(void *data)
429 int dst_cnt; 429 int dst_cnt;
430 int i; 430 int i;
431 ktime_t ktime, start, diff; 431 ktime_t ktime, start, diff;
432 ktime_t filltime = ktime_set(0, 0); 432 ktime_t filltime = 0;
433 ktime_t comparetime = ktime_set(0, 0); 433 ktime_t comparetime = 0;
434 s64 runtime = 0; 434 s64 runtime = 0;
435 unsigned long long total_len = 0; 435 unsigned long long total_len = 0;
436 u8 align = 0; 436 u8 align = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4a5a5ac0ff3..762f8e82ceb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -752,7 +752,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
752 752
753 drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); 753 drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
754 dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); 754 dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
755 hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), 755 hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
756 HRTIMER_MODE_REL); 756 HRTIMER_MODE_REL);
757 757
758 return HRTIMER_NORESTART; 758 return HRTIMER_NORESTART;
@@ -772,11 +772,11 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
772 hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, 772 hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
773 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 773 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
774 hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, 774 hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
775 ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); 775 DCE_VIRTUAL_VBLANK_PERIOD);
776 adev->mode_info.crtcs[crtc]->vblank_timer.function = 776 adev->mode_info.crtcs[crtc]->vblank_timer.function =
777 dce_virtual_vblank_timer_handle; 777 dce_virtual_vblank_timer_handle;
778 hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, 778 hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
779 ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); 779 DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
780 } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { 780 } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
781 DRM_DEBUG("Disable software vsync timer\n"); 781 DRM_DEBUG("Disable software vsync timer\n");
782 hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer); 782 hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d7be0d94ba4d..0bffd3f0c15d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,7 +62,7 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62{ 62{
63 d->wake_count++; 63 d->wake_count++;
64 hrtimer_start_range_ns(&d->timer, 64 hrtimer_start_range_ns(&d->timer,
65 ktime_set(0, NSEC_PER_MSEC), 65 NSEC_PER_MSEC,
66 NSEC_PER_MSEC, 66 NSEC_PER_MSEC,
67 HRTIMER_MODE_REL); 67 HRTIMER_MODE_REL);
68} 68}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index f2f348f0160c..a6126c93f215 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -330,7 +330,7 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
330 __set_current_state(intr ? TASK_INTERRUPTIBLE : 330 __set_current_state(intr ? TASK_INTERRUPTIBLE :
331 TASK_UNINTERRUPTIBLE); 331 TASK_UNINTERRUPTIBLE);
332 332
333 kt = ktime_set(0, sleep_time); 333 kt = sleep_time;
334 schedule_hrtimeout(&kt, HRTIMER_MODE_REL); 334 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
335 sleep_time *= 2; 335 sleep_time *= 2;
336 if (sleep_time > NSEC_PER_MSEC) 336 if (sleep_time > NSEC_PER_MSEC)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 9942b0577d6e..725dffad5640 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -539,7 +539,7 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
539 } 539 }
540 540
541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
542 tilcdc_crtc->last_vblank = ktime_set(0, 0); 542 tilcdc_crtc->last_vblank = 0;
543 543
544 tilcdc_crtc->enabled = false; 544 tilcdc_crtc->enabled = false;
545 mutex_unlock(&tilcdc_crtc->enable_lock); 545 mutex_unlock(&tilcdc_crtc->enable_lock);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 446802ae8f1b..b44b32f21e61 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -135,9 +135,9 @@ u64 hv_do_hypercall(u64 control, void *input, void *output)
135EXPORT_SYMBOL_GPL(hv_do_hypercall); 135EXPORT_SYMBOL_GPL(hv_do_hypercall);
136 136
137#ifdef CONFIG_X86_64 137#ifdef CONFIG_X86_64
138static cycle_t read_hv_clock_tsc(struct clocksource *arg) 138static u64 read_hv_clock_tsc(struct clocksource *arg)
139{ 139{
140 cycle_t current_tick; 140 u64 current_tick;
141 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; 141 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
142 142
143 if (tsc_pg->tsc_sequence != 0) { 143 if (tsc_pg->tsc_sequence != 0) {
@@ -146,7 +146,7 @@ static cycle_t read_hv_clock_tsc(struct clocksource *arg)
146 */ 146 */
147 147
148 while (1) { 148 while (1) {
149 cycle_t tmp; 149 u64 tmp;
150 u32 sequence = tsc_pg->tsc_sequence; 150 u32 sequence = tsc_pg->tsc_sequence;
151 u64 cur_tsc; 151 u64 cur_tsc;
152 u64 scale = tsc_pg->tsc_scale; 152 u64 scale = tsc_pg->tsc_scale;
@@ -350,7 +350,7 @@ int hv_post_message(union hv_connection_id connection_id,
350static int hv_ce_set_next_event(unsigned long delta, 350static int hv_ce_set_next_event(unsigned long delta,
351 struct clock_event_device *evt) 351 struct clock_event_device *evt)
352{ 352{
353 cycle_t current_tick; 353 u64 current_tick;
354 354
355 WARN_ON(!clockevent_state_oneshot(evt)); 355 WARN_ON(!clockevent_state_oneshot(evt));
356 356
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 5e6d451febeb..a1cad6cc2e0f 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -63,7 +63,7 @@ ssize_t iio_hrtimer_store_sampling_frequency(struct device *dev,
63 return -EINVAL; 63 return -EINVAL;
64 64
65 info->sampling_frequency = val; 65 info->sampling_frequency = val;
66 info->period = ktime_set(0, NSEC_PER_SEC / val); 66 info->period = NSEC_PER_SEC / val;
67 67
68 return len; 68 return len;
69} 69}
@@ -141,8 +141,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
141 trig_info->timer.function = iio_hrtimer_trig_handler; 141 trig_info->timer.function = iio_hrtimer_trig_handler;
142 142
143 trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY; 143 trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
144 trig_info->period = ktime_set(0, NSEC_PER_SEC / 144 trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency;
145 trig_info->sampling_frequency);
146 145
147 ret = iio_trigger_register(trig_info->swt.trigger); 146 ret = iio_trigger_register(trig_info->swt.trigger);
148 if (ret) 147 if (ret)
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 70a893a17467..36a5b93156ed 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -165,7 +165,7 @@ static void walkera0701_irq_handler(void *handler_data)
165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ 165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
166 w->counter = 0; 166 w->counter = 0;
167 167
168 hrtimer_start(&w->timer, ktime_set(0, BIN_SAMPLE), HRTIMER_MODE_REL); 168 hrtimer_start(&w->timer, BIN_SAMPLE, HRTIMER_MODE_REL);
169} 169}
170 170
171static enum hrtimer_restart timer_handler(struct hrtimer 171static enum hrtimer_restart timer_handler(struct hrtimer
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c0178a122940..c01c09e9916d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -152,12 +152,12 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
152} 152}
153 153
154#ifdef CONFIG_CLKSRC_MIPS_GIC 154#ifdef CONFIG_CLKSRC_MIPS_GIC
155cycle_t gic_read_count(void) 155u64 gic_read_count(void)
156{ 156{
157 unsigned int hi, hi2, lo; 157 unsigned int hi, hi2, lo;
158 158
159 if (mips_cm_is64) 159 if (mips_cm_is64)
160 return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); 160 return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
161 161
162 do { 162 do {
163 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 163 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
@@ -165,7 +165,7 @@ cycle_t gic_read_count(void)
165 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 165 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
166 } while (hi2 != hi); 166 } while (hi2 != hi);
167 167
168 return (((cycle_t) hi) << 32) + lo; 168 return (((u64) hi) << 32) + lo;
169} 169}
170 170
171unsigned int gic_get_count_width(void) 171unsigned int gic_get_count_width(void)
@@ -179,7 +179,7 @@ unsigned int gic_get_count_width(void)
179 return bits; 179 return bits;
180} 180}
181 181
182void gic_write_compare(cycle_t cnt) 182void gic_write_compare(u64 cnt)
183{ 183{
184 if (mips_cm_is64) { 184 if (mips_cm_is64) {
185 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); 185 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -191,7 +191,7 @@ void gic_write_compare(cycle_t cnt)
191 } 191 }
192} 192}
193 193
194void gic_write_cpu_compare(cycle_t cnt, int cpu) 194void gic_write_cpu_compare(u64 cnt, int cpu)
195{ 195{
196 unsigned long flags; 196 unsigned long flags;
197 197
@@ -211,17 +211,17 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
211 local_irq_restore(flags); 211 local_irq_restore(flags);
212} 212}
213 213
214cycle_t gic_read_compare(void) 214u64 gic_read_compare(void)
215{ 215{
216 unsigned int hi, lo; 216 unsigned int hi, lo;
217 217
218 if (mips_cm_is64) 218 if (mips_cm_is64)
219 return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); 219 return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
220 220
221 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); 221 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
222 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); 222 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
223 223
224 return (((cycle_t) hi) << 32) + lo; 224 return (((u64) hi) << 32) + lo;
225} 225}
226 226
227void gic_start_count(void) 227void gic_start_count(void)
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4a36632c236f..4671f8a12872 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -87,8 +87,7 @@ exit:
87 87
88 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) 88 if (!err && (chan->txdone_method & TXDONE_BY_POLL))
89 /* kick start the timer immediately to avoid delays */ 89 /* kick start the timer immediately to avoid delays */
90 hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0), 90 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
91 HRTIMER_MODE_REL);
92} 91}
93 92
94static void tx_tick(struct mbox_chan *chan, int r) 93static void tx_tick(struct mbox_chan *chan, int r)
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 0c44479b556e..0c16bb213101 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -562,7 +562,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
562 struct dmxdev_filter *filter, 562 struct dmxdev_filter *filter,
563 struct dmxdev_feed *feed) 563 struct dmxdev_feed *feed)
564{ 564{
565 ktime_t timeout = ktime_set(0, 0); 565 ktime_t timeout = 0;
566 struct dmx_pes_filter_params *para = &filter->params.pes; 566 struct dmx_pes_filter_params *para = &filter->params.pes;
567 dmx_output_t otype; 567 dmx_output_t otype;
568 int ret; 568 int ret;
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index dcfea3502e42..c7b3cb406499 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -178,8 +178,7 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
178 struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer); 178 struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer);
179 179
180 cx88_ir_handle_key(ir); 180 cx88_ir_handle_key(ir);
181 missed = hrtimer_forward_now(&ir->timer, 181 missed = hrtimer_forward_now(&ir->timer, ir->polling * 1000000);
182 ktime_set(0, ir->polling * 1000000));
183 if (missed > 1) 182 if (missed > 1)
184 ir_dprintk("Missed ticks %ld\n", missed - 1); 183 ir_dprintk("Missed ticks %ld\n", missed - 1);
185 184
@@ -199,8 +198,7 @@ static int __cx88_ir_start(void *priv)
199 if (ir->polling) { 198 if (ir->polling) {
200 hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 199 hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 ir->timer.function = cx88_ir_work; 200 ir->timer.function = cx88_ir_work;
202 hrtimer_start(&ir->timer, 201 hrtimer_start(&ir->timer, ir->polling * 1000000,
203 ktime_set(0, ir->polling * 1000000),
204 HRTIMER_MODE_REL); 202 HRTIMER_MODE_REL);
205 } 203 }
206 if (ir->sampling) { 204 if (ir->sampling) {
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 7fb649e523f4..77f4d15f322b 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -463,7 +463,7 @@ static int pt3_fetch_thread(void *data)
463 463
464 pt3_proc_dma(adap); 464 pt3_proc_dma(adap);
465 465
466 delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC); 466 delay = PT3_FETCH_DELAY * NSEC_PER_MSEC;
467 set_current_state(TASK_UNINTERRUPTIBLE); 467 set_current_state(TASK_UNINTERRUPTIBLE);
468 freezable_schedule_hrtimeout_range(&delay, 468 freezable_schedule_hrtimeout_range(&delay,
469 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC, 469 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 82fb6f2ca011..e6efa8c267a0 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -109,7 +109,7 @@ static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
109 109
110 now = timer->base->get_time(); 110 now = timer->base->get_time();
111 111
112 } while (hrtimer_get_expires_tv64(timer) < now.tv64); 112 } while (hrtimer_get_expires_tv64(timer) < now);
113 113
114 return HRTIMER_RESTART; 114 return HRTIMER_RESTART;
115end: 115end:
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 52fe50725d74..4063215c9b54 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -390,7 +390,7 @@ static void softing_initialize_timestamp(struct softing *card)
390 ovf = 0x100000000ULL * 16; 390 ovf = 0x100000000ULL * 16;
391 do_div(ovf, card->pdat->freq ?: 16); 391 do_div(ovf, card->pdat->freq ?: 16);
392 392
393 card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); 393 card->ts_overflow = ktime_add_us(0, ovf);
394} 394}
395 395
396ktime_t softing_raw2ktime(struct softing *card, u32 raw) 396ktime_t softing_raw2ktime(struct softing *card, u32 raw)
@@ -647,7 +647,7 @@ int softing_startstop(struct net_device *dev, int up)
647 open_candev(netdev); 647 open_candev(netdev);
648 if (dev != netdev) { 648 if (dev != netdev) {
649 /* notify other busses on the restart */ 649 /* notify other busses on the restart */
650 softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 650 softing_netdev_rx(netdev, &msg, 0);
651 ++priv->can.can_stats.restarts; 651 ++priv->can.can_stats.restarts;
652 } 652 }
653 netif_wake_queue(netdev); 653 netif_wake_queue(netdev);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7621f91a8a20..5f64deec9f6c 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -192,7 +192,7 @@ static int softing_handle_1(struct softing *card)
192 /* a dead bus has no overflows */ 192 /* a dead bus has no overflows */
193 continue; 193 continue;
194 ++netdev->stats.rx_over_errors; 194 ++netdev->stats.rx_over_errors;
195 softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 195 softing_netdev_rx(netdev, &msg, 0);
196 } 196 }
197 /* prepare for other use */ 197 /* prepare for other use */
198 memset(&msg, 0, sizeof(msg)); 198 memset(&msg, 0, sizeof(msg));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index b03e4f58d02e..a533a6cc2d53 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -122,7 +122,7 @@
122#include "xgbe.h" 122#include "xgbe.h"
123#include "xgbe-common.h" 123#include "xgbe-common.h"
124 124
125static cycle_t xgbe_cc_read(const struct cyclecounter *cc) 125static u64 xgbe_cc_read(const struct cyclecounter *cc)
126{ 126{
127 struct xgbe_prv_data *pdata = container_of(cc, 127 struct xgbe_prv_data *pdata = container_of(cc,
128 struct xgbe_prv_data, 128 struct xgbe_prv_data,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 688617ac8c29..d8d06fdfc42b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15223,7 +15223,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15223} 15223}
15224 15224
15225/* Read the PHC */ 15225/* Read the PHC */
15226static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) 15226static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15227{ 15227{
15228 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); 15228 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15229 int port = BP_PORT(bp); 15229 int port = BP_PORT(bp);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 57650953ff83..7bf78a0d322c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -253,7 +253,7 @@ static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
253 if (!netif_running(priv->net_dev)) 253 if (!netif_running(priv->net_dev))
254 return HRTIMER_NORESTART; 254 return HRTIMER_NORESTART;
255 255
256 hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); 256 hrtimer_forward_now(timer, polling_frequency);
257 return HRTIMER_RESTART; 257 return HRTIMER_RESTART;
258} 258}
259 259
@@ -427,8 +427,7 @@ static int ec_bhf_open(struct net_device *net_dev)
427 427
428 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 428 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
429 priv->hrtimer.function = ec_bhf_timer_fun; 429 priv->hrtimer.function = ec_bhf_timer_fun;
430 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), 430 hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
431 HRTIMER_MODE_REL);
432 431
433 return 0; 432 return 0;
434 433
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f9e74461bdc0..6ebad3fac81d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -230,7 +230,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
230 * cyclecounter structure used to construct a ns counter from the 230 * cyclecounter structure used to construct a ns counter from the
231 * arbitrary fixed point registers 231 * arbitrary fixed point registers
232 */ 232 */
233static cycle_t fec_ptp_read(const struct cyclecounter *cc) 233static u64 fec_ptp_read(const struct cyclecounter *cc)
234{ 234{
235 struct fec_enet_private *fep = 235 struct fec_enet_private *fep =
236 container_of(cc, struct fec_enet_private, cc); 236 container_of(cc, struct fec_enet_private, cc);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ffcf35af4881..eccf1da9356b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4305,24 +4305,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4305/** 4305/**
4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads 4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4307 * @hw: pointer to the HW structure 4307 * @hw: pointer to the HW structure
4308 * @systim: cycle_t value read, sanitized and returned 4308 * @systim: time value read, sanitized and returned
4309 * 4309 *
4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: 4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4311 * check to see that the time is incrementing at a reasonable 4311 * check to see that the time is incrementing at a reasonable
4312 * rate and is a multiple of incvalue. 4312 * rate and is a multiple of incvalue.
4313 **/ 4313 **/
4314static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) 4314static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
4315{ 4315{
4316 u64 time_delta, rem, temp; 4316 u64 time_delta, rem, temp;
4317 cycle_t systim_next; 4317 u64 systim_next;
4318 u32 incvalue; 4318 u32 incvalue;
4319 int i; 4319 int i;
4320 4320
4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; 4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { 4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4323 /* latch SYSTIMH on read of SYSTIML */ 4323 /* latch SYSTIMH on read of SYSTIML */
4324 systim_next = (cycle_t)er32(SYSTIML); 4324 systim_next = (u64)er32(SYSTIML);
4325 systim_next |= (cycle_t)er32(SYSTIMH) << 32; 4325 systim_next |= (u64)er32(SYSTIMH) << 32;
4326 4326
4327 time_delta = systim_next - systim; 4327 time_delta = systim_next - systim;
4328 temp = time_delta; 4328 temp = time_delta;
@@ -4342,13 +4342,13 @@ static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4343 * @cc: cyclecounter structure 4343 * @cc: cyclecounter structure
4344 **/ 4344 **/
4345static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) 4345static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
4346{ 4346{
4347 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4347 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4348 cc); 4348 cc);
4349 struct e1000_hw *hw = &adapter->hw; 4349 struct e1000_hw *hw = &adapter->hw;
4350 u32 systimel, systimeh; 4350 u32 systimel, systimeh;
4351 cycle_t systim; 4351 u64 systim;
4352 /* SYSTIMH latching upon SYSTIML read does not work well. 4352 /* SYSTIMH latching upon SYSTIML read does not work well.
4353 * This means that if SYSTIML overflows after we read it but before 4353 * This means that if SYSTIML overflows after we read it but before
4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4368,8 +4368,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4368 systimel = systimel_2; 4368 systimel = systimel_2;
4369 } 4369 }
4370 } 4370 }
4371 systim = (cycle_t)systimel; 4371 systim = (u64)systimel;
4372 systim |= (cycle_t)systimeh << 32; 4372 systim |= (u64)systimeh << 32;
4373 4373
4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) 4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4375 systim = e1000e_sanitize_systim(hw, systim); 4375 systim = e1000e_sanitize_systim(hw, systim);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index ad03763e009a..34cc3be0df8e 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -127,8 +127,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
127 unsigned long flags; 127 unsigned long flags;
128 int i; 128 int i;
129 u32 tsync_ctrl; 129 u32 tsync_ctrl;
130 cycle_t dev_cycles; 130 u64 dev_cycles;
131 cycle_t sys_cycles; 131 u64 sys_cycles;
132 132
133 tsync_ctrl = er32(TSYNCTXCTL); 133 tsync_ctrl = er32(TSYNCTXCTL);
134 tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC | 134 tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c30eea8399a7..c4477552ce9e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -77,7 +77,7 @@
77static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); 77static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
78 78
79/* SYSTIM read access for the 82576 */ 79/* SYSTIM read access for the 82576 */
80static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 80static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
81{ 81{
82 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 82 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
83 struct e1000_hw *hw = &igb->hw; 83 struct e1000_hw *hw = &igb->hw;
@@ -94,7 +94,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
94} 94}
95 95
96/* SYSTIM read access for the 82580 */ 96/* SYSTIM read access for the 82580 */
97static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) 97static u64 igb_ptp_read_82580(const struct cyclecounter *cc)
98{ 98{
99 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 99 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
100 struct e1000_hw *hw = &igb->hw; 100 struct e1000_hw *hw = &igb->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index a92277683a64..1efb404431e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -245,7 +245,7 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
245 * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of 245 * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
246 * "cycles", rather than seconds and nanoseconds. 246 * "cycles", rather than seconds and nanoseconds.
247 */ 247 */
248static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) 248static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
249{ 249{
250 struct ixgbe_adapter *adapter = 250 struct ixgbe_adapter *adapter =
251 container_of(hw_cc, struct ixgbe_adapter, hw_cc); 251 container_of(hw_cc, struct ixgbe_adapter, hw_cc);
@@ -282,7 +282,7 @@ static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
282 * cyclecounter structure used to construct a ns counter from the 282 * cyclecounter structure used to construct a ns counter from the
283 * arbitrary fixed point registers 283 * arbitrary fixed point registers
284 */ 284 */
285static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) 285static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
286{ 286{
287 struct ixgbe_adapter *adapter = 287 struct ixgbe_adapter *adapter =
288 container_of(cc, struct ixgbe_adapter, hw_cc); 288 container_of(cc, struct ixgbe_adapter, hw_cc);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index cda04b3126bc..4fe430ceb194 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4913,7 +4913,7 @@ static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4913 4913
4914 if (!port_pcpu->timer_scheduled) { 4914 if (!port_pcpu->timer_scheduled) {
4915 port_pcpu->timer_scheduled = true; 4915 port_pcpu->timer_scheduled = true;
4916 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); 4916 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
4917 hrtimer_start(&port_pcpu->tx_done_timer, interval, 4917 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4918 HRTIMER_MODE_REL_PINNED); 4918 HRTIMER_MODE_REL_PINNED);
4919 } 4919 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index a5fc46bbcbe2..015198c14fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -38,7 +38,7 @@
38 38
39/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) 39/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
40 */ 40 */
41static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) 41static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
42{ 42{
43 struct mlx4_en_dev *mdev = 43 struct mlx4_en_dev *mdev =
44 container_of(tc, struct mlx4_en_dev, cycles); 44 container_of(tc, struct mlx4_en_dev, cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b2ca8a635b2e..5e7840a7a33b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1823,10 +1823,10 @@ static void unmap_bf_area(struct mlx4_dev *dev)
1823 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1823 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1824} 1824}
1825 1825
1826cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1826u64 mlx4_read_clock(struct mlx4_dev *dev)
1827{ 1827{
1828 u32 clockhi, clocklo, clockhi1; 1828 u32 clockhi, clocklo, clockhi1;
1829 cycle_t cycles; 1829 u64 cycles;
1830 int i; 1830 int i;
1831 struct mlx4_priv *priv = mlx4_priv(dev); 1831 struct mlx4_priv *priv = mlx4_priv(dev);
1832 1832
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2cd8e56a573b..746a92c13644 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -49,7 +49,7 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
49 hwts->hwtstamp = ns_to_ktime(nsec); 49 hwts->hwtstamp = ns_to_ktime(nsec);
50} 50}
51 51
52static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc) 52static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
53{ 53{
54 struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, 54 struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
55 cycles); 55 cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7b4c339a8a9a..54e5a786f191 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -557,7 +557,7 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
557 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 557 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
558} 558}
559 559
560cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) 560u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
561{ 561{
562 u32 timer_h, timer_h1, timer_l; 562 u32 timer_h, timer_h1, timer_l;
563 563
@@ -567,7 +567,7 @@ cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
567 if (timer_h != timer_h1) /* wrap around */ 567 if (timer_h != timer_h1) /* wrap around */
568 timer_l = ioread32be(&dev->iseg->internal_timer_l); 568 timer_l = ioread32be(&dev->iseg->internal_timer_l);
569 569
570 return (cycle_t)timer_l | (cycle_t)timer_h1 << 32; 570 return (u64)timer_l | (u64)timer_h1 << 32;
571} 571}
572 572
573static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) 573static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index e0a8fbdd1446..d4a99c9757cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -106,7 +106,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
106int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, 106int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
107 u32 element_id); 107 u32 element_id);
108int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); 108int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
109cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); 109u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
110u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); 110u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
111struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); 111struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
112void mlx5_cq_tasklet_cb(unsigned long data); 112void mlx5_cq_tasklet_cb(unsigned long data);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 0c0d48e5bea4..32279d21c836 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -121,7 +121,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
121 return type == match ? 0 : -1; 121 return type == match ? 0 : -1;
122} 122}
123 123
124static cycle_t cpts_systim_read(const struct cyclecounter *cc) 124static u64 cpts_systim_read(const struct cyclecounter *cc)
125{ 125{
126 u64 val = 0; 126 u64 val = 0;
127 struct cpts_event *event; 127 struct cpts_event *event;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0aaf975bb347..2255f9a6f3bc 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -751,7 +751,7 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
751 &info->mpipe[instance].tx_wake[priv->echannel]; 751 &info->mpipe[instance].tx_wake[priv->echannel];
752 752
753 hrtimer_start(&tx_wake->timer, 753 hrtimer_start(&tx_wake->timer,
754 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 754 TX_TIMER_DELAY_USEC * 1000UL,
755 HRTIMER_MODE_REL_PINNED); 755 HRTIMER_MODE_REL_PINNED);
756} 756}
757 757
@@ -770,7 +770,7 @@ static void tile_net_schedule_egress_timer(void)
770 770
771 if (!info->egress_timer_scheduled) { 771 if (!info->egress_timer_scheduled) {
772 hrtimer_start(&info->egress_timer, 772 hrtimer_start(&info->egress_timer,
773 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), 773 EGRESS_TIMER_DELAY_USEC * 1000UL,
774 HRTIMER_MODE_REL_PINNED); 774 HRTIMER_MODE_REL_PINNED);
775 info->egress_timer_scheduled = true; 775 info->egress_timer_scheduled = true;
776 } 776 }
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 057025722e3d..46d53a6c8cf8 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -510,7 +510,7 @@ at86rf230_async_state_delay(void *context)
510 case STATE_TRX_OFF: 510 case STATE_TRX_OFF:
511 switch (ctx->to_state) { 511 switch (ctx->to_state) {
512 case STATE_RX_AACK_ON: 512 case STATE_RX_AACK_ON:
513 tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); 513 tim = c->t_off_to_aack * NSEC_PER_USEC;
514 /* state change from TRX_OFF to RX_AACK_ON to do a 514 /* state change from TRX_OFF to RX_AACK_ON to do a
515 * calibration, we need to reset the timeout for the 515 * calibration, we need to reset the timeout for the
516 * next one. 516 * next one.
@@ -519,7 +519,7 @@ at86rf230_async_state_delay(void *context)
519 goto change; 519 goto change;
520 case STATE_TX_ARET_ON: 520 case STATE_TX_ARET_ON:
521 case STATE_TX_ON: 521 case STATE_TX_ON:
522 tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); 522 tim = c->t_off_to_tx_on * NSEC_PER_USEC;
523 /* state change from TRX_OFF to TX_ON or ARET_ON to do 523 /* state change from TRX_OFF to TX_ON or ARET_ON to do
524 * a calibration, we need to reset the timeout for the 524 * a calibration, we need to reset the timeout for the
525 * next one. 525 * next one.
@@ -539,8 +539,7 @@ at86rf230_async_state_delay(void *context)
539 * to TX_ON or TRX_OFF. 539 * to TX_ON or TRX_OFF.
540 */ 540 */
541 if (!force) { 541 if (!force) {
542 tim = ktime_set(0, (c->t_frame + c->t_p_ack) * 542 tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC;
543 NSEC_PER_USEC);
544 goto change; 543 goto change;
545 } 544 }
546 break; 545 break;
@@ -552,7 +551,7 @@ at86rf230_async_state_delay(void *context)
552 case STATE_P_ON: 551 case STATE_P_ON:
553 switch (ctx->to_state) { 552 switch (ctx->to_state) {
554 case STATE_TRX_OFF: 553 case STATE_TRX_OFF:
555 tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC); 554 tim = c->t_reset_to_off * NSEC_PER_USEC;
556 goto change; 555 goto change;
557 default: 556 default:
558 break; 557 break;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2d1a6f2e16ab..f317984f7536 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1282,7 +1282,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
1282 /* start timer, if not already started */ 1282 /* start timer, if not already started */
1283 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) 1283 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
1284 hrtimer_start(&ctx->tx_timer, 1284 hrtimer_start(&ctx->tx_timer,
1285 ktime_set(0, ctx->timer_interval), 1285 ctx->timer_interval,
1286 HRTIMER_MODE_REL); 1286 HRTIMER_MODE_REL);
1287} 1287}
1288 1288
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 9f61293f1a56..f38c44061b5b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -177,7 +177,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
177 if (rt2800usb_txstatus_pending(rt2x00dev)) { 177 if (rt2800usb_txstatus_pending(rt2x00dev)) {
178 /* Read register after 1 ms */ 178 /* Read register after 1 ms */
179 hrtimer_start(&rt2x00dev->txstatus_timer, 179 hrtimer_start(&rt2x00dev->txstatus_timer,
180 ktime_set(0, TXSTATUS_READ_INTERVAL), 180 TXSTATUS_READ_INTERVAL,
181 HRTIMER_MODE_REL); 181 HRTIMER_MODE_REL);
182 return false; 182 return false;
183 } 183 }
@@ -204,7 +204,7 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
204 204
205 /* Read TX_STA_FIFO register after 2 ms */ 205 /* Read TX_STA_FIFO register after 2 ms */
206 hrtimer_start(&rt2x00dev->txstatus_timer, 206 hrtimer_start(&rt2x00dev->txstatus_timer,
207 ktime_set(0, 2*TXSTATUS_READ_INTERVAL), 207 2 * TXSTATUS_READ_INTERVAL,
208 HRTIMER_MODE_REL); 208 HRTIMER_MODE_REL);
209} 209}
210 210
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 9236e40ac055..1800befa8b8b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3044,7 +3044,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3044static ktime_t fixup_debug_start(struct pci_dev *dev, 3044static ktime_t fixup_debug_start(struct pci_dev *dev,
3045 void (*fn)(struct pci_dev *dev)) 3045 void (*fn)(struct pci_dev *dev))
3046{ 3046{
3047 ktime_t calltime = ktime_set(0, 0); 3047 ktime_t calltime = 0;
3048 3048
3049 dev_dbg(&dev->dev, "calling %pF\n", fn); 3049 dev_dbg(&dev->dev, "calling %pF\n", fn);
3050 if (initcall_debug) { 3050 if (initcall_debug) {
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 978e6d640572..9a32f8627ecc 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -283,7 +283,7 @@ static int __init msi_wmi_input_setup(void)
283 if (err) 283 if (err)
284 goto err_free_keymap; 284 goto err_free_keymap;
285 285
286 last_pressed = ktime_set(0, 0); 286 last_pressed = 0;
287 287
288 return 0; 288 return 0;
289 289
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 15fed9d8f871..bfcd6fba6363 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -169,7 +169,7 @@ static void ltc2952_poweroff_kill(void)
169 169
170static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) 170static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
171{ 171{
172 data->wde_interval = ktime_set(0, 300L*1E6L); 172 data->wde_interval = 300L * 1E6L;
173 data->trigger_delay = ktime_set(2, 500L*1E6L); 173 data->trigger_delay = ktime_set(2, 500L*1E6L);
174 174
175 hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 175 hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 84a52db9b05f..fc0fa7577636 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -363,7 +363,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
363 rtc_timer_remove(rtc, &rtc->aie_timer); 363 rtc_timer_remove(rtc, &rtc->aie_timer);
364 364
365 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 365 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
366 rtc->aie_timer.period = ktime_set(0, 0); 366 rtc->aie_timer.period = 0;
367 if (alarm->enabled) 367 if (alarm->enabled)
368 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 368 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
369 369
@@ -391,11 +391,11 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
391 return err; 391 return err;
392 392
393 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 393 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
394 rtc->aie_timer.period = ktime_set(0, 0); 394 rtc->aie_timer.period = 0;
395 395
396 /* Alarm has to be enabled & in the future for us to enqueue it */ 396 /* Alarm has to be enabled & in the future for us to enqueue it */
397 if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < 397 if (alarm->enabled && (rtc_tm_to_ktime(now) <
398 rtc->aie_timer.node.expires.tv64)) { 398 rtc->aie_timer.node.expires)) {
399 399
400 rtc->aie_timer.enabled = 1; 400 rtc->aie_timer.enabled = 1;
401 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); 401 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
@@ -554,7 +554,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
554 int count; 554 int count;
555 rtc = container_of(timer, struct rtc_device, pie_timer); 555 rtc = container_of(timer, struct rtc_device, pie_timer);
556 556
557 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); 557 period = NSEC_PER_SEC / rtc->irq_freq;
558 count = hrtimer_forward_now(timer, period); 558 count = hrtimer_forward_now(timer, period);
559 559
560 rtc_handle_legacy_irq(rtc, count, RTC_PF); 560 rtc_handle_legacy_irq(rtc, count, RTC_PF);
@@ -665,7 +665,7 @@ static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
665 return -1; 665 return -1;
666 666
667 if (enabled) { 667 if (enabled) {
668 ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); 668 ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
669 669
670 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); 670 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
671 } 671 }
@@ -766,7 +766,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
766 766
767 /* Skip over expired timers */ 767 /* Skip over expired timers */
768 while (next) { 768 while (next) {
769 if (next->expires.tv64 >= now.tv64) 769 if (next->expires >= now)
770 break; 770 break;
771 next = timerqueue_iterate_next(next); 771 next = timerqueue_iterate_next(next);
772 } 772 }
@@ -858,7 +858,7 @@ again:
858 __rtc_read_time(rtc, &tm); 858 __rtc_read_time(rtc, &tm);
859 now = rtc_tm_to_ktime(tm); 859 now = rtc_tm_to_ktime(tm);
860 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 860 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
861 if (next->expires.tv64 > now.tv64) 861 if (next->expires > now)
862 break; 862 break;
863 863
864 /* expire timer */ 864 /* expire timer */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 6d75984a3d85..5fa699192864 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -333,7 +333,7 @@ void ap_wait(enum ap_wait wait)
333 case AP_WAIT_TIMEOUT: 333 case AP_WAIT_TIMEOUT:
334 spin_lock_bh(&ap_poll_timer_lock); 334 spin_lock_bh(&ap_poll_timer_lock);
335 if (!hrtimer_is_queued(&ap_poll_timer)) { 335 if (!hrtimer_is_queued(&ap_poll_timer)) {
336 hr_time = ktime_set(0, poll_timeout); 336 hr_time = poll_timeout;
337 hrtimer_forward_now(&ap_poll_timer, hr_time); 337 hrtimer_forward_now(&ap_poll_timer, hr_time);
338 hrtimer_restart(&ap_poll_timer); 338 hrtimer_restart(&ap_poll_timer);
339 } 339 }
@@ -860,7 +860,7 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
860 time > 120000000000ULL) 860 time > 120000000000ULL)
861 return -EINVAL; 861 return -EINVAL;
862 poll_timeout = time; 862 poll_timeout = time;
863 hr_time = ktime_set(0, poll_timeout); 863 hr_time = poll_timeout;
864 864
865 spin_lock_bh(&ap_poll_timer_lock); 865 spin_lock_bh(&ap_poll_timer_lock);
866 hrtimer_cancel(&ap_poll_timer); 866 hrtimer_cancel(&ap_poll_timer);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2583e8b50b21..3d3768aaab4f 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1694,7 +1694,7 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1694 if (!vscsi->rsp_q_timer.started) { 1694 if (!vscsi->rsp_q_timer.started) {
1695 if (vscsi->rsp_q_timer.timer_pops < 1695 if (vscsi->rsp_q_timer.timer_pops <
1696 MAX_TIMER_POPS) { 1696 MAX_TIMER_POPS) {
1697 kt = ktime_set(0, WAIT_NANO_SECONDS); 1697 kt = WAIT_NANO_SECONDS;
1698 } else { 1698 } else {
1699 /* 1699 /*
1700 * slide the timeslice if the maximum 1700 * slide the timeslice if the maximum
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cf04a364fd8b..03051e12a072 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4085,7 +4085,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4085 jiffies_to_timespec(delta_jiff, &ts); 4085 jiffies_to_timespec(delta_jiff, &ts);
4086 kt = ktime_set(ts.tv_sec, ts.tv_nsec); 4086 kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4087 } else 4087 } else
4088 kt = ktime_set(0, sdebug_ndelay); 4088 kt = sdebug_ndelay;
4089 if (NULL == sd_dp) { 4089 if (NULL == sd_dp) {
4090 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 4090 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4091 if (NULL == sd_dp) 4091 if (NULL == sd_dp)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a2c2817fc566..20e5e5fb048c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -930,7 +930,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
930 if (!hba->outstanding_reqs && scaling->is_busy_started) { 930 if (!hba->outstanding_reqs && scaling->is_busy_started) {
931 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), 931 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
932 scaling->busy_start_t)); 932 scaling->busy_start_t));
933 scaling->busy_start_t = ktime_set(0, 0); 933 scaling->busy_start_t = 0;
934 scaling->is_busy_started = false; 934 scaling->is_busy_started = false;
935 } 935 }
936} 936}
@@ -6661,7 +6661,7 @@ start_window:
6661 scaling->busy_start_t = ktime_get(); 6661 scaling->busy_start_t = ktime_get();
6662 scaling->is_busy_started = true; 6662 scaling->is_busy_started = true;
6663 } else { 6663 } else {
6664 scaling->busy_start_t = ktime_set(0, 0); 6664 scaling->busy_start_t = 0;
6665 scaling->is_busy_started = false; 6665 scaling->is_busy_started = false;
6666 } 6666 }
6667 spin_unlock_irqrestore(hba->host->host_lock, flags); 6667 spin_unlock_irqrestore(hba->host->host_lock, flags);
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index de8e22ec3902..93e24ce61a3a 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -234,8 +234,8 @@ static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
234 ktime_set(timer_sec, timer_nsec)); 234 ktime_set(timer_sec, timer_nsec));
235 ci->enabled_otg_timer_bits |= (1 << t); 235 ci->enabled_otg_timer_bits |= (1 << t);
236 if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) || 236 if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) ||
237 (ci->hr_timeouts[ci->next_otg_timer].tv64 > 237 (ci->hr_timeouts[ci->next_otg_timer] >
238 ci->hr_timeouts[t].tv64)) { 238 ci->hr_timeouts[t])) {
239 ci->next_otg_timer = t; 239 ci->next_otg_timer = t;
240 hrtimer_start_range_ns(&ci->otg_fsm_hrtimer, 240 hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
241 ci->hr_timeouts[t], NSEC_PER_MSEC, 241 ci->hr_timeouts[t], NSEC_PER_MSEC,
@@ -269,8 +269,8 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
269 for_each_set_bit(cur_timer, &enabled_timer_bits, 269 for_each_set_bit(cur_timer, &enabled_timer_bits,
270 NUM_OTG_FSM_TIMERS) { 270 NUM_OTG_FSM_TIMERS) {
271 if ((next_timer == NUM_OTG_FSM_TIMERS) || 271 if ((next_timer == NUM_OTG_FSM_TIMERS) ||
272 (ci->hr_timeouts[next_timer].tv64 < 272 (ci->hr_timeouts[next_timer] <
273 ci->hr_timeouts[cur_timer].tv64)) 273 ci->hr_timeouts[cur_timer]))
274 next_timer = cur_timer; 274 next_timer = cur_timer;
275 } 275 }
276 } 276 }
@@ -397,14 +397,14 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
397 397
398 now = ktime_get(); 398 now = ktime_get();
399 for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) { 399 for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) {
400 if (now.tv64 >= ci->hr_timeouts[cur_timer].tv64) { 400 if (now >= ci->hr_timeouts[cur_timer]) {
401 ci->enabled_otg_timer_bits &= ~(1 << cur_timer); 401 ci->enabled_otg_timer_bits &= ~(1 << cur_timer);
402 if (otg_timer_handlers[cur_timer]) 402 if (otg_timer_handlers[cur_timer])
403 ret = otg_timer_handlers[cur_timer](ci); 403 ret = otg_timer_handlers[cur_timer](ci);
404 } else { 404 } else {
405 if ((next_timer == NUM_OTG_FSM_TIMERS) || 405 if ((next_timer == NUM_OTG_FSM_TIMERS) ||
406 (ci->hr_timeouts[cur_timer].tv64 < 406 (ci->hr_timeouts[cur_timer] <
407 ci->hr_timeouts[next_timer].tv64)) 407 ci->hr_timeouts[next_timer]))
408 next_timer = cur_timer; 408 next_timer = cur_timer;
409 } 409 }
410 } 410 }
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e8008fa35e1e..224717e63a53 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1113,8 +1113,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
1113 } 1113 }
1114 1114
1115 /* Delay the timer. */ 1115 /* Delay the timer. */
1116 hrtimer_start(&ncm->task_timer, 1116 hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
1117 ktime_set(0, TX_TIMEOUT_NSECS),
1118 HRTIMER_MODE_REL); 1117 HRTIMER_MODE_REL);
1119 1118
1120 /* Add the datagram position entries */ 1119 /* Add the datagram position entries */
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 69f50e6533a6..3893b5bafd87 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -88,8 +88,7 @@ static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
88 ktime_t *timeout = &ehci->hr_timeouts[event]; 88 ktime_t *timeout = &ehci->hr_timeouts[event];
89 89
90 if (resched) 90 if (resched)
91 *timeout = ktime_add(ktime_get(), 91 *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
92 ktime_set(0, event_delays_ns[event]));
93 ehci->enabled_hrtimer_events |= (1 << event); 92 ehci->enabled_hrtimer_events |= (1 << event);
94 93
95 /* Track only the lowest-numbered pending event */ 94 /* Track only the lowest-numbered pending event */
@@ -425,7 +424,7 @@ static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
425 */ 424 */
426 now = ktime_get(); 425 now = ktime_get();
427 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { 426 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
428 if (now.tv64 >= ehci->hr_timeouts[e].tv64) 427 if (now >= ehci->hr_timeouts[e])
429 event_handlers[e](ehci); 428 event_handlers[e](ehci);
430 else 429 else
431 ehci_enable_event(ehci, e, false); 430 ehci_enable_event(ehci, e, false);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 66efa9a67687..9d0b0518290a 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1080,8 +1080,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
1080 ktime_t *timeout = &fotg210->hr_timeouts[event]; 1080 ktime_t *timeout = &fotg210->hr_timeouts[event];
1081 1081
1082 if (resched) 1082 if (resched)
1083 *timeout = ktime_add(ktime_get(), 1083 *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
1084 ktime_set(0, event_delays_ns[event]));
1085 fotg210->enabled_hrtimer_events |= (1 << event); 1084 fotg210->enabled_hrtimer_events |= (1 << event);
1086 1085
1087 /* Track only the lowest-numbered pending event */ 1086 /* Track only the lowest-numbered pending event */
@@ -1381,7 +1380,7 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
1381 */ 1380 */
1382 now = ktime_get(); 1381 now = ktime_get();
1383 for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { 1382 for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
1384 if (now.tv64 >= fotg210->hr_timeouts[e].tv64) 1383 if (now >= fotg210->hr_timeouts[e])
1385 event_handlers[e](fotg210); 1384 event_handlers[e](fotg210);
1386 else 1385 else
1387 fotg210_enable_event(fotg210, e, false); 1386 fotg210_enable_event(fotg210, e, false);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d4d7c56b48c7..16363852c034 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -197,8 +197,7 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
197 if (!list_empty(&controller->early_tx_list) && 197 if (!list_empty(&controller->early_tx_list) &&
198 !hrtimer_is_queued(&controller->early_tx)) { 198 !hrtimer_is_queued(&controller->early_tx)) {
199 ret = HRTIMER_RESTART; 199 ret = HRTIMER_RESTART;
200 hrtimer_forward_now(&controller->early_tx, 200 hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
201 ktime_set(0, 20 * NSEC_PER_USEC));
202 } 201 }
203 202
204 spin_unlock_irqrestore(&musb->lock, flags); 203 spin_unlock_irqrestore(&musb->lock, flags);
@@ -280,9 +279,9 @@ static void cppi41_dma_callback(void *private_data)
280 unsigned long usecs = cppi41_channel->total_len / 10; 279 unsigned long usecs = cppi41_channel->total_len / 10;
281 280
282 hrtimer_start_range_ns(&controller->early_tx, 281 hrtimer_start_range_ns(&controller->early_tx,
283 ktime_set(0, usecs * NSEC_PER_USEC), 282 usecs * NSEC_PER_USEC,
284 20 * NSEC_PER_USEC, 283 20 * NSEC_PER_USEC,
285 HRTIMER_MODE_REL); 284 HRTIMER_MODE_REL);
286 } 285 }
287 286
288out: 287out:
diff --git a/fs/aio.c b/fs/aio.c
index 955c5241a8f2..4ab67e8cb776 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
1285 struct io_event __user *event, 1285 struct io_event __user *event,
1286 struct timespec __user *timeout) 1286 struct timespec __user *timeout)
1287{ 1287{
1288 ktime_t until = { .tv64 = KTIME_MAX }; 1288 ktime_t until = KTIME_MAX;
1289 long ret = 0; 1289 long ret = 0;
1290 1290
1291 if (timeout) { 1291 if (timeout) {
@@ -1311,7 +1311,7 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
1311 * the ringbuffer empty. So in practice we should be ok, but it's 1311 * the ringbuffer empty. So in practice we should be ok, but it's
1312 * something to be aware of when touching this code. 1312 * something to be aware of when touching this code.
1313 */ 1313 */
1314 if (until.tv64 == 0) 1314 if (until == 0)
1315 aio_read_events(ctx, min_nr, nr, event, &ret); 1315 aio_read_events(ctx, min_nr, nr, event, &ret);
1316 else 1316 else
1317 wait_event_interruptible_hrtimeout(ctx->wait, 1317 wait_event_interruptible_hrtimeout(ctx->wait,
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 35502d4046f5..6df332296c66 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1395,7 +1395,6 @@ static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1395void dlm_scan_waiters(struct dlm_ls *ls) 1395void dlm_scan_waiters(struct dlm_ls *ls)
1396{ 1396{
1397 struct dlm_lkb *lkb; 1397 struct dlm_lkb *lkb;
1398 ktime_t zero = ktime_set(0, 0);
1399 s64 us; 1398 s64 us;
1400 s64 debug_maxus = 0; 1399 s64 debug_maxus = 0;
1401 u32 debug_scanned = 0; 1400 u32 debug_scanned = 0;
@@ -1409,7 +1408,7 @@ void dlm_scan_waiters(struct dlm_ls *ls)
1409 mutex_lock(&ls->ls_waiters_mutex); 1408 mutex_lock(&ls->ls_waiters_mutex);
1410 1409
1411 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { 1410 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1412 if (ktime_equal(lkb->lkb_wait_time, zero)) 1411 if (!lkb->lkb_wait_time)
1413 continue; 1412 continue;
1414 1413
1415 debug_scanned++; 1414 debug_scanned++;
@@ -1419,7 +1418,7 @@ void dlm_scan_waiters(struct dlm_ls *ls)
1419 if (us < dlm_config.ci_waitwarn_us) 1418 if (us < dlm_config.ci_waitwarn_us)
1420 continue; 1419 continue;
1421 1420
1422 lkb->lkb_wait_time = zero; 1421 lkb->lkb_wait_time = 0;
1423 1422
1424 debug_expired++; 1423 debug_expired++;
1425 if (us > debug_maxus) 1424 if (us > debug_maxus)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f7b3ba61add5..94f50cac91c6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -695,7 +695,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
695 gl->gl_target = LM_ST_UNLOCKED; 695 gl->gl_target = LM_ST_UNLOCKED;
696 gl->gl_demote_state = LM_ST_EXCLUSIVE; 696 gl->gl_demote_state = LM_ST_EXCLUSIVE;
697 gl->gl_ops = glops; 697 gl->gl_ops = glops;
698 gl->gl_dstamp = ktime_set(0, 0); 698 gl->gl_dstamp = 0;
699 preempt_disable(); 699 preempt_disable();
700 /* We use the global stats to estimate the initial per-glock stats */ 700 /* We use the global stats to estimate the initial per-glock stats */
701 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 701 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 45962fe5098c..0ca4af8cca5d 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -619,12 +619,11 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
619 struct nfs4_ff_layoutstat *layoutstat, 619 struct nfs4_ff_layoutstat *layoutstat,
620 ktime_t now) 620 ktime_t now)
621{ 621{
622 static const ktime_t notime = {0};
623 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 622 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 623 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625 624
626 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 625 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 if (ktime_equal(mirror->start_time, notime)) 626 if (!mirror->start_time)
628 mirror->start_time = now; 627 mirror->start_time = now;
629 if (mirror->report_interval != 0) 628 if (mirror->report_interval != 0)
630 report_interval = (s64)mirror->report_interval * 1000LL; 629 report_interval = (s64)mirror->report_interval * 1000LL;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 96a155ab5059..f6e871760f8d 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1250,7 +1250,7 @@ static int o2hb_thread(void *data)
1250 1250
1251 mlog(ML_HEARTBEAT, 1251 mlog(ML_HEARTBEAT,
1252 "start = %lld, end = %lld, msec = %u, ret = %d\n", 1252 "start = %lld, end = %lld, msec = %u, ret = %d\n",
1253 before_hb.tv64, after_hb.tv64, elapsed_msec, ret); 1253 before_hb, after_hb, elapsed_msec, ret);
1254 1254
1255 if (!kthread_should_stop() && 1255 if (!kthread_should_stop() &&
1256 elapsed_msec < reg->hr_timeout_ms) { 1256 elapsed_msec < reg->hr_timeout_ms) {
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 9ae4abb4110b..c173cc196175 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -55,7 +55,7 @@ static inline bool isalarm(struct timerfd_ctx *ctx)
55/* 55/*
56 * This gets called when the timer event triggers. We set the "expired" 56 * This gets called when the timer event triggers. We set the "expired"
57 * flag, but we do not re-arm the timer (in case it's necessary, 57 * flag, but we do not re-arm the timer (in case it's necessary,
58 * tintv.tv64 != 0) until the timer is accessed. 58 * tintv != 0) until the timer is accessed.
59 */ 59 */
60static void timerfd_triggered(struct timerfd_ctx *ctx) 60static void timerfd_triggered(struct timerfd_ctx *ctx)
61{ 61{
@@ -93,7 +93,7 @@ static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
93 */ 93 */
94void timerfd_clock_was_set(void) 94void timerfd_clock_was_set(void)
95{ 95{
96 ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 96 ktime_t moffs = ktime_mono_to_real(0);
97 struct timerfd_ctx *ctx; 97 struct timerfd_ctx *ctx;
98 unsigned long flags; 98 unsigned long flags;
99 99
@@ -102,8 +102,8 @@ void timerfd_clock_was_set(void)
102 if (!ctx->might_cancel) 102 if (!ctx->might_cancel)
103 continue; 103 continue;
104 spin_lock_irqsave(&ctx->wqh.lock, flags); 104 spin_lock_irqsave(&ctx->wqh.lock, flags);
105 if (ctx->moffs.tv64 != moffs.tv64) { 105 if (ctx->moffs != moffs) {
106 ctx->moffs.tv64 = KTIME_MAX; 106 ctx->moffs = KTIME_MAX;
107 ctx->ticks++; 107 ctx->ticks++;
108 wake_up_locked(&ctx->wqh); 108 wake_up_locked(&ctx->wqh);
109 } 109 }
@@ -124,9 +124,9 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
124 124
125static bool timerfd_canceled(struct timerfd_ctx *ctx) 125static bool timerfd_canceled(struct timerfd_ctx *ctx)
126{ 126{
127 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) 127 if (!ctx->might_cancel || ctx->moffs != KTIME_MAX)
128 return false; 128 return false;
129 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 129 ctx->moffs = ktime_mono_to_real(0);
130 return true; 130 return true;
131} 131}
132 132
@@ -155,7 +155,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
155 else 155 else
156 remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr); 156 remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
157 157
158 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; 158 return remaining < 0 ? 0: remaining;
159} 159}
160 160
161static int timerfd_setup(struct timerfd_ctx *ctx, int flags, 161static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
@@ -184,7 +184,7 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
184 ctx->t.tmr.function = timerfd_tmrproc; 184 ctx->t.tmr.function = timerfd_tmrproc;
185 } 185 }
186 186
187 if (texp.tv64 != 0) { 187 if (texp != 0) {
188 if (isalarm(ctx)) { 188 if (isalarm(ctx)) {
189 if (flags & TFD_TIMER_ABSTIME) 189 if (flags & TFD_TIMER_ABSTIME)
190 alarm_start(&ctx->t.alarm, texp); 190 alarm_start(&ctx->t.alarm, texp);
@@ -261,9 +261,9 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
261 if (ctx->ticks) { 261 if (ctx->ticks) {
262 ticks = ctx->ticks; 262 ticks = ctx->ticks;
263 263
264 if (ctx->expired && ctx->tintv.tv64) { 264 if (ctx->expired && ctx->tintv) {
265 /* 265 /*
266 * If tintv.tv64 != 0, this is a periodic timer that 266 * If tintv != 0, this is a periodic timer that
267 * needs to be re-armed. We avoid doing it in the timer 267 * needs to be re-armed. We avoid doing it in the timer
268 * callback to avoid DoS attacks specifying a very 268 * callback to avoid DoS attacks specifying a very
269 * short timer period. 269 * short timer period.
@@ -410,7 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
410 else 410 else
411 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); 411 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
412 412
413 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 413 ctx->moffs = ktime_mono_to_real(0);
414 414
415 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, 415 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
416 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); 416 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
@@ -469,7 +469,7 @@ static int do_timerfd_settime(int ufd, int flags,
469 * We do not update "ticks" and "expired" since the timer will be 469 * We do not update "ticks" and "expired" since the timer will be
470 * re-programmed again in the following timerfd_setup() call. 470 * re-programmed again in the following timerfd_setup() call.
471 */ 471 */
472 if (ctx->expired && ctx->tintv.tv64) { 472 if (ctx->expired && ctx->tintv) {
473 if (isalarm(ctx)) 473 if (isalarm(ctx))
474 alarm_forward_now(&ctx->t.alarm, ctx->tintv); 474 alarm_forward_now(&ctx->t.alarm, ctx->tintv);
475 else 475 else
@@ -499,7 +499,7 @@ static int do_timerfd_gettime(int ufd, struct itimerspec *t)
499 ctx = f.file->private_data; 499 ctx = f.file->private_data;
500 500
501 spin_lock_irq(&ctx->wqh.lock); 501 spin_lock_irq(&ctx->wqh.lock);
502 if (ctx->expired && ctx->tintv.tv64) { 502 if (ctx->expired && ctx->tintv) {
503 ctx->expired = 0; 503 ctx->expired = 0;
504 504
505 if (isalarm(ctx)) { 505 if (isalarm(ctx)) {
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index dda39d8fa189..b717ed9d2b75 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -25,13 +25,13 @@
25 25
26struct arch_timer_kvm { 26struct arch_timer_kvm {
27 /* Virtual offset */ 27 /* Virtual offset */
28 cycle_t cntvoff; 28 u64 cntvoff;
29}; 29};
30 30
31struct arch_timer_cpu { 31struct arch_timer_cpu {
32 /* Registers: control register, timer value */ 32 /* Registers: control register, timer value */
33 u32 cntv_ctl; /* Saved/restored */ 33 u32 cntv_ctl; /* Saved/restored */
34 cycle_t cntv_cval; /* Saved/restored */ 34 u64 cntv_cval; /* Saved/restored */
35 35
36 /* 36 /*
37 * Anything that is not used directly from assembly code goes 37 * Anything that is not used directly from assembly code goes
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 65602d395a52..e315d04a2fd9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -75,8 +75,8 @@ struct module;
75 * structure. 75 * structure.
76 */ 76 */
77struct clocksource { 77struct clocksource {
78 cycle_t (*read)(struct clocksource *cs); 78 u64 (*read)(struct clocksource *cs);
79 cycle_t mask; 79 u64 mask;
80 u32 mult; 80 u32 mult;
81 u32 shift; 81 u32 shift;
82 u64 max_idle_ns; 82 u64 max_idle_ns;
@@ -98,8 +98,8 @@ struct clocksource {
98#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 98#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
99 /* Watchdog related data, used by the framework */ 99 /* Watchdog related data, used by the framework */
100 struct list_head wd_list; 100 struct list_head wd_list;
101 cycle_t cs_last; 101 u64 cs_last;
102 cycle_t wd_last; 102 u64 wd_last;
103#endif 103#endif
104 struct module *owner; 104 struct module *owner;
105}; 105};
@@ -117,7 +117,7 @@ struct clocksource {
117#define CLOCK_SOURCE_RESELECT 0x100 117#define CLOCK_SOURCE_RESELECT 0x100
118 118
119/* simplify initialization of mask field */ 119/* simplify initialization of mask field */
120#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 120#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
121 121
122static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) 122static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
123{ 123{
@@ -176,7 +176,7 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
176 * 176 *
177 * XXX - This could use some mult_lxl_ll() asm optimization 177 * XXX - This could use some mult_lxl_ll() asm optimization
178 */ 178 */
179static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 179static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
180{ 180{
181 return ((u64) cycles * mult) >> shift; 181 return ((u64) cycles * mult) >> shift;
182} 182}
@@ -236,13 +236,13 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
236 236
237extern int timekeeping_notify(struct clocksource *clock); 237extern int timekeeping_notify(struct clocksource *clock);
238 238
239extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 239extern u64 clocksource_mmio_readl_up(struct clocksource *);
240extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 240extern u64 clocksource_mmio_readl_down(struct clocksource *);
241extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 241extern u64 clocksource_mmio_readw_up(struct clocksource *);
242extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 242extern u64 clocksource_mmio_readw_down(struct clocksource *);
243 243
244extern int clocksource_mmio_init(void __iomem *, const char *, 244extern int clocksource_mmio_init(void __iomem *, const char *,
245 unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 245 unsigned long, int, unsigned, u64 (*)(struct clocksource *));
246 246
247extern int clocksource_i8253_init(void); 247extern int clocksource_i8253_init(void);
248 248
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 1f79b20918b1..4334106f44c3 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -50,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
50 unsigned long freq); 50 unsigned long freq);
51void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); 51void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs);
52void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); 52void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
53cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); 53u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
54 54
55#endif /* __DW_APB_TIMER_H__ */ 55#endif /* __DW_APB_TIMER_H__ */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 6435f46d6e13..7c5b694864cd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,14 +1,14 @@
1#ifndef _LINUX_FUTEX_H 1#ifndef _LINUX_FUTEX_H
2#define _LINUX_FUTEX_H 2#define _LINUX_FUTEX_H
3 3
4#include <linux/ktime.h>
4#include <uapi/linux/futex.h> 5#include <uapi/linux/futex.h>
5 6
6struct inode; 7struct inode;
7struct mm_struct; 8struct mm_struct;
8struct task_struct; 9struct task_struct;
9union ktime;
10 10
11long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, 11long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
12 u32 __user *uaddr2, u32 val2, u32 val3); 12 u32 __user *uaddr2, u32 val2, u32 val3);
13 13
14extern int 14extern int
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80b1535..cdab81ba29f8 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
228 228
229static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) 229static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
230{ 230{
231 timer->node.expires.tv64 = tv64; 231 timer->node.expires = tv64;
232 timer->_softexpires.tv64 = tv64; 232 timer->_softexpires = tv64;
233} 233}
234 234
235static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) 235static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
@@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
256 256
257static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) 257static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
258{ 258{
259 return timer->node.expires.tv64; 259 return timer->node.expires;
260} 260}
261static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) 261static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
262{ 262{
263 return timer->_softexpires.tv64; 263 return timer->_softexpires;
264} 264}
265 265
266static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) 266static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
@@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void);
297 * this resolution values. 297 * this resolution values.
298 */ 298 */
299# define HIGH_RES_NSEC 1 299# define HIGH_RES_NSEC 1
300# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } 300# define KTIME_HIGH_RES (HIGH_RES_NSEC)
301# define MONOTONIC_RES_NSEC HIGH_RES_NSEC 301# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
302# define KTIME_MONOTONIC_RES KTIME_HIGH_RES 302# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
303 303
@@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
333 * hrtimer_start_range_ns() to prevent short timeouts. 333 * hrtimer_start_range_ns() to prevent short timeouts.
334 */ 334 */
335 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) 335 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
336 rem.tv64 -= hrtimer_resolution; 336 rem -= hrtimer_resolution;
337 return rem; 337 return rem;
338} 338}
339 339
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 81f930b0bca9..7b49c71c968b 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -259,11 +259,11 @@ extern void gic_init(unsigned long gic_base_addr,
259 unsigned long gic_addrspace_size, unsigned int cpu_vec, 259 unsigned long gic_addrspace_size, unsigned int cpu_vec,
260 unsigned int irqbase); 260 unsigned int irqbase);
261extern void gic_clocksource_init(unsigned int); 261extern void gic_clocksource_init(unsigned int);
262extern cycle_t gic_read_count(void); 262extern u64 gic_read_count(void);
263extern unsigned int gic_get_count_width(void); 263extern unsigned int gic_get_count_width(void);
264extern cycle_t gic_read_compare(void); 264extern u64 gic_read_compare(void);
265extern void gic_write_compare(cycle_t cnt); 265extern void gic_write_compare(u64 cnt);
266extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 266extern void gic_write_cpu_compare(u64 cnt, int cpu);
267extern void gic_start_count(void); 267extern void gic_start_count(void);
268extern void gic_stop_count(void); 268extern void gic_stop_count(void);
269extern int gic_get_c0_compare_int(void); 269extern int gic_get_c0_compare_int(void);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0fb7ffb1775f..0c8bd45c8206 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -24,21 +24,8 @@
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26 26
27/* 27/* Nanosecond scalar representation for kernel time values */
28 * ktime_t: 28typedef s64 ktime_t;
29 *
30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations.
35 *
36 */
37union ktime {
38 s64 tv64;
39};
40
41typedef union ktime ktime_t; /* Kill this */
42 29
43/** 30/**
44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 31 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */
50static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) 37static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
51{ 38{
52 if (unlikely(secs >= KTIME_SEC_MAX)) 39 if (unlikely(secs >= KTIME_SEC_MAX))
53 return (ktime_t){ .tv64 = KTIME_MAX }; 40 return KTIME_MAX;
54 41
55 return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; 42 return secs * NSEC_PER_SEC + (s64)nsecs;
56} 43}
57 44
58/* Subtract two ktime_t variables. rem = lhs -rhs: */ 45/* Subtract two ktime_t variables. rem = lhs -rhs: */
59#define ktime_sub(lhs, rhs) \ 46#define ktime_sub(lhs, rhs) ((lhs) - (rhs))
60 ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
61 47
62/* Add two ktime_t variables. res = lhs + rhs: */ 48/* Add two ktime_t variables. res = lhs + rhs: */
63#define ktime_add(lhs, rhs) \ 49#define ktime_add(lhs, rhs) ((lhs) + (rhs))
64 ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
65 50
66/* 51/*
67 * Same as ktime_add(), but avoids undefined behaviour on overflow; however, 52 * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
68 * this means that you must check the result for overflow yourself. 53 * this means that you must check the result for overflow yourself.
69 */ 54 */
70#define ktime_add_unsafe(lhs, rhs) \ 55#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs))
71 ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
72 56
73/* 57/*
74 * Add a ktime_t variable and a scalar nanosecond value. 58 * Add a ktime_t variable and a scalar nanosecond value.
75 * res = kt + nsval: 59 * res = kt + nsval:
76 */ 60 */
77#define ktime_add_ns(kt, nsval) \ 61#define ktime_add_ns(kt, nsval) ((kt) + (nsval))
78 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
79 62
80/* 63/*
81 * Subtract a scalar nanosecod from a ktime_t variable 64 * Subtract a scalar nanosecod from a ktime_t variable
82 * res = kt - nsval: 65 * res = kt - nsval:
83 */ 66 */
84#define ktime_sub_ns(kt, nsval) \ 67#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
85 ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
86 68
87/* convert a timespec to ktime_t format: */ 69/* convert a timespec to ktime_t format: */
88static inline ktime_t timespec_to_ktime(struct timespec ts) 70static inline ktime_t timespec_to_ktime(struct timespec ts)
@@ -103,31 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
103} 85}
104 86
105/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 87/* Map the ktime_t to timespec conversion to ns_to_timespec function */
106#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 88#define ktime_to_timespec(kt) ns_to_timespec((kt))
107 89
108/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 90/* Map the ktime_t to timespec conversion to ns_to_timespec function */
109#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) 91#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
110 92
111/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 93/* Map the ktime_t to timeval conversion to ns_to_timeval function */
112#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 94#define ktime_to_timeval(kt) ns_to_timeval((kt))
113 95
114/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 96/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
115#define ktime_to_ns(kt) ((kt).tv64) 97#define ktime_to_ns(kt) (kt)
116
117
118/**
119 * ktime_equal - Compares two ktime_t variables to see if they are equal
120 * @cmp1: comparable1
121 * @cmp2: comparable2
122 *
123 * Compare two ktime_t variables.
124 *
125 * Return: 1 if equal.
126 */
127static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
128{
129 return cmp1.tv64 == cmp2.tv64;
130}
131 98
132/** 99/**
133 * ktime_compare - Compares two ktime_t variables for less, greater or equal 100 * ktime_compare - Compares two ktime_t variables for less, greater or equal
@@ -141,9 +108,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
141 */ 108 */
142static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) 109static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
143{ 110{
144 if (cmp1.tv64 < cmp2.tv64) 111 if (cmp1 < cmp2)
145 return -1; 112 return -1;
146 if (cmp1.tv64 > cmp2.tv64) 113 if (cmp1 > cmp2)
147 return 1; 114 return 1;
148 return 0; 115 return 0;
149} 116}
@@ -182,7 +149,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
182 */ 149 */
183 BUG_ON(div < 0); 150 BUG_ON(div < 0);
184 if (__builtin_constant_p(div) && !(div >> 32)) { 151 if (__builtin_constant_p(div) && !(div >> 32)) {
185 s64 ns = kt.tv64; 152 s64 ns = kt;
186 u64 tmp = ns < 0 ? -ns : ns; 153 u64 tmp = ns < 0 ? -ns : ns;
187 154
188 do_div(tmp, div); 155 do_div(tmp, div);
@@ -199,7 +166,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
199 * so catch them on 64bit as well. 166 * so catch them on 64bit as well.
200 */ 167 */
201 WARN_ON(div < 0); 168 WARN_ON(div < 0);
202 return kt.tv64 / div; 169 return kt / div;
203} 170}
204#endif 171#endif
205 172
@@ -256,7 +223,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
256static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, 223static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
257 struct timespec *ts) 224 struct timespec *ts)
258{ 225{
259 if (kt.tv64) { 226 if (kt) {
260 *ts = ktime_to_timespec(kt); 227 *ts = ktime_to_timespec(kt);
261 return true; 228 return true;
262 } else { 229 } else {
@@ -275,7 +242,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
275static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, 242static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
276 struct timespec64 *ts) 243 struct timespec64 *ts)
277{ 244{
278 if (kt.tv64) { 245 if (kt) {
279 *ts = ktime_to_timespec64(kt); 246 *ts = ktime_to_timespec64(kt);
280 return true; 247 return true;
281 } else { 248 } else {
@@ -290,20 +257,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
290 * this resolution values. 257 * this resolution values.
291 */ 258 */
292#define LOW_RES_NSEC TICK_NSEC 259#define LOW_RES_NSEC TICK_NSEC
293#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 260#define KTIME_LOW_RES (LOW_RES_NSEC)
294 261
295static inline ktime_t ns_to_ktime(u64 ns) 262static inline ktime_t ns_to_ktime(u64 ns)
296{ 263{
297 static const ktime_t ktime_zero = { .tv64 = 0 }; 264 return ns;
298
299 return ktime_add_ns(ktime_zero, ns);
300} 265}
301 266
302static inline ktime_t ms_to_ktime(u64 ms) 267static inline ktime_t ms_to_ktime(u64 ms)
303{ 268{
304 static const ktime_t ktime_zero = { .tv64 = 0 }; 269 return ms * NSEC_PER_MSEC;
305
306 return ktime_add_ms(ktime_zero, ms);
307} 270}
308 271
309# include <linux/timekeeping.h> 272# include <linux/timekeeping.h>
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index c9f379689dd0..93bdb3485192 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1460,7 +1460,7 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1460int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1460int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1461 u32 max_range_qpn); 1461 u32 max_range_qpn);
1462 1462
1463cycle_t mlx4_read_clock(struct mlx4_dev *dev); 1463u64 mlx4_read_clock(struct mlx4_dev *dev);
1464 1464
1465struct mlx4_active_ports { 1465struct mlx4_active_ports {
1466 DECLARE_BITMAP(ports, MLX4_MAX_PORTS); 1466 DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ac7fa34db8a7..b53c0cfd417e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3227,7 +3227,7 @@ static inline ktime_t net_timedelta(ktime_t t)
3227 3227
3228static inline ktime_t net_invalid_timestamp(void) 3228static inline ktime_t net_invalid_timestamp(void)
3229{ 3229{
3230 return ktime_set(0, 0); 3230 return 0;
3231} 3231}
3232 3232
3233struct sk_buff *skb_clone_sk(struct sk_buff *skb); 3233struct sk_buff *skb_clone_sk(struct sk_buff *skb);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 62be0786d6d0..a04fea19676f 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -127,9 +127,7 @@ static inline void tick_nohz_idle_exit(void) { }
127 127
128static inline ktime_t tick_nohz_get_sleep_length(void) 128static inline ktime_t tick_nohz_get_sleep_length(void)
129{ 129{
130 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 130 return NSEC_PER_SEC / HZ;
131
132 return len;
133} 131}
134static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 132static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
135static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 133static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index 4382035a75bb..2496ad4cfc99 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -20,7 +20,7 @@
20#include <linux/types.h> 20#include <linux/types.h>
21 21
22/* simplify initialization of mask field */ 22/* simplify initialization of mask field */
23#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 23#define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
24 24
25/** 25/**
26 * struct cyclecounter - hardware abstraction for a free running counter 26 * struct cyclecounter - hardware abstraction for a free running counter
@@ -37,8 +37,8 @@
37 * @shift: cycle to nanosecond divisor (power of two) 37 * @shift: cycle to nanosecond divisor (power of two)
38 */ 38 */
39struct cyclecounter { 39struct cyclecounter {
40 cycle_t (*read)(const struct cyclecounter *cc); 40 u64 (*read)(const struct cyclecounter *cc);
41 cycle_t mask; 41 u64 mask;
42 u32 mult; 42 u32 mult;
43 u32 shift; 43 u32 shift;
44}; 44};
@@ -63,7 +63,7 @@ struct cyclecounter {
63 */ 63 */
64struct timecounter { 64struct timecounter {
65 const struct cyclecounter *cc; 65 const struct cyclecounter *cc;
66 cycle_t cycle_last; 66 u64 cycle_last;
67 u64 nsec; 67 u64 nsec;
68 u64 mask; 68 u64 mask;
69 u64 frac; 69 u64 frac;
@@ -77,7 +77,7 @@ struct timecounter {
77 * @frac: pointer to storage for the fractional nanoseconds. 77 * @frac: pointer to storage for the fractional nanoseconds.
78 */ 78 */
79static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 79static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
80 cycle_t cycles, u64 mask, u64 *frac) 80 u64 cycles, u64 mask, u64 *frac)
81{ 81{
82 u64 ns = (u64) cycles; 82 u64 ns = (u64) cycles;
83 83
@@ -134,6 +134,6 @@ extern u64 timecounter_read(struct timecounter *tc);
134 * in the past. 134 * in the past.
135 */ 135 */
136extern u64 timecounter_cyc2time(struct timecounter *tc, 136extern u64 timecounter_cyc2time(struct timecounter *tc,
137 cycle_t cycle_tstamp); 137 u64 cycle_tstamp);
138 138
139#endif 139#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e88005459035..110f4532188c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,9 +29,9 @@
29 */ 29 */
30struct tk_read_base { 30struct tk_read_base {
31 struct clocksource *clock; 31 struct clocksource *clock;
32 cycle_t (*read)(struct clocksource *cs); 32 u64 (*read)(struct clocksource *cs);
33 cycle_t mask; 33 u64 mask;
34 cycle_t cycle_last; 34 u64 cycle_last;
35 u32 mult; 35 u32 mult;
36 u32 shift; 36 u32 shift;
37 u64 xtime_nsec; 37 u64 xtime_nsec;
@@ -97,7 +97,7 @@ struct timekeeper {
97 struct timespec64 raw_time; 97 struct timespec64 raw_time;
98 98
99 /* The following members are for timekeeping internal use */ 99 /* The following members are for timekeeping internal use */
100 cycle_t cycle_interval; 100 u64 cycle_interval;
101 u64 xtime_interval; 101 u64 xtime_interval;
102 s64 xtime_remainder; 102 s64 xtime_remainder;
103 u32 raw_interval; 103 u32 raw_interval;
@@ -136,7 +136,7 @@ extern void update_vsyscall_tz(void);
136 136
137extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, 137extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
138 struct clocksource *c, u32 mult, 138 struct clocksource *c, u32 mult,
139 cycle_t cycle_last); 139 u64 cycle_last);
140extern void update_vsyscall_tz(void); 140extern void update_vsyscall_tz(void);
141 141
142#else 142#else
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 361f8bf1429d..d2e804e15c3e 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -293,7 +293,7 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
293 * @cs_was_changed_seq: The sequence number of clocksource change events 293 * @cs_was_changed_seq: The sequence number of clocksource change events
294 */ 294 */
295struct system_time_snapshot { 295struct system_time_snapshot {
296 cycle_t cycles; 296 u64 cycles;
297 ktime_t real; 297 ktime_t real;
298 ktime_t raw; 298 ktime_t raw;
299 unsigned int clock_was_set_seq; 299 unsigned int clock_was_set_seq;
@@ -321,7 +321,7 @@ struct system_device_crosststamp {
321 * timekeeping code to verify comparibility of two cycle values 321 * timekeeping code to verify comparibility of two cycle values
322 */ 322 */
323struct system_counterval_t { 323struct system_counterval_t {
324 cycle_t cycles; 324 u64 cycles;
325 struct clocksource *cs; 325 struct clocksource *cs;
326}; 326};
327 327
diff --git a/include/linux/types.h b/include/linux/types.h
index d501ad3ba247..1e7bd24848fc 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -228,8 +228,5 @@ struct callback_head {
228typedef void (*rcu_callback_t)(struct rcu_head *head); 228typedef void (*rcu_callback_t)(struct rcu_head *head);
229typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); 229typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
230 230
231/* clocksource cycle base type */
232typedef u64 cycle_t;
233
234#endif /* __ASSEMBLY__ */ 231#endif /* __ASSEMBLY__ */
235#endif /* _LINUX_TYPES_H */ 232#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2408e8d5c05c..1421132e9086 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -510,7 +510,7 @@ do { \
510 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 510 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
511 HRTIMER_MODE_REL); \ 511 HRTIMER_MODE_REL); \
512 hrtimer_init_sleeper(&__t, current); \ 512 hrtimer_init_sleeper(&__t, current); \
513 if ((timeout).tv64 != KTIME_MAX) \ 513 if ((timeout) != KTIME_MAX) \
514 hrtimer_start_range_ns(&__t.timer, timeout, \ 514 hrtimer_start_range_ns(&__t.timer, timeout, \
515 current->timer_slack_ns, \ 515 current->timer_slack_ns, \
516 HRTIMER_MODE_REL); \ 516 HRTIMER_MODE_REL); \
diff --git a/include/net/red.h b/include/net/red.h
index 76e0b5f922c6..208e718e16b9 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -207,7 +207,7 @@ static inline void red_set_parms(struct red_parms *p,
207 207
208static inline int red_is_idling(const struct red_vars *v) 208static inline int red_is_idling(const struct red_vars *v)
209{ 209{
210 return v->qidlestart.tv64 != 0; 210 return v->qidlestart != 0;
211} 211}
212 212
213static inline void red_start_of_idle_period(struct red_vars *v) 213static inline void red_start_of_idle_period(struct red_vars *v)
@@ -217,7 +217,7 @@ static inline void red_start_of_idle_period(struct red_vars *v)
217 217
218static inline void red_end_of_idle_period(struct red_vars *v) 218static inline void red_end_of_idle_period(struct red_vars *v)
219{ 219{
220 v->qidlestart.tv64 = 0; 220 v->qidlestart = 0;
221} 221}
222 222
223static inline void red_restart(struct red_vars *v) 223static inline void red_restart(struct red_vars *v)
diff --git a/include/net/sock.h b/include/net/sock.h
index 282d065e286b..f0e867f58722 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2193,8 +2193,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2193 */ 2193 */
2194 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2194 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2196 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2196 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2197 (hwtstamps->hwtstamp.tv64 && 2197 (hwtstamps->hwtstamp &&
2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2199 __sock_recv_timestamp(msg, sk, skb); 2199 __sock_recv_timestamp(msg, sk, skb);
2200 else 2200 else
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index a1c108c16c9c..ae4f358dd8e9 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -31,7 +31,7 @@ TRACE_EVENT(alarmtimer_suspend,
31 ), 31 ),
32 32
33 TP_fast_assign( 33 TP_fast_assign(
34 __entry->expires = expires.tv64; 34 __entry->expires = expires;
35 __entry->alarm_type = flag; 35 __entry->alarm_type = flag;
36 ), 36 ),
37 37
@@ -57,8 +57,8 @@ DECLARE_EVENT_CLASS(alarm_class,
57 TP_fast_assign( 57 TP_fast_assign(
58 __entry->alarm = alarm; 58 __entry->alarm = alarm;
59 __entry->alarm_type = alarm->type; 59 __entry->alarm_type = alarm->type;
60 __entry->expires = alarm->node.expires.tv64; 60 __entry->expires = alarm->node.expires;
61 __entry->now = now.tv64; 61 __entry->now = now;
62 ), 62 ),
63 63
64 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu", 64 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu",
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6fdfac..1448637616d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -177,16 +177,14 @@ TRACE_EVENT(hrtimer_start,
177 TP_fast_assign( 177 TP_fast_assign(
178 __entry->hrtimer = hrtimer; 178 __entry->hrtimer = hrtimer;
179 __entry->function = hrtimer->function; 179 __entry->function = hrtimer->function;
180 __entry->expires = hrtimer_get_expires(hrtimer).tv64; 180 __entry->expires = hrtimer_get_expires(hrtimer);
181 __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; 181 __entry->softexpires = hrtimer_get_softexpires(hrtimer);
182 ), 182 ),
183 183
184 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", 184 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
185 __entry->hrtimer, __entry->function, 185 __entry->hrtimer, __entry->function,
186 (unsigned long long)ktime_to_ns((ktime_t) { 186 (unsigned long long) __entry->expires,
187 .tv64 = __entry->expires }), 187 (unsigned long long) __entry->softexpires)
188 (unsigned long long)ktime_to_ns((ktime_t) {
189 .tv64 = __entry->softexpires }))
190); 188);
191 189
192/** 190/**
@@ -211,13 +209,13 @@ TRACE_EVENT(hrtimer_expire_entry,
211 209
212 TP_fast_assign( 210 TP_fast_assign(
213 __entry->hrtimer = hrtimer; 211 __entry->hrtimer = hrtimer;
214 __entry->now = now->tv64; 212 __entry->now = *now;
215 __entry->function = hrtimer->function; 213 __entry->function = hrtimer->function;
216 ), 214 ),
217 215
218 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, 216 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
219 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) 217 (unsigned long long) __entry->now)
220 ); 218);
221 219
222DECLARE_EVENT_CLASS(hrtimer_class, 220DECLARE_EVENT_CLASS(hrtimer_class,
223 221
diff --git a/kernel/futex.c b/kernel/futex.c
index 9246d9f593d1..0842c8ca534b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2459,7 +2459,7 @@ retry:
2459 restart->fn = futex_wait_restart; 2459 restart->fn = futex_wait_restart;
2460 restart->futex.uaddr = uaddr; 2460 restart->futex.uaddr = uaddr;
2461 restart->futex.val = val; 2461 restart->futex.val = val;
2462 restart->futex.time = abs_time->tv64; 2462 restart->futex.time = *abs_time;
2463 restart->futex.bitset = bitset; 2463 restart->futex.bitset = bitset;
2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; 2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2465 2465
@@ -2480,7 +2480,7 @@ static long futex_wait_restart(struct restart_block *restart)
2480 ktime_t t, *tp = NULL; 2480 ktime_t t, *tp = NULL;
2481 2481
2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { 2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2483 t.tv64 = restart->futex.time; 2483 t = restart->futex.time;
2484 tp = &t; 2484 tp = &t;
2485 } 2485 }
2486 restart->fn = do_no_restart_syscall; 2486 restart->fn = do_no_restart_syscall;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 966556ebdbb3..c56fb57f2991 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1456,7 +1456,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1456 * yield - it could be a while. 1456 * yield - it could be a while.
1457 */ 1457 */
1458 if (unlikely(queued)) { 1458 if (unlikely(queued)) {
1459 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1459 ktime_t to = NSEC_PER_SEC / HZ;
1460 1460
1461 set_current_state(TASK_UNINTERRUPTIBLE); 1461 set_current_state(TASK_UNINTERRUPTIBLE);
1462 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1462 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
diff --git a/kernel/signal.c b/kernel/signal.c
index f5d4e275345e..ff046b73ff2d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -587,7 +587,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
587 struct hrtimer *tmr = &tsk->signal->real_timer; 587 struct hrtimer *tmr = &tsk->signal->real_timer;
588 588
589 if (!hrtimer_is_queued(tmr) && 589 if (!hrtimer_is_queued(tmr) &&
590 tsk->signal->it_real_incr.tv64 != 0) { 590 tsk->signal->it_real_incr != 0) {
591 hrtimer_forward(tmr, tmr->base->get_time(), 591 hrtimer_forward(tmr, tmr->base->get_time(),
592 tsk->signal->it_real_incr); 592 tsk->signal->it_real_incr);
593 hrtimer_restart(tmr); 593 hrtimer_restart(tmr);
@@ -2766,7 +2766,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767 const struct timespec *ts) 2767 const struct timespec *ts)
2768{ 2768{
2769 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; 2769 ktime_t *to = NULL, timeout = KTIME_MAX;
2770 struct task_struct *tsk = current; 2770 struct task_struct *tsk = current;
2771 sigset_t mask = *which; 2771 sigset_t mask = *which;
2772 int sig, ret = 0; 2772 int sig, ret = 0;
@@ -2786,7 +2786,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2786 2786
2787 spin_lock_irq(&tsk->sighand->siglock); 2787 spin_lock_irq(&tsk->sighand->siglock);
2788 sig = dequeue_signal(tsk, &mask, info); 2788 sig = dequeue_signal(tsk, &mask, info);
2789 if (!sig && timeout.tv64) { 2789 if (!sig && timeout) {
2790 /* 2790 /*
2791 * None ready, temporarily unblock those we're interested 2791 * None ready, temporarily unblock those we're interested
2792 * while we are sleeping in so that we'll be awakened when 2792 * while we are sleeping in so that we'll be awakened when
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 3921cf7fea8e..e6dc9a538efa 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -234,7 +234,7 @@ static int alarmtimer_suspend(struct device *dev)
234 min = freezer_delta; 234 min = freezer_delta;
235 expires = freezer_expires; 235 expires = freezer_expires;
236 type = freezer_alarmtype; 236 type = freezer_alarmtype;
237 freezer_delta = ktime_set(0, 0); 237 freezer_delta = 0;
238 spin_unlock_irqrestore(&freezer_delta_lock, flags); 238 spin_unlock_irqrestore(&freezer_delta_lock, flags);
239 239
240 rtc = alarmtimer_get_rtcdev(); 240 rtc = alarmtimer_get_rtcdev();
@@ -254,13 +254,13 @@ static int alarmtimer_suspend(struct device *dev)
254 if (!next) 254 if (!next)
255 continue; 255 continue;
256 delta = ktime_sub(next->expires, base->gettime()); 256 delta = ktime_sub(next->expires, base->gettime());
257 if (!min.tv64 || (delta.tv64 < min.tv64)) { 257 if (!min || (delta < min)) {
258 expires = next->expires; 258 expires = next->expires;
259 min = delta; 259 min = delta;
260 type = i; 260 type = i;
261 } 261 }
262 } 262 }
263 if (min.tv64 == 0) 263 if (min == 0)
264 return 0; 264 return 0;
265 265
266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { 266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
@@ -277,7 +277,7 @@ static int alarmtimer_suspend(struct device *dev)
277 now = ktime_add(now, min); 277 now = ktime_add(now, min);
278 278
279 /* Set alarm, if in the past reject suspend briefly to handle */ 279 /* Set alarm, if in the past reject suspend briefly to handle */
280 ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); 280 ret = rtc_timer_start(rtc, &rtctimer, now, 0);
281 if (ret < 0) 281 if (ret < 0)
282 __pm_wakeup_event(ws, MSEC_PER_SEC); 282 __pm_wakeup_event(ws, MSEC_PER_SEC);
283 return ret; 283 return ret;
@@ -328,7 +328,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
328 delta = ktime_sub(absexp, base->gettime()); 328 delta = ktime_sub(absexp, base->gettime());
329 329
330 spin_lock_irqsave(&freezer_delta_lock, flags); 330 spin_lock_irqsave(&freezer_delta_lock, flags);
331 if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { 331 if (!freezer_delta || (delta < freezer_delta)) {
332 freezer_delta = delta; 332 freezer_delta = delta;
333 freezer_expires = absexp; 333 freezer_expires = absexp;
334 freezer_alarmtype = type; 334 freezer_alarmtype = type;
@@ -453,10 +453,10 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
453 453
454 delta = ktime_sub(now, alarm->node.expires); 454 delta = ktime_sub(now, alarm->node.expires);
455 455
456 if (delta.tv64 < 0) 456 if (delta < 0)
457 return 0; 457 return 0;
458 458
459 if (unlikely(delta.tv64 >= interval.tv64)) { 459 if (unlikely(delta >= interval)) {
460 s64 incr = ktime_to_ns(interval); 460 s64 incr = ktime_to_ns(interval);
461 461
462 overrun = ktime_divns(delta, incr); 462 overrun = ktime_divns(delta, incr);
@@ -464,7 +464,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
464 alarm->node.expires = ktime_add_ns(alarm->node.expires, 464 alarm->node.expires = ktime_add_ns(alarm->node.expires,
465 incr*overrun); 465 incr*overrun);
466 466
467 if (alarm->node.expires.tv64 > now.tv64) 467 if (alarm->node.expires > now)
468 return overrun; 468 return overrun;
469 /* 469 /*
470 * This (and the ktime_add() below) is the 470 * This (and the ktime_add() below) is the
@@ -522,7 +522,7 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
522 } 522 }
523 523
524 /* Re-add periodic timers */ 524 /* Re-add periodic timers */
525 if (ptr->it.alarm.interval.tv64) { 525 if (ptr->it.alarm.interval) {
526 ptr->it_overrun += alarm_forward(alarm, now, 526 ptr->it_overrun += alarm_forward(alarm, now,
527 ptr->it.alarm.interval); 527 ptr->it.alarm.interval);
528 result = ALARMTIMER_RESTART; 528 result = ALARMTIMER_RESTART;
@@ -730,7 +730,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type,
730 730
731 rem = ktime_sub(exp, alarm_bases[type].gettime()); 731 rem = ktime_sub(exp, alarm_bases[type].gettime());
732 732
733 if (rem.tv64 <= 0) 733 if (rem <= 0)
734 return 0; 734 return 0;
735 rmt = ktime_to_timespec(rem); 735 rmt = ktime_to_timespec(rem);
736 736
@@ -755,7 +755,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
755 struct alarm alarm; 755 struct alarm alarm;
756 int ret = 0; 756 int ret = 0;
757 757
758 exp.tv64 = restart->nanosleep.expires; 758 exp = restart->nanosleep.expires;
759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); 759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
760 760
761 if (alarmtimer_do_nsleep(&alarm, exp)) 761 if (alarmtimer_do_nsleep(&alarm, exp))
@@ -835,7 +835,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
835 restart = &current->restart_block; 835 restart = &current->restart_block;
836 restart->fn = alarm_timer_nsleep_restart; 836 restart->fn = alarm_timer_nsleep_restart;
837 restart->nanosleep.clockid = type; 837 restart->nanosleep.clockid = type;
838 restart->nanosleep.expires = exp.tv64; 838 restart->nanosleep.expires = exp;
839 restart->nanosleep.rmtp = rmtp; 839 restart->nanosleep.rmtp = rmtp;
840 ret = -ERESTART_RESTARTBLOCK; 840 ret = -ERESTART_RESTARTBLOCK;
841 841
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2c5bc77c0bb0..97ac0951f164 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -179,7 +179,7 @@ void clockevents_switch_state(struct clock_event_device *dev,
179void clockevents_shutdown(struct clock_event_device *dev) 179void clockevents_shutdown(struct clock_event_device *dev)
180{ 180{
181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
182 dev->next_event.tv64 = KTIME_MAX; 182 dev->next_event = KTIME_MAX;
183} 183}
184 184
185/** 185/**
@@ -213,7 +213,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
214 printk_deferred(KERN_WARNING 214 printk_deferred(KERN_WARNING
215 "CE: Reprogramming failure. Giving up\n"); 215 "CE: Reprogramming failure. Giving up\n");
216 dev->next_event.tv64 = KTIME_MAX; 216 dev->next_event = KTIME_MAX;
217 return -ETIME; 217 return -ETIME;
218 } 218 }
219 219
@@ -310,7 +310,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
310 int64_t delta; 310 int64_t delta;
311 int rc; 311 int rc;
312 312
313 if (unlikely(expires.tv64 < 0)) { 313 if (unlikely(expires < 0)) {
314 WARN_ON_ONCE(1); 314 WARN_ON_ONCE(1);
315 return -ETIME; 315 return -ETIME;
316 } 316 }
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 150242ccfcd2..665985b0a89a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -170,7 +170,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
170static void clocksource_watchdog(unsigned long data) 170static void clocksource_watchdog(unsigned long data)
171{ 171{
172 struct clocksource *cs; 172 struct clocksource *cs;
173 cycle_t csnow, wdnow, cslast, wdlast, delta; 173 u64 csnow, wdnow, cslast, wdlast, delta;
174 int64_t wd_nsec, cs_nsec; 174 int64_t wd_nsec, cs_nsec;
175 int next_cpu, reset_pending; 175 int next_cpu, reset_pending;
176 176
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 161e340395d5..c6ecedd3b839 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
171 return 0; 171 return 0;
172 172
173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
174 return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 174 return expires <= new_base->cpu_base->expires_next;
175#else 175#else
176 return 0; 176 return 0;
177#endif 177#endif
@@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
313 * We use KTIME_SEC_MAX here, the maximum timeout which we can 313 * We use KTIME_SEC_MAX here, the maximum timeout which we can
314 * return to user space in a timespec: 314 * return to user space in a timespec:
315 */ 315 */
316 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 316 if (res < 0 || res < lhs || res < rhs)
317 res = ktime_set(KTIME_SEC_MAX, 0); 317 res = ktime_set(KTIME_SEC_MAX, 0);
318 318
319 return res; 319 return res;
@@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 465static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
466{ 466{
467 struct hrtimer_clock_base *base = cpu_base->clock_base; 467 struct hrtimer_clock_base *base = cpu_base->clock_base;
468 ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
469 unsigned int active = cpu_base->active_bases; 468 unsigned int active = cpu_base->active_bases;
469 ktime_t expires, expires_next = KTIME_MAX;
470 470
471 hrtimer_update_next_timer(cpu_base, NULL); 471 hrtimer_update_next_timer(cpu_base, NULL);
472 for (; active; base++, active >>= 1) { 472 for (; active; base++, active >>= 1) {
@@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
479 next = timerqueue_getnext(&base->active); 479 next = timerqueue_getnext(&base->active);
480 timer = container_of(next, struct hrtimer, node); 480 timer = container_of(next, struct hrtimer, node);
481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
482 if (expires.tv64 < expires_next.tv64) { 482 if (expires < expires_next) {
483 expires_next = expires; 483 expires_next = expires;
484 hrtimer_update_next_timer(cpu_base, timer); 484 hrtimer_update_next_timer(cpu_base, timer);
485 } 485 }
@@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
489 * the clock bases so the result might be negative. Fix it up 489 * the clock bases so the result might be negative. Fix it up
490 * to prevent a false positive in clockevents_program_event(). 490 * to prevent a false positive in clockevents_program_event().
491 */ 491 */
492 if (expires_next.tv64 < 0) 492 if (expires_next < 0)
493 expires_next.tv64 = 0; 493 expires_next = 0;
494 return expires_next; 494 return expires_next;
495} 495}
496#endif 496#endif
@@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
561 561
562 expires_next = __hrtimer_get_next_event(cpu_base); 562 expires_next = __hrtimer_get_next_event(cpu_base);
563 563
564 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 564 if (skip_equal && expires_next == cpu_base->expires_next)
565 return; 565 return;
566 566
567 cpu_base->expires_next.tv64 = expires_next.tv64; 567 cpu_base->expires_next = expires_next;
568 568
569 /* 569 /*
570 * If a hang was detected in the last timer interrupt then we 570 * If a hang was detected in the last timer interrupt then we
@@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer,
622 * CLOCK_REALTIME timer might be requested with an absolute 622 * CLOCK_REALTIME timer might be requested with an absolute
623 * expiry time which is less than base->offset. Set it to 0. 623 * expiry time which is less than base->offset. Set it to 0.
624 */ 624 */
625 if (expires.tv64 < 0) 625 if (expires < 0)
626 expires.tv64 = 0; 626 expires = 0;
627 627
628 if (expires.tv64 >= cpu_base->expires_next.tv64) 628 if (expires >= cpu_base->expires_next)
629 return; 629 return;
630 630
631 /* Update the pointer to the next expiring timer */ 631 /* Update the pointer to the next expiring timer */
@@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer,
653 */ 653 */
654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 654static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
655{ 655{
656 base->expires_next.tv64 = KTIME_MAX; 656 base->expires_next = KTIME_MAX;
657 base->hres_active = 0; 657 base->hres_active = 0;
658} 658}
659 659
@@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
827 827
828 delta = ktime_sub(now, hrtimer_get_expires(timer)); 828 delta = ktime_sub(now, hrtimer_get_expires(timer));
829 829
830 if (delta.tv64 < 0) 830 if (delta < 0)
831 return 0; 831 return 0;
832 832
833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
834 return 0; 834 return 0;
835 835
836 if (interval.tv64 < hrtimer_resolution) 836 if (interval < hrtimer_resolution)
837 interval.tv64 = hrtimer_resolution; 837 interval = hrtimer_resolution;
838 838
839 if (unlikely(delta.tv64 >= interval.tv64)) { 839 if (unlikely(delta >= interval)) {
840 s64 incr = ktime_to_ns(interval); 840 s64 incr = ktime_to_ns(interval);
841 841
842 orun = ktime_divns(delta, incr); 842 orun = ktime_divns(delta, incr);
843 hrtimer_add_expires_ns(timer, incr * orun); 843 hrtimer_add_expires_ns(timer, incr * orun);
844 if (hrtimer_get_expires_tv64(timer) > now.tv64) 844 if (hrtimer_get_expires_tv64(timer) > now)
845 return orun; 845 return orun;
846 /* 846 /*
847 * This (and the ktime_add() below) is the 847 * This (and the ktime_add() below) is the
@@ -955,7 +955,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
955 */ 955 */
956 timer->is_rel = mode & HRTIMER_MODE_REL; 956 timer->is_rel = mode & HRTIMER_MODE_REL;
957 if (timer->is_rel) 957 if (timer->is_rel)
958 tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); 958 tim = ktime_add_safe(tim, hrtimer_resolution);
959#endif 959#endif
960 return tim; 960 return tim;
961} 961}
@@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void)
1104 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1104 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1105 1105
1106 if (!__hrtimer_hres_active(cpu_base)) 1106 if (!__hrtimer_hres_active(cpu_base))
1107 expires = __hrtimer_get_next_event(cpu_base).tv64; 1107 expires = __hrtimer_get_next_event(cpu_base);
1108 1108
1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1110 1110
@@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
1296 * are right-of a not yet expired timer, because that 1296 * are right-of a not yet expired timer, because that
1297 * timer will have to trigger a wakeup anyway. 1297 * timer will have to trigger a wakeup anyway.
1298 */ 1298 */
1299 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) 1299 if (basenow < hrtimer_get_softexpires_tv64(timer))
1300 break; 1300 break;
1301 1301
1302 __run_hrtimer(cpu_base, base, timer, &basenow); 1302 __run_hrtimer(cpu_base, base, timer, &basenow);
@@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1318 1318
1319 BUG_ON(!cpu_base->hres_active); 1319 BUG_ON(!cpu_base->hres_active);
1320 cpu_base->nr_events++; 1320 cpu_base->nr_events++;
1321 dev->next_event.tv64 = KTIME_MAX; 1321 dev->next_event = KTIME_MAX;
1322 1322
1323 raw_spin_lock(&cpu_base->lock); 1323 raw_spin_lock(&cpu_base->lock);
1324 entry_time = now = hrtimer_update_base(cpu_base); 1324 entry_time = now = hrtimer_update_base(cpu_base);
@@ -1331,7 +1331,7 @@ retry:
1331 * timers which run their callback and need to be requeued on 1331 * timers which run their callback and need to be requeued on
1332 * this CPU. 1332 * this CPU.
1333 */ 1333 */
1334 cpu_base->expires_next.tv64 = KTIME_MAX; 1334 cpu_base->expires_next = KTIME_MAX;
1335 1335
1336 __hrtimer_run_queues(cpu_base, now); 1336 __hrtimer_run_queues(cpu_base, now);
1337 1337
@@ -1379,13 +1379,13 @@ retry:
1379 cpu_base->hang_detected = 1; 1379 cpu_base->hang_detected = 1;
1380 raw_spin_unlock(&cpu_base->lock); 1380 raw_spin_unlock(&cpu_base->lock);
1381 delta = ktime_sub(now, entry_time); 1381 delta = ktime_sub(now, entry_time);
1382 if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) 1382 if ((unsigned int)delta > cpu_base->max_hang_time)
1383 cpu_base->max_hang_time = (unsigned int) delta.tv64; 1383 cpu_base->max_hang_time = (unsigned int) delta;
1384 /* 1384 /*
1385 * Limit it to a sensible value as we enforce a longer 1385 * Limit it to a sensible value as we enforce a longer
1386 * delay. Give the CPU at least 100ms to catch up. 1386 * delay. Give the CPU at least 100ms to catch up.
1387 */ 1387 */
1388 if (delta.tv64 > 100 * NSEC_PER_MSEC) 1388 if (delta > 100 * NSEC_PER_MSEC)
1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1390 else 1390 else
1391 expires_next = ktime_add(now, delta); 1391 expires_next = ktime_add(now, delta);
@@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1495 ktime_t rem; 1495 ktime_t rem;
1496 1496
1497 rem = hrtimer_expires_remaining(timer); 1497 rem = hrtimer_expires_remaining(timer);
1498 if (rem.tv64 <= 0) 1498 if (rem <= 0)
1499 return 0; 1499 return 0;
1500 rmt = ktime_to_timespec(rem); 1500 rmt = ktime_to_timespec(rem);
1501 1501
@@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
1693 * Optimize when a zero timeout value is given. It does not 1693 * Optimize when a zero timeout value is given. It does not
1694 * matter whether this is an absolute or a relative time. 1694 * matter whether this is an absolute or a relative time.
1695 */ 1695 */
1696 if (expires && !expires->tv64) { 1696 if (expires && *expires == 0) {
1697 __set_current_state(TASK_RUNNING); 1697 __set_current_state(TASK_RUNNING);
1698 return 0; 1698 return 0;
1699 } 1699 }
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index a45afb7277c2..8c89143f9ebf 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -34,10 +34,10 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
34 * then we return 0 - which is correct. 34 * then we return 0 - which is correct.
35 */ 35 */
36 if (hrtimer_active(timer)) { 36 if (hrtimer_active(timer)) {
37 if (rem.tv64 <= 0) 37 if (rem <= 0)
38 rem.tv64 = NSEC_PER_USEC; 38 rem = NSEC_PER_USEC;
39 } else 39 } else
40 rem.tv64 = 0; 40 rem = 0;
41 41
42 return ktime_to_timeval(rem); 42 return ktime_to_timeval(rem);
43} 43}
@@ -216,12 +216,12 @@ again:
216 goto again; 216 goto again;
217 } 217 }
218 expires = timeval_to_ktime(value->it_value); 218 expires = timeval_to_ktime(value->it_value);
219 if (expires.tv64 != 0) { 219 if (expires != 0) {
220 tsk->signal->it_real_incr = 220 tsk->signal->it_real_incr =
221 timeval_to_ktime(value->it_interval); 221 timeval_to_ktime(value->it_interval);
222 hrtimer_start(timer, expires, HRTIMER_MODE_REL); 222 hrtimer_start(timer, expires, HRTIMER_MODE_REL);
223 } else 223 } else
224 tsk->signal->it_real_incr.tv64 = 0; 224 tsk->signal->it_real_incr = 0;
225 225
226 trace_itimer_state(ITIMER_REAL, value, 0); 226 trace_itimer_state(ITIMER_REAL, value, 0);
227 spin_unlock_irq(&tsk->sighand->siglock); 227 spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 555e21f7b966..a4a0e478e44d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -59,9 +59,9 @@
59#define JIFFIES_SHIFT 8 59#define JIFFIES_SHIFT 8
60#endif 60#endif
61 61
62static cycle_t jiffies_read(struct clocksource *cs) 62static u64 jiffies_read(struct clocksource *cs)
63{ 63{
64 return (cycle_t) jiffies; 64 return (u64) jiffies;
65} 65}
66 66
67static struct clocksource clocksource_jiffies = { 67static struct clocksource clocksource_jiffies = {
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6df8927c58a5..edf19cc53140 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -381,7 +381,7 @@ ktime_t ntp_get_next_leap(void)
381 381
382 if ((time_state == TIME_INS) && (time_status & STA_INS)) 382 if ((time_state == TIME_INS) && (time_status & STA_INS))
383 return ktime_set(ntp_next_leap_sec, 0); 383 return ktime_set(ntp_next_leap_sec, 0);
384 ret.tv64 = KTIME_MAX; 384 ret = KTIME_MAX;
385 return ret; 385 return ret;
386} 386}
387 387
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42d7b9558741..1e6623d76750 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -359,7 +359,7 @@ static void schedule_next_timer(struct k_itimer *timr)
359{ 359{
360 struct hrtimer *timer = &timr->it.real.timer; 360 struct hrtimer *timer = &timr->it.real.timer;
361 361
362 if (timr->it.real.interval.tv64 == 0) 362 if (timr->it.real.interval == 0)
363 return; 363 return;
364 364
365 timr->it_overrun += (unsigned int) hrtimer_forward(timer, 365 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
@@ -449,7 +449,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
449 timr = container_of(timer, struct k_itimer, it.real.timer); 449 timr = container_of(timer, struct k_itimer, it.real.timer);
450 spin_lock_irqsave(&timr->it_lock, flags); 450 spin_lock_irqsave(&timr->it_lock, flags);
451 451
452 if (timr->it.real.interval.tv64 != 0) 452 if (timr->it.real.interval != 0)
453 si_private = ++timr->it_requeue_pending; 453 si_private = ++timr->it_requeue_pending;
454 454
455 if (posix_timer_event(timr, si_private)) { 455 if (posix_timer_event(timr, si_private)) {
@@ -458,7 +458,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
458 * we will not get a call back to restart it AND 458 * we will not get a call back to restart it AND
459 * it should be restarted. 459 * it should be restarted.
460 */ 460 */
461 if (timr->it.real.interval.tv64 != 0) { 461 if (timr->it.real.interval != 0) {
462 ktime_t now = hrtimer_cb_get_time(timer); 462 ktime_t now = hrtimer_cb_get_time(timer);
463 463
464 /* 464 /*
@@ -485,9 +485,9 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
485 */ 485 */
486#ifdef CONFIG_HIGH_RES_TIMERS 486#ifdef CONFIG_HIGH_RES_TIMERS
487 { 487 {
488 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); 488 ktime_t kj = NSEC_PER_SEC / HZ;
489 489
490 if (timr->it.real.interval.tv64 < kj.tv64) 490 if (timr->it.real.interval < kj)
491 now = ktime_add(now, kj); 491 now = ktime_add(now, kj);
492 } 492 }
493#endif 493#endif
@@ -743,7 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
743 iv = timr->it.real.interval; 743 iv = timr->it.real.interval;
744 744
745 /* interval timer ? */ 745 /* interval timer ? */
746 if (iv.tv64) 746 if (iv)
747 cur_setting->it_interval = ktime_to_timespec(iv); 747 cur_setting->it_interval = ktime_to_timespec(iv);
748 else if (!hrtimer_active(timer) && 748 else if (!hrtimer_active(timer) &&
749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
@@ -756,13 +756,13 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
756 * timer move the expiry time forward by intervals, so 756 * timer move the expiry time forward by intervals, so
757 * expiry is > now. 757 * expiry is > now.
758 */ 758 */
759 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || 759 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING ||
760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
762 762
763 remaining = __hrtimer_expires_remaining_adjusted(timer, now); 763 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
764 /* Return 0 only, when the timer is expired and not pending */ 764 /* Return 0 only, when the timer is expired and not pending */
765 if (remaining.tv64 <= 0) { 765 if (remaining <= 0) {
766 /* 766 /*
767 * A single shot SIGEV_NONE timer must return 0, when 767 * A single shot SIGEV_NONE timer must return 0, when
768 * it is expired ! 768 * it is expired !
@@ -839,7 +839,7 @@ common_timer_set(struct k_itimer *timr, int flags,
839 common_timer_get(timr, old_setting); 839 common_timer_get(timr, old_setting);
840 840
841 /* disable the timer */ 841 /* disable the timer */
842 timr->it.real.interval.tv64 = 0; 842 timr->it.real.interval = 0;
843 /* 843 /*
844 * careful here. If smp we could be in the "fire" routine which will 844 * careful here. If smp we could be in the "fire" routine which will
845 * be spinning as we hold the lock. But this is ONLY an SMP issue. 845 * be spinning as we hold the lock. But this is ONLY an SMP issue.
@@ -924,7 +924,7 @@ retry:
924 924
925static int common_timer_del(struct k_itimer *timer) 925static int common_timer_del(struct k_itimer *timer)
926{ 926{
927 timer->it.real.interval.tv64 = 0; 927 timer->it.real.interval = 0;
928 928
929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) 929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
930 return TIMER_RETRY; 930 return TIMER_RETRY;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 690b797f522e..a7bb8f33ae07 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -97,7 +97,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); 97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
98 98
99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) 99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
100 if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) 100 if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
101 return HRTIMER_RESTART; 101 return HRTIMER_RESTART;
102 102
103 return HRTIMER_NORESTART; 103 return HRTIMER_NORESTART;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e83ebae..3109204c87cc 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -604,14 +604,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
604 bool bc_local; 604 bool bc_local;
605 605
606 raw_spin_lock(&tick_broadcast_lock); 606 raw_spin_lock(&tick_broadcast_lock);
607 dev->next_event.tv64 = KTIME_MAX; 607 dev->next_event = KTIME_MAX;
608 next_event.tv64 = KTIME_MAX; 608 next_event = KTIME_MAX;
609 cpumask_clear(tmpmask); 609 cpumask_clear(tmpmask);
610 now = ktime_get(); 610 now = ktime_get();
611 /* Find all expired events */ 611 /* Find all expired events */
612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
613 td = &per_cpu(tick_cpu_device, cpu); 613 td = &per_cpu(tick_cpu_device, cpu);
614 if (td->evtdev->next_event.tv64 <= now.tv64) { 614 if (td->evtdev->next_event <= now) {
615 cpumask_set_cpu(cpu, tmpmask); 615 cpumask_set_cpu(cpu, tmpmask);
616 /* 616 /*
617 * Mark the remote cpu in the pending mask, so 617 * Mark the remote cpu in the pending mask, so
@@ -619,8 +619,8 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
619 * timer in tick_broadcast_oneshot_control(). 619 * timer in tick_broadcast_oneshot_control().
620 */ 620 */
621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
622 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 622 } else if (td->evtdev->next_event < next_event) {
623 next_event.tv64 = td->evtdev->next_event.tv64; 623 next_event = td->evtdev->next_event;
624 next_cpu = cpu; 624 next_cpu = cpu;
625 } 625 }
626 } 626 }
@@ -657,7 +657,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
657 * - There are pending events on sleeping CPUs which were not 657 * - There are pending events on sleeping CPUs which were not
658 * in the event mask 658 * in the event mask
659 */ 659 */
660 if (next_event.tv64 != KTIME_MAX) 660 if (next_event != KTIME_MAX)
661 tick_broadcast_set_event(dev, next_cpu, next_event); 661 tick_broadcast_set_event(dev, next_cpu, next_event);
662 662
663 raw_spin_unlock(&tick_broadcast_lock); 663 raw_spin_unlock(&tick_broadcast_lock);
@@ -672,7 +672,7 @@ static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
672{ 672{
673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) 673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
674 return 0; 674 return 0;
675 if (bc->next_event.tv64 == KTIME_MAX) 675 if (bc->next_event == KTIME_MAX)
676 return 0; 676 return 0;
677 return bc->bound_on == cpu ? -EBUSY : 0; 677 return bc->bound_on == cpu ? -EBUSY : 0;
678} 678}
@@ -688,7 +688,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { 688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
689 if (broadcast_needs_cpu(bc, smp_processor_id())) 689 if (broadcast_needs_cpu(bc, smp_processor_id()))
690 return; 690 return;
691 if (dev->next_event.tv64 < bc->next_event.tv64) 691 if (dev->next_event < bc->next_event)
692 return; 692 return;
693 } 693 }
694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
@@ -754,7 +754,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
754 */ 754 */
755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
756 ret = -EBUSY; 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) { 757 } else if (dev->next_event < bc->next_event) {
758 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /* 759 /*
760 * In case of hrtimer broadcasts the 760 * In case of hrtimer broadcasts the
@@ -789,7 +789,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
789 /* 789 /*
790 * Bail out if there is no next event. 790 * Bail out if there is no next event.
791 */ 791 */
792 if (dev->next_event.tv64 == KTIME_MAX) 792 if (dev->next_event == KTIME_MAX)
793 goto out; 793 goto out;
794 /* 794 /*
795 * If the pending bit is not set, then we are 795 * If the pending bit is not set, then we are
@@ -824,7 +824,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
824 * nohz fixups. 824 * nohz fixups.
825 */ 825 */
826 now = ktime_get(); 826 now = ktime_get();
827 if (dev->next_event.tv64 <= now.tv64) { 827 if (dev->next_event <= now) {
828 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 828 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
829 goto out; 829 goto out;
830 } 830 }
@@ -897,7 +897,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
897 tick_next_period); 897 tick_next_period);
898 tick_broadcast_set_event(bc, cpu, tick_next_period); 898 tick_broadcast_set_event(bc, cpu, tick_next_period);
899 } else 899 } else
900 bc->next_event.tv64 = KTIME_MAX; 900 bc->next_event = KTIME_MAX;
901 } else { 901 } else {
902 /* 902 /*
903 * The first cpu which switches to oneshot mode sets 903 * The first cpu which switches to oneshot mode sets
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4fcd99e12aa0..49edc1c4f3e6 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -178,8 +178,8 @@ static void tick_setup_device(struct tick_device *td,
178 struct clock_event_device *newdev, int cpu, 178 struct clock_event_device *newdev, int cpu,
179 const struct cpumask *cpumask) 179 const struct cpumask *cpumask)
180{ 180{
181 ktime_t next_event;
182 void (*handler)(struct clock_event_device *) = NULL; 181 void (*handler)(struct clock_event_device *) = NULL;
182 ktime_t next_event = 0;
183 183
184 /* 184 /*
185 * First device setup ? 185 * First device setup ?
@@ -195,7 +195,7 @@ static void tick_setup_device(struct tick_device *td,
195 else 195 else
196 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 196 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
197 tick_next_period = ktime_get(); 197 tick_next_period = ktime_get();
198 tick_period = ktime_set(0, NSEC_PER_SEC / HZ); 198 tick_period = NSEC_PER_SEC / HZ;
199 } 199 }
200 200
201 /* 201 /*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index b51344652330..6b009c207671 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -28,7 +28,7 @@ int tick_program_event(ktime_t expires, int force)
28{ 28{
29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
30 30
31 if (unlikely(expires.tv64 == KTIME_MAX)) { 31 if (unlikely(expires == KTIME_MAX)) {
32 /* 32 /*
33 * We don't need the clock event device any more, stop it. 33 * We don't need the clock event device any more, stop it.
34 */ 34 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 71496a20e670..2c115fdab397 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now)
58 * Do a quick check without holding jiffies_lock: 58 * Do a quick check without holding jiffies_lock:
59 */ 59 */
60 delta = ktime_sub(now, last_jiffies_update); 60 delta = ktime_sub(now, last_jiffies_update);
61 if (delta.tv64 < tick_period.tv64) 61 if (delta < tick_period)
62 return; 62 return;
63 63
64 /* Reevaluate with jiffies_lock held */ 64 /* Reevaluate with jiffies_lock held */
65 write_seqlock(&jiffies_lock); 65 write_seqlock(&jiffies_lock);
66 66
67 delta = ktime_sub(now, last_jiffies_update); 67 delta = ktime_sub(now, last_jiffies_update);
68 if (delta.tv64 >= tick_period.tv64) { 68 if (delta >= tick_period) {
69 69
70 delta = ktime_sub(delta, tick_period); 70 delta = ktime_sub(delta, tick_period);
71 last_jiffies_update = ktime_add(last_jiffies_update, 71 last_jiffies_update = ktime_add(last_jiffies_update,
72 tick_period); 72 tick_period);
73 73
74 /* Slow path for long timeouts */ 74 /* Slow path for long timeouts */
75 if (unlikely(delta.tv64 >= tick_period.tv64)) { 75 if (unlikely(delta >= tick_period)) {
76 s64 incr = ktime_to_ns(tick_period); 76 s64 incr = ktime_to_ns(tick_period);
77 77
78 ticks = ktime_divns(delta, incr); 78 ticks = ktime_divns(delta, incr);
@@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void)
101 101
102 write_seqlock(&jiffies_lock); 102 write_seqlock(&jiffies_lock);
103 /* Did we start the jiffies update yet ? */ 103 /* Did we start the jiffies update yet ? */
104 if (last_jiffies_update.tv64 == 0) 104 if (last_jiffies_update == 0)
105 last_jiffies_update = tick_next_period; 105 last_jiffies_update = tick_next_period;
106 period = last_jiffies_update; 106 period = last_jiffies_update;
107 write_sequnlock(&jiffies_lock); 107 write_sequnlock(&jiffies_lock);
@@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
669 /* Read jiffies and the time when jiffies were updated last */ 669 /* Read jiffies and the time when jiffies were updated last */
670 do { 670 do {
671 seq = read_seqbegin(&jiffies_lock); 671 seq = read_seqbegin(&jiffies_lock);
672 basemono = last_jiffies_update.tv64; 672 basemono = last_jiffies_update;
673 basejiff = jiffies; 673 basejiff = jiffies;
674 } while (read_seqretry(&jiffies_lock, seq)); 674 } while (read_seqretry(&jiffies_lock, seq));
675 ts->last_jiffies = basejiff; 675 ts->last_jiffies = basejiff;
@@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
697 */ 697 */
698 delta = next_tick - basemono; 698 delta = next_tick - basemono;
699 if (delta <= (u64)TICK_NSEC) { 699 if (delta <= (u64)TICK_NSEC) {
700 tick.tv64 = 0; 700 tick = 0;
701 701
702 /* 702 /*
703 * Tell the timer code that the base is not idle, i.e. undo 703 * Tell the timer code that the base is not idle, i.e. undo
@@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
764 expires = KTIME_MAX; 764 expires = KTIME_MAX;
765 765
766 expires = min_t(u64, expires, next_tick); 766 expires = min_t(u64, expires, next_tick);
767 tick.tv64 = expires; 767 tick = expires;
768 768
769 /* Skip reprogram of event if its not changed */ 769 /* Skip reprogram of event if its not changed */
770 if (ts->tick_stopped && (expires == dev->next_event.tv64)) 770 if (ts->tick_stopped && (expires == dev->next_event))
771 goto out; 771 goto out;
772 772
773 /* 773 /*
@@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
864 } 864 }
865 865
866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { 866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
867 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; 867 ts->sleep_length = NSEC_PER_SEC / HZ;
868 return false; 868 return false;
869 } 869 }
870 870
@@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
914 ts->idle_calls++; 914 ts->idle_calls++;
915 915
916 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 916 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
917 if (expires.tv64 > 0LL) { 917 if (expires > 0LL) {
918 ts->idle_sleeps++; 918 ts->idle_sleeps++;
919 ts->idle_expires = expires; 919 ts->idle_expires = expires;
920 } 920 }
@@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1051 struct pt_regs *regs = get_irq_regs(); 1051 struct pt_regs *regs = get_irq_regs();
1052 ktime_t now = ktime_get(); 1052 ktime_t now = ktime_get();
1053 1053
1054 dev->next_event.tv64 = KTIME_MAX; 1054 dev->next_event = KTIME_MAX;
1055 1055
1056 tick_sched_do_timer(now); 1056 tick_sched_do_timer(now);
1057 tick_sched_handle(ts, regs); 1057 tick_sched_handle(ts, regs);
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
index 4687b3104bae..8afd78932bdf 100644
--- a/kernel/time/timecounter.c
+++ b/kernel/time/timecounter.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(timecounter_init);
43 */ 43 */
44static u64 timecounter_read_delta(struct timecounter *tc) 44static u64 timecounter_read_delta(struct timecounter *tc)
45{ 45{
46 cycle_t cycle_now, cycle_delta; 46 u64 cycle_now, cycle_delta;
47 u64 ns_offset; 47 u64 ns_offset;
48 48
49 /* read cycle counter: */ 49 /* read cycle counter: */
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(timecounter_read);
80 * time previous to the time stored in the cycle counter. 80 * time previous to the time stored in the cycle counter.
81 */ 81 */
82static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, 82static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
83 cycle_t cycles, u64 mask, u64 frac) 83 u64 cycles, u64 mask, u64 frac)
84{ 84{
85 u64 ns = (u64) cycles; 85 u64 ns = (u64) cycles;
86 86
@@ -90,7 +90,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
90} 90}
91 91
92u64 timecounter_cyc2time(struct timecounter *tc, 92u64 timecounter_cyc2time(struct timecounter *tc,
93 cycle_t cycle_tstamp) 93 u64 cycle_tstamp)
94{ 94{
95 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; 95 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
96 u64 nsec = tc->nsec, frac = tc->frac; 96 u64 nsec = tc->nsec, frac = tc->frac;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index da233cdf89b0..db087d7e106d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -104,7 +104,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104 */ 104 */
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, 105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec); 106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); 107 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
108 tk->wall_to_monotonic = wtm; 108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp); 110 tk->offs_real = timespec64_to_ktime(tmp);
@@ -119,10 +119,10 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
119#ifdef CONFIG_DEBUG_TIMEKEEPING 119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ 120#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
121 121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) 122static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
123{ 123{
124 124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; 125 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name; 126 const char *name = tk->tkr_mono.clock->name;
127 127
128 if (offset > max_cycles) { 128 if (offset > max_cycles) {
@@ -158,10 +158,10 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
158 } 158 }
159} 159}
160 160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) 161static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
162{ 162{
163 struct timekeeper *tk = &tk_core.timekeeper; 163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta; 164 u64 now, last, mask, max, delta;
165 unsigned int seq; 165 unsigned int seq;
166 166
167 /* 167 /*
@@ -199,12 +199,12 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
199 return delta; 199 return delta;
200} 200}
201#else 201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) 202static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
203{ 203{
204} 204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) 205static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
206{ 206{
207 cycle_t cycle_now, delta; 207 u64 cycle_now, delta;
208 208
209 /* read clocksource */ 209 /* read clocksource */
210 cycle_now = tkr->read(tkr->clock); 210 cycle_now = tkr->read(tkr->clock);
@@ -229,7 +229,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
229 */ 229 */
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) 230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{ 231{
232 cycle_t interval; 232 u64 interval;
233 u64 tmp, ntpinterval; 233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock; 234 struct clocksource *old_clock;
235 235
@@ -254,7 +254,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
254 if (tmp == 0) 254 if (tmp == 0)
255 tmp = 1; 255 tmp = 1;
256 256
257 interval = (cycle_t) tmp; 257 interval = (u64) tmp;
258 tk->cycle_interval = interval; 258 tk->cycle_interval = interval;
259 259
260 /* Go back from cycles -> shifted ns */ 260 /* Go back from cycles -> shifted ns */
@@ -298,8 +298,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
298static inline u32 arch_gettimeoffset(void) { return 0; } 298static inline u32 arch_gettimeoffset(void) { return 0; }
299#endif 299#endif
300 300
301static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, 301static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
302 cycle_t delta)
303{ 302{
304 u64 nsec; 303 u64 nsec;
305 304
@@ -312,16 +311,15 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
312 311
313static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) 312static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
314{ 313{
315 cycle_t delta; 314 u64 delta;
316 315
317 delta = timekeeping_get_delta(tkr); 316 delta = timekeeping_get_delta(tkr);
318 return timekeeping_delta_to_ns(tkr, delta); 317 return timekeeping_delta_to_ns(tkr, delta);
319} 318}
320 319
321static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, 320static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
322 cycle_t cycles)
323{ 321{
324 cycle_t delta; 322 u64 delta;
325 323
326 /* calculate the delta since the last update_wall_time */ 324 /* calculate the delta since the last update_wall_time */
327 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); 325 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
@@ -454,9 +452,9 @@ u64 notrace ktime_get_boot_fast_ns(void)
454EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); 452EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
455 453
456/* Suspend-time cycles value for halted fast timekeeper. */ 454/* Suspend-time cycles value for halted fast timekeeper. */
457static cycle_t cycles_at_suspend; 455static u64 cycles_at_suspend;
458 456
459static cycle_t dummy_clock_read(struct clocksource *cs) 457static u64 dummy_clock_read(struct clocksource *cs)
460{ 458{
461 return cycles_at_suspend; 459 return cycles_at_suspend;
462} 460}
@@ -573,7 +571,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
573static inline void tk_update_leap_state(struct timekeeper *tk) 571static inline void tk_update_leap_state(struct timekeeper *tk)
574{ 572{
575 tk->next_leap_ktime = ntp_get_next_leap(); 573 tk->next_leap_ktime = ntp_get_next_leap();
576 if (tk->next_leap_ktime.tv64 != KTIME_MAX) 574 if (tk->next_leap_ktime != KTIME_MAX)
577 /* Convert to monotonic time */ 575 /* Convert to monotonic time */
578 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); 576 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
579} 577}
@@ -650,7 +648,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
650static void timekeeping_forward_now(struct timekeeper *tk) 648static void timekeeping_forward_now(struct timekeeper *tk)
651{ 649{
652 struct clocksource *clock = tk->tkr_mono.clock; 650 struct clocksource *clock = tk->tkr_mono.clock;
653 cycle_t cycle_now, delta; 651 u64 cycle_now, delta;
654 u64 nsec; 652 u64 nsec;
655 653
656 cycle_now = tk->tkr_mono.read(clock); 654 cycle_now = tk->tkr_mono.read(clock);
@@ -923,7 +921,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
923 ktime_t base_real; 921 ktime_t base_real;
924 u64 nsec_raw; 922 u64 nsec_raw;
925 u64 nsec_real; 923 u64 nsec_real;
926 cycle_t now; 924 u64 now;
927 925
928 WARN_ON_ONCE(timekeeping_suspended); 926 WARN_ON_ONCE(timekeeping_suspended);
929 927
@@ -982,8 +980,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
982 * interval is partial_history_cycles. 980 * interval is partial_history_cycles.
983 */ 981 */
984static int adjust_historical_crosststamp(struct system_time_snapshot *history, 982static int adjust_historical_crosststamp(struct system_time_snapshot *history,
985 cycle_t partial_history_cycles, 983 u64 partial_history_cycles,
986 cycle_t total_history_cycles, 984 u64 total_history_cycles,
987 bool discontinuity, 985 bool discontinuity,
988 struct system_device_crosststamp *ts) 986 struct system_device_crosststamp *ts)
989{ 987{
@@ -1047,7 +1045,7 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1047/* 1045/*
1048 * cycle_between - true if test occurs chronologically between before and after 1046 * cycle_between - true if test occurs chronologically between before and after
1049 */ 1047 */
1050static bool cycle_between(cycle_t before, cycle_t test, cycle_t after) 1048static bool cycle_between(u64 before, u64 test, u64 after)
1051{ 1049{
1052 if (test > before && test < after) 1050 if (test > before && test < after)
1053 return true; 1051 return true;
@@ -1077,7 +1075,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
1077{ 1075{
1078 struct system_counterval_t system_counterval; 1076 struct system_counterval_t system_counterval;
1079 struct timekeeper *tk = &tk_core.timekeeper; 1077 struct timekeeper *tk = &tk_core.timekeeper;
1080 cycle_t cycles, now, interval_start; 1078 u64 cycles, now, interval_start;
1081 unsigned int clock_was_set_seq = 0; 1079 unsigned int clock_was_set_seq = 0;
1082 ktime_t base_real, base_raw; 1080 ktime_t base_real, base_raw;
1083 u64 nsec_real, nsec_raw; 1081 u64 nsec_real, nsec_raw;
@@ -1138,7 +1136,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
1138 * current interval 1136 * current interval
1139 */ 1137 */
1140 if (do_interp) { 1138 if (do_interp) {
1141 cycle_t partial_history_cycles, total_history_cycles; 1139 u64 partial_history_cycles, total_history_cycles;
1142 bool discontinuity; 1140 bool discontinuity;
1143 1141
1144 /* 1142 /*
@@ -1644,7 +1642,7 @@ void timekeeping_resume(void)
1644 struct clocksource *clock = tk->tkr_mono.clock; 1642 struct clocksource *clock = tk->tkr_mono.clock;
1645 unsigned long flags; 1643 unsigned long flags;
1646 struct timespec64 ts_new, ts_delta; 1644 struct timespec64 ts_new, ts_delta;
1647 cycle_t cycle_now; 1645 u64 cycle_now;
1648 1646
1649 sleeptime_injected = false; 1647 sleeptime_injected = false;
1650 read_persistent_clock64(&ts_new); 1648 read_persistent_clock64(&ts_new);
@@ -2010,11 +2008,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2010 * 2008 *
2011 * Returns the unconsumed cycles. 2009 * Returns the unconsumed cycles.
2012 */ 2010 */
2013static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, 2011static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2014 u32 shift, 2012 u32 shift, unsigned int *clock_set)
2015 unsigned int *clock_set)
2016{ 2013{
2017 cycle_t interval = tk->cycle_interval << shift; 2014 u64 interval = tk->cycle_interval << shift;
2018 u64 raw_nsecs; 2015 u64 raw_nsecs;
2019 2016
2020 /* If the offset is smaller than a shifted interval, do nothing */ 2017 /* If the offset is smaller than a shifted interval, do nothing */
@@ -2055,7 +2052,7 @@ void update_wall_time(void)
2055{ 2052{
2056 struct timekeeper *real_tk = &tk_core.timekeeper; 2053 struct timekeeper *real_tk = &tk_core.timekeeper;
2057 struct timekeeper *tk = &shadow_timekeeper; 2054 struct timekeeper *tk = &shadow_timekeeper;
2058 cycle_t offset; 2055 u64 offset;
2059 int shift = 0, maxshift; 2056 int shift = 0, maxshift;
2060 unsigned int clock_set = 0; 2057 unsigned int clock_set = 0;
2061 unsigned long flags; 2058 unsigned long flags;
@@ -2253,7 +2250,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2253 } 2250 }
2254 2251
2255 /* Handle leapsecond insertion adjustments */ 2252 /* Handle leapsecond insertion adjustments */
2256 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) 2253 if (unlikely(base >= tk->next_leap_ktime))
2257 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); 2254 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2258 2255
2259 } while (read_seqcount_retry(&tk_core.seq, seq)); 2256 } while (read_seqcount_retry(&tk_core.seq, seq));
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index 5be76270ec4a..9a18f121f399 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -13,9 +13,9 @@ extern void tk_debug_account_sleep_time(struct timespec64 *t);
13#endif 13#endif
14 14
15#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE 15#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
16static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) 16static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
17{ 17{
18 cycle_t ret = (now - last) & mask; 18 u64 ret = (now - last) & mask;
19 19
20 /* 20 /*
21 * Prevent time going backwards by checking the MSB of mask in 21 * Prevent time going backwards by checking the MSB of mask in
@@ -24,7 +24,7 @@ static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
24 return ret & ~(mask >> 1) ? 0 : ret; 24 return ret & ~(mask >> 1) ? 0 : ret;
25} 25}
26#else 26#else
27static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) 27static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
28{ 28{
29 return (now - last) & mask; 29 return (now - last) & mask;
30} 30}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1f0f547c54da..eb230f06ba41 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2847,7 +2847,7 @@ static void ftrace_shutdown_sysctl(void)
2847 } 2847 }
2848} 2848}
2849 2849
2850static cycle_t ftrace_update_time; 2850static u64 ftrace_update_time;
2851unsigned long ftrace_update_tot_cnt; 2851unsigned long ftrace_update_tot_cnt;
2852 2852
2853static inline int ops_traces_mod(struct ftrace_ops *ops) 2853static inline int ops_traces_mod(struct ftrace_ops *ops)
@@ -2894,7 +2894,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2894{ 2894{
2895 struct ftrace_page *pg; 2895 struct ftrace_page *pg;
2896 struct dyn_ftrace *p; 2896 struct dyn_ftrace *p;
2897 cycle_t start, stop; 2897 u64 start, stop;
2898 unsigned long update_cnt = 0; 2898 unsigned long update_cnt = 0;
2899 unsigned long rec_flags = 0; 2899 unsigned long rec_flags = 0;
2900 int i; 2900 int i;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 66f829c47bec..d7449783987a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -236,7 +236,7 @@ static int __init set_tracepoint_printk(char *str)
236} 236}
237__setup("tp_printk", set_tracepoint_printk); 237__setup("tp_printk", set_tracepoint_printk);
238 238
239unsigned long long ns2usecs(cycle_t nsec) 239unsigned long long ns2usecs(u64 nsec)
240{ 240{
241 nsec += 500; 241 nsec += 500;
242 do_div(nsec, 1000); 242 do_div(nsec, 1000);
@@ -573,7 +573,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
573 return read; 573 return read;
574} 574}
575 575
576static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 576static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
577{ 577{
578 u64 ts; 578 u64 ts;
579 579
@@ -587,7 +587,7 @@ static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
587 return ts; 587 return ts;
588} 588}
589 589
590cycle_t ftrace_now(int cpu) 590u64 ftrace_now(int cpu)
591{ 591{
592 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 592 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
593} 593}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c2234494f40c..1ea51ab53edf 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -159,7 +159,7 @@ struct trace_array_cpu {
159 unsigned long policy; 159 unsigned long policy;
160 unsigned long rt_priority; 160 unsigned long rt_priority;
161 unsigned long skipped_entries; 161 unsigned long skipped_entries;
162 cycle_t preempt_timestamp; 162 u64 preempt_timestamp;
163 pid_t pid; 163 pid_t pid;
164 kuid_t uid; 164 kuid_t uid;
165 char comm[TASK_COMM_LEN]; 165 char comm[TASK_COMM_LEN];
@@ -177,7 +177,7 @@ struct trace_buffer {
177 struct trace_array *tr; 177 struct trace_array *tr;
178 struct ring_buffer *buffer; 178 struct ring_buffer *buffer;
179 struct trace_array_cpu __percpu *data; 179 struct trace_array_cpu __percpu *data;
180 cycle_t time_start; 180 u64 time_start;
181 int cpu; 181 int cpu;
182}; 182};
183 183
@@ -689,7 +689,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
689} 689}
690#endif /* CONFIG_STACKTRACE */ 690#endif /* CONFIG_STACKTRACE */
691 691
692extern cycle_t ftrace_now(int cpu); 692extern u64 ftrace_now(int cpu);
693 693
694extern void trace_find_cmdline(int pid, char comm[]); 694extern void trace_find_cmdline(int pid, char comm[]);
695extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 695extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
@@ -736,7 +736,7 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
736#endif /* CONFIG_FTRACE_STARTUP_TEST */ 736#endif /* CONFIG_FTRACE_STARTUP_TEST */
737 737
738extern void *head_page(struct trace_array_cpu *data); 738extern void *head_page(struct trace_array_cpu *data);
739extern unsigned long long ns2usecs(cycle_t nsec); 739extern unsigned long long ns2usecs(u64 nsec);
740extern int 740extern int
741trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 741trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
742extern int 742extern int
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 86654d7e1afe..7758bc0617cb 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -298,7 +298,7 @@ static void irqsoff_print_header(struct seq_file *s)
298/* 298/*
299 * Should this new latency be reported/recorded? 299 * Should this new latency be reported/recorded?
300 */ 300 */
301static bool report_latency(struct trace_array *tr, cycle_t delta) 301static bool report_latency(struct trace_array *tr, u64 delta)
302{ 302{
303 if (tracing_thresh) { 303 if (tracing_thresh) {
304 if (delta < tracing_thresh) 304 if (delta < tracing_thresh)
@@ -316,7 +316,7 @@ check_critical_timing(struct trace_array *tr,
316 unsigned long parent_ip, 316 unsigned long parent_ip,
317 int cpu) 317 int cpu)
318{ 318{
319 cycle_t T0, T1, delta; 319 u64 T0, T1, delta;
320 unsigned long flags; 320 unsigned long flags;
321 int pc; 321 int pc;
322 322
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5d0bb025bb21..ddec53b67646 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -358,7 +358,7 @@ static void wakeup_print_header(struct seq_file *s)
358/* 358/*
359 * Should this new latency be reported/recorded? 359 * Should this new latency be reported/recorded?
360 */ 360 */
361static bool report_latency(struct trace_array *tr, cycle_t delta) 361static bool report_latency(struct trace_array *tr, u64 delta)
362{ 362{
363 if (tracing_thresh) { 363 if (tracing_thresh) {
364 if (delta < tracing_thresh) 364 if (delta < tracing_thresh)
@@ -440,7 +440,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
440 struct task_struct *prev, struct task_struct *next) 440 struct task_struct *prev, struct task_struct *next)
441{ 441{
442 struct trace_array_cpu *data; 442 struct trace_array_cpu *data;
443 cycle_t T0, T1, delta; 443 u64 T0, T1, delta;
444 unsigned long flags; 444 unsigned long flags;
445 long disabled; 445 long disabled;
446 int cpu; 446 int cpu;
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index 782ae8ca2c06..adc6ee0a5126 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -48,7 +48,7 @@ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
48 while (*p) { 48 while (*p) {
49 parent = *p; 49 parent = *p;
50 ptr = rb_entry(parent, struct timerqueue_node, node); 50 ptr = rb_entry(parent, struct timerqueue_node, node);
51 if (node->expires.tv64 < ptr->expires.tv64) 51 if (node->expires < ptr->expires)
52 p = &(*p)->rb_left; 52 p = &(*p)->rb_left;
53 else 53 else
54 p = &(*p)->rb_right; 54 p = &(*p)->rb_right;
@@ -56,7 +56,7 @@ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
56 rb_link_node(&node->node, parent, p); 56 rb_link_node(&node->node, parent, p);
57 rb_insert_color(&node->node, &head->head); 57 rb_insert_color(&node->node, &head->head);
58 58
59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) { 59 if (!head->next || node->expires < head->next->expires) {
60 head->next = node; 60 head->next = node;
61 return true; 61 return true;
62 } 62 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 436a7537e6a9..21ac75390e3d 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -199,11 +199,11 @@ static int bcm_proc_show(struct seq_file *m, void *v)
199 199
200 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 200 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
201 201
202 if (op->kt_ival1.tv64) 202 if (op->kt_ival1)
203 seq_printf(m, "timeo=%lld ", 203 seq_printf(m, "timeo=%lld ",
204 (long long)ktime_to_us(op->kt_ival1)); 204 (long long)ktime_to_us(op->kt_ival1));
205 205
206 if (op->kt_ival2.tv64) 206 if (op->kt_ival2)
207 seq_printf(m, "thr=%lld ", 207 seq_printf(m, "thr=%lld ",
208 (long long)ktime_to_us(op->kt_ival2)); 208 (long long)ktime_to_us(op->kt_ival2));
209 209
@@ -226,11 +226,11 @@ static int bcm_proc_show(struct seq_file *m, void *v)
226 else 226 else
227 seq_printf(m, "[%u] ", op->nframes); 227 seq_printf(m, "[%u] ", op->nframes);
228 228
229 if (op->kt_ival1.tv64) 229 if (op->kt_ival1)
230 seq_printf(m, "t1=%lld ", 230 seq_printf(m, "t1=%lld ",
231 (long long)ktime_to_us(op->kt_ival1)); 231 (long long)ktime_to_us(op->kt_ival1));
232 232
233 if (op->kt_ival2.tv64) 233 if (op->kt_ival2)
234 seq_printf(m, "t2=%lld ", 234 seq_printf(m, "t2=%lld ",
235 (long long)ktime_to_us(op->kt_ival2)); 235 (long long)ktime_to_us(op->kt_ival2));
236 236
@@ -365,11 +365,11 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
365 365
366static void bcm_tx_start_timer(struct bcm_op *op) 366static void bcm_tx_start_timer(struct bcm_op *op)
367{ 367{
368 if (op->kt_ival1.tv64 && op->count) 368 if (op->kt_ival1 && op->count)
369 hrtimer_start(&op->timer, 369 hrtimer_start(&op->timer,
370 ktime_add(ktime_get(), op->kt_ival1), 370 ktime_add(ktime_get(), op->kt_ival1),
371 HRTIMER_MODE_ABS); 371 HRTIMER_MODE_ABS);
372 else if (op->kt_ival2.tv64) 372 else if (op->kt_ival2)
373 hrtimer_start(&op->timer, 373 hrtimer_start(&op->timer,
374 ktime_add(ktime_get(), op->kt_ival2), 374 ktime_add(ktime_get(), op->kt_ival2),
375 HRTIMER_MODE_ABS); 375 HRTIMER_MODE_ABS);
@@ -380,7 +380,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
380 struct bcm_op *op = (struct bcm_op *)data; 380 struct bcm_op *op = (struct bcm_op *)data;
381 struct bcm_msg_head msg_head; 381 struct bcm_msg_head msg_head;
382 382
383 if (op->kt_ival1.tv64 && (op->count > 0)) { 383 if (op->kt_ival1 && (op->count > 0)) {
384 384
385 op->count--; 385 op->count--;
386 if (!op->count && (op->flags & TX_COUNTEVT)) { 386 if (!op->count && (op->flags & TX_COUNTEVT)) {
@@ -398,7 +398,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
398 } 398 }
399 bcm_can_tx(op); 399 bcm_can_tx(op);
400 400
401 } else if (op->kt_ival2.tv64) 401 } else if (op->kt_ival2)
402 bcm_can_tx(op); 402 bcm_can_tx(op);
403 403
404 bcm_tx_start_timer(op); 404 bcm_tx_start_timer(op);
@@ -459,7 +459,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
459 lastdata->flags |= (RX_RECV|RX_THR); 459 lastdata->flags |= (RX_RECV|RX_THR);
460 460
461 /* throttling mode inactive ? */ 461 /* throttling mode inactive ? */
462 if (!op->kt_ival2.tv64) { 462 if (!op->kt_ival2) {
463 /* send RX_CHANGED to the user immediately */ 463 /* send RX_CHANGED to the user immediately */
464 bcm_rx_changed(op, lastdata); 464 bcm_rx_changed(op, lastdata);
465 return; 465 return;
@@ -470,7 +470,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
470 return; 470 return;
471 471
472 /* first reception with enabled throttling mode */ 472 /* first reception with enabled throttling mode */
473 if (!op->kt_lastmsg.tv64) 473 if (!op->kt_lastmsg)
474 goto rx_changed_settime; 474 goto rx_changed_settime;
475 475
476 /* got a second frame inside a potential throttle period? */ 476 /* got a second frame inside a potential throttle period? */
@@ -537,7 +537,7 @@ static void bcm_rx_starttimer(struct bcm_op *op)
537 if (op->flags & RX_NO_AUTOTIMER) 537 if (op->flags & RX_NO_AUTOTIMER)
538 return; 538 return;
539 539
540 if (op->kt_ival1.tv64) 540 if (op->kt_ival1)
541 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 541 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
542} 542}
543 543
@@ -643,7 +643,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
643 return HRTIMER_RESTART; 643 return HRTIMER_RESTART;
644 } else { 644 } else {
645 /* rearm throttle handling */ 645 /* rearm throttle handling */
646 op->kt_lastmsg = ktime_set(0, 0); 646 op->kt_lastmsg = 0;
647 return HRTIMER_NORESTART; 647 return HRTIMER_NORESTART;
648 } 648 }
649} 649}
@@ -1005,7 +1005,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1005 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1005 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1006 1006
1007 /* disable an active timer due to zero values? */ 1007 /* disable an active timer due to zero values? */
1008 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 1008 if (!op->kt_ival1 && !op->kt_ival2)
1009 hrtimer_cancel(&op->timer); 1009 hrtimer_cancel(&op->timer);
1010 } 1010 }
1011 1011
@@ -1189,19 +1189,19 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1190 1190
1191 /* disable an active timer due to zero value? */ 1191 /* disable an active timer due to zero value? */
1192 if (!op->kt_ival1.tv64) 1192 if (!op->kt_ival1)
1193 hrtimer_cancel(&op->timer); 1193 hrtimer_cancel(&op->timer);
1194 1194
1195 /* 1195 /*
1196 * In any case cancel the throttle timer, flush 1196 * In any case cancel the throttle timer, flush
1197 * potentially blocked msgs and reset throttle handling 1197 * potentially blocked msgs and reset throttle handling
1198 */ 1198 */
1199 op->kt_lastmsg = ktime_set(0, 0); 1199 op->kt_lastmsg = 0;
1200 hrtimer_cancel(&op->thrtimer); 1200 hrtimer_cancel(&op->thrtimer);
1201 bcm_rx_thr_flush(op, 1); 1201 bcm_rx_thr_flush(op, 1);
1202 } 1202 }
1203 1203
1204 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1204 if ((op->flags & STARTTIMER) && op->kt_ival1)
1205 hrtimer_start(&op->timer, op->kt_ival1, 1205 hrtimer_start(&op->timer, op->kt_ival1,
1206 HRTIMER_MODE_REL); 1206 HRTIMER_MODE_REL);
1207 } 1207 }
diff --git a/net/can/gw.c b/net/can/gw.c
index 455168718c2e..a54ab0c82104 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -429,7 +429,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
429 429
430 /* clear the skb timestamp if not configured the other way */ 430 /* clear the skb timestamp if not configured the other way */
431 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP)) 431 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
432 nskb->tstamp.tv64 = 0; 432 nskb->tstamp = 0;
433 433
434 /* send to netdevice */ 434 /* send to netdevice */
435 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO)) 435 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
diff --git a/net/core/dev.c b/net/core/dev.c
index 037ffd27fcc2..8db5a0b4b520 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1731,14 +1731,14 @@ EXPORT_SYMBOL(net_disable_timestamp);
1731 1731
1732static inline void net_timestamp_set(struct sk_buff *skb) 1732static inline void net_timestamp_set(struct sk_buff *skb)
1733{ 1733{
1734 skb->tstamp.tv64 = 0; 1734 skb->tstamp = 0;
1735 if (static_key_false(&netstamp_needed)) 1735 if (static_key_false(&netstamp_needed))
1736 __net_timestamp(skb); 1736 __net_timestamp(skb);
1737} 1737}
1738 1738
1739#define net_timestamp_check(COND, SKB) \ 1739#define net_timestamp_check(COND, SKB) \
1740 if (static_key_false(&netstamp_needed)) { \ 1740 if (static_key_false(&netstamp_needed)) { \
1741 if ((COND) && !(SKB)->tstamp.tv64) \ 1741 if ((COND) && !(SKB)->tstamp) \
1742 __net_timestamp(SKB); \ 1742 __net_timestamp(SKB); \
1743 } \ 1743 } \
1744 1744
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e77f40616fea..5a03730fbc1a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4368,7 +4368,7 @@ EXPORT_SYMBOL(skb_try_coalesce);
4368 */ 4368 */
4369void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4369void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4370{ 4370{
4371 skb->tstamp.tv64 = 0; 4371 skb->tstamp = 0;
4372 skb->pkt_type = PACKET_HOST; 4372 skb->pkt_type = PACKET_HOST;
4373 skb->skb_iif = 0; 4373 skb->skb_iif = 0;
4374 skb->ignore_df = 0; 4374 skb->ignore_df = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 31a255b555ad..1d5331a1b1dc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1038,7 +1038,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1038 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1038 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1039 1039
1040 /* Our usage of tstamp should remain private */ 1040 /* Our usage of tstamp should remain private */
1041 skb->tstamp.tv64 = 0; 1041 skb->tstamp = 0;
1042 1042
1043 /* Cleanup our debris for IP stacks */ 1043 /* Cleanup our debris for IP stacks */
1044 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1044 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
@@ -3203,7 +3203,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3203#endif 3203#endif
3204 3204
3205 /* Do not fool tcpdump (if any), clean our debris */ 3205 /* Do not fool tcpdump (if any), clean our debris */
3206 skb->tstamp.tv64 = 0; 3206 skb->tstamp = 0;
3207 return skb; 3207 return skb;
3208} 3208}
3209EXPORT_SYMBOL(tcp_make_synack); 3209EXPORT_SYMBOL(tcp_make_synack);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 926818c331e5..e4198502fd98 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -232,7 +232,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
232 ipv6h->saddr = hao->addr; 232 ipv6h->saddr = hao->addr;
233 hao->addr = tmp_addr; 233 hao->addr = tmp_addr;
234 234
235 if (skb->tstamp.tv64 == 0) 235 if (skb->tstamp == 0)
236 __net_timestamp(skb); 236 __net_timestamp(skb);
237 237
238 return true; 238 return true;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 60c79a08e14a..64f0f7be9e5e 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -191,7 +191,7 @@ static inline int mip6_report_rl_allow(ktime_t stamp,
191 int allow = 0; 191 int allow = 0;
192 192
193 spin_lock_bh(&mip6_report_rl.lock); 193 spin_lock_bh(&mip6_report_rl.lock);
194 if (!ktime_equal(mip6_report_rl.stamp, stamp) || 194 if (mip6_report_rl.stamp != stamp ||
195 mip6_report_rl.iif != iif || 195 mip6_report_rl.iif != iif ||
196 !ipv6_addr_equal(&mip6_report_rl.src, src) || 196 !ipv6_addr_equal(&mip6_report_rl.src, src) ||
197 !ipv6_addr_equal(&mip6_report_rl.dst, dst)) { 197 !ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index e07f22b0c58a..8a9219ff2e77 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1809,7 +1809,7 @@ static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1809 rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); 1809 rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied);
1810 if (rc) 1810 if (rc)
1811 goto out_free; 1811 goto out_free;
1812 if (skb->tstamp.tv64) 1812 if (skb->tstamp)
1813 sk->sk_stamp = skb->tstamp; 1813 sk->sk_stamp = skb->tstamp;
1814 1814
1815 if (sipx) { 1815 if (sipx) {
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index f9fd0957ab67..7c03fb0ea34c 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -80,11 +80,11 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
80 80
81 if (skb->len > max_sifs_size) 81 if (skb->len > max_sifs_size)
82 hrtimer_start(&local->ifs_timer, 82 hrtimer_start(&local->ifs_timer,
83 ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC), 83 hw->phy->lifs_period * NSEC_PER_USEC,
84 HRTIMER_MODE_REL); 84 HRTIMER_MODE_REL);
85 else 85 else
86 hrtimer_start(&local->ifs_timer, 86 hrtimer_start(&local->ifs_timer,
87 ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC), 87 hw->phy->sifs_period * NSEC_PER_USEC,
88 HRTIMER_MODE_REL); 88 HRTIMER_MODE_REL);
89 } else { 89 } else {
90 ieee802154_wake_queue(hw); 90 ieee802154_wake_queue(hw);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6a0bbfa8e702..3a073cd9fcf4 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -783,7 +783,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
783 /* set conntrack timestamp, if enabled. */ 783 /* set conntrack timestamp, if enabled. */
784 tstamp = nf_conn_tstamp_find(ct); 784 tstamp = nf_conn_tstamp_find(ct);
785 if (tstamp) { 785 if (tstamp) {
786 if (skb->tstamp.tv64 == 0) 786 if (skb->tstamp == 0)
787 __net_timestamp(skb); 787 __net_timestamp(skb);
788 788
789 tstamp->start = ktime_to_ns(skb->tstamp); 789 tstamp->start = ktime_to_ns(skb->tstamp);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 200922bb2036..08247bf7d7b8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -538,7 +538,7 @@ __build_packet_message(struct nfnl_log_net *log,
538 goto nla_put_failure; 538 goto nla_put_failure;
539 } 539 }
540 540
541 if (skb->tstamp.tv64) { 541 if (skb->tstamp) {
542 struct nfulnl_msg_packet_timestamp ts; 542 struct nfulnl_msg_packet_timestamp ts;
543 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 543 struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
544 ts.sec = cpu_to_be64(kts.tv_sec); 544 ts.sec = cpu_to_be64(kts.tv_sec);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index be7627b80400..3ee0b8a000a4 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -384,7 +384,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
384 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ 384 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */
385 + nla_total_size(sizeof(u_int32_t)); /* cap_len */ 385 + nla_total_size(sizeof(u_int32_t)); /* cap_len */
386 386
387 if (entskb->tstamp.tv64) 387 if (entskb->tstamp)
388 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 388 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
389 389
390 size += nfqnl_get_bridge_size(entry); 390 size += nfqnl_get_bridge_size(entry);
@@ -555,7 +555,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
555 if (nfqnl_put_bridge(entry, skb) < 0) 555 if (nfqnl_put_bridge(entry, skb) < 0)
556 goto nla_put_failure; 556 goto nla_put_failure;
557 557
558 if (entskb->tstamp.tv64) { 558 if (entskb->tstamp) {
559 struct nfqnl_msg_packet_timestamp ts; 559 struct nfqnl_msg_packet_timestamp ts;
560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); 560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
561 561
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 0ae55a36f492..1b01eec1fbda 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -168,7 +168,7 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
168 * may happen that the same packet matches both rules if 168 * may happen that the same packet matches both rules if
169 * it arrived at the right moment before 13:00. 169 * it arrived at the right moment before 13:00.
170 */ 170 */
171 if (skb->tstamp.tv64 == 0) 171 if (skb->tstamp == 0)
172 __net_timestamp((struct sk_buff *)skb); 172 __net_timestamp((struct sk_buff *)skb);
173 173
174 stamp = ktime_to_ns(skb->tstamp); 174 stamp = ktime_to_ns(skb->tstamp);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9ffe1c220b02..f1207582cbf3 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -509,7 +509,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
509 if (delay) { 509 if (delay) {
510 ktime_t time; 510 ktime_t time;
511 511
512 time = ktime_set(0, 0); 512 time = 0;
513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); 513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); 514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
515 } 515 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b7e4097bfdab..bcfadfdea8e0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -627,7 +627,7 @@ deliver:
627 * from the network (tstamp will be updated). 627 * from the network (tstamp will be updated).
628 */ 628 */
629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
630 skb->tstamp.tv64 = 0; 630 skb->tstamp = 0;
631#endif 631#endif
632 632
633 if (q->qdisc) { 633 if (q->qdisc) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index ce54dce13ddb..a1652ab63918 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
72 */ 72 */
73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74 74
75 peer->last_time_heard = ktime_set(0, 0); 75 peer->last_time_heard = 0;
76 peer->last_time_ecne_reduced = jiffies; 76 peer->last_time_ecne_reduced = jiffies;
77 77
78 peer->param_flags = SPP_HB_DISABLE | 78 peer->param_flags = SPP_HB_DISABLE |
diff --git a/net/socket.c b/net/socket.c
index 5ff26c44db33..8487bf136e5c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -668,7 +668,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
668 668
669 /* Race occurred between timestamp enabling and packet 669 /* Race occurred between timestamp enabling and packet
670 receiving. Fill in the current time for now. */ 670 receiving. Fill in the current time for now. */
671 if (need_software_tstamp && skb->tstamp.tv64 == 0) 671 if (need_software_tstamp && skb->tstamp == 0)
672 __net_timestamp(skb); 672 __net_timestamp(skb);
673 673
674 if (need_software_tstamp) { 674 if (need_software_tstamp) {
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a3e85ee28b5a..de066acdb34e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -574,7 +574,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
574 } 574 }
575 len = svc_addr_len(svc_addr(rqstp)); 575 len = svc_addr_len(svc_addr(rqstp));
576 rqstp->rq_addrlen = len; 576 rqstp->rq_addrlen = len;
577 if (skb->tstamp.tv64 == 0) { 577 if (skb->tstamp == 0) {
578 skb->tstamp = ktime_get_real(); 578 skb->tstamp = ktime_get_real();
579 /* Don't enable netstamp, sunrpc doesn't 579 /* Don't enable netstamp, sunrpc doesn't
580 need that much accuracy */ 580 need that much accuracy */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d4ab9a7f3d94..64e3c82eedf6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1404,7 +1404,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1404 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1404 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1405 x->curlft.packets >= x->lft.hard_packet_limit) { 1405 x->curlft.packets >= x->lft.hard_packet_limit) {
1406 x->km.state = XFRM_STATE_EXPIRED; 1406 x->km.state = XFRM_STATE_EXPIRED;
1407 tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL); 1407 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1408 return -EINVAL; 1408 return -EINVAL;
1409 } 1409 }
1410 1410
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index e2f27022b363..1ac0c423903e 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -58,7 +58,7 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
58 58
59 /* calculate the drift */ 59 /* calculate the drift */
60 delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); 60 delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt));
61 if (delta.tv64 > 0) 61 if (delta > 0)
62 ticks += ktime_divns(delta, ticks * resolution); 62 ticks += ktime_divns(delta, ticks * resolution);
63 63
64 snd_timer_interrupt(stime->timer, ticks); 64 snd_timer_interrupt(stime->timer, ticks);
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 3689f5f6be64..aca2d7d5f059 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -166,7 +166,7 @@ static int pcsp_start_playing(struct snd_pcsp *chip)
166 atomic_set(&chip->timer_active, 1); 166 atomic_set(&chip->timer_active, 1);
167 chip->thalf = 0; 167 chip->thalf = 0;
168 168
169 hrtimer_start(&pcsp_chip.timer, ktime_set(0, 0), HRTIMER_MODE_REL); 169 hrtimer_start(&pcsp_chip.timer, 0, HRTIMER_MODE_REL);
170 return 0; 170 return 0;
171} 171}
172 172
diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c
index ca4dfcf43175..7683238283b6 100644
--- a/sound/firewire/lib.c
+++ b/sound/firewire/lib.c
@@ -114,7 +114,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode,
114 snd_rawmidi_transmit_ack(substream, port->consume_bytes); 114 snd_rawmidi_transmit_ack(substream, port->consume_bytes);
115 else if (!rcode_is_permanent_error(rcode)) 115 else if (!rcode_is_permanent_error(rcode))
116 /* To start next transaction immediately for recovery. */ 116 /* To start next transaction immediately for recovery. */
117 port->next_ktime = ktime_set(0, 0); 117 port->next_ktime = 0;
118 else 118 else
119 /* Don't continue processing. */ 119 /* Don't continue processing. */
120 port->error = true; 120 port->error = true;
@@ -156,7 +156,7 @@ static void midi_port_work(struct work_struct *work)
156 if (port->consume_bytes <= 0) { 156 if (port->consume_bytes <= 0) {
157 /* Do it in next chance, immediately. */ 157 /* Do it in next chance, immediately. */
158 if (port->consume_bytes == 0) { 158 if (port->consume_bytes == 0) {
159 port->next_ktime = ktime_set(0, 0); 159 port->next_ktime = 0;
160 schedule_work(&port->work); 160 schedule_work(&port->work);
161 } else { 161 } else {
162 /* Fatal error. */ 162 /* Fatal error. */
@@ -219,7 +219,7 @@ int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
219 port->addr = addr; 219 port->addr = addr;
220 port->fill = fill; 220 port->fill = fill;
221 port->idling = true; 221 port->idling = true;
222 port->next_ktime = ktime_set(0, 0); 222 port->next_ktime = 0;
223 port->error = false; 223 port->error = false;
224 224
225 INIT_WORK(&port->work, midi_port_work); 225 INIT_WORK(&port->work, midi_port_work);
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index 38990a77d7b7..c6994ebb4567 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -465,7 +465,7 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
465} 465}
466EXPORT_SYMBOL_GPL(snd_hdac_stream_set_params); 466EXPORT_SYMBOL_GPL(snd_hdac_stream_set_params);
467 467
468static cycle_t azx_cc_read(const struct cyclecounter *cc) 468static u64 azx_cc_read(const struct cyclecounter *cc)
469{ 469{
470 struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); 470 struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc);
471 471
@@ -473,7 +473,7 @@ static cycle_t azx_cc_read(const struct cyclecounter *cc)
473} 473}
474 474
475static void azx_timecounter_init(struct hdac_stream *azx_dev, 475static void azx_timecounter_init(struct hdac_stream *azx_dev,
476 bool force, cycle_t last) 476 bool force, u64 last)
477{ 477{
478 struct timecounter *tc = &azx_dev->tc; 478 struct timecounter *tc = &azx_dev->tc;
479 struct cyclecounter *cc = &azx_dev->cc; 479 struct cyclecounter *cc = &azx_dev->cc;
@@ -523,7 +523,7 @@ void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
523 struct snd_pcm_runtime *runtime = azx_dev->substream->runtime; 523 struct snd_pcm_runtime *runtime = azx_dev->substream->runtime;
524 struct hdac_stream *s; 524 struct hdac_stream *s;
525 bool inited = false; 525 bool inited = false;
526 cycle_t cycle_last = 0; 526 u64 cycle_last = 0;
527 int i = 0; 527 int i = 0;
528 528
529 list_for_each_entry(s, &bus->stream_list, list) { 529 list_for_each_entry(s, &bus->stream_list, list) {
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index abf9c0cab1e2..461b310c7872 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -87,7 +87,7 @@ static void dac_audio_reset(struct snd_sh_dac *chip)
87 87
88static void dac_audio_set_rate(struct snd_sh_dac *chip) 88static void dac_audio_set_rate(struct snd_sh_dac *chip)
89{ 89{
90 chip->wakeups_per_second = ktime_set(0, 1000000000 / chip->rate); 90 chip->wakeups_per_second = 1000000000 / chip->rate;
91} 91}
92 92
93 93
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 5b4f60d43314..a2dbbccbb6a3 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -39,7 +39,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
39 vcpu->arch.timer_cpu.active_cleared_last = false; 39 vcpu->arch.timer_cpu.active_cleared_last = false;
40} 40}
41 41
42static cycle_t kvm_phys_timer_read(void) 42static u64 kvm_phys_timer_read(void)
43{ 43{
44 return timecounter->cc->read(timecounter->cc); 44 return timecounter->cc->read(timecounter->cc);
45} 45}
@@ -102,7 +102,7 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
102 102
103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) 103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
104{ 104{
105 cycle_t cval, now; 105 u64 cval, now;
106 106
107 cval = vcpu->arch.timer_cpu.cntv_cval; 107 cval = vcpu->arch.timer_cpu.cntv_cval;
108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; 108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
@@ -155,7 +155,7 @@ static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
155bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) 155bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
156{ 156{
157 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 157 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
158 cycle_t cval, now; 158 u64 cval, now;
159 159
160 if (!kvm_timer_irq_can_fire(vcpu)) 160 if (!kvm_timer_irq_can_fire(vcpu))
161 return false; 161 return false;