aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
commite7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch)
treedaa51c16462c318b890acf7f01fba5827275dd74
parent08d69a25714429850cf9ef71f22d8cdc9189d93f (diff)
parent953dec21aed4038464fec02f96a2f1b8701a5bce (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner: "A rather large update of timers, timekeeping & co - Core timekeeping code is year-2038 safe now for 32bit machines. Now we just need to fix all in kernel users and the gazillion of user space interfaces which rely on timespec/timeval :) - Better cache layout for the timekeeping internal data structures. - Proper nanosecond based interfaces for in kernel users. - Tree wide cleanup of code which wants nanoseconds but does hoops and loops to convert back and forth from timespecs. Some of it definitely belongs into the ugly code museum. - Consolidation of the timekeeping interface zoo. - A fast NMI safe accessor to clock monotonic for tracing. This is a long standing request to support correlated user/kernel space traces. With proper NTP frequency correction it's also suitable for correlation of traces accross separate machines. - Checkpoint/restart support for timerfd. - A few NOHZ[_FULL] improvements in the [hr]timer code. - Code move from kernel to kernel/time of all time* related code. - New clocksource/event drivers from the ARM universe. I'm really impressed that despite an architected timer in the newer chips SoC manufacturers insist on inventing new and differently broken SoC specific timers. [ Ed. "Impressed"? I don't think that word means what you think it means ] - Another round of code move from arch to drivers. Looks like most of the legacy mess in ARM regarding timers is sorted out except for a few obnoxious strongholds. - The usual updates and fixlets all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) timekeeping: Fixup typo in update_vsyscall_old definition clocksource: document some basic timekeeping concepts timekeeping: Use cached ntp_tick_length when accumulating error timekeeping: Rework frequency adjustments to work better w/ nohz timekeeping: Minor fixup for timespec64->timespec assignment ftrace: Provide trace clocks monotonic timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC seqcount: Add raw_write_seqcount_latch() seqcount: Provide raw_read_seqcount() timekeeping: Use tk_read_base as argument for timekeeping_get_ns() timekeeping: Create struct tk_read_base and use it in struct timekeeper timekeeping: Restructure the timekeeper some more clocksource: Get rid of cycle_last clocksource: Move cycle_last validation to core code clocksource: Make delta calculation a function wireless: ath9k: Get rid of timespec conversions drm: vmwgfx: Use nsec based interfaces drm: i915: Use nsec based interfaces timekeeping: Provide ktime_get_raw() hangcheck-timer: Use ktime_get_ns() ...
-rw-r--r--Documentation/DocBook/device-drivers.tmpl4
-rw-r--r--Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt29
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt47
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,mtu2.txt39
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.txt39
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/proc.txt19
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/timers/timekeeping.txt179
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/bL_switcher.c16
-rw-r--r--arch/arm/mach-pxa/Makefile2
-rw-r--r--arch/arm/mach-pxa/generic.c11
-rw-r--r--arch/arm/mach-pxa/time.c162
-rw-r--r--arch/arm64/kernel/vdso.c10
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/time.c16
-rw-r--r--arch/tile/kernel/time.c13
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c7
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/tsc.c21
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c23
-rw-r--r--arch/x86/kvm/x86.c62
-rw-r--r--drivers/char/hangcheck-timer.c33
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/clps711x-timer.c131
-rw-r--r--drivers/clocksource/exynos_mct.c63
-rw-r--r--drivers/clocksource/mtk_timer.c261
-rw-r--r--drivers/clocksource/pxa_timer.c227
-rw-r--r--drivers/clocksource/sh_cmt.c233
-rw-r--r--drivers/clocksource/sh_mtu2.c146
-rw-r--r--drivers/clocksource/sh_tmu.c127
-rw-r--r--drivers/clocksource/timer-marco.c3
-rw-r--r--drivers/clocksource/timer-prima2.c3
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/firewire/core-cdev.c6
-rw-r--r--drivers/gpu/drm/drm_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c33
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c44
-rw-r--r--drivers/hwmon/ibmaem.c6
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/mfd/cros_ec_spi.c8
-rw-r--r--drivers/misc/ioc4.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/of/address.c36
-rw-r--r--fs/lockd/mon.c4
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/timerfd.c77
-rw-r--r--include/clocksource/pxa.h18
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/hrtimer.h16
-rw-r--r--include/linux/iio/iio.h9
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/ktime.h228
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/of_address.h11
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/linux/sh_timer.h5
-rw-r--r--include/linux/time.h65
-rw-r--r--include/linux/time64.h190
-rw-r--r--include/linux/timekeeper_internal.h150
-rw-r--r--include/linux/timekeeping.h209
-rw-r--r--include/linux/timerfd.h5
-rw-r--r--kernel/Makefile25
-rw-r--r--kernel/acct.c10
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/delayacct.c62
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/time/Kconfig9
-rw-r--r--kernel/time/Makefile19
-rw-r--r--kernel/time/clocksource.c12
-rw-r--r--kernel/time/hrtimer.c (renamed from kernel/hrtimer.c)125
-rw-r--r--kernel/time/itimer.c (renamed from kernel/itimer.c)0
-rw-r--r--kernel/time/ntp.c15
-rw-r--r--kernel/time/ntp_internal.h2
-rw-r--r--kernel/time/posix-cpu-timers.c (renamed from kernel/posix-cpu-timers.c)0
-rw-r--r--kernel/time/posix-timers.c (renamed from kernel/posix-timers.c)2
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/time.c (renamed from kernel/time.c)64
-rw-r--r--kernel/time/timeconst.bc (renamed from kernel/timeconst.bc)0
-rw-r--r--kernel/time/timekeeping.c1147
-rw-r--r--kernel/time/timekeeping.h20
-rw-r--r--kernel/time/timekeeping_debug.c2
-rw-r--r--kernel/time/timekeeping_internal.h17
-rw-r--r--kernel/time/timer.c (renamed from kernel/timer.c)34
-rw-r--r--kernel/time/udelay_test.c168
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/tsacct.c19
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/devres.c2
-rw-r--r--security/tomoyo/audit.c8
-rw-r--r--security/tomoyo/common.c4
-rwxr-xr-xtools/time/udelay_test.sh66
108 files changed, 3221 insertions, 1923 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index dd3f278faa8a..f2130586ef5d 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -54,7 +54,7 @@
54!Ikernel/sched/cpupri.c 54!Ikernel/sched/cpupri.c
55!Ikernel/sched/fair.c 55!Ikernel/sched/fair.c
56!Iinclude/linux/completion.h 56!Iinclude/linux/completion.h
57!Ekernel/timer.c 57!Ekernel/time/timer.c
58 </sect1> 58 </sect1>
59 <sect1><title>Wait queues and Wake events</title> 59 <sect1><title>Wait queues and Wake events</title>
60!Iinclude/linux/wait.h 60!Iinclude/linux/wait.h
@@ -63,7 +63,7 @@
63 <sect1><title>High-resolution timers</title> 63 <sect1><title>High-resolution timers</title>
64!Iinclude/linux/ktime.h 64!Iinclude/linux/ktime.h
65!Iinclude/linux/hrtimer.h 65!Iinclude/linux/hrtimer.h
66!Ekernel/hrtimer.c 66!Ekernel/time/hrtimer.c
67 </sect1> 67 </sect1>
68 <sect1><title>Workqueues and Kevents</title> 68 <sect1><title>Workqueues and Kevents</title>
69!Ekernel/workqueue.c 69!Ekernel/workqueue.c
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
new file mode 100644
index 000000000000..cd55b52548e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
@@ -0,0 +1,29 @@
1* Cirrus Logic CLPS711X Timer Counter
2
3Required properties:
4- compatible: Shall contain "cirrus,clps711x-timer".
5- reg : Address and length of the register set.
6- interrupts: The interrupt number of the timer.
7- clocks : phandle of timer reference clock.
8
9Note: Each timer should have an alias correctly numbered in "aliases" node.
10
11Example:
12 aliases {
13 timer0 = &timer1;
14 timer1 = &timer2;
15 };
16
17 timer1: timer@80000300 {
18 compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
19 reg = <0x80000300 0x4>;
20 interrupts = <8>;
21 clocks = <&clks 5>;
22 };
23
24 timer2: timer@80000340 {
25 compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
26 reg = <0x80000340 0x4>;
27 interrupts = <9>;
28 clocks = <&clks 6>;
29 };
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
new file mode 100644
index 000000000000..7c4408ff4b83
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -0,0 +1,17 @@
1Mediatek MT6577, MT6572 and MT6589 Timers
2---------------------------------------
3
4Required properties:
5- compatible: Should be "mediatek,mt6577-timer"
6- reg: Should contain location and length for timers register.
7- clocks: Clocks driving the timer hardware. This list should include two
8 clocks. The order is system clock and as second clock the RTC clock.
9
10Examples:
11
12 timer@10008000 {
13 compatible = "mediatek,mt6577-timer";
14 reg = <0x10008000 0x80>;
15 interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
16 clocks = <&system_clk>, <&rtc_clk>;
17 };
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
new file mode 100644
index 000000000000..a17418b0ece3
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -0,0 +1,47 @@
1* Renesas R-Car Compare Match Timer (CMT)
2
3The CMT is a multi-channel 16/32/48-bit timer/counter with configurable clock
4inputs and programmable compare match.
5
6Channels share hardware resources but their counter and compare match value
7are independent. A particular CMT instance can implement only a subset of the
8channels supported by the CMT model. Channel indices represent the hardware
9position of the channel in the CMT and don't match the channel numbers in the
10datasheets.
11
12Required Properties:
13
14 - compatible: must contain one of the following.
15 - "renesas,cmt-32" for the 32-bit CMT
16 (CMT0 on sh7372, sh73a0 and r8a7740)
17 - "renesas,cmt-32-fast" for the 32-bit CMT with fast clock support
18 (CMT[234] on sh7372, sh73a0 and r8a7740)
19 - "renesas,cmt-48" for the 48-bit CMT
20 (CMT1 on sh7372, sh73a0 and r8a7740)
21 - "renesas,cmt-48-gen2" for the second generation 48-bit CMT
22 (CMT[01] on r8a73a4, r8a7790 and r8a7791)
23
24 - reg: base address and length of the registers block for the timer module.
25 - interrupts: interrupt-specifier for the timer, one per channel.
26 - clocks: a list of phandle + clock-specifier pairs, one for each entry
27 in clock-names.
28 - clock-names: must contain "fck" for the functional clock.
29
30 - renesas,channels-mask: bitmask of the available channels.
31
32
33Example: R8A7790 (R-Car H2) CMT0 node
34
35 CMT0 on R8A7790 implements hardware channels 5 and 6 only and names
36 them channels 0 and 1 in the documentation.
37
38 cmt0: timer@ffca0000 {
39 compatible = "renesas,cmt-48-gen2";
40 reg = <0 0xffca0000 0 0x1004>;
41 interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
42 <0 142 IRQ_TYPE_LEVEL_HIGH>;
43 clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
44 clock-names = "fck";
45
46 renesas,channels-mask = <0x60>;
47 };
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.txt b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt
new file mode 100644
index 000000000000..917453f826bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt
@@ -0,0 +1,39 @@
1* Renesas R-Car Multi-Function Timer Pulse Unit 2 (MTU2)
2
3The MTU2 is a multi-purpose, multi-channel timer/counter with configurable
4clock inputs and programmable compare match.
5
6Channels share hardware resources but their counter and compare match value
7are independent. The MTU2 hardware supports five channels indexed from 0 to 4.
8
9Required Properties:
10
11 - compatible: must contain "renesas,mtu2"
12
13 - reg: base address and length of the registers block for the timer module.
14
15 - interrupts: interrupt specifiers for the timer, one for each entry in
16 interrupt-names.
17 - interrupt-names: must contain one entry named "tgi?a" for each enabled
18 channel, where "?" is the channel index expressed as one digit from "0" to
19 "4".
20
21 - clocks: a list of phandle + clock-specifier pairs, one for each entry
22 in clock-names.
23 - clock-names: must contain "fck" for the functional clock.
24
25
26Example: R7S72100 (RZ/A1H) MTU2 node
27
28 mtu2: timer@fcff0000 {
29 compatible = "renesas,mtu2";
30 reg = <0xfcff0000 0x400>;
31 interrupts = <0 139 IRQ_TYPE_LEVEL_HIGH>,
32 <0 146 IRQ_TYPE_LEVEL_HIGH>,
33 <0 150 IRQ_TYPE_LEVEL_HIGH>,
34 <0 154 IRQ_TYPE_LEVEL_HIGH>,
35 <0 159 IRQ_TYPE_LEVEL_HIGH>;
36 interrupt-names = "tgi0a", "tgi1a", "tgi2a", "tgi3a", "tgi4a";
37 clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
38 clock-names = "fck";
39 };
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
new file mode 100644
index 000000000000..425d0c5f4aee
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
@@ -0,0 +1,39 @@
1* Renesas R-Car Timer Unit (TMU)
2
3The TMU is a 32-bit timer/counter with configurable clock inputs and
4programmable compare match.
5
6Channels share hardware resources but their counter and compare match value
7are independent. The TMU hardware supports up to three channels.
8
9Required Properties:
10
11 - compatible: must contain "renesas,tmu"
12
13 - reg: base address and length of the registers block for the timer module.
14
15 - interrupts: interrupt-specifier for the timer, one per channel.
16
17 - clocks: a list of phandle + clock-specifier pairs, one for each entry
18 in clock-names.
19 - clock-names: must contain "fck" for the functional clock.
20
21Optional Properties:
22
23 - #renesas,channels: number of channels implemented by the timer, must be 2
24 or 3 (if not specified the value defaults to 3).
25
26
27Example: R8A7779 (R-Car H1) TMU0 node
28
29 tmu0: timer@ffd80000 {
30 compatible = "renesas,tmu";
31 reg = <0xffd80000 0x30>;
32 interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
33 <0 33 IRQ_TYPE_LEVEL_HIGH>,
34 <0 34 IRQ_TYPE_LEVEL_HIGH>;
35 clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
36 clock-names = "fck";
37
38 #renesas,channels = <3>;
39 };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 97c9c06132c4..d415b38ec8ca 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -78,6 +78,7 @@ lsi LSI Corp. (LSI Logic)
78lltc Linear Technology Corporation 78lltc Linear Technology Corporation
79marvell Marvell Technology Group Ltd. 79marvell Marvell Technology Group Ltd.
80maxim Maxim Integrated Products 80maxim Maxim Integrated Products
81mediatek MediaTek Inc.
81micrel Micrel Inc. 82micrel Micrel Inc.
82microchip Microchip Technology Inc. 83microchip Microchip Technology Inc.
83mosaixtech Mosaix Technologies, Inc. 84mosaixtech Mosaix Technologies, Inc.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ddc531a74d04..eb8a10e22f7c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1743,6 +1743,25 @@ pair provide additional information particular to the objects they represent.
1743 While the first three lines are mandatory and always printed, the rest is 1743 While the first three lines are mandatory and always printed, the rest is
1744 optional and may be omitted if no marks created yet. 1744 optional and may be omitted if no marks created yet.
1745 1745
1746 Timerfd files
1747 ~~~~~~~~~~~~~
1748
1749 pos: 0
1750 flags: 02
1751 mnt_id: 9
1752 clockid: 0
1753 ticks: 0
1754 settime flags: 01
1755 it_value: (0, 49406829)
1756 it_interval: (1, 0)
1757
1758 where 'clockid' is the clock type and 'ticks' is the number of the timer expirations
1759 that have occurred [see timerfd_create(2) for details]. 'settime flags' are
1760 flags in octal form been used to setup the timer [see timerfd_settime(2) for
1761 details]. 'it_value' is remaining time until the timer exiration.
1762 'it_interval' is the interval for the timer. Note the timer might be set up
1763 with TIMER_ABSTIME option which will be shown in 'settime flags', but 'it_value'
1764 still exhibits timer's remaining time.
1746 1765
1747------------------------------------------------------------------------------ 1766------------------------------------------------------------------------------
1748Configuring procfs 1767Configuring procfs
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index 6d042dc1cce0..ee212a27772f 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -12,6 +12,8 @@ Makefile
12 - Build and link hpet_example 12 - Build and link hpet_example
13NO_HZ.txt 13NO_HZ.txt
14 - Summary of the different methods for the scheduler clock-interrupts management. 14 - Summary of the different methods for the scheduler clock-interrupts management.
15timekeeping.txt
16 - Clock sources, clock events, sched_clock() and delay timer notes
15timers-howto.txt 17timers-howto.txt
16 - how to insert delays in the kernel the right (tm) way. 18 - how to insert delays in the kernel the right (tm) way.
17timer_stats.txt 19timer_stats.txt
diff --git a/Documentation/timers/timekeeping.txt b/Documentation/timers/timekeeping.txt
new file mode 100644
index 000000000000..f3a8cf28f802
--- /dev/null
+++ b/Documentation/timers/timekeeping.txt
@@ -0,0 +1,179 @@
1Clock sources, Clock events, sched_clock() and delay timers
2-----------------------------------------------------------
3
4This document tries to briefly explain some basic kernel timekeeping
5abstractions. It partly pertains to the drivers usually found in
6drivers/clocksource in the kernel tree, but the code may be spread out
7across the kernel.
8
9If you grep through the kernel source you will find a number of architecture-
10specific implementations of clock sources, clockevents and several likewise
11architecture-specific overrides of the sched_clock() function and some
12delay timers.
13
14To provide timekeeping for your platform, the clock source provides
15the basic timeline, whereas clock events shoot interrupts on certain points
16on this timeline, providing facilities such as high-resolution timers.
17sched_clock() is used for scheduling and timestamping, and delay timers
18provide an accurate delay source using hardware counters.
19
20
21Clock sources
22-------------
23
24The purpose of the clock source is to provide a timeline for the system that
25tells you where you are in time. For example issuing the command 'date' on
26a Linux system will eventually read the clock source to determine exactly
27what time it is.
28
29Typically the clock source is a monotonic, atomic counter which will provide
30n bits which count from 0 to 2^(n-1) and then wraps around to 0 and start over.
31It will ideally NEVER stop ticking as long as the system is running. It
32may stop during system suspend.
33
34The clock source shall have as high resolution as possible, and the frequency
35shall be as stable and correct as possible as compared to a real-world wall
36clock. It should not move unpredictably back and forth in time or miss a few
37cycles here and there.
38
39It must be immune to the kind of effects that occur in hardware where e.g.
40the counter register is read in two phases on the bus lowest 16 bits first
41and the higher 16 bits in a second bus cycle with the counter bits
42potentially being updated in between leading to the risk of very strange
43values from the counter.
44
45When the wall-clock accuracy of the clock source isn't satisfactory, there
46are various quirks and layers in the timekeeping code for e.g. synchronizing
47the user-visible time to RTC clocks in the system or against networked time
48servers using NTP, but all they do basically is update an offset against
49the clock source, which provides the fundamental timeline for the system.
50These measures does not affect the clock source per se, they only adapt the
51system to the shortcomings of it.
52
53The clock source struct shall provide means to translate the provided counter
54into a nanosecond value as an unsigned long long (unsigned 64 bit) number.
55Since this operation may be invoked very often, doing this in a strict
56mathematical sense is not desirable: instead the number is taken as close as
57possible to a nanosecond value using only the arithmetic operations
58multiply and shift, so in clocksource_cyc2ns() you find:
59
60 ns ~= (clocksource * mult) >> shift
61
62You will find a number of helper functions in the clock source code intended
63to aid in providing these mult and shift values, such as
64clocksource_khz2mult(), clocksource_hz2mult() that help determine the
65mult factor from a fixed shift, and clocksource_register_hz() and
66clocksource_register_khz() which will help out assigning both shift and mult
67factors using the frequency of the clock source as the only input.
68
69For real simple clock sources accessed from a single I/O memory location
70there is nowadays even clocksource_mmio_init() which will take a memory
71location, bit width, a parameter telling whether the counter in the
72register counts up or down, and the timer clock rate, and then conjure all
73necessary parameters.
74
75Since a 32-bit counter at say 100 MHz will wrap around to zero after some 43
76seconds, the code handling the clock source will have to compensate for this.
77That is the reason why the clock source struct also contains a 'mask'
78member telling how many bits of the source are valid. This way the timekeeping
79code knows when the counter will wrap around and can insert the necessary
80compensation code on both sides of the wrap point so that the system timeline
81remains monotonic.
82
83
84Clock events
85------------
86
87Clock events are the conceptual reverse of clock sources: they take a
88desired time specification value and calculate the values to poke into
89hardware timer registers.
90
91Clock events are orthogonal to clock sources. The same hardware
92and register range may be used for the clock event, but it is essentially
93a different thing. The hardware driving clock events has to be able to
94fire interrupts, so as to trigger events on the system timeline. On an SMP
95system, it is ideal (and customary) to have one such event driving timer per
96CPU core, so that each core can trigger events independently of any other
97core.
98
99You will notice that the clock event device code is based on the same basic
100idea about translating counters to nanoseconds using mult and shift
101arithmetic, and you find the same family of helper functions again for
102assigning these values. The clock event driver does not need a 'mask'
103attribute however: the system will not try to plan events beyond the time
104horizon of the clock event.
105
106
107sched_clock()
108-------------
109
110In addition to the clock sources and clock events there is a special weak
111function in the kernel called sched_clock(). This function shall return the
112number of nanoseconds since the system was started. An architecture may or
113may not provide an implementation of sched_clock() on its own. If a local
114implementation is not provided, the system jiffy counter will be used as
115sched_clock().
116
117As the name suggests, sched_clock() is used for scheduling the system,
118determining the absolute timeslice for a certain process in the CFS scheduler
119for example. It is also used for printk timestamps when you have selected to
120include time information in printk for things like bootcharts.
121
122Compared to clock sources, sched_clock() has to be very fast: it is called
123much more often, especially by the scheduler. If you have to do trade-offs
124between accuracy compared to the clock source, you may sacrifice accuracy
125for speed in sched_clock(). It however requires some of the same basic
126characteristics as the clock source, i.e. it should be monotonic.
127
128The sched_clock() function may wrap only on unsigned long long boundaries,
129i.e. after 64 bits. Since this is a nanosecond value this will mean it wraps
130after circa 585 years. (For most practical systems this means "never".)
131
132If an architecture does not provide its own implementation of this function,
133it will fall back to using jiffies, making its maximum resolution 1/HZ of the
134jiffy frequency for the architecture. This will affect scheduling accuracy
135and will likely show up in system benchmarks.
136
137The clock driving sched_clock() may stop or reset to zero during system
138suspend/sleep. This does not matter to the function it serves of scheduling
139events on the system. However it may result in interesting timestamps in
140printk().
141
142The sched_clock() function should be callable in any context, IRQ- and
143NMI-safe and return a sane value in any context.
144
145Some architectures may have a limited set of time sources and lack a nice
146counter to derive a 64-bit nanosecond value, so for example on the ARM
147architecture, special helper functions have been created to provide a
148sched_clock() nanosecond base from a 16- or 32-bit counter. Sometimes the
149same counter that is also used as clock source is used for this purpose.
150
151On SMP systems, it is crucial for performance that sched_clock() can be called
152independently on each CPU without any synchronization performance hits.
153Some hardware (such as the x86 TSC) will cause the sched_clock() function to
154drift between the CPUs on the system. The kernel can work around this by
155enabling the CONFIG_HAVE_UNSTABLE_SCHED_CLOCK option. This is another aspect
156that makes sched_clock() different from the ordinary clock source.
157
158
159Delay timers (some architectures only)
160--------------------------------------
161
162On systems with variable CPU frequency, the various kernel delay() functions
163will sometimes behave strangely. Basically these delays usually use a hard
164loop to delay a certain number of jiffy fractions using a "lpj" (loops per
165jiffy) value, calibrated on boot.
166
167Let's hope that your system is running on maximum frequency when this value
168is calibrated: as an effect when the frequency is geared down to half the
169full frequency, any delay() will be twice as long. Usually this does not
170hurt, as you're commonly requesting that amount of delay *or more*. But
171basically the semantics are quite unpredictable on such systems.
172
173Enter timer-based delays. Using these, a timer read may be used instead of
174a hard-coded loop for providing the desired delay.
175
176This is done by declaring a struct delay_timer and assigning the appropriate
177function pointers and rate settings for this delay timer.
178
179This is available on some architectures like OpenRISC or ARM.
diff --git a/MAINTAINERS b/MAINTAINERS
index d5a78f24d14e..f77776304406 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4237,7 +4237,7 @@ L: linux-kernel@vger.kernel.org
4237T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core 4237T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
4238S: Maintained 4238S: Maintained
4239F: Documentation/timers/ 4239F: Documentation/timers/
4240F: kernel/hrtimer.c 4240F: kernel/time/hrtimer.c
4241F: kernel/time/clockevents.c 4241F: kernel/time/clockevents.c
4242F: kernel/time/tick*.* 4242F: kernel/time/tick*.*
4243F: kernel/time/timer_*.c 4243F: kernel/time/timer_*.c
@@ -7053,10 +7053,10 @@ POSIX CLOCKS and TIMERS
7053M: Thomas Gleixner <tglx@linutronix.de> 7053M: Thomas Gleixner <tglx@linutronix.de>
7054L: linux-kernel@vger.kernel.org 7054L: linux-kernel@vger.kernel.org
7055T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core 7055T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
7056S: Supported 7056S: Maintained
7057F: fs/timerfd.c 7057F: fs/timerfd.c
7058F: include/linux/timer* 7058F: include/linux/timer*
7059F: kernel/*timer* 7059F: kernel/time/*timer*
7060 7060
7061POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS 7061POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
7062M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 7062M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1e14b9068a39..d31c500653a2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -65,7 +65,6 @@ config ARM
65 select HAVE_UID16 65 select HAVE_UID16
66 select HAVE_VIRT_CPU_ACCOUNTING_GEN 66 select HAVE_VIRT_CPU_ACCOUNTING_GEN
67 select IRQ_FORCED_THREADING 67 select IRQ_FORCED_THREADING
68 select KTIME_SCALAR
69 select MODULES_USE_ELF_REL 68 select MODULES_USE_ELF_REL
70 select NO_BOOTMEM 69 select NO_BOOTMEM
71 select OLD_SIGACTION 70 select OLD_SIGACTION
@@ -648,6 +647,7 @@ config ARCH_PXA
648 select AUTO_ZRELADDR 647 select AUTO_ZRELADDR
649 select CLKDEV_LOOKUP 648 select CLKDEV_LOOKUP
650 select CLKSRC_MMIO 649 select CLKSRC_MMIO
650 select CLKSRC_OF
651 select GENERIC_CLOCKEVENTS 651 select GENERIC_CLOCKEVENTS
652 select GPIO_PXA 652 select GPIO_PXA
653 select HAVE_IDE 653 select HAVE_IDE
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 490f3dced749..6eaddc47c43d 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -58,16 +58,6 @@ static int read_mpidr(void)
58} 58}
59 59
60/* 60/*
61 * Get a global nanosecond time stamp for tracing.
62 */
63static s64 get_ns(void)
64{
65 struct timespec ts;
66 getnstimeofday(&ts);
67 return timespec_to_ns(&ts);
68}
69
70/*
71 * bL switcher core code. 61 * bL switcher core code.
72 */ 62 */
73 63
@@ -224,7 +214,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
224 */ 214 */
225 local_irq_disable(); 215 local_irq_disable();
226 local_fiq_disable(); 216 local_fiq_disable();
227 trace_cpu_migrate_begin(get_ns(), ob_mpidr); 217 trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
228 218
229 /* redirect GIC's SGIs to our counterpart */ 219 /* redirect GIC's SGIs to our counterpart */
230 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); 220 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
@@ -267,7 +257,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
267 tdev->evtdev->next_event, 1); 257 tdev->evtdev->next_event, 1);
268 } 258 }
269 259
270 trace_cpu_migrate_finish(get_ns(), ib_mpidr); 260 trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
271 local_fiq_enable(); 261 local_fiq_enable();
272 local_irq_enable(); 262 local_irq_enable();
273 263
@@ -558,7 +548,7 @@ int bL_switcher_get_logical_index(u32 mpidr)
558 548
559static void bL_switcher_trace_trigger_cpu(void *__always_unused info) 549static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
560{ 550{
561 trace_cpu_migrate_current(get_ns(), read_mpidr()); 551 trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
562} 552}
563 553
564int bL_switcher_trace_trigger(void) 554int bL_switcher_trace_trigger(void)
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 648867a8caa8..2fe1824c6dcb 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -4,7 +4,7 @@
4 4
5# Common support (must be linked before board specific support) 5# Common support (must be linked before board specific support)
6obj-y += clock.o devices.o generic.o irq.o \ 6obj-y += clock.o devices.o generic.o irq.o \
7 time.o reset.o 7 reset.o
8obj-$(CONFIG_PM) += pm.o sleep.o standby.o 8obj-$(CONFIG_PM) += pm.o sleep.o standby.o
9 9
10# Generic drivers that other drivers may depend upon 10# Generic drivers that other drivers may depend upon
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 42254175fcf4..6f38e1af45af 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -25,11 +25,13 @@
25#include <asm/mach/map.h> 25#include <asm/mach/map.h>
26#include <asm/mach-types.h> 26#include <asm/mach-types.h>
27 27
28#include <mach/irqs.h>
28#include <mach/reset.h> 29#include <mach/reset.h>
29#include <mach/smemc.h> 30#include <mach/smemc.h>
30#include <mach/pxa3xx-regs.h> 31#include <mach/pxa3xx-regs.h>
31 32
32#include "generic.h" 33#include "generic.h"
34#include <clocksource/pxa.h>
33 35
34void clear_reset_status(unsigned int mask) 36void clear_reset_status(unsigned int mask)
35{ 37{
@@ -57,6 +59,15 @@ unsigned long get_clock_tick_rate(void)
57EXPORT_SYMBOL(get_clock_tick_rate); 59EXPORT_SYMBOL(get_clock_tick_rate);
58 60
59/* 61/*
62 * For non device-tree builds, keep legacy timer init
63 */
64void pxa_timer_init(void)
65{
66 pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000),
67 get_clock_tick_rate());
68}
69
70/*
60 * Get the clock frequency as reflected by CCCR and the turbo flag. 71 * Get the clock frequency as reflected by CCCR and the turbo flag.
61 * We assume these values have been applied via a fcs. 72 * We assume these values have been applied via a fcs.
62 * If info is not 0 we also display the current settings. 73 * If info is not 0 we also display the current settings.
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
deleted file mode 100644
index fca174e3865d..000000000000
--- a/arch/arm/mach-pxa/time.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * arch/arm/mach-pxa/time.c
3 *
4 * PXA clocksource, clockevents, and OST interrupt handlers.
5 * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
6 *
7 * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
8 * by MontaVista Software, Inc. (Nico, your code rocks!)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/clockchips.h>
19#include <linux/sched_clock.h>
20
21#include <asm/div64.h>
22#include <asm/mach/irq.h>
23#include <asm/mach/time.h>
24#include <mach/regs-ost.h>
25#include <mach/irqs.h>
26
27/*
28 * This is PXA's sched_clock implementation. This has a resolution
29 * of at least 308 ns and a maximum value of 208 days.
30 *
31 * The return value is guaranteed to be monotonic in that range as
32 * long as there is always less than 582 seconds between successive
33 * calls to sched_clock() which should always be the case in practice.
34 */
35
36static u64 notrace pxa_read_sched_clock(void)
37{
38 return readl_relaxed(OSCR);
39}
40
41
42#define MIN_OSCR_DELTA 16
43
44static irqreturn_t
45pxa_ost0_interrupt(int irq, void *dev_id)
46{
47 struct clock_event_device *c = dev_id;
48
49 /* Disarm the compare/match, signal the event. */
50 writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
51 writel_relaxed(OSSR_M0, OSSR);
52 c->event_handler(c);
53
54 return IRQ_HANDLED;
55}
56
57static int
58pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
59{
60 unsigned long next, oscr;
61
62 writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER);
63 next = readl_relaxed(OSCR) + delta;
64 writel_relaxed(next, OSMR0);
65 oscr = readl_relaxed(OSCR);
66
67 return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
68}
69
70static void
71pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
72{
73 switch (mode) {
74 case CLOCK_EVT_MODE_ONESHOT:
75 writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
76 writel_relaxed(OSSR_M0, OSSR);
77 break;
78
79 case CLOCK_EVT_MODE_UNUSED:
80 case CLOCK_EVT_MODE_SHUTDOWN:
81 /* initializing, released, or preparing for suspend */
82 writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
83 writel_relaxed(OSSR_M0, OSSR);
84 break;
85
86 case CLOCK_EVT_MODE_RESUME:
87 case CLOCK_EVT_MODE_PERIODIC:
88 break;
89 }
90}
91
92#ifdef CONFIG_PM
93static unsigned long osmr[4], oier, oscr;
94
95static void pxa_timer_suspend(struct clock_event_device *cedev)
96{
97 osmr[0] = readl_relaxed(OSMR0);
98 osmr[1] = readl_relaxed(OSMR1);
99 osmr[2] = readl_relaxed(OSMR2);
100 osmr[3] = readl_relaxed(OSMR3);
101 oier = readl_relaxed(OIER);
102 oscr = readl_relaxed(OSCR);
103}
104
105static void pxa_timer_resume(struct clock_event_device *cedev)
106{
107 /*
108 * Ensure that we have at least MIN_OSCR_DELTA between match
109 * register 0 and the OSCR, to guarantee that we will receive
110 * the one-shot timer interrupt. We adjust OSMR0 in preference
111 * to OSCR to guarantee that OSCR is monotonically incrementing.
112 */
113 if (osmr[0] - oscr < MIN_OSCR_DELTA)
114 osmr[0] += MIN_OSCR_DELTA;
115
116 writel_relaxed(osmr[0], OSMR0);
117 writel_relaxed(osmr[1], OSMR1);
118 writel_relaxed(osmr[2], OSMR2);
119 writel_relaxed(osmr[3], OSMR3);
120 writel_relaxed(oier, OIER);
121 writel_relaxed(oscr, OSCR);
122}
123#else
124#define pxa_timer_suspend NULL
125#define pxa_timer_resume NULL
126#endif
127
128static struct clock_event_device ckevt_pxa_osmr0 = {
129 .name = "osmr0",
130 .features = CLOCK_EVT_FEAT_ONESHOT,
131 .rating = 200,
132 .set_next_event = pxa_osmr0_set_next_event,
133 .set_mode = pxa_osmr0_set_mode,
134 .suspend = pxa_timer_suspend,
135 .resume = pxa_timer_resume,
136};
137
138static struct irqaction pxa_ost0_irq = {
139 .name = "ost0",
140 .flags = IRQF_TIMER | IRQF_IRQPOLL,
141 .handler = pxa_ost0_interrupt,
142 .dev_id = &ckevt_pxa_osmr0,
143};
144
145void __init pxa_timer_init(void)
146{
147 unsigned long clock_tick_rate = get_clock_tick_rate();
148
149 writel_relaxed(0, OIER);
150 writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
151
152 sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
153
154 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
155
156 setup_irq(IRQ_OST0, &pxa_ost0_irq);
157
158 clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32,
159 clocksource_mmio_readl_up);
160 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
161 MIN_OSCR_DELTA * 2, 0x7fffffff);
162}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 24f2e8c62479..a81a446a5786 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -219,7 +219,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
219void update_vsyscall(struct timekeeper *tk) 219void update_vsyscall(struct timekeeper *tk)
220{ 220{
221 struct timespec xtime_coarse; 221 struct timespec xtime_coarse;
222 u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); 222 u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter");
223 223
224 ++vdso_data->tb_seq_count; 224 ++vdso_data->tb_seq_count;
225 smp_wmb(); 225 smp_wmb();
@@ -232,11 +232,11 @@ void update_vsyscall(struct timekeeper *tk)
232 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 232 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
233 233
234 if (!use_syscall) { 234 if (!use_syscall) {
235 vdso_data->cs_cycle_last = tk->clock->cycle_last; 235 vdso_data->cs_cycle_last = tk->tkr.cycle_last;
236 vdso_data->xtime_clock_sec = tk->xtime_sec; 236 vdso_data->xtime_clock_sec = tk->xtime_sec;
237 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 237 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
238 vdso_data->cs_mult = tk->mult; 238 vdso_data->cs_mult = tk->tkr.mult;
239 vdso_data->cs_shift = tk->shift; 239 vdso_data->cs_shift = tk->tkr.shift;
240 } 240 }
241 241
242 smp_wmb(); 242 smp_wmb();
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 0fd6138f6203..4dc89d1f9c48 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -23,7 +23,6 @@ config HEXAGON
23 select GENERIC_IOMAP 23 select GENERIC_IOMAP
24 select GENERIC_SMP_IDLE_THREAD 24 select GENERIC_SMP_IDLE_THREAD
25 select STACKTRACE_SUPPORT 25 select STACKTRACE_SUPPORT
26 select KTIME_SCALAR
27 select GENERIC_CLOCKEVENTS 26 select GENERIC_CLOCKEVENTS
28 select GENERIC_CLOCKEVENTS_BROADCAST 27 select GENERIC_CLOCKEVENTS_BROADCAST
29 select MODULES_USE_ELF_RELA 28 select MODULES_USE_ELF_RELA
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 71c52bc7c28d..3e71ef85e439 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -441,7 +441,7 @@ void update_vsyscall_tz(void)
441} 441}
442 442
443void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, 443void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
444 struct clocksource *c, u32 mult) 444 struct clocksource *c, u32 mult, cycle_t cycle_last)
445{ 445{
446 write_seqcount_begin(&fsyscall_gtod_data.seq); 446 write_seqcount_begin(&fsyscall_gtod_data.seq);
447 447
@@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
450 fsyscall_gtod_data.clk_mult = mult; 450 fsyscall_gtod_data.clk_mult = mult;
451 fsyscall_gtod_data.clk_shift = c->shift; 451 fsyscall_gtod_data.clk_shift = c->shift;
452 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; 452 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
453 fsyscall_gtod_data.clk_cycle_last = c->cycle_last; 453 fsyscall_gtod_data.clk_cycle_last = cycle_last;
454 454
455 /* copy kernel time structures */ 455 /* copy kernel time structures */
456 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; 456 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 9fff9cdcc519..368ab374d33c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs)
741} 741}
742 742
743void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 743void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
744 struct clocksource *clock, u32 mult) 744 struct clocksource *clock, u32 mult, cycle_t cycle_last)
745{ 745{
746 u64 new_tb_to_xs, new_stamp_xsec; 746 u64 new_tb_to_xs, new_stamp_xsec;
747 u32 frac_sec; 747 u32 frac_sec;
@@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
774 * We expect the caller to have done the first increment of 774 * We expect the caller to have done the first increment of
775 * vdso_data->tb_update_count already. 775 * vdso_data->tb_update_count already.
776 */ 776 */
777 vdso_data->tb_orig_stamp = clock->cycle_last; 777 vdso_data->tb_orig_stamp = cycle_last;
778 vdso_data->stamp_xsec = new_stamp_xsec; 778 vdso_data->stamp_xsec = new_stamp_xsec;
779 vdso_data->tb_to_xs = new_tb_to_xs; 779 vdso_data->tb_to_xs = new_tb_to_xs;
780 vdso_data->wtom_clock_sec = wtm->tv_sec; 780 vdso_data->wtom_clock_sec = wtm->tv_sec;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index f85db3a69b4a..2930d1e81a05 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -611,7 +611,6 @@ static int __init create_spu(void *data)
611 int ret; 611 int ret;
612 static int number; 612 static int number;
613 unsigned long flags; 613 unsigned long flags;
614 struct timespec ts;
615 614
616 ret = -ENOMEM; 615 ret = -ENOMEM;
617 spu = kzalloc(sizeof (*spu), GFP_KERNEL); 616 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
@@ -652,8 +651,7 @@ static int __init create_spu(void *data)
652 mutex_unlock(&spu_full_list_mutex); 651 mutex_unlock(&spu_full_list_mutex);
653 652
654 spu->stats.util_state = SPU_UTIL_IDLE_LOADED; 653 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
655 ktime_get_ts(&ts); 654 spu->stats.tstamp = ktime_get_ns();
656 spu->stats.tstamp = timespec_to_ns(&ts);
657 655
658 INIT_LIST_HEAD(&spu->aff_list); 656 INIT_LIST_HEAD(&spu->aff_list);
659 657
@@ -676,7 +674,6 @@ static const char *spu_state_names[] = {
676static unsigned long long spu_acct_time(struct spu *spu, 674static unsigned long long spu_acct_time(struct spu *spu,
677 enum spu_utilization_state state) 675 enum spu_utilization_state state)
678{ 676{
679 struct timespec ts;
680 unsigned long long time = spu->stats.times[state]; 677 unsigned long long time = spu->stats.times[state];
681 678
682 /* 679 /*
@@ -684,10 +681,8 @@ static unsigned long long spu_acct_time(struct spu *spu,
684 * statistics are not updated. Apply the time delta from the 681 * statistics are not updated. Apply the time delta from the
685 * last recorded state of the spu. 682 * last recorded state of the spu.
686 */ 683 */
687 if (spu->stats.util_state == state) { 684 if (spu->stats.util_state == state)
688 ktime_get_ts(&ts); 685 time += ktime_get_ns() - spu->stats.tstamp;
689 time += timespec_to_ns(&ts) - spu->stats.tstamp;
690 }
691 686
692 return time / NSEC_PER_MSEC; 687 return time / NSEC_PER_MSEC;
693} 688}
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 9c6790d17eda..3b4152faeb1f 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -36,7 +36,6 @@ atomic_t nr_spu_contexts = ATOMIC_INIT(0);
36struct spu_context *alloc_spu_context(struct spu_gang *gang) 36struct spu_context *alloc_spu_context(struct spu_gang *gang)
37{ 37{
38 struct spu_context *ctx; 38 struct spu_context *ctx;
39 struct timespec ts;
40 39
41 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 40 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
42 if (!ctx) 41 if (!ctx)
@@ -67,8 +66,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
67 __spu_update_sched_info(ctx); 66 __spu_update_sched_info(ctx);
68 spu_set_timeslice(ctx); 67 spu_set_timeslice(ctx);
69 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; 68 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
70 ktime_get_ts(&ts); 69 ctx->stats.tstamp = ktime_get_ns();
71 ctx->stats.tstamp = timespec_to_ns(&ts);
72 70
73 atomic_inc(&nr_spu_contexts); 71 atomic_inc(&nr_spu_contexts);
74 goto out; 72 goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 90986923a53a..d966bbe58b8f 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2338,7 +2338,6 @@ static const char *ctx_state_names[] = {
2338static unsigned long long spufs_acct_time(struct spu_context *ctx, 2338static unsigned long long spufs_acct_time(struct spu_context *ctx,
2339 enum spu_utilization_state state) 2339 enum spu_utilization_state state)
2340{ 2340{
2341 struct timespec ts;
2342 unsigned long long time = ctx->stats.times[state]; 2341 unsigned long long time = ctx->stats.times[state];
2343 2342
2344 /* 2343 /*
@@ -2351,8 +2350,7 @@ static unsigned long long spufs_acct_time(struct spu_context *ctx,
2351 * of the spu context. 2350 * of the spu context.
2352 */ 2351 */
2353 if (ctx->spu && ctx->stats.util_state == state) { 2352 if (ctx->spu && ctx->stats.util_state == state) {
2354 ktime_get_ts(&ts); 2353 time += ktime_get_ns() - ctx->stats.tstamp;
2355 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2356 } 2354 }
2357 2355
2358 return time / NSEC_PER_MSEC; 2356 return time / NSEC_PER_MSEC;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 4a0a64fe25df..998f632e7cce 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1039,13 +1039,11 @@ void spuctx_switch_state(struct spu_context *ctx,
1039{ 1039{
1040 unsigned long long curtime; 1040 unsigned long long curtime;
1041 signed long long delta; 1041 signed long long delta;
1042 struct timespec ts;
1043 struct spu *spu; 1042 struct spu *spu;
1044 enum spu_utilization_state old_state; 1043 enum spu_utilization_state old_state;
1045 int node; 1044 int node;
1046 1045
1047 ktime_get_ts(&ts); 1046 curtime = ktime_get_ns();
1048 curtime = timespec_to_ns(&ts);
1049 delta = curtime - ctx->stats.tstamp; 1047 delta = curtime - ctx->stats.tstamp;
1050 1048
1051 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); 1049 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f5af5f6ef0f4..720a11d339eb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -136,7 +136,6 @@ config S390
136 select HAVE_SYSCALL_TRACEPOINTS 136 select HAVE_SYSCALL_TRACEPOINTS
137 select HAVE_UID16 if 32BIT 137 select HAVE_UID16 if 32BIT
138 select HAVE_VIRT_CPU_ACCOUNTING 138 select HAVE_VIRT_CPU_ACCOUNTING
139 select KTIME_SCALAR if 32BIT
140 select MODULES_USE_ELF_RELA 139 select MODULES_USE_ELF_RELA
141 select NO_BOOTMEM 140 select NO_BOOTMEM
142 select OLD_SIGACTION 141 select OLD_SIGACTION
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0931b110c826..4cef607f3711 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk)
214{ 214{
215 u64 nsecps; 215 u64 nsecps;
216 216
217 if (tk->clock != &clocksource_tod) 217 if (tk->tkr.clock != &clocksource_tod)
218 return; 218 return;
219 219
220 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
221 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
222 smp_wmb(); 222 smp_wmb();
223 vdso_data->xtime_tod_stamp = tk->clock->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
224 vdso_data->xtime_clock_sec = tk->xtime_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
225 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 225 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
226 vdso_data->wtom_clock_sec = 226 vdso_data->wtom_clock_sec =
227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
228 vdso_data->wtom_clock_nsec = tk->xtime_nsec + 228 vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
229 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); 229 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift; 230 nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) { 231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps; 232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++; 233 vdso_data->wtom_clock_sec++;
234 } 234 }
235 vdso_data->tk_mult = tk->mult; 235 vdso_data->tk_mult = tk->tkr.mult;
236 vdso_data->tk_shift = tk->shift; 236 vdso_data->tk_shift = tk->tkr.shift;
237 smp_wmb(); 237 smp_wmb();
238 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
239} 239}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 462dcd0c1700..d8fbc289e680 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -260,9 +260,8 @@ void update_vsyscall_tz(void)
260 260
261void update_vsyscall(struct timekeeper *tk) 261void update_vsyscall(struct timekeeper *tk)
262{ 262{
263 struct timespec wall_time = tk_xtime(tk);
264 struct timespec *wtm = &tk->wall_to_monotonic; 263 struct timespec *wtm = &tk->wall_to_monotonic;
265 struct clocksource *clock = tk->clock; 264 struct clocksource *clock = tk->tkr.clock;
266 265
267 if (clock != &cycle_counter_cs) 266 if (clock != &cycle_counter_cs)
268 return; 267 return;
@@ -270,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk)
270 /* Userspace gettimeofday will spin while this value is odd. */ 269 /* Userspace gettimeofday will spin while this value is odd. */
271 ++vdso_data->tb_update_count; 270 ++vdso_data->tb_update_count;
272 smp_wmb(); 271 smp_wmb();
273 vdso_data->xtime_tod_stamp = clock->cycle_last; 272 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
274 vdso_data->xtime_clock_sec = wall_time.tv_sec; 273 vdso_data->xtime_clock_sec = tk->xtime_sec;
275 vdso_data->xtime_clock_nsec = wall_time.tv_nsec; 274 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
276 vdso_data->wtom_clock_sec = wtm->tv_sec; 275 vdso_data->wtom_clock_sec = wtm->tv_sec;
277 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 276 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
278 vdso_data->mult = clock->mult; 277 vdso_data->mult = tk->tkr.mult;
279 vdso_data->shift = clock->shift; 278 vdso_data->shift = tk->tkr.shift;
280 smp_wmb(); 279 smp_wmb();
281 ++vdso_data->tb_update_count; 280 ++vdso_data->tb_update_count;
282} 281}
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index 51ec8e46f5f9..e933fb9fbf5c 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -83,10 +83,11 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
83 if (count & 1) 83 if (count & 1)
84 continue; 84 continue;
85 85
86 cycles = (get_cycles() - vdso_data->xtime_tod_stamp);
87 ns = (cycles * vdso_data->mult) >> vdso_data->shift;
88 sec = vdso_data->xtime_clock_sec; 86 sec = vdso_data->xtime_clock_sec;
89 ns += vdso_data->xtime_clock_nsec; 87 cycles = get_cycles() - vdso_data->xtime_tod_stamp;
88 ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
89 ns >>= vdso_data->shift;
90
90 if (ns >= NSEC_PER_SEC) { 91 if (ns >= NSEC_PER_SEC) {
91 ns -= NSEC_PER_SEC; 92 ns -= NSEC_PER_SEC;
92 sec += 1; 93 sec += 1;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6b71f0417293..6cfeb082a422 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,9 +108,9 @@ config X86
108 select CLOCKSOURCE_WATCHDOG 108 select CLOCKSOURCE_WATCHDOG
109 select GENERIC_CLOCKEVENTS 109 select GENERIC_CLOCKEVENTS
110 select ARCH_CLOCKSOURCE_DATA 110 select ARCH_CLOCKSOURCE_DATA
111 select CLOCKSOURCE_VALIDATE_LAST_CYCLE
111 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 112 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
112 select GENERIC_TIME_VSYSCALL 113 select GENERIC_TIME_VSYSCALL
113 select KTIME_SCALAR if X86_32
114 select GENERIC_STRNCPY_FROM_USER 114 select GENERIC_STRNCPY_FROM_USER
115 select GENERIC_STRNLEN_USER 115 select GENERIC_STRNLEN_USER
116 select HAVE_CONTEXT_TRACKING if X86_64 116 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 56b0c338061e..b6025f9e36c6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -950,7 +950,7 @@ core_initcall(cpufreq_tsc);
950static struct clocksource clocksource_tsc; 950static struct clocksource clocksource_tsc;
951 951
952/* 952/*
953 * We compare the TSC to the cycle_last value in the clocksource 953 * We used to compare the TSC to the cycle_last value in the clocksource
954 * structure to avoid a nasty time-warp. This can be observed in a 954 * structure to avoid a nasty time-warp. This can be observed in a
955 * very small window right after one CPU updated cycle_last under 955 * very small window right after one CPU updated cycle_last under
956 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 956 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
@@ -960,26 +960,23 @@ static struct clocksource clocksource_tsc;
960 * due to the unsigned delta calculation of the time keeping core 960 * due to the unsigned delta calculation of the time keeping core
961 * code, which is necessary to support wrapping clocksources like pm 961 * code, which is necessary to support wrapping clocksources like pm
962 * timer. 962 * timer.
963 *
964 * This sanity check is now done in the core timekeeping code.
965 * checking the result of read_tsc() - cycle_last for being negative.
966 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
963 */ 967 */
964static cycle_t read_tsc(struct clocksource *cs) 968static cycle_t read_tsc(struct clocksource *cs)
965{ 969{
966 cycle_t ret = (cycle_t)get_cycles(); 970 return (cycle_t)get_cycles();
967
968 return ret >= clocksource_tsc.cycle_last ?
969 ret : clocksource_tsc.cycle_last;
970}
971
972static void resume_tsc(struct clocksource *cs)
973{
974 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
975 clocksource_tsc.cycle_last = 0;
976} 971}
977 972
973/*
974 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
975 */
978static struct clocksource clocksource_tsc = { 976static struct clocksource clocksource_tsc = {
979 .name = "tsc", 977 .name = "tsc",
980 .rating = 300, 978 .rating = 300,
981 .read = read_tsc, 979 .read = read_tsc,
982 .resume = resume_tsc,
983 .mask = CLOCKSOURCE_MASK(64), 980 .mask = CLOCKSOURCE_MASK(64),
984 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 981 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
985 CLOCK_SOURCE_MUST_VERIFY, 982 CLOCK_SOURCE_MUST_VERIFY,
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index 9531fbb123ba..c7d791f32b98 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
31 gtod_write_begin(vdata); 31 gtod_write_begin(vdata);
32 32
33 /* copy vsyscall data */ 33 /* copy vsyscall data */
34 vdata->vclock_mode = tk->clock->archdata.vclock_mode; 34 vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode;
35 vdata->cycle_last = tk->clock->cycle_last; 35 vdata->cycle_last = tk->tkr.cycle_last;
36 vdata->mask = tk->clock->mask; 36 vdata->mask = tk->tkr.mask;
37 vdata->mult = tk->mult; 37 vdata->mult = tk->tkr.mult;
38 vdata->shift = tk->shift; 38 vdata->shift = tk->tkr.shift;
39 39
40 vdata->wall_time_sec = tk->xtime_sec; 40 vdata->wall_time_sec = tk->xtime_sec;
41 vdata->wall_time_snsec = tk->xtime_nsec; 41 vdata->wall_time_snsec = tk->tkr.xtime_nsec;
42 42
43 vdata->monotonic_time_sec = tk->xtime_sec 43 vdata->monotonic_time_sec = tk->xtime_sec
44 + tk->wall_to_monotonic.tv_sec; 44 + tk->wall_to_monotonic.tv_sec;
45 vdata->monotonic_time_snsec = tk->xtime_nsec 45 vdata->monotonic_time_snsec = tk->tkr.xtime_nsec
46 + ((u64)tk->wall_to_monotonic.tv_nsec 46 + ((u64)tk->wall_to_monotonic.tv_nsec
47 << tk->shift); 47 << tk->tkr.shift);
48 while (vdata->monotonic_time_snsec >= 48 while (vdata->monotonic_time_snsec >=
49 (((u64)NSEC_PER_SEC) << tk->shift)) { 49 (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
50 vdata->monotonic_time_snsec -= 50 vdata->monotonic_time_snsec -=
51 ((u64)NSEC_PER_SEC) << tk->shift; 51 ((u64)NSEC_PER_SEC) << tk->tkr.shift;
52 vdata->monotonic_time_sec++; 52 vdata->monotonic_time_sec++;
53 } 53 }
54 54
55 vdata->wall_time_coarse_sec = tk->xtime_sec; 55 vdata->wall_time_coarse_sec = tk->xtime_sec;
56 vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift); 56 vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
57 tk->tkr.shift);
57 58
58 vdata->monotonic_time_coarse_sec = 59 vdata->monotonic_time_coarse_sec =
59 vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; 60 vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b86d329b953a..ca3d760dd581 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1020,9 +1020,8 @@ struct pvclock_gtod_data {
1020 u32 shift; 1020 u32 shift;
1021 } clock; 1021 } clock;
1022 1022
1023 /* open coded 'struct timespec' */ 1023 u64 boot_ns;
1024 u64 monotonic_time_snsec; 1024 u64 nsec_base;
1025 time_t monotonic_time_sec;
1026}; 1025};
1027 1026
1028static struct pvclock_gtod_data pvclock_gtod_data; 1027static struct pvclock_gtod_data pvclock_gtod_data;
@@ -1030,27 +1029,21 @@ static struct pvclock_gtod_data pvclock_gtod_data;
1030static void update_pvclock_gtod(struct timekeeper *tk) 1029static void update_pvclock_gtod(struct timekeeper *tk)
1031{ 1030{
1032 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 1031 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1032 u64 boot_ns;
1033
1034 boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
1033 1035
1034 write_seqcount_begin(&vdata->seq); 1036 write_seqcount_begin(&vdata->seq);
1035 1037
1036 /* copy pvclock gtod data */ 1038 /* copy pvclock gtod data */
1037 vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; 1039 vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
1038 vdata->clock.cycle_last = tk->clock->cycle_last; 1040 vdata->clock.cycle_last = tk->tkr.cycle_last;
1039 vdata->clock.mask = tk->clock->mask; 1041 vdata->clock.mask = tk->tkr.mask;
1040 vdata->clock.mult = tk->mult; 1042 vdata->clock.mult = tk->tkr.mult;
1041 vdata->clock.shift = tk->shift; 1043 vdata->clock.shift = tk->tkr.shift;
1042 1044
1043 vdata->monotonic_time_sec = tk->xtime_sec 1045 vdata->boot_ns = boot_ns;
1044 + tk->wall_to_monotonic.tv_sec; 1046 vdata->nsec_base = tk->tkr.xtime_nsec;
1045 vdata->monotonic_time_snsec = tk->xtime_nsec
1046 + (tk->wall_to_monotonic.tv_nsec
1047 << tk->shift);
1048 while (vdata->monotonic_time_snsec >=
1049 (((u64)NSEC_PER_SEC) << tk->shift)) {
1050 vdata->monotonic_time_snsec -=
1051 ((u64)NSEC_PER_SEC) << tk->shift;
1052 vdata->monotonic_time_sec++;
1053 }
1054 1047
1055 write_seqcount_end(&vdata->seq); 1048 write_seqcount_end(&vdata->seq);
1056} 1049}
@@ -1145,11 +1138,7 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
1145 1138
1146static inline u64 get_kernel_ns(void) 1139static inline u64 get_kernel_ns(void)
1147{ 1140{
1148 struct timespec ts; 1141 return ktime_get_boot_ns();
1149
1150 ktime_get_ts(&ts);
1151 monotonic_to_bootbased(&ts);
1152 return timespec_to_ns(&ts);
1153} 1142}
1154 1143
1155#ifdef CONFIG_X86_64 1144#ifdef CONFIG_X86_64
@@ -1414,23 +1403,22 @@ static inline u64 vgettsc(cycle_t *cycle_now)
1414 return v * gtod->clock.mult; 1403 return v * gtod->clock.mult;
1415} 1404}
1416 1405
1417static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) 1406static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
1418{ 1407{
1408 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1419 unsigned long seq; 1409 unsigned long seq;
1420 u64 ns;
1421 int mode; 1410 int mode;
1422 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1411 u64 ns;
1423 1412
1424 ts->tv_nsec = 0;
1425 do { 1413 do {
1426 seq = read_seqcount_begin(&gtod->seq); 1414 seq = read_seqcount_begin(&gtod->seq);
1427 mode = gtod->clock.vclock_mode; 1415 mode = gtod->clock.vclock_mode;
1428 ts->tv_sec = gtod->monotonic_time_sec; 1416 ns = gtod->nsec_base;
1429 ns = gtod->monotonic_time_snsec;
1430 ns += vgettsc(cycle_now); 1417 ns += vgettsc(cycle_now);
1431 ns >>= gtod->clock.shift; 1418 ns >>= gtod->clock.shift;
1419 ns += gtod->boot_ns;
1432 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 1420 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1433 timespec_add_ns(ts, ns); 1421 *t = ns;
1434 1422
1435 return mode; 1423 return mode;
1436} 1424}
@@ -1438,19 +1426,11 @@ static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
1438/* returns true if host is using tsc clocksource */ 1426/* returns true if host is using tsc clocksource */
1439static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1427static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
1440{ 1428{
1441 struct timespec ts;
1442
1443 /* checked again under seqlock below */ 1429 /* checked again under seqlock below */
1444 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) 1430 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1445 return false; 1431 return false;
1446 1432
1447 if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) 1433 return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1448 return false;
1449
1450 monotonic_to_bootbased(&ts);
1451 *kernel_ns = timespec_to_ns(&ts);
1452
1453 return true;
1454} 1434}
1455#endif 1435#endif
1456 1436
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index f953c96efc86..ebc4c73d8ca4 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,7 +49,7 @@
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <linux/sysrq.h> 50#include <linux/sysrq.h>
51#include <linux/timer.h> 51#include <linux/timer.h>
52#include <linux/time.h> 52#include <linux/hrtimer.h>
53 53
54#define VERSION_STR "0.9.1" 54#define VERSION_STR "0.9.1"
55 55
@@ -117,24 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); 117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
118#endif /* not MODULE */ 118#endif /* not MODULE */
119 119
120#if defined(CONFIG_S390) 120#define TIMER_FREQ 1000000000ULL
121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL
123#else
124# define TIMER_FREQ 1000000000ULL
125#endif
126
127#ifdef HAVE_MONOTONIC
128extern unsigned long long monotonic_clock(void);
129#else
130static inline unsigned long long monotonic_clock(void)
131{
132 struct timespec ts;
133 getrawmonotonic(&ts);
134 return timespec_to_ns(&ts);
135}
136#endif /* HAVE_MONOTONIC */
137
138 121
139/* Last time scheduled */ 122/* Last time scheduled */
140static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; 123static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
@@ -143,12 +126,11 @@ static void hangcheck_fire(unsigned long);
143 126
144static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); 127static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0);
145 128
146
147static void hangcheck_fire(unsigned long data) 129static void hangcheck_fire(unsigned long data)
148{ 130{
149 unsigned long long cur_tsc, tsc_diff; 131 unsigned long long cur_tsc, tsc_diff;
150 132
151 cur_tsc = monotonic_clock(); 133 cur_tsc = ktime_get_ns();
152 134
153 if (cur_tsc > hangcheck_tsc) 135 if (cur_tsc > hangcheck_tsc)
154 tsc_diff = cur_tsc - hangcheck_tsc; 136 tsc_diff = cur_tsc - hangcheck_tsc;
@@ -177,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
177 tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); 159 tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
178#endif 160#endif
179 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 161 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
180 hangcheck_tsc = monotonic_clock(); 162 hangcheck_tsc = ktime_get_ns();
181} 163}
182 164
183 165
@@ -185,16 +167,11 @@ static int __init hangcheck_init(void)
185{ 167{
186 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", 168 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
187 VERSION_STR, hangcheck_tick, hangcheck_margin); 169 VERSION_STR, hangcheck_tick, hangcheck_margin);
188#if defined (HAVE_MONOTONIC)
189 printk("Hangcheck: Using monotonic_clock().\n");
190#else
191 printk("Hangcheck: Using getrawmonotonic().\n");
192#endif /* HAVE_MONOTONIC */
193 hangcheck_tsc_margin = 170 hangcheck_tsc_margin =
194 (unsigned long long)(hangcheck_margin + hangcheck_tick); 171 (unsigned long long)(hangcheck_margin + hangcheck_tick);
195 hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ; 172 hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;
196 173
197 hangcheck_tsc = monotonic_clock(); 174 hangcheck_tsc = ktime_get_ns();
198 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 175 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
199 176
200 return 0; 177 return 0;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 065131cbfcc0..cfd6519df661 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,5 @@
1menu "Clock Source drivers"
2
1config CLKSRC_OF 3config CLKSRC_OF
2 bool 4 bool
3 5
@@ -125,6 +127,7 @@ config CLKSRC_METAG_GENERIC
125 127
126config CLKSRC_EXYNOS_MCT 128config CLKSRC_EXYNOS_MCT
127 def_bool y if ARCH_EXYNOS 129 def_bool y if ARCH_EXYNOS
130 depends on !ARM64
128 help 131 help
129 Support for Multi Core Timer controller on Exynos SoCs. 132 Support for Multi Core Timer controller on Exynos SoCs.
130 133
@@ -149,6 +152,11 @@ config VF_PIT_TIMER
149config SYS_SUPPORTS_SH_CMT 152config SYS_SUPPORTS_SH_CMT
150 bool 153 bool
151 154
155config MTK_TIMER
156 select CLKSRC_OF
157 select CLKSRC_MMIO
158 bool
159
152config SYS_SUPPORTS_SH_MTU2 160config SYS_SUPPORTS_SH_MTU2
153 bool 161 bool
154 162
@@ -173,7 +181,7 @@ config SH_TIMER_MTU2
173 default SYS_SUPPORTS_SH_MTU2 181 default SYS_SUPPORTS_SH_MTU2
174 help 182 help
175 This enables build of a clockevent driver for the Multi-Function 183 This enables build of a clockevent driver for the Multi-Function
176 Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. 184 Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
177 This hardware comes with 16 bit-timer registers. 185 This hardware comes with 16 bit-timer registers.
178 186
179config SH_TIMER_TMU 187config SH_TIMER_TMU
@@ -187,7 +195,7 @@ config SH_TIMER_TMU
187 195
188config EM_TIMER_STI 196config EM_TIMER_STI
189 bool "Renesas STI timer driver" if COMPILE_TEST 197 bool "Renesas STI timer driver" if COMPILE_TEST
190 depends on GENERIC_CLOCKEVENTS 198 depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
191 default SYS_SUPPORTS_EM_STI 199 default SYS_SUPPORTS_EM_STI
192 help 200 help
193 This enables build of a clocksource and clockevent driver for 201 This enables build of a clocksource and clockevent driver for
@@ -207,3 +215,5 @@ config CLKSRC_VERSATILE
207 counter available in the "System Registers" block of 215 counter available in the "System Registers" block of
208 ARM Versatile, RealView and Versatile Express reference 216 ARM Versatile, RealView and Versatile Express reference
209 platforms. 217 platforms.
218
219endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 800b1303c236..7fd9fd1dff42 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,9 +16,11 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
16obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 16obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
17obj-$(CONFIG_ORION_TIMER) += time-orion.o 17obj-$(CONFIG_ORION_TIMER) += time-orion.o
18obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o 18obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
19obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
19obj-$(CONFIG_ARCH_MARCO) += timer-marco.o 20obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
20obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o 21obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
21obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 22obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
23obj-$(CONFIG_ARCH_PXA) += pxa_timer.o
22obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 24obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
23obj-$(CONFIG_ARCH_U300) += timer-u300.o 25obj-$(CONFIG_ARCH_U300) += timer-u300.o
24obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o 26obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
34obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o 36obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
35obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o 37obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
36obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o 38obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
39obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
37 40
38obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 41obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
39obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 42obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
new file mode 100644
index 000000000000..d83ec1f2fddc
--- /dev/null
+++ b/drivers/clocksource/clps711x-timer.c
@@ -0,0 +1,131 @@
1/*
2 * Cirrus Logic CLPS711X clocksource driver
3 *
4 * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/clockchips.h>
14#include <linux/clocksource.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/sched_clock.h>
20#include <linux/slab.h>
21
22enum {
23 CLPS711X_CLKSRC_CLOCKSOURCE,
24 CLPS711X_CLKSRC_CLOCKEVENT,
25};
26
27static void __iomem *tcd;
28
29static u64 notrace clps711x_sched_clock_read(void)
30{
31 return ~readw(tcd);
32}
33
34static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
35{
36 unsigned long rate;
37
38 if (!base)
39 return -ENOMEM;
40 if (IS_ERR(clock))
41 return PTR_ERR(clock);
42
43 rate = clk_get_rate(clock);
44
45 tcd = base;
46
47 clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16,
48 clocksource_mmio_readw_down);
49
50 sched_clock_register(clps711x_sched_clock_read, 16, rate);
51
52 return 0;
53}
54
55static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
56{
57 struct clock_event_device *evt = dev_id;
58
59 evt->event_handler(evt);
60
61 return IRQ_HANDLED;
62}
63
64static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
65 struct clock_event_device *evt)
66{
67}
68
69static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
70 unsigned int irq)
71{
72 struct clock_event_device *clkevt;
73 unsigned long rate;
74
75 if (!irq)
76 return -EINVAL;
77 if (!base)
78 return -ENOMEM;
79 if (IS_ERR(clock))
80 return PTR_ERR(clock);
81
82 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
83 if (!clkevt)
84 return -ENOMEM;
85
86 rate = clk_get_rate(clock);
87
88 /* Set Timer prescaler */
89 writew(DIV_ROUND_CLOSEST(rate, HZ), base);
90
91 clkevt->name = "clps711x-clockevent";
92 clkevt->rating = 300;
93 clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
94 clkevt->set_mode = clps711x_clockevent_set_mode;
95 clkevt->cpumask = cpumask_of(0);
96 clockevents_config_and_register(clkevt, HZ, 0, 0);
97
98 return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER,
99 "clps711x-timer", clkevt);
100}
101
102void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
103 unsigned int irq)
104{
105 struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
106 struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
107
108 BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
109 BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
110}
111
112#ifdef CONFIG_CLKSRC_OF
113static void __init clps711x_timer_init(struct device_node *np)
114{
115 unsigned int irq = irq_of_parse_and_map(np, 0);
116 struct clk *clock = of_clk_get(np, 0);
117 void __iomem *base = of_iomap(np, 0);
118
119 switch (of_alias_get_id(np, "timer")) {
120 case CLPS711X_CLKSRC_CLOCKSOURCE:
121 BUG_ON(_clps711x_clksrc_init(clock, base));
122 break;
123 case CLPS711X_CLKSRC_CLOCKEVENT:
124 BUG_ON(_clps711x_clkevt_init(clock, base, irq));
125 break;
126 default:
127 break;
128 }
129}
130CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
131#endif
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ab51bf20a3ed..9403061a2acc 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -94,7 +94,7 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
94 u32 mask; 94 u32 mask;
95 u32 i; 95 u32 i;
96 96
97 __raw_writel(value, reg_base + offset); 97 writel_relaxed(value, reg_base + offset);
98 98
99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
100 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 100 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
@@ -144,8 +144,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
144 144
145 /* Wait maximum 1 ms until written values are applied */ 145 /* Wait maximum 1 ms until written values are applied */
146 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) 146 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
147 if (__raw_readl(reg_base + stat_addr) & mask) { 147 if (readl_relaxed(reg_base + stat_addr) & mask) {
148 __raw_writel(mask, reg_base + stat_addr); 148 writel_relaxed(mask, reg_base + stat_addr);
149 return; 149 return;
150 } 150 }
151 151
@@ -157,28 +157,51 @@ static void exynos4_mct_frc_start(void)
157{ 157{
158 u32 reg; 158 u32 reg;
159 159
160 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 160 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
161 reg |= MCT_G_TCON_START; 161 reg |= MCT_G_TCON_START;
162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
163} 163}
164 164
165static cycle_t notrace _exynos4_frc_read(void) 165/**
166 * exynos4_read_count_64 - Read all 64-bits of the global counter
167 *
168 * This will read all 64-bits of the global counter taking care to make sure
169 * that the upper and lower half match. Note that reading the MCT can be quite
170 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
171 * only) version when possible.
172 *
173 * Returns the number of cycles in the global counter.
174 */
175static u64 exynos4_read_count_64(void)
166{ 176{
167 unsigned int lo, hi; 177 unsigned int lo, hi;
168 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 178 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
169 179
170 do { 180 do {
171 hi = hi2; 181 hi = hi2;
172 lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); 182 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
173 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 183 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
174 } while (hi != hi2); 184 } while (hi != hi2);
175 185
176 return ((cycle_t)hi << 32) | lo; 186 return ((cycle_t)hi << 32) | lo;
177} 187}
178 188
189/**
190 * exynos4_read_count_32 - Read the lower 32-bits of the global counter
191 *
192 * This will read just the lower 32-bits of the global counter. This is marked
193 * as notrace so it can be used by the scheduler clock.
194 *
195 * Returns the number of cycles in the global counter (lower 32 bits).
196 */
197static u32 notrace exynos4_read_count_32(void)
198{
199 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
200}
201
179static cycle_t exynos4_frc_read(struct clocksource *cs) 202static cycle_t exynos4_frc_read(struct clocksource *cs)
180{ 203{
181 return _exynos4_frc_read(); 204 return exynos4_read_count_32();
182} 205}
183 206
184static void exynos4_frc_resume(struct clocksource *cs) 207static void exynos4_frc_resume(struct clocksource *cs)
@@ -190,21 +213,23 @@ struct clocksource mct_frc = {
190 .name = "mct-frc", 213 .name = "mct-frc",
191 .rating = 400, 214 .rating = 400,
192 .read = exynos4_frc_read, 215 .read = exynos4_frc_read,
193 .mask = CLOCKSOURCE_MASK(64), 216 .mask = CLOCKSOURCE_MASK(32),
194 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 217 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
195 .resume = exynos4_frc_resume, 218 .resume = exynos4_frc_resume,
196}; 219};
197 220
198static u64 notrace exynos4_read_sched_clock(void) 221static u64 notrace exynos4_read_sched_clock(void)
199{ 222{
200 return _exynos4_frc_read(); 223 return exynos4_read_count_32();
201} 224}
202 225
203static struct delay_timer exynos4_delay_timer; 226static struct delay_timer exynos4_delay_timer;
204 227
205static cycles_t exynos4_read_current_timer(void) 228static cycles_t exynos4_read_current_timer(void)
206{ 229{
207 return _exynos4_frc_read(); 230 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
231 "cycles_t needs to move to 32-bit for ARM64 usage");
232 return exynos4_read_count_32();
208} 233}
209 234
210static void __init exynos4_clocksource_init(void) 235static void __init exynos4_clocksource_init(void)
@@ -218,14 +243,14 @@ static void __init exynos4_clocksource_init(void)
218 if (clocksource_register_hz(&mct_frc, clk_rate)) 243 if (clocksource_register_hz(&mct_frc, clk_rate))
219 panic("%s: can't register clocksource\n", mct_frc.name); 244 panic("%s: can't register clocksource\n", mct_frc.name);
220 245
221 sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); 246 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
222} 247}
223 248
224static void exynos4_mct_comp0_stop(void) 249static void exynos4_mct_comp0_stop(void)
225{ 250{
226 unsigned int tcon; 251 unsigned int tcon;
227 252
228 tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 253 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
229 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); 254 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
230 255
231 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); 256 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
@@ -238,14 +263,14 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode,
238 unsigned int tcon; 263 unsigned int tcon;
239 cycle_t comp_cycle; 264 cycle_t comp_cycle;
240 265
241 tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 266 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
242 267
243 if (mode == CLOCK_EVT_MODE_PERIODIC) { 268 if (mode == CLOCK_EVT_MODE_PERIODIC) {
244 tcon |= MCT_G_TCON_COMP0_AUTO_INC; 269 tcon |= MCT_G_TCON_COMP0_AUTO_INC;
245 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); 270 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
246 } 271 }
247 272
248 comp_cycle = exynos4_frc_read(&mct_frc) + cycles; 273 comp_cycle = exynos4_read_count_64() + cycles;
249 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); 274 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
250 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); 275 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
251 276
@@ -327,7 +352,7 @@ static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
327 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; 352 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
328 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; 353 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
329 354
330 tmp = __raw_readl(reg_base + offset); 355 tmp = readl_relaxed(reg_base + offset);
331 if (tmp & mask) { 356 if (tmp & mask) {
332 tmp &= ~mask; 357 tmp &= ~mask;
333 exynos4_mct_write(tmp, offset); 358 exynos4_mct_write(tmp, offset);
@@ -349,7 +374,7 @@ static void exynos4_mct_tick_start(unsigned long cycles,
349 /* enable MCT tick interrupt */ 374 /* enable MCT tick interrupt */
350 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); 375 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
351 376
352 tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); 377 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
353 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | 378 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
354 MCT_L_TCON_INTERVAL_MODE; 379 MCT_L_TCON_INTERVAL_MODE;
355 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); 380 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
@@ -401,7 +426,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
401 exynos4_mct_tick_stop(mevt); 426 exynos4_mct_tick_stop(mevt);
402 427
403 /* Clear the MCT tick interrupt */ 428 /* Clear the MCT tick interrupt */
404 if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { 429 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
405 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 430 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
406 return 1; 431 return 1;
407 } else { 432 } else {
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
new file mode 100644
index 000000000000..32a3d25795d3
--- /dev/null
+++ b/drivers/clocksource/mtk_timer.c
@@ -0,0 +1,261 @@
1/*
2 * Mediatek SoCs General-Purpose Timer handling.
3 *
4 * Copyright (C) 2014 Matthias Brugger
5 *
6 * Matthias Brugger <matthias.bgg@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/clockchips.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/irqreturn.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/slab.h>
28
29#define GPT_IRQ_EN_REG 0x00
30#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
31#define GPT_IRQ_ACK_REG 0x08
32#define GPT_IRQ_ACK(val) BIT((val) - 1)
33
34#define TIMER_CTRL_REG(val) (0x10 * (val))
35#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
36#define TIMER_CTRL_OP_ONESHOT (0)
37#define TIMER_CTRL_OP_REPEAT (1)
38#define TIMER_CTRL_OP_FREERUN (3)
39#define TIMER_CTRL_CLEAR (2)
40#define TIMER_CTRL_ENABLE (1)
41#define TIMER_CTRL_DISABLE (0)
42
43#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
44#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
45#define TIMER_CLK_SRC_SYS13M (0)
46#define TIMER_CLK_SRC_RTC32K (1)
47#define TIMER_CLK_DIV1 (0x0)
48#define TIMER_CLK_DIV2 (0x1)
49
50#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
51#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
52
53#define GPT_CLK_EVT 1
54#define GPT_CLK_SRC 2
55
56struct mtk_clock_event_device {
57 void __iomem *gpt_base;
58 u32 ticks_per_jiffy;
59 struct clock_event_device dev;
60};
61
62static inline struct mtk_clock_event_device *to_mtk_clk(
63 struct clock_event_device *c)
64{
65 return container_of(c, struct mtk_clock_event_device, dev);
66}
67
68static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
69{
70 u32 val;
71
72 val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
73 writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
74 TIMER_CTRL_REG(timer));
75}
76
77static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
78 unsigned long delay, u8 timer)
79{
80 writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
81}
82
83static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
84 bool periodic, u8 timer)
85{
86 u32 val;
87
88 /* Acknowledge interrupt */
89 writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
90
91 val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
92
93 /* Clear 2 bit timer operation mode field */
94 val &= ~TIMER_CTRL_OP(0x3);
95
96 if (periodic)
97 val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
98 else
99 val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
100
101 writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
102 evt->gpt_base + TIMER_CTRL_REG(timer));
103}
104
105static void mtk_clkevt_mode(enum clock_event_mode mode,
106 struct clock_event_device *clk)
107{
108 struct mtk_clock_event_device *evt = to_mtk_clk(clk);
109
110 mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
111
112 switch (mode) {
113 case CLOCK_EVT_MODE_PERIODIC:
114 mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
115 mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
116 break;
117 case CLOCK_EVT_MODE_ONESHOT:
118 /* Timer is enabled in set_next_event */
119 break;
120 case CLOCK_EVT_MODE_UNUSED:
121 case CLOCK_EVT_MODE_SHUTDOWN:
122 default:
123 /* No more interrupts will occur as source is disabled */
124 break;
125 }
126}
127
128static int mtk_clkevt_next_event(unsigned long event,
129 struct clock_event_device *clk)
130{
131 struct mtk_clock_event_device *evt = to_mtk_clk(clk);
132
133 mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
134 mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
135 mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
136
137 return 0;
138}
139
140static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
141{
142 struct mtk_clock_event_device *evt = dev_id;
143
144 /* Acknowledge timer0 irq */
145 writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
146 evt->dev.event_handler(&evt->dev);
147
148 return IRQ_HANDLED;
149}
150
151static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
152{
153 /* Disable all interrupts */
154 writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
155 /* Acknowledge all interrupts */
156 writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
157}
158
159static void
160mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
161{
162 writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
163 evt->gpt_base + TIMER_CTRL_REG(timer));
164
165 writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
166 evt->gpt_base + TIMER_CLK_REG(timer));
167
168 writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
169
170 writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
171 evt->gpt_base + TIMER_CTRL_REG(timer));
172}
173
174static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
175{
176 u32 val;
177
178 val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
179 writel(val | GPT_IRQ_ENABLE(timer),
180 evt->gpt_base + GPT_IRQ_EN_REG);
181}
182
183static void __init mtk_timer_init(struct device_node *node)
184{
185 struct mtk_clock_event_device *evt;
186 struct resource res;
187 unsigned long rate = 0;
188 struct clk *clk;
189
190 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
191 if (!evt) {
192 pr_warn("Can't allocate mtk clock event driver struct");
193 return;
194 }
195
196 evt->dev.name = "mtk_tick";
197 evt->dev.rating = 300;
198 evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
199 evt->dev.set_mode = mtk_clkevt_mode;
200 evt->dev.set_next_event = mtk_clkevt_next_event;
201 evt->dev.cpumask = cpu_possible_mask;
202
203 evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
204 if (IS_ERR(evt->gpt_base)) {
205 pr_warn("Can't get resource\n");
206 return;
207 }
208
209 evt->dev.irq = irq_of_parse_and_map(node, 0);
210 if (evt->dev.irq <= 0) {
211 pr_warn("Can't parse IRQ");
212 goto err_mem;
213 }
214
215 clk = of_clk_get(node, 0);
216 if (IS_ERR(clk)) {
217 pr_warn("Can't get timer clock");
218 goto err_irq;
219 }
220
221 if (clk_prepare_enable(clk)) {
222 pr_warn("Can't prepare clock");
223 goto err_clk_put;
224 }
225 rate = clk_get_rate(clk);
226
227 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
228 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
229 pr_warn("failed to setup irq %d\n", evt->dev.irq);
230 goto err_clk_disable;
231 }
232
233 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
234
235 mtk_timer_global_reset(evt);
236
237 /* Configure clock source */
238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
240 node->name, rate, 300, 32, clocksource_mmio_readl_up);
241
242 /* Configure clock event */
243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
244 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
245
246 clockevents_config_and_register(&evt->dev, rate, 0x3,
247 0xffffffff);
248 return;
249
250err_clk_disable:
251 clk_disable_unprepare(clk);
252err_clk_put:
253 clk_put(clk);
254err_irq:
255 irq_dispose_mapping(evt->dev.irq);
256err_mem:
257 iounmap(evt->gpt_base);
258 of_address_to_resource(node, 0, &res);
259 release_mem_region(res.start, resource_size(&res));
260}
261CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
new file mode 100644
index 000000000000..941f3f344e08
--- /dev/null
+++ b/drivers/clocksource/pxa_timer.c
@@ -0,0 +1,227 @@
1/*
2 * arch/arm/mach-pxa/time.c
3 *
4 * PXA clocksource, clockevents, and OST interrupt handlers.
5 * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
6 *
7 * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
8 * by MontaVista Software, Inc. (Nico, your code rocks!)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/clk.h>
19#include <linux/clockchips.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/sched_clock.h>
23
24#include <asm/div64.h>
25
26#define OSMR0 0x00 /* OS Timer 0 Match Register */
27#define OSMR1 0x04 /* OS Timer 1 Match Register */
28#define OSMR2 0x08 /* OS Timer 2 Match Register */
29#define OSMR3 0x0C /* OS Timer 3 Match Register */
30
31#define OSCR 0x10 /* OS Timer Counter Register */
32#define OSSR 0x14 /* OS Timer Status Register */
33#define OWER 0x18 /* OS Timer Watchdog Enable Register */
34#define OIER 0x1C /* OS Timer Interrupt Enable Register */
35
36#define OSSR_M3 (1 << 3) /* Match status channel 3 */
37#define OSSR_M2 (1 << 2) /* Match status channel 2 */
38#define OSSR_M1 (1 << 1) /* Match status channel 1 */
39#define OSSR_M0 (1 << 0) /* Match status channel 0 */
40
41#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
42
43/*
44 * This is PXA's sched_clock implementation. This has a resolution
45 * of at least 308 ns and a maximum value of 208 days.
46 *
47 * The return value is guaranteed to be monotonic in that range as
48 * long as there is always less than 582 seconds between successive
49 * calls to sched_clock() which should always be the case in practice.
50 */
51
52#define timer_readl(reg) readl_relaxed(timer_base + (reg))
53#define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg))
54
55static void __iomem *timer_base;
56
57static u64 notrace pxa_read_sched_clock(void)
58{
59 return timer_readl(OSCR);
60}
61
62
63#define MIN_OSCR_DELTA 16
64
65static irqreturn_t
66pxa_ost0_interrupt(int irq, void *dev_id)
67{
68 struct clock_event_device *c = dev_id;
69
70 /* Disarm the compare/match, signal the event. */
71 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
72 timer_writel(OSSR_M0, OSSR);
73 c->event_handler(c);
74
75 return IRQ_HANDLED;
76}
77
78static int
79pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
80{
81 unsigned long next, oscr;
82
83 timer_writel(timer_readl(OIER) | OIER_E0, OIER);
84 next = timer_readl(OSCR) + delta;
85 timer_writel(next, OSMR0);
86 oscr = timer_readl(OSCR);
87
88 return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
89}
90
91static void
92pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
93{
94 switch (mode) {
95 case CLOCK_EVT_MODE_ONESHOT:
96 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
97 timer_writel(OSSR_M0, OSSR);
98 break;
99
100 case CLOCK_EVT_MODE_UNUSED:
101 case CLOCK_EVT_MODE_SHUTDOWN:
102 /* initializing, released, or preparing for suspend */
103 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
104 timer_writel(OSSR_M0, OSSR);
105 break;
106
107 case CLOCK_EVT_MODE_RESUME:
108 case CLOCK_EVT_MODE_PERIODIC:
109 break;
110 }
111}
112
113#ifdef CONFIG_PM
114static unsigned long osmr[4], oier, oscr;
115
116static void pxa_timer_suspend(struct clock_event_device *cedev)
117{
118 osmr[0] = timer_readl(OSMR0);
119 osmr[1] = timer_readl(OSMR1);
120 osmr[2] = timer_readl(OSMR2);
121 osmr[3] = timer_readl(OSMR3);
122 oier = timer_readl(OIER);
123 oscr = timer_readl(OSCR);
124}
125
126static void pxa_timer_resume(struct clock_event_device *cedev)
127{
128 /*
129 * Ensure that we have at least MIN_OSCR_DELTA between match
130 * register 0 and the OSCR, to guarantee that we will receive
131 * the one-shot timer interrupt. We adjust OSMR0 in preference
132 * to OSCR to guarantee that OSCR is monotonically incrementing.
133 */
134 if (osmr[0] - oscr < MIN_OSCR_DELTA)
135 osmr[0] += MIN_OSCR_DELTA;
136
137 timer_writel(osmr[0], OSMR0);
138 timer_writel(osmr[1], OSMR1);
139 timer_writel(osmr[2], OSMR2);
140 timer_writel(osmr[3], OSMR3);
141 timer_writel(oier, OIER);
142 timer_writel(oscr, OSCR);
143}
144#else
145#define pxa_timer_suspend NULL
146#define pxa_timer_resume NULL
147#endif
148
149static struct clock_event_device ckevt_pxa_osmr0 = {
150 .name = "osmr0",
151 .features = CLOCK_EVT_FEAT_ONESHOT,
152 .rating = 200,
153 .set_next_event = pxa_osmr0_set_next_event,
154 .set_mode = pxa_osmr0_set_mode,
155 .suspend = pxa_timer_suspend,
156 .resume = pxa_timer_resume,
157};
158
159static struct irqaction pxa_ost0_irq = {
160 .name = "ost0",
161 .flags = IRQF_TIMER | IRQF_IRQPOLL,
162 .handler = pxa_ost0_interrupt,
163 .dev_id = &ckevt_pxa_osmr0,
164};
165
166static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
167{
168 timer_writel(0, OIER);
169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
170
171 sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
172
173 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
174
175 setup_irq(irq, &pxa_ost0_irq);
176
177 clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
178 32, clocksource_mmio_readl_up);
179 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
180 MIN_OSCR_DELTA * 2, 0x7fffffff);
181}
182
183static void __init pxa_timer_dt_init(struct device_node *np)
184{
185 struct clk *clk;
186 int irq;
187
188 /* timer registers are shared with watchdog timer */
189 timer_base = of_iomap(np, 0);
190 if (!timer_base)
191 panic("%s: unable to map resource\n", np->name);
192
193 clk = of_clk_get(np, 0);
194 if (IS_ERR(clk)) {
195 pr_crit("%s: unable to get clk\n", np->name);
196 return;
197 }
198 clk_prepare_enable(clk);
199
200 /* we are only interested in OS-timer0 irq */
201 irq = irq_of_parse_and_map(np, 0);
202 if (irq <= 0) {
203 pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
204 return;
205 }
206
207 pxa_timer_common_init(irq, clk_get_rate(clk));
208}
209CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
210
211/*
212 * Legacy timer init for non device-tree boards.
213 */
214void __init pxa_timer_nodt_init(int irq, void __iomem *base,
215 unsigned long clock_tick_rate)
216{
217 struct clk *clk;
218
219 timer_base = base;
220 clk = clk_get(NULL, "OSTIMER0");
221 if (clk && !IS_ERR(clk))
222 clk_prepare_enable(clk);
223 else
224 pr_crit("%s: unable to get clk\n", __func__);
225
226 pxa_timer_common_init(irq, clock_tick_rate);
227}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dfa780396b91..2bd13b53b727 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/of.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_domain.h> 29#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
@@ -114,14 +115,15 @@ struct sh_cmt_device {
114 struct platform_device *pdev; 115 struct platform_device *pdev;
115 116
116 const struct sh_cmt_info *info; 117 const struct sh_cmt_info *info;
117 bool legacy;
118 118
119 void __iomem *mapbase_ch;
120 void __iomem *mapbase; 119 void __iomem *mapbase;
121 struct clk *clk; 120 struct clk *clk;
122 121
122 raw_spinlock_t lock; /* Protect the shared start/stop register */
123
123 struct sh_cmt_channel *channels; 124 struct sh_cmt_channel *channels;
124 unsigned int num_channels; 125 unsigned int num_channels;
126 unsigned int hw_channels;
125 127
126 bool has_clockevent; 128 bool has_clockevent;
127 bool has_clocksource; 129 bool has_clocksource;
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
301 return v2; 303 return v2;
302} 304}
303 305
304static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
305
306static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) 306static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
307{ 307{
308 unsigned long flags, value; 308 unsigned long flags, value;
309 309
310 /* start stop register shared by multiple timer channels */ 310 /* start stop register shared by multiple timer channels */
311 raw_spin_lock_irqsave(&sh_cmt_lock, flags); 311 raw_spin_lock_irqsave(&ch->cmt->lock, flags);
312 value = sh_cmt_read_cmstr(ch); 312 value = sh_cmt_read_cmstr(ch);
313 313
314 if (start) 314 if (start)
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
317 value &= ~(1 << ch->timer_bit); 317 value &= ~(1 << ch->timer_bit);
318 318
319 sh_cmt_write_cmstr(ch, value); 319 sh_cmt_write_cmstr(ch, value);
320 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); 320 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
321} 321}
322 322
323static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) 323static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
792 int irq; 792 int irq;
793 int ret; 793 int ret;
794 794
795 irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); 795 irq = platform_get_irq(ch->cmt->pdev, ch->index);
796 if (irq < 0) { 796 if (irq < 0) {
797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", 797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
798 ch->index); 798 ch->index);
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
863 * Compute the address of the channel control register block. For the 863 * Compute the address of the channel control register block. For the
864 * timers with a per-channel start/stop register, compute its address 864 * timers with a per-channel start/stop register, compute its address
865 * as well. 865 * as well.
866 *
867 * For legacy configuration the address has been mapped explicitly.
868 */ 866 */
869 if (cmt->legacy) { 867 switch (cmt->info->model) {
870 ch->ioctrl = cmt->mapbase_ch; 868 case SH_CMT_16BIT:
871 } else { 869 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
872 switch (cmt->info->model) { 870 break;
873 case SH_CMT_16BIT: 871 case SH_CMT_32BIT:
874 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; 872 case SH_CMT_48BIT:
875 break; 873 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
876 case SH_CMT_32BIT: 874 break;
877 case SH_CMT_48BIT: 875 case SH_CMT_32BIT_FAST:
878 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; 876 /*
879 break; 877 * The 32-bit "fast" timer has a single channel at hwidx 5 but
880 case SH_CMT_32BIT_FAST: 878 * is located at offset 0x40 instead of 0x60 for some reason.
881 /* 879 */
882 * The 32-bit "fast" timer has a single channel at hwidx 880 ch->ioctrl = cmt->mapbase + 0x40;
883 * 5 but is located at offset 0x40 instead of 0x60 for 881 break;
884 * some reason. 882 case SH_CMT_48BIT_GEN2:
885 */ 883 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
886 ch->ioctrl = cmt->mapbase + 0x40; 884 ch->ioctrl = ch->iostart + 0x10;
887 break; 885 break;
888 case SH_CMT_48BIT_GEN2:
889 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
890 ch->ioctrl = ch->iostart + 0x10;
891 break;
892 }
893 } 886 }
894 887
895 if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) 888 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
@@ -900,12 +893,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
900 ch->match_value = ch->max_match_value; 893 ch->match_value = ch->max_match_value;
901 raw_spin_lock_init(&ch->lock); 894 raw_spin_lock_init(&ch->lock);
902 895
903 if (cmt->legacy) { 896 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
904 ch->timer_bit = ch->hwidx;
905 } else {
906 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
907 ? 0 : ch->hwidx;
908 }
909 897
910 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), 898 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
911 clockevent, clocksource); 899 clockevent, clocksource);
@@ -938,75 +926,65 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
938 return 0; 926 return 0;
939} 927}
940 928
941static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) 929static const struct platform_device_id sh_cmt_id_table[] = {
942{ 930 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
943 struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; 931 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
944 struct resource *res, *res2; 932 { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
945 933 { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
946 /* map memory, let mapbase_ch point to our channel */ 934 { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
947 res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); 935 { }
948 if (!res) { 936};
949 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); 937MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
950 return -ENXIO;
951 }
952
953 cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
954 if (cmt->mapbase_ch == NULL) {
955 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
956 return -ENXIO;
957 }
958
959 /* optional resource for the shared timer start/stop register */
960 res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
961
962 /* map second resource for CMSTR */
963 cmt->mapbase = ioremap_nocache(res2 ? res2->start :
964 res->start - cfg->channel_offset,
965 res2 ? resource_size(res2) : 2);
966 if (cmt->mapbase == NULL) {
967 dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
968 iounmap(cmt->mapbase_ch);
969 return -ENXIO;
970 }
971
972 /* identify the model based on the resources */
973 if (resource_size(res) == 6)
974 cmt->info = &sh_cmt_info[SH_CMT_16BIT];
975 else if (res2 && (resource_size(res2) == 4))
976 cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
977 else
978 cmt->info = &sh_cmt_info[SH_CMT_32BIT];
979 938
980 return 0; 939static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
981} 940 { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
941 { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
942 { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
943 { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
944 { }
945};
946MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
982 947
983static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) 948static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
984{ 949{
985 iounmap(cmt->mapbase); 950 struct device_node *np = cmt->pdev->dev.of_node;
986 if (cmt->mapbase_ch) 951
987 iounmap(cmt->mapbase_ch); 952 return of_property_read_u32(np, "renesas,channels-mask",
953 &cmt->hw_channels);
988} 954}
989 955
990static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) 956static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
991{ 957{
992 struct sh_timer_config *cfg = pdev->dev.platform_data; 958 unsigned int mask;
993 const struct platform_device_id *id = pdev->id_entry; 959 unsigned int i;
994 unsigned int hw_channels;
995 int ret; 960 int ret;
996 961
997 memset(cmt, 0, sizeof(*cmt)); 962 memset(cmt, 0, sizeof(*cmt));
998 cmt->pdev = pdev; 963 cmt->pdev = pdev;
964 raw_spin_lock_init(&cmt->lock);
965
966 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
967 const struct of_device_id *id;
968
969 id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
970 cmt->info = id->data;
999 971
1000 if (!cfg) { 972 ret = sh_cmt_parse_dt(cmt);
973 if (ret < 0)
974 return ret;
975 } else if (pdev->dev.platform_data) {
976 struct sh_timer_config *cfg = pdev->dev.platform_data;
977 const struct platform_device_id *id = pdev->id_entry;
978
979 cmt->info = (const struct sh_cmt_info *)id->driver_data;
980 cmt->hw_channels = cfg->channels_mask;
981 } else {
1001 dev_err(&cmt->pdev->dev, "missing platform data\n"); 982 dev_err(&cmt->pdev->dev, "missing platform data\n");
1002 return -ENXIO; 983 return -ENXIO;
1003 } 984 }
1004 985
1005 cmt->info = (const struct sh_cmt_info *)id->driver_data;
1006 cmt->legacy = cmt->info ? false : true;
1007
1008 /* Get hold of clock. */ 986 /* Get hold of clock. */
1009 cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); 987 cmt->clk = clk_get(&cmt->pdev->dev, "fck");
1010 if (IS_ERR(cmt->clk)) { 988 if (IS_ERR(cmt->clk)) {
1011 dev_err(&cmt->pdev->dev, "cannot get clock\n"); 989 dev_err(&cmt->pdev->dev, "cannot get clock\n");
1012 return PTR_ERR(cmt->clk); 990 return PTR_ERR(cmt->clk);
@@ -1016,28 +994,13 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1016 if (ret < 0) 994 if (ret < 0)
1017 goto err_clk_put; 995 goto err_clk_put;
1018 996
1019 /* 997 /* Map the memory resource(s). */
1020 * Map the memory resource(s). We need to support both the legacy 998 ret = sh_cmt_map_memory(cmt);
1021 * platform device configuration (with one device per channel) and the
1022 * new version (with multiple channels per device).
1023 */
1024 if (cmt->legacy)
1025 ret = sh_cmt_map_memory_legacy(cmt);
1026 else
1027 ret = sh_cmt_map_memory(cmt);
1028
1029 if (ret < 0) 999 if (ret < 0)
1030 goto err_clk_unprepare; 1000 goto err_clk_unprepare;
1031 1001
1032 /* Allocate and setup the channels. */ 1002 /* Allocate and setup the channels. */
1033 if (cmt->legacy) { 1003 cmt->num_channels = hweight8(cmt->hw_channels);
1034 cmt->num_channels = 1;
1035 hw_channels = 0;
1036 } else {
1037 cmt->num_channels = hweight8(cfg->channels_mask);
1038 hw_channels = cfg->channels_mask;
1039 }
1040
1041 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), 1004 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
1042 GFP_KERNEL); 1005 GFP_KERNEL);
1043 if (cmt->channels == NULL) { 1006 if (cmt->channels == NULL) {
@@ -1045,35 +1008,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1045 goto err_unmap; 1008 goto err_unmap;
1046 } 1009 }
1047 1010
1048 if (cmt->legacy) { 1011 /*
1049 ret = sh_cmt_setup_channel(&cmt->channels[0], 1012 * Use the first channel as a clock event device and the second channel
1050 cfg->timer_bit, cfg->timer_bit, 1013 * as a clock source. If only one channel is available use it for both.
1051 cfg->clockevent_rating != 0, 1014 */
1052 cfg->clocksource_rating != 0, cmt); 1015 for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1016 unsigned int hwidx = ffs(mask) - 1;
1017 bool clocksource = i == 1 || cmt->num_channels == 1;
1018 bool clockevent = i == 0;
1019
1020 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1021 clockevent, clocksource, cmt);
1053 if (ret < 0) 1022 if (ret < 0)
1054 goto err_unmap; 1023 goto err_unmap;
1055 } else {
1056 unsigned int mask = hw_channels;
1057 unsigned int i;
1058 1024
1059 /* 1025 mask &= ~(1 << hwidx);
1060 * Use the first channel as a clock event device and the second
1061 * channel as a clock source. If only one channel is available
1062 * use it for both.
1063 */
1064 for (i = 0; i < cmt->num_channels; ++i) {
1065 unsigned int hwidx = ffs(mask) - 1;
1066 bool clocksource = i == 1 || cmt->num_channels == 1;
1067 bool clockevent = i == 0;
1068
1069 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1070 clockevent, clocksource,
1071 cmt);
1072 if (ret < 0)
1073 goto err_unmap;
1074
1075 mask &= ~(1 << hwidx);
1076 }
1077 } 1026 }
1078 1027
1079 platform_set_drvdata(pdev, cmt); 1028 platform_set_drvdata(pdev, cmt);
@@ -1082,7 +1031,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1082 1031
1083err_unmap: 1032err_unmap:
1084 kfree(cmt->channels); 1033 kfree(cmt->channels);
1085 sh_cmt_unmap_memory(cmt); 1034 iounmap(cmt->mapbase);
1086err_clk_unprepare: 1035err_clk_unprepare:
1087 clk_unprepare(cmt->clk); 1036 clk_unprepare(cmt->clk);
1088err_clk_put: 1037err_clk_put:
@@ -1132,22 +1081,12 @@ static int sh_cmt_remove(struct platform_device *pdev)
1132 return -EBUSY; /* cannot unregister clockevent and clocksource */ 1081 return -EBUSY; /* cannot unregister clockevent and clocksource */
1133} 1082}
1134 1083
1135static const struct platform_device_id sh_cmt_id_table[] = {
1136 { "sh_cmt", 0 },
1137 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
1138 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
1139 { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
1140 { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
1141 { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
1142 { }
1143};
1144MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
1145
1146static struct platform_driver sh_cmt_device_driver = { 1084static struct platform_driver sh_cmt_device_driver = {
1147 .probe = sh_cmt_probe, 1085 .probe = sh_cmt_probe,
1148 .remove = sh_cmt_remove, 1086 .remove = sh_cmt_remove,
1149 .driver = { 1087 .driver = {
1150 .name = "sh_cmt", 1088 .name = "sh_cmt",
1089 .of_match_table = of_match_ptr(sh_cmt_of_table),
1151 }, 1090 },
1152 .id_table = sh_cmt_id_table, 1091 .id_table = sh_cmt_id_table,
1153}; 1092};
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 188d4e092efc..3d88698cf2b8 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -23,6 +23,7 @@
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/pm_domain.h> 28#include <linux/pm_domain.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
@@ -37,7 +38,6 @@ struct sh_mtu2_channel {
37 unsigned int index; 38 unsigned int index;
38 39
39 void __iomem *base; 40 void __iomem *base;
40 int irq;
41 41
42 struct clock_event_device ced; 42 struct clock_event_device ced;
43}; 43};
@@ -48,15 +48,14 @@ struct sh_mtu2_device {
48 void __iomem *mapbase; 48 void __iomem *mapbase;
49 struct clk *clk; 49 struct clk *clk;
50 50
51 raw_spinlock_t lock; /* Protect the shared registers */
52
51 struct sh_mtu2_channel *channels; 53 struct sh_mtu2_channel *channels;
52 unsigned int num_channels; 54 unsigned int num_channels;
53 55
54 bool legacy;
55 bool has_clockevent; 56 bool has_clockevent;
56}; 57};
57 58
58static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
59
60#define TSTR -1 /* shared register */ 59#define TSTR -1 /* shared register */
61#define TCR 0 /* channel register */ 60#define TCR 0 /* channel register */
62#define TMDR 1 /* channel register */ 61#define TMDR 1 /* channel register */
@@ -162,12 +161,8 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
162{ 161{
163 unsigned long offs; 162 unsigned long offs;
164 163
165 if (reg_nr == TSTR) { 164 if (reg_nr == TSTR)
166 if (ch->mtu->legacy) 165 return ioread8(ch->mtu->mapbase + 0x280);
167 return ioread8(ch->mtu->mapbase);
168 else
169 return ioread8(ch->mtu->mapbase + 0x280);
170 }
171 166
172 offs = mtu2_reg_offs[reg_nr]; 167 offs = mtu2_reg_offs[reg_nr];
173 168
@@ -182,12 +177,8 @@ static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
182{ 177{
183 unsigned long offs; 178 unsigned long offs;
184 179
185 if (reg_nr == TSTR) { 180 if (reg_nr == TSTR)
186 if (ch->mtu->legacy) 181 return iowrite8(value, ch->mtu->mapbase + 0x280);
187 return iowrite8(value, ch->mtu->mapbase);
188 else
189 return iowrite8(value, ch->mtu->mapbase + 0x280);
190 }
191 182
192 offs = mtu2_reg_offs[reg_nr]; 183 offs = mtu2_reg_offs[reg_nr];
193 184
@@ -202,7 +193,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
202 unsigned long flags, value; 193 unsigned long flags, value;
203 194
204 /* start stop register shared by multiple timer channels */ 195 /* start stop register shared by multiple timer channels */
205 raw_spin_lock_irqsave(&sh_mtu2_lock, flags); 196 raw_spin_lock_irqsave(&ch->mtu->lock, flags);
206 value = sh_mtu2_read(ch, TSTR); 197 value = sh_mtu2_read(ch, TSTR);
207 198
208 if (start) 199 if (start)
@@ -211,7 +202,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
211 value &= ~(1 << ch->index); 202 value &= ~(1 << ch->index);
212 203
213 sh_mtu2_write(ch, TSTR, value); 204 sh_mtu2_write(ch, TSTR, value);
214 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); 205 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
215} 206}
216 207
217static int sh_mtu2_enable(struct sh_mtu2_channel *ch) 208static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
@@ -331,7 +322,6 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
331 const char *name) 322 const char *name)
332{ 323{
333 struct clock_event_device *ced = &ch->ced; 324 struct clock_event_device *ced = &ch->ced;
334 int ret;
335 325
336 ced->name = name; 326 ced->name = name;
337 ced->features = CLOCK_EVT_FEAT_PERIODIC; 327 ced->features = CLOCK_EVT_FEAT_PERIODIC;
@@ -344,24 +334,12 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
344 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", 334 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
345 ch->index); 335 ch->index);
346 clockevents_register_device(ced); 336 clockevents_register_device(ced);
347
348 ret = request_irq(ch->irq, sh_mtu2_interrupt,
349 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
350 dev_name(&ch->mtu->pdev->dev), ch);
351 if (ret) {
352 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
353 ch->index, ch->irq);
354 return;
355 }
356} 337}
357 338
358static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, 339static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
359 bool clockevent)
360{ 340{
361 if (clockevent) { 341 ch->mtu->has_clockevent = true;
362 ch->mtu->has_clockevent = true; 342 sh_mtu2_register_clockevent(ch, name);
363 sh_mtu2_register_clockevent(ch, name);
364 }
365 343
366 return 0; 344 return 0;
367} 345}
@@ -372,40 +350,32 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
372 static const unsigned int channel_offsets[] = { 350 static const unsigned int channel_offsets[] = {
373 0x300, 0x380, 0x000, 351 0x300, 0x380, 0x000,
374 }; 352 };
375 bool clockevent; 353 char name[6];
354 int irq;
355 int ret;
376 356
377 ch->mtu = mtu; 357 ch->mtu = mtu;
378 358
379 if (mtu->legacy) { 359 sprintf(name, "tgi%ua", index);
380 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; 360 irq = platform_get_irq_byname(mtu->pdev, name);
381 361 if (irq < 0) {
382 clockevent = cfg->clockevent_rating != 0;
383
384 ch->irq = platform_get_irq(mtu->pdev, 0);
385 ch->base = mtu->mapbase - cfg->channel_offset;
386 ch->index = cfg->timer_bit;
387 } else {
388 char name[6];
389
390 clockevent = true;
391
392 sprintf(name, "tgi%ua", index);
393 ch->irq = platform_get_irq_byname(mtu->pdev, name);
394 ch->base = mtu->mapbase + channel_offsets[index];
395 ch->index = index;
396 }
397
398 if (ch->irq < 0) {
399 /* Skip channels with no declared interrupt. */ 362 /* Skip channels with no declared interrupt. */
400 if (!mtu->legacy) 363 return 0;
401 return 0; 364 }
402 365
403 dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", 366 ret = request_irq(irq, sh_mtu2_interrupt,
404 ch->index); 367 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
405 return ch->irq; 368 dev_name(&ch->mtu->pdev->dev), ch);
369 if (ret) {
370 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
371 index, irq);
372 return ret;
406 } 373 }
407 374
408 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); 375 ch->base = mtu->mapbase + channel_offsets[index];
376 ch->index = index;
377
378 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
409} 379}
410 380
411static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) 381static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
@@ -422,46 +392,21 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
422 if (mtu->mapbase == NULL) 392 if (mtu->mapbase == NULL)
423 return -ENXIO; 393 return -ENXIO;
424 394
425 /*
426 * In legacy platform device configuration (with one device per channel)
427 * the resource points to the channel base address.
428 */
429 if (mtu->legacy) {
430 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
431 mtu->mapbase += cfg->channel_offset;
432 }
433
434 return 0; 395 return 0;
435} 396}
436 397
437static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
438{
439 if (mtu->legacy) {
440 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
441 mtu->mapbase -= cfg->channel_offset;
442 }
443
444 iounmap(mtu->mapbase);
445}
446
447static int sh_mtu2_setup(struct sh_mtu2_device *mtu, 398static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
448 struct platform_device *pdev) 399 struct platform_device *pdev)
449{ 400{
450 struct sh_timer_config *cfg = pdev->dev.platform_data;
451 const struct platform_device_id *id = pdev->id_entry;
452 unsigned int i; 401 unsigned int i;
453 int ret; 402 int ret;
454 403
455 mtu->pdev = pdev; 404 mtu->pdev = pdev;
456 mtu->legacy = id->driver_data;
457 405
458 if (mtu->legacy && !cfg) { 406 raw_spin_lock_init(&mtu->lock);
459 dev_err(&mtu->pdev->dev, "missing platform data\n");
460 return -ENXIO;
461 }
462 407
463 /* Get hold of clock. */ 408 /* Get hold of clock. */
464 mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); 409 mtu->clk = clk_get(&mtu->pdev->dev, "fck");
465 if (IS_ERR(mtu->clk)) { 410 if (IS_ERR(mtu->clk)) {
466 dev_err(&mtu->pdev->dev, "cannot get clock\n"); 411 dev_err(&mtu->pdev->dev, "cannot get clock\n");
467 return PTR_ERR(mtu->clk); 412 return PTR_ERR(mtu->clk);
@@ -479,10 +424,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
479 } 424 }
480 425
481 /* Allocate and setup the channels. */ 426 /* Allocate and setup the channels. */
482 if (mtu->legacy) 427 mtu->num_channels = 3;
483 mtu->num_channels = 1;
484 else
485 mtu->num_channels = 3;
486 428
487 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, 429 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
488 GFP_KERNEL); 430 GFP_KERNEL);
@@ -491,16 +433,10 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
491 goto err_unmap; 433 goto err_unmap;
492 } 434 }
493 435
494 if (mtu->legacy) { 436 for (i = 0; i < mtu->num_channels; ++i) {
495 ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); 437 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
496 if (ret < 0) 438 if (ret < 0)
497 goto err_unmap; 439 goto err_unmap;
498 } else {
499 for (i = 0; i < mtu->num_channels; ++i) {
500 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
501 if (ret < 0)
502 goto err_unmap;
503 }
504 } 440 }
505 441
506 platform_set_drvdata(pdev, mtu); 442 platform_set_drvdata(pdev, mtu);
@@ -509,7 +445,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
509 445
510err_unmap: 446err_unmap:
511 kfree(mtu->channels); 447 kfree(mtu->channels);
512 sh_mtu2_unmap_memory(mtu); 448 iounmap(mtu->mapbase);
513err_clk_unprepare: 449err_clk_unprepare:
514 clk_unprepare(mtu->clk); 450 clk_unprepare(mtu->clk);
515err_clk_put: 451err_clk_put:
@@ -560,17 +496,23 @@ static int sh_mtu2_remove(struct platform_device *pdev)
560} 496}
561 497
562static const struct platform_device_id sh_mtu2_id_table[] = { 498static const struct platform_device_id sh_mtu2_id_table[] = {
563 { "sh_mtu2", 1 },
564 { "sh-mtu2", 0 }, 499 { "sh-mtu2", 0 },
565 { }, 500 { },
566}; 501};
567MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); 502MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
568 503
504static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
505 { .compatible = "renesas,mtu2" },
506 { }
507};
508MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
509
569static struct platform_driver sh_mtu2_device_driver = { 510static struct platform_driver sh_mtu2_device_driver = {
570 .probe = sh_mtu2_probe, 511 .probe = sh_mtu2_probe,
571 .remove = sh_mtu2_remove, 512 .remove = sh_mtu2_remove,
572 .driver = { 513 .driver = {
573 .name = "sh_mtu2", 514 .name = "sh_mtu2",
515 .of_match_table = of_match_ptr(sh_mtu2_of_table),
574 }, 516 },
575 .id_table = sh_mtu2_id_table, 517 .id_table = sh_mtu2_id_table,
576}; 518};
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 6bd17a8f3dd4..0f665b8f2461 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/of.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_domain.h> 29#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
@@ -32,7 +33,6 @@
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33 34
34enum sh_tmu_model { 35enum sh_tmu_model {
35 SH_TMU_LEGACY,
36 SH_TMU, 36 SH_TMU,
37 SH_TMU_SH3, 37 SH_TMU_SH3,
38}; 38};
@@ -62,6 +62,8 @@ struct sh_tmu_device {
62 62
63 enum sh_tmu_model model; 63 enum sh_tmu_model model;
64 64
65 raw_spinlock_t lock; /* Protect the shared start/stop register */
66
65 struct sh_tmu_channel *channels; 67 struct sh_tmu_channel *channels;
66 unsigned int num_channels; 68 unsigned int num_channels;
67 69
@@ -69,8 +71,6 @@ struct sh_tmu_device {
69 bool has_clocksource; 71 bool has_clocksource;
70}; 72};
71 73
72static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
73
74#define TSTR -1 /* shared register */ 74#define TSTR -1 /* shared register */
75#define TCOR 0 /* channel register */ 75#define TCOR 0 /* channel register */
76#define TCNT 1 /* channel register */ 76#define TCNT 1 /* channel register */
@@ -91,8 +91,6 @@ static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
91 91
92 if (reg_nr == TSTR) { 92 if (reg_nr == TSTR) {
93 switch (ch->tmu->model) { 93 switch (ch->tmu->model) {
94 case SH_TMU_LEGACY:
95 return ioread8(ch->tmu->mapbase);
96 case SH_TMU_SH3: 94 case SH_TMU_SH3:
97 return ioread8(ch->tmu->mapbase + 2); 95 return ioread8(ch->tmu->mapbase + 2);
98 case SH_TMU: 96 case SH_TMU:
@@ -115,8 +113,6 @@ static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
115 113
116 if (reg_nr == TSTR) { 114 if (reg_nr == TSTR) {
117 switch (ch->tmu->model) { 115 switch (ch->tmu->model) {
118 case SH_TMU_LEGACY:
119 return iowrite8(value, ch->tmu->mapbase);
120 case SH_TMU_SH3: 116 case SH_TMU_SH3:
121 return iowrite8(value, ch->tmu->mapbase + 2); 117 return iowrite8(value, ch->tmu->mapbase + 2);
122 case SH_TMU: 118 case SH_TMU:
@@ -137,7 +133,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
137 unsigned long flags, value; 133 unsigned long flags, value;
138 134
139 /* start stop register shared by multiple timer channels */ 135 /* start stop register shared by multiple timer channels */
140 raw_spin_lock_irqsave(&sh_tmu_lock, flags); 136 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
141 value = sh_tmu_read(ch, TSTR); 137 value = sh_tmu_read(ch, TSTR);
142 138
143 if (start) 139 if (start)
@@ -146,7 +142,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
146 value &= ~(1 << ch->index); 142 value &= ~(1 << ch->index);
147 143
148 sh_tmu_write(ch, TSTR, value); 144 sh_tmu_write(ch, TSTR, value);
149 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 145 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
150} 146}
151 147
152static int __sh_tmu_enable(struct sh_tmu_channel *ch) 148static int __sh_tmu_enable(struct sh_tmu_channel *ch)
@@ -476,27 +472,12 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
476 return 0; 472 return 0;
477 473
478 ch->tmu = tmu; 474 ch->tmu = tmu;
475 ch->index = index;
479 476
480 if (tmu->model == SH_TMU_LEGACY) { 477 if (tmu->model == SH_TMU_SH3)
481 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 478 ch->base = tmu->mapbase + 4 + ch->index * 12;
482 479 else
483 /* 480 ch->base = tmu->mapbase + 8 + ch->index * 12;
484 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
485 * channel registers blocks at base + 2 + 12 * index, while all
486 * other variants map them at base + 4 + 12 * index. We can
487 * compute the index by just dividing by 12, the 2 bytes or 4
488 * bytes offset being hidden by the integer division.
489 */
490 ch->index = cfg->channel_offset / 12;
491 ch->base = tmu->mapbase + cfg->channel_offset;
492 } else {
493 ch->index = index;
494
495 if (tmu->model == SH_TMU_SH3)
496 ch->base = tmu->mapbase + 4 + ch->index * 12;
497 else
498 ch->base = tmu->mapbase + 8 + ch->index * 12;
499 }
500 481
501 ch->irq = platform_get_irq(tmu->pdev, index); 482 ch->irq = platform_get_irq(tmu->pdev, index);
502 if (ch->irq < 0) { 483 if (ch->irq < 0) {
@@ -526,46 +507,53 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
526 if (tmu->mapbase == NULL) 507 if (tmu->mapbase == NULL)
527 return -ENXIO; 508 return -ENXIO;
528 509
529 /*
530 * In legacy platform device configuration (with one device per channel)
531 * the resource points to the channel base address.
532 */
533 if (tmu->model == SH_TMU_LEGACY) {
534 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
535 tmu->mapbase -= cfg->channel_offset;
536 }
537
538 return 0; 510 return 0;
539} 511}
540 512
541static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) 513static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
542{ 514{
543 if (tmu->model == SH_TMU_LEGACY) { 515 struct device_node *np = tmu->pdev->dev.of_node;
544 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 516
545 tmu->mapbase += cfg->channel_offset; 517 tmu->model = SH_TMU;
518 tmu->num_channels = 3;
519
520 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
521
522 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
523 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
524 tmu->num_channels);
525 return -EINVAL;
546 } 526 }
547 527
548 iounmap(tmu->mapbase); 528 return 0;
549} 529}
550 530
551static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) 531static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
552{ 532{
553 struct sh_timer_config *cfg = pdev->dev.platform_data;
554 const struct platform_device_id *id = pdev->id_entry;
555 unsigned int i; 533 unsigned int i;
556 int ret; 534 int ret;
557 535
558 if (!cfg) { 536 tmu->pdev = pdev;
537
538 raw_spin_lock_init(&tmu->lock);
539
540 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
541 ret = sh_tmu_parse_dt(tmu);
542 if (ret < 0)
543 return ret;
544 } else if (pdev->dev.platform_data) {
545 const struct platform_device_id *id = pdev->id_entry;
546 struct sh_timer_config *cfg = pdev->dev.platform_data;
547
548 tmu->model = id->driver_data;
549 tmu->num_channels = hweight8(cfg->channels_mask);
550 } else {
559 dev_err(&tmu->pdev->dev, "missing platform data\n"); 551 dev_err(&tmu->pdev->dev, "missing platform data\n");
560 return -ENXIO; 552 return -ENXIO;
561 } 553 }
562 554
563 tmu->pdev = pdev;
564 tmu->model = id->driver_data;
565
566 /* Get hold of clock. */ 555 /* Get hold of clock. */
567 tmu->clk = clk_get(&tmu->pdev->dev, 556 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
568 tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
569 if (IS_ERR(tmu->clk)) { 557 if (IS_ERR(tmu->clk)) {
570 dev_err(&tmu->pdev->dev, "cannot get clock\n"); 558 dev_err(&tmu->pdev->dev, "cannot get clock\n");
571 return PTR_ERR(tmu->clk); 559 return PTR_ERR(tmu->clk);
@@ -583,11 +571,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
583 } 571 }
584 572
585 /* Allocate and setup the channels. */ 573 /* Allocate and setup the channels. */
586 if (tmu->model == SH_TMU_LEGACY)
587 tmu->num_channels = 1;
588 else
589 tmu->num_channels = hweight8(cfg->channels_mask);
590
591 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, 574 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
592 GFP_KERNEL); 575 GFP_KERNEL);
593 if (tmu->channels == NULL) { 576 if (tmu->channels == NULL) {
@@ -595,23 +578,15 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
595 goto err_unmap; 578 goto err_unmap;
596 } 579 }
597 580
598 if (tmu->model == SH_TMU_LEGACY) { 581 /*
599 ret = sh_tmu_channel_setup(&tmu->channels[0], 0, 582 * Use the first channel as a clock event device and the second channel
600 cfg->clockevent_rating != 0, 583 * as a clock source.
601 cfg->clocksource_rating != 0, tmu); 584 */
585 for (i = 0; i < tmu->num_channels; ++i) {
586 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
587 i == 0, i == 1, tmu);
602 if (ret < 0) 588 if (ret < 0)
603 goto err_unmap; 589 goto err_unmap;
604 } else {
605 /*
606 * Use the first channel as a clock event device and the second
607 * channel as a clock source.
608 */
609 for (i = 0; i < tmu->num_channels; ++i) {
610 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
611 i == 0, i == 1, tmu);
612 if (ret < 0)
613 goto err_unmap;
614 }
615 } 590 }
616 591
617 platform_set_drvdata(pdev, tmu); 592 platform_set_drvdata(pdev, tmu);
@@ -620,7 +595,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
620 595
621err_unmap: 596err_unmap:
622 kfree(tmu->channels); 597 kfree(tmu->channels);
623 sh_tmu_unmap_memory(tmu); 598 iounmap(tmu->mapbase);
624err_clk_unprepare: 599err_clk_unprepare:
625 clk_unprepare(tmu->clk); 600 clk_unprepare(tmu->clk);
626err_clk_put: 601err_clk_put:
@@ -671,18 +646,24 @@ static int sh_tmu_remove(struct platform_device *pdev)
671} 646}
672 647
673static const struct platform_device_id sh_tmu_id_table[] = { 648static const struct platform_device_id sh_tmu_id_table[] = {
674 { "sh_tmu", SH_TMU_LEGACY },
675 { "sh-tmu", SH_TMU }, 649 { "sh-tmu", SH_TMU },
676 { "sh-tmu-sh3", SH_TMU_SH3 }, 650 { "sh-tmu-sh3", SH_TMU_SH3 },
677 { } 651 { }
678}; 652};
679MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); 653MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
680 654
655static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
656 { .compatible = "renesas,tmu" },
657 { }
658};
659MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
660
681static struct platform_driver sh_tmu_device_driver = { 661static struct platform_driver sh_tmu_device_driver = {
682 .probe = sh_tmu_probe, 662 .probe = sh_tmu_probe,
683 .remove = sh_tmu_remove, 663 .remove = sh_tmu_remove,
684 .driver = { 664 .driver = {
685 .name = "sh_tmu", 665 .name = "sh_tmu",
666 .of_match_table = of_match_ptr(sh_tmu_of_table),
686 }, 667 },
687 .id_table = sh_tmu_id_table, 668 .id_table = sh_tmu_id_table,
688}; 669};
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index dbd30398222a..330e93064692 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -260,6 +260,9 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
260 260
261 clk = of_clk_get(np, 0); 261 clk = of_clk_get(np, 0);
262 BUG_ON(IS_ERR(clk)); 262 BUG_ON(IS_ERR(clk));
263
264 BUG_ON(clk_prepare_enable(clk));
265
263 rate = clk_get_rate(clk); 266 rate = clk_get_rate(clk);
264 267
265 BUG_ON(rate < MARCO_CLOCK_FREQ); 268 BUG_ON(rate < MARCO_CLOCK_FREQ);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index a722aac7ac02..ce18d570e1cd 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -200,6 +200,9 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
200 200
201 clk = of_clk_get(np, 0); 201 clk = of_clk_get(np, 0);
202 BUG_ON(IS_ERR(clk)); 202 BUG_ON(IS_ERR(clk));
203
204 BUG_ON(clk_prepare_enable(clk));
205
203 rate = clk_get_rate(clk); 206 rate = clk_get_rate(clk);
204 207
205 BUG_ON(rate < PRIMA2_CLOCK_FREQ); 208 BUG_ON(rate < PRIMA2_CLOCK_FREQ);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ccdd4c7e748b..15d06fcf0b50 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -69,7 +69,6 @@ void proc_fork_connector(struct task_struct *task)
69 struct cn_msg *msg; 69 struct cn_msg *msg;
70 struct proc_event *ev; 70 struct proc_event *ev;
71 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 71 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
72 struct timespec ts;
73 struct task_struct *parent; 72 struct task_struct *parent;
74 73
75 if (atomic_read(&proc_event_num_listeners) < 1) 74 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -79,8 +78,7 @@ void proc_fork_connector(struct task_struct *task)
79 ev = (struct proc_event *)msg->data; 78 ev = (struct proc_event *)msg->data;
80 memset(&ev->event_data, 0, sizeof(ev->event_data)); 79 memset(&ev->event_data, 0, sizeof(ev->event_data));
81 get_seq(&msg->seq, &ev->cpu); 80 get_seq(&msg->seq, &ev->cpu);
82 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 81 ev->timestamp_ns = ktime_get_ns();
83 ev->timestamp_ns = timespec_to_ns(&ts);
84 ev->what = PROC_EVENT_FORK; 82 ev->what = PROC_EVENT_FORK;
85 rcu_read_lock(); 83 rcu_read_lock();
86 parent = rcu_dereference(task->real_parent); 84 parent = rcu_dereference(task->real_parent);
@@ -102,7 +100,6 @@ void proc_exec_connector(struct task_struct *task)
102{ 100{
103 struct cn_msg *msg; 101 struct cn_msg *msg;
104 struct proc_event *ev; 102 struct proc_event *ev;
105 struct timespec ts;
106 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 103 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
107 104
108 if (atomic_read(&proc_event_num_listeners) < 1) 105 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -112,8 +109,7 @@ void proc_exec_connector(struct task_struct *task)
112 ev = (struct proc_event *)msg->data; 109 ev = (struct proc_event *)msg->data;
113 memset(&ev->event_data, 0, sizeof(ev->event_data)); 110 memset(&ev->event_data, 0, sizeof(ev->event_data));
114 get_seq(&msg->seq, &ev->cpu); 111 get_seq(&msg->seq, &ev->cpu);
115 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 112 ev->timestamp_ns = ktime_get_ns();
116 ev->timestamp_ns = timespec_to_ns(&ts);
117 ev->what = PROC_EVENT_EXEC; 113 ev->what = PROC_EVENT_EXEC;
118 ev->event_data.exec.process_pid = task->pid; 114 ev->event_data.exec.process_pid = task->pid;
119 ev->event_data.exec.process_tgid = task->tgid; 115 ev->event_data.exec.process_tgid = task->tgid;
@@ -130,7 +126,6 @@ void proc_id_connector(struct task_struct *task, int which_id)
130 struct cn_msg *msg; 126 struct cn_msg *msg;
131 struct proc_event *ev; 127 struct proc_event *ev;
132 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 128 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
133 struct timespec ts;
134 const struct cred *cred; 129 const struct cred *cred;
135 130
136 if (atomic_read(&proc_event_num_listeners) < 1) 131 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -156,8 +151,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
156 } 151 }
157 rcu_read_unlock(); 152 rcu_read_unlock();
158 get_seq(&msg->seq, &ev->cpu); 153 get_seq(&msg->seq, &ev->cpu);
159 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 154 ev->timestamp_ns = ktime_get_ns();
160 ev->timestamp_ns = timespec_to_ns(&ts);
161 155
162 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 156 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
163 msg->ack = 0; /* not used */ 157 msg->ack = 0; /* not used */
@@ -170,7 +164,6 @@ void proc_sid_connector(struct task_struct *task)
170{ 164{
171 struct cn_msg *msg; 165 struct cn_msg *msg;
172 struct proc_event *ev; 166 struct proc_event *ev;
173 struct timespec ts;
174 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 167 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
175 168
176 if (atomic_read(&proc_event_num_listeners) < 1) 169 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -180,8 +173,7 @@ void proc_sid_connector(struct task_struct *task)
180 ev = (struct proc_event *)msg->data; 173 ev = (struct proc_event *)msg->data;
181 memset(&ev->event_data, 0, sizeof(ev->event_data)); 174 memset(&ev->event_data, 0, sizeof(ev->event_data));
182 get_seq(&msg->seq, &ev->cpu); 175 get_seq(&msg->seq, &ev->cpu);
183 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 176 ev->timestamp_ns = ktime_get_ns();
184 ev->timestamp_ns = timespec_to_ns(&ts);
185 ev->what = PROC_EVENT_SID; 177 ev->what = PROC_EVENT_SID;
186 ev->event_data.sid.process_pid = task->pid; 178 ev->event_data.sid.process_pid = task->pid;
187 ev->event_data.sid.process_tgid = task->tgid; 179 ev->event_data.sid.process_tgid = task->tgid;
@@ -197,7 +189,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
197{ 189{
198 struct cn_msg *msg; 190 struct cn_msg *msg;
199 struct proc_event *ev; 191 struct proc_event *ev;
200 struct timespec ts;
201 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 192 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
202 193
203 if (atomic_read(&proc_event_num_listeners) < 1) 194 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -207,8 +198,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
207 ev = (struct proc_event *)msg->data; 198 ev = (struct proc_event *)msg->data;
208 memset(&ev->event_data, 0, sizeof(ev->event_data)); 199 memset(&ev->event_data, 0, sizeof(ev->event_data));
209 get_seq(&msg->seq, &ev->cpu); 200 get_seq(&msg->seq, &ev->cpu);
210 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 201 ev->timestamp_ns = ktime_get_ns();
211 ev->timestamp_ns = timespec_to_ns(&ts);
212 ev->what = PROC_EVENT_PTRACE; 202 ev->what = PROC_EVENT_PTRACE;
213 ev->event_data.ptrace.process_pid = task->pid; 203 ev->event_data.ptrace.process_pid = task->pid;
214 ev->event_data.ptrace.process_tgid = task->tgid; 204 ev->event_data.ptrace.process_tgid = task->tgid;
@@ -232,7 +222,6 @@ void proc_comm_connector(struct task_struct *task)
232{ 222{
233 struct cn_msg *msg; 223 struct cn_msg *msg;
234 struct proc_event *ev; 224 struct proc_event *ev;
235 struct timespec ts;
236 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 225 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
237 226
238 if (atomic_read(&proc_event_num_listeners) < 1) 227 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -242,8 +231,7 @@ void proc_comm_connector(struct task_struct *task)
242 ev = (struct proc_event *)msg->data; 231 ev = (struct proc_event *)msg->data;
243 memset(&ev->event_data, 0, sizeof(ev->event_data)); 232 memset(&ev->event_data, 0, sizeof(ev->event_data));
244 get_seq(&msg->seq, &ev->cpu); 233 get_seq(&msg->seq, &ev->cpu);
245 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 234 ev->timestamp_ns = ktime_get_ns();
246 ev->timestamp_ns = timespec_to_ns(&ts);
247 ev->what = PROC_EVENT_COMM; 235 ev->what = PROC_EVENT_COMM;
248 ev->event_data.comm.process_pid = task->pid; 236 ev->event_data.comm.process_pid = task->pid;
249 ev->event_data.comm.process_tgid = task->tgid; 237 ev->event_data.comm.process_tgid = task->tgid;
@@ -261,7 +249,6 @@ void proc_coredump_connector(struct task_struct *task)
261 struct cn_msg *msg; 249 struct cn_msg *msg;
262 struct proc_event *ev; 250 struct proc_event *ev;
263 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 251 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
264 struct timespec ts;
265 252
266 if (atomic_read(&proc_event_num_listeners) < 1) 253 if (atomic_read(&proc_event_num_listeners) < 1)
267 return; 254 return;
@@ -270,8 +257,7 @@ void proc_coredump_connector(struct task_struct *task)
270 ev = (struct proc_event *)msg->data; 257 ev = (struct proc_event *)msg->data;
271 memset(&ev->event_data, 0, sizeof(ev->event_data)); 258 memset(&ev->event_data, 0, sizeof(ev->event_data));
272 get_seq(&msg->seq, &ev->cpu); 259 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 260 ev->timestamp_ns = ktime_get_ns();
274 ev->timestamp_ns = timespec_to_ns(&ts);
275 ev->what = PROC_EVENT_COREDUMP; 261 ev->what = PROC_EVENT_COREDUMP;
276 ev->event_data.coredump.process_pid = task->pid; 262 ev->event_data.coredump.process_pid = task->pid;
277 ev->event_data.coredump.process_tgid = task->tgid; 263 ev->event_data.coredump.process_tgid = task->tgid;
@@ -288,7 +274,6 @@ void proc_exit_connector(struct task_struct *task)
288 struct cn_msg *msg; 274 struct cn_msg *msg;
289 struct proc_event *ev; 275 struct proc_event *ev;
290 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 276 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
291 struct timespec ts;
292 277
293 if (atomic_read(&proc_event_num_listeners) < 1) 278 if (atomic_read(&proc_event_num_listeners) < 1)
294 return; 279 return;
@@ -297,8 +282,7 @@ void proc_exit_connector(struct task_struct *task)
297 ev = (struct proc_event *)msg->data; 282 ev = (struct proc_event *)msg->data;
298 memset(&ev->event_data, 0, sizeof(ev->event_data)); 283 memset(&ev->event_data, 0, sizeof(ev->event_data));
299 get_seq(&msg->seq, &ev->cpu); 284 get_seq(&msg->seq, &ev->cpu);
300 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 285 ev->timestamp_ns = ktime_get_ns();
301 ev->timestamp_ns = timespec_to_ns(&ts);
302 ev->what = PROC_EVENT_EXIT; 286 ev->what = PROC_EVENT_EXIT;
303 ev->event_data.exit.process_pid = task->pid; 287 ev->event_data.exit.process_pid = task->pid;
304 ev->event_data.exit.process_tgid = task->tgid; 288 ev->event_data.exit.process_tgid = task->tgid;
@@ -325,7 +309,6 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
325 struct cn_msg *msg; 309 struct cn_msg *msg;
326 struct proc_event *ev; 310 struct proc_event *ev;
327 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 311 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
328 struct timespec ts;
329 312
330 if (atomic_read(&proc_event_num_listeners) < 1) 313 if (atomic_read(&proc_event_num_listeners) < 1)
331 return; 314 return;
@@ -334,8 +317,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
334 ev = (struct proc_event *)msg->data; 317 ev = (struct proc_event *)msg->data;
335 memset(&ev->event_data, 0, sizeof(ev->event_data)); 318 memset(&ev->event_data, 0, sizeof(ev->event_data));
336 msg->seq = rcvd_seq; 319 msg->seq = rcvd_seq;
337 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 320 ev->timestamp_ns = ktime_get_ns();
338 ev->timestamp_ns = timespec_to_ns(&ts);
339 ev->cpu = -1; 321 ev->cpu = -1;
340 ev->what = PROC_EVENT_NONE; 322 ev->what = PROC_EVENT_NONE;
341 ev->event_data.ack.err = err; 323 ev->event_data.ack.err = err;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index d7d5c8af92b9..5d997a33907e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1214 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 1214 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1215 1215
1216 switch (a->clk_id) { 1216 switch (a->clk_id) {
1217 case CLOCK_REALTIME: getnstimeofday(&ts); break; 1217 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1218 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; 1218 case CLOCK_MONOTONIC: ktime_get_ts(&ts); break;
1219 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; 1219 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1220 default: 1220 default:
1221 ret = -EINVAL; 1221 ret = -EINVAL;
1222 } 1222 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0de123afdb34..08ba1209228e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -542,8 +542,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
542 const struct drm_crtc *refcrtc, 542 const struct drm_crtc *refcrtc,
543 const struct drm_display_mode *mode) 543 const struct drm_display_mode *mode)
544{ 544{
545 ktime_t stime, etime, mono_time_offset;
546 struct timeval tv_etime; 545 struct timeval tv_etime;
546 ktime_t stime, etime;
547 int vbl_status; 547 int vbl_status;
548 int vpos, hpos, i; 548 int vpos, hpos, i;
549 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 549 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
@@ -588,13 +588,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
588 vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, 588 vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
589 &hpos, &stime, &etime); 589 &hpos, &stime, &etime);
590 590
591 /*
592 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
593 * CLOCK_REALTIME is requested.
594 */
595 if (!drm_timestamp_monotonic)
596 mono_time_offset = ktime_get_monotonic_offset();
597
598 /* Return as no-op if scanout query unsupported or failed. */ 591 /* Return as no-op if scanout query unsupported or failed. */
599 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 592 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
600 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 593 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -633,7 +626,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
633 delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; 626 delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
634 627
635 if (!drm_timestamp_monotonic) 628 if (!drm_timestamp_monotonic)
636 etime = ktime_sub(etime, mono_time_offset); 629 etime = ktime_mono_to_real(etime);
637 630
638 /* save this only for debugging purposes */ 631 /* save this only for debugging purposes */
639 tv_etime = ktime_to_timeval(etime); 632 tv_etime = ktime_to_timeval(etime);
@@ -664,10 +657,7 @@ static struct timeval get_drm_timestamp(void)
664{ 657{
665 ktime_t now; 658 ktime_t now;
666 659
667 now = ktime_get(); 660 now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
668 if (!drm_timestamp_monotonic)
669 now = ktime_sub(now, ktime_get_monotonic_offset());
670
671 return ktime_to_timeval(now); 661 return ktime_to_timeval(now);
672} 662}
673 663
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 374f964323ad..1f7700897dfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -931,7 +931,7 @@ struct intel_ilk_power_mgmt {
931 unsigned long last_time1; 931 unsigned long last_time1;
932 unsigned long chipset_power; 932 unsigned long chipset_power;
933 u64 last_count2; 933 u64 last_count2;
934 struct timespec last_time2; 934 u64 last_time2;
935 unsigned long gfx_power; 935 unsigned long gfx_power;
936 u8 corr; 936 u8 corr;
937 937
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d893e4da5dce..f247d922e44a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, 1149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1150 unsigned reset_counter, 1150 unsigned reset_counter,
1151 bool interruptible, 1151 bool interruptible,
1152 struct timespec *timeout, 1152 s64 *timeout,
1153 struct drm_i915_file_private *file_priv) 1153 struct drm_i915_file_private *file_priv)
1154{ 1154{
1155 struct drm_device *dev = ring->dev; 1155 struct drm_device *dev = ring->dev;
1156 struct drm_i915_private *dev_priv = dev->dev_private; 1156 struct drm_i915_private *dev_priv = dev->dev_private;
1157 const bool irq_test_in_progress = 1157 const bool irq_test_in_progress =
1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1159 struct timespec before, now;
1160 DEFINE_WAIT(wait); 1159 DEFINE_WAIT(wait);
1161 unsigned long timeout_expire; 1160 unsigned long timeout_expire;
1161 s64 before, now;
1162 int ret; 1162 int ret;
1163 1163
1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); 1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
@@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1167 return 0; 1167 return 0;
1168 1168
1169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; 1169 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1170 1170
1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { 1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1172 gen6_rps_boost(dev_priv); 1172 gen6_rps_boost(dev_priv);
@@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1181 1181
1182 /* Record current time in case interrupted by signal, or wedged */ 1182 /* Record current time in case interrupted by signal, or wedged */
1183 trace_i915_gem_request_wait_begin(ring, seqno); 1183 trace_i915_gem_request_wait_begin(ring, seqno);
1184 getrawmonotonic(&before); 1184 before = ktime_get_raw_ns();
1185 for (;;) { 1185 for (;;) {
1186 struct timer_list timer; 1186 struct timer_list timer;
1187 1187
@@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1230 destroy_timer_on_stack(&timer); 1230 destroy_timer_on_stack(&timer);
1231 } 1231 }
1232 } 1232 }
1233 getrawmonotonic(&now); 1233 now = ktime_get_raw_ns();
1234 trace_i915_gem_request_wait_end(ring, seqno); 1234 trace_i915_gem_request_wait_end(ring, seqno);
1235 1235
1236 if (!irq_test_in_progress) 1236 if (!irq_test_in_progress)
@@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1239 finish_wait(&ring->irq_queue, &wait); 1239 finish_wait(&ring->irq_queue, &wait);
1240 1240
1241 if (timeout) { 1241 if (timeout) {
1242 struct timespec sleep_time = timespec_sub(now, before); 1242 s64 tres = *timeout - (now - before);
1243 *timeout = timespec_sub(*timeout, sleep_time); 1243
1244 if (!timespec_valid(timeout)) /* i.e. negative time remains */ 1244 *timeout = tres < 0 ? 0 : tres;
1245 set_normalized_timespec(timeout, 0, 0);
1246 } 1245 }
1247 1246
1248 return ret; 1247 return ret;
@@ -2746,16 +2745,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2746 struct drm_i915_gem_wait *args = data; 2745 struct drm_i915_gem_wait *args = data;
2747 struct drm_i915_gem_object *obj; 2746 struct drm_i915_gem_object *obj;
2748 struct intel_engine_cs *ring = NULL; 2747 struct intel_engine_cs *ring = NULL;
2749 struct timespec timeout_stack, *timeout = NULL;
2750 unsigned reset_counter; 2748 unsigned reset_counter;
2751 u32 seqno = 0; 2749 u32 seqno = 0;
2752 int ret = 0; 2750 int ret = 0;
2753 2751
2754 if (args->timeout_ns >= 0) {
2755 timeout_stack = ns_to_timespec(args->timeout_ns);
2756 timeout = &timeout_stack;
2757 }
2758
2759 ret = i915_mutex_lock_interruptible(dev); 2752 ret = i915_mutex_lock_interruptible(dev);
2760 if (ret) 2753 if (ret)
2761 return ret; 2754 return ret;
@@ -2780,9 +2773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2780 goto out; 2773 goto out;
2781 2774
2782 /* Do this after OLR check to make sure we make forward progress polling 2775 /* Do this after OLR check to make sure we make forward progress polling
2783 * on this IOCTL with a 0 timeout (like busy ioctl) 2776 * on this IOCTL with a timeout <=0 (like busy ioctl)
2784 */ 2777 */
2785 if (!args->timeout_ns) { 2778 if (args->timeout_ns <= 0) {
2786 ret = -ETIME; 2779 ret = -ETIME;
2787 goto out; 2780 goto out;
2788 } 2781 }
@@ -2791,10 +2784,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2791 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2784 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2792 mutex_unlock(&dev->struct_mutex); 2785 mutex_unlock(&dev->struct_mutex);
2793 2786
2794 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); 2787 return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
2795 if (timeout) 2788 file->driver_priv);
2796 args->timeout_ns = timespec_to_ns(timeout);
2797 return ret;
2798 2789
2799out: 2790out:
2800 drm_gem_object_unreference(&obj->base); 2791 drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ee72807069e4..f1233f544f3e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2993,7 +2993,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
2993 I915_READ(0x112e0); 2993 I915_READ(0x112e0);
2994 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 2994 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2995 dev_priv->ips.last_count2 = I915_READ(0x112f4); 2995 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2996 getrawmonotonic(&dev_priv->ips.last_time2); 2996 dev_priv->ips.last_time2 = ktime_get_raw_ns();
2997 2997
2998 spin_unlock_irq(&mchdev_lock); 2998 spin_unlock_irq(&mchdev_lock);
2999} 2999}
@@ -4314,18 +4314,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4314 4314
4315static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 4315static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4316{ 4316{
4317 struct timespec now, diff1; 4317 u64 now, diff, diffms;
4318 u64 diff;
4319 unsigned long diffms;
4320 u32 count; 4318 u32 count;
4321 4319
4322 assert_spin_locked(&mchdev_lock); 4320 assert_spin_locked(&mchdev_lock);
4323 4321
4324 getrawmonotonic(&now); 4322 now = ktime_get_raw_ns();
4325 diff1 = timespec_sub(now, dev_priv->ips.last_time2); 4323 diffms = now - dev_priv->ips.last_time2;
4324 do_div(diffms, NSEC_PER_MSEC);
4326 4325
4327 /* Don't divide by 0 */ 4326 /* Don't divide by 0 */
4328 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4329 if (!diffms) 4327 if (!diffms)
4330 return; 4328 return;
4331 4329
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6b252a887ae2..c886c024c637 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -159,8 +159,8 @@ struct vmw_surface {
159 159
160struct vmw_marker_queue { 160struct vmw_marker_queue {
161 struct list_head head; 161 struct list_head head;
162 struct timespec lag; 162 u64 lag;
163 struct timespec lag_time; 163 u64 lag_time;
164 spinlock_t lock; 164 spinlock_t lock;
165}; 165};
166 166
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index 8a8725c2716c..efd1ffd68185 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -31,14 +31,14 @@
31struct vmw_marker { 31struct vmw_marker {
32 struct list_head head; 32 struct list_head head;
33 uint32_t seqno; 33 uint32_t seqno;
34 struct timespec submitted; 34 u64 submitted;
35}; 35};
36 36
37void vmw_marker_queue_init(struct vmw_marker_queue *queue) 37void vmw_marker_queue_init(struct vmw_marker_queue *queue)
38{ 38{
39 INIT_LIST_HEAD(&queue->head); 39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = ns_to_timespec(0); 40 queue->lag = 0;
41 getrawmonotonic(&queue->lag_time); 41 queue->lag_time = ktime_get_raw_ns();
42 spin_lock_init(&queue->lock); 42 spin_lock_init(&queue->lock);
43} 43}
44 44
@@ -62,7 +62,7 @@ int vmw_marker_push(struct vmw_marker_queue *queue,
62 return -ENOMEM; 62 return -ENOMEM;
63 63
64 marker->seqno = seqno; 64 marker->seqno = seqno;
65 getrawmonotonic(&marker->submitted); 65 marker->submitted = ktime_get_raw_ns();
66 spin_lock(&queue->lock); 66 spin_lock(&queue->lock);
67 list_add_tail(&marker->head, &queue->head); 67 list_add_tail(&marker->head, &queue->head);
68 spin_unlock(&queue->lock); 68 spin_unlock(&queue->lock);
@@ -74,14 +74,14 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
74 uint32_t signaled_seqno) 74 uint32_t signaled_seqno)
75{ 75{
76 struct vmw_marker *marker, *next; 76 struct vmw_marker *marker, *next;
77 struct timespec now;
78 bool updated = false; 77 bool updated = false;
78 u64 now;
79 79
80 spin_lock(&queue->lock); 80 spin_lock(&queue->lock);
81 getrawmonotonic(&now); 81 now = ktime_get_raw_ns();
82 82
83 if (list_empty(&queue->head)) { 83 if (list_empty(&queue->head)) {
84 queue->lag = ns_to_timespec(0); 84 queue->lag = 0;
85 queue->lag_time = now; 85 queue->lag_time = now;
86 updated = true; 86 updated = true;
87 goto out_unlock; 87 goto out_unlock;
@@ -91,7 +91,7 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
91 if (signaled_seqno - marker->seqno > (1 << 30)) 91 if (signaled_seqno - marker->seqno > (1 << 30))
92 continue; 92 continue;
93 93
94 queue->lag = timespec_sub(now, marker->submitted); 94 queue->lag = now - marker->submitted;
95 queue->lag_time = now; 95 queue->lag_time = now;
96 updated = true; 96 updated = true;
97 list_del(&marker->head); 97 list_del(&marker->head);
@@ -104,27 +104,13 @@ out_unlock:
104 return (updated) ? 0 : -EBUSY; 104 return (updated) ? 0 : -EBUSY;
105} 105}
106 106
107static struct timespec vmw_timespec_add(struct timespec t1, 107static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
108 struct timespec t2)
109{ 108{
110 t1.tv_sec += t2.tv_sec; 109 u64 now;
111 t1.tv_nsec += t2.tv_nsec;
112 if (t1.tv_nsec >= 1000000000L) {
113 t1.tv_sec += 1;
114 t1.tv_nsec -= 1000000000L;
115 }
116
117 return t1;
118}
119
120static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
121{
122 struct timespec now;
123 110
124 spin_lock(&queue->lock); 111 spin_lock(&queue->lock);
125 getrawmonotonic(&now); 112 now = ktime_get_raw_ns();
126 queue->lag = vmw_timespec_add(queue->lag, 113 queue->lag += now - queue->lag_time;
127 timespec_sub(now, queue->lag_time));
128 queue->lag_time = now; 114 queue->lag_time = now;
129 spin_unlock(&queue->lock); 115 spin_unlock(&queue->lock);
130 return queue->lag; 116 return queue->lag;
@@ -134,11 +120,9 @@ static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
134static bool vmw_lag_lt(struct vmw_marker_queue *queue, 120static bool vmw_lag_lt(struct vmw_marker_queue *queue,
135 uint32_t us) 121 uint32_t us)
136{ 122{
137 struct timespec lag, cond; 123 u64 cond = (u64) us * NSEC_PER_USEC;
138 124
139 cond = ns_to_timespec((s64) us * 1000); 125 return vmw_fifo_lag(queue) <= cond;
140 lag = vmw_fifo_lag(queue);
141 return (timespec_compare(&lag, &cond) < 1);
142} 126}
143 127
144int vmw_wait_lag(struct vmw_private *dev_priv, 128int vmw_wait_lag(struct vmw_private *dev_priv,
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 632f1dc0fe1f..7a8a6fbf11ff 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -842,11 +842,10 @@ static ssize_t aem_show_power(struct device *dev,
842 struct aem_data *data = dev_get_drvdata(dev); 842 struct aem_data *data = dev_get_drvdata(dev);
843 u64 before, after, delta, time; 843 u64 before, after, delta, time;
844 signed long leftover; 844 signed long leftover;
845 struct timespec b, a;
846 845
847 mutex_lock(&data->lock); 846 mutex_lock(&data->lock);
848 update_aem_energy_one(data, attr->index); 847 update_aem_energy_one(data, attr->index);
849 getnstimeofday(&b); 848 time = ktime_get_ns();
850 before = data->energy[attr->index]; 849 before = data->energy[attr->index];
851 850
852 leftover = schedule_timeout_interruptible( 851 leftover = schedule_timeout_interruptible(
@@ -858,11 +857,10 @@ static ssize_t aem_show_power(struct device *dev,
858 } 857 }
859 858
860 update_aem_energy_one(data, attr->index); 859 update_aem_energy_one(data, attr->index);
861 getnstimeofday(&a); 860 time = ktime_get_ns() - time;
862 after = data->energy[attr->index]; 861 after = data->energy[attr->index];
863 mutex_unlock(&data->lock); 862 mutex_unlock(&data->lock);
864 863
865 time = timespec_to_ns(&a) - timespec_to_ns(&b);
866 delta = (after - before) * UJ_PER_MJ; 864 delta = (after - before) * UJ_PER_MJ;
867 865
868 return sprintf(buf, "%llu\n", 866 return sprintf(buf, "%llu\n",
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index fd325ec9f064..de055451d1af 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -108,9 +108,8 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
108 struct input_event ev; 108 struct input_event ev;
109 ktime_t time; 109 ktime_t time;
110 110
111 time = ktime_get(); 111 time = (client->clkid == CLOCK_MONOTONIC) ?
112 if (client->clkid != CLOCK_MONOTONIC) 112 ktime_get() : ktime_get_real();
113 time = ktime_sub(time, ktime_get_monotonic_offset());
114 113
115 ev.time = ktime_to_timeval(time); 114 ev.time = ktime_to_timeval(time);
116 ev.type = EV_SYN; 115 ev.type = EV_SYN;
@@ -202,7 +201,7 @@ static void evdev_events(struct input_handle *handle,
202 ktime_t time_mono, time_real; 201 ktime_t time_mono, time_real;
203 202
204 time_mono = ktime_get(); 203 time_mono = ktime_get();
205 time_real = ktime_sub(time_mono, ktime_get_monotonic_offset()); 204 time_real = ktime_mono_to_real(time_mono);
206 205
207 rcu_read_lock(); 206 rcu_read_lock();
208 207
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 0b8d32829166..8c1c7cc373f8 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -225,7 +225,6 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
225 u8 *ptr; 225 u8 *ptr;
226 int sum; 226 int sum;
227 int ret = 0, final_ret; 227 int ret = 0, final_ret;
228 struct timespec ts;
229 228
230 /* 229 /*
231 * We have the shared ec_dev buffer plus we do lots of separate spi_sync 230 * We have the shared ec_dev buffer plus we do lots of separate spi_sync
@@ -239,11 +238,9 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
239 238
240 /* If it's too soon to do another transaction, wait */ 239 /* If it's too soon to do another transaction, wait */
241 if (ec_spi->last_transfer_ns) { 240 if (ec_spi->last_transfer_ns) {
242 struct timespec ts;
243 unsigned long delay; /* The delay completed so far */ 241 unsigned long delay; /* The delay completed so far */
244 242
245 ktime_get_ts(&ts); 243 delay = ktime_get_ns() - ec_spi->last_transfer_ns;
246 delay = timespec_to_ns(&ts) - ec_spi->last_transfer_ns;
247 if (delay < EC_SPI_RECOVERY_TIME_NS) 244 if (delay < EC_SPI_RECOVERY_TIME_NS)
248 ndelay(EC_SPI_RECOVERY_TIME_NS - delay); 245 ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
249 } 246 }
@@ -280,8 +277,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
280 } 277 }
281 278
282 final_ret = spi_sync(ec_spi->spi, &msg); 279 final_ret = spi_sync(ec_spi->spi, &msg);
283 ktime_get_ts(&ts); 280 ec_spi->last_transfer_ns = ktime_get_ns();
284 ec_spi->last_transfer_ns = timespec_to_ns(&ts);
285 if (!ret) 281 if (!ret)
286 ret = final_ret; 282 ret = final_ret;
287 if (ret < 0) { 283 if (ret < 0) {
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 06f6ad29ceff..3336ddca45ac 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -145,7 +145,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
145 union ioc4_int_out int_out; 145 union ioc4_int_out int_out;
146 union ioc4_gpcr gpcr; 146 union ioc4_gpcr gpcr;
147 unsigned int state, last_state = 1; 147 unsigned int state, last_state = 1;
148 struct timespec start_ts, end_ts;
149 uint64_t start, end, period; 148 uint64_t start, end, period;
150 unsigned int count = 0; 149 unsigned int count = 0;
151 150
@@ -174,10 +173,10 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
174 if (!last_state && state) { 173 if (!last_state && state) {
175 count++; 174 count++;
176 if (count == IOC4_CALIBRATE_END) { 175 if (count == IOC4_CALIBRATE_END) {
177 ktime_get_ts(&end_ts); 176 end = ktime_get_ns();
178 break; 177 break;
179 } else if (count == IOC4_CALIBRATE_DISCARD) 178 } else if (count == IOC4_CALIBRATE_DISCARD)
180 ktime_get_ts(&start_ts); 179 start = ktime_get_ns();
181 } 180 }
182 last_state = state; 181 last_state = state;
183 } while (1); 182 } while (1);
@@ -192,8 +191,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
192 * by which the IOC4 generates the square wave, to get the 191 * by which the IOC4 generates the square wave, to get the
193 * period of an IOC4 INT_OUT count. 192 * period of an IOC4 INT_OUT count.
194 */ 193 */
195 end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
196 start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
197 period = (end - start) / 194 period = (end - start) /
198 (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1)); 195 (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
199 196
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 87d1b018a9c3..67f8f5a1dc86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -548,7 +548,7 @@ static void cmd_work_handler(struct work_struct *work)
548 lay->status_own = CMD_OWNER_HW; 548 lay->status_own = CMD_OWNER_HW;
549 set_signature(ent, !cmd->checksum_disabled); 549 set_signature(ent, !cmd->checksum_disabled);
550 dump_command(dev, ent, 1); 550 dump_command(dev, ent, 1);
551 ktime_get_ts(&ent->ts1); 551 ent->ts1 = ktime_get_ns();
552 552
553 /* ring doorbell after the descriptor is valid */ 553 /* ring doorbell after the descriptor is valid */
554 wmb(); 554 wmb();
@@ -637,7 +637,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
637{ 637{
638 struct mlx5_cmd *cmd = &dev->cmd; 638 struct mlx5_cmd *cmd = &dev->cmd;
639 struct mlx5_cmd_work_ent *ent; 639 struct mlx5_cmd_work_ent *ent;
640 ktime_t t1, t2, delta;
641 struct mlx5_cmd_stats *stats; 640 struct mlx5_cmd_stats *stats;
642 int err = 0; 641 int err = 0;
643 s64 ds; 642 s64 ds;
@@ -668,10 +667,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
668 if (err == -ETIMEDOUT) 667 if (err == -ETIMEDOUT)
669 goto out; 668 goto out;
670 669
671 t1 = timespec_to_ktime(ent->ts1); 670 ds = ent->ts2 - ent->ts1;
672 t2 = timespec_to_ktime(ent->ts2);
673 delta = ktime_sub(t2, t1);
674 ds = ktime_to_ns(delta);
675 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 671 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
676 if (op < ARRAY_SIZE(cmd->stats)) { 672 if (op < ARRAY_SIZE(cmd->stats)) {
677 stats = &cmd->stats[op]; 673 stats = &cmd->stats[op];
@@ -1135,7 +1131,6 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1135 void *context; 1131 void *context;
1136 int err; 1132 int err;
1137 int i; 1133 int i;
1138 ktime_t t1, t2, delta;
1139 s64 ds; 1134 s64 ds;
1140 struct mlx5_cmd_stats *stats; 1135 struct mlx5_cmd_stats *stats;
1141 unsigned long flags; 1136 unsigned long flags;
@@ -1149,7 +1144,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1149 sem = &cmd->pages_sem; 1144 sem = &cmd->pages_sem;
1150 else 1145 else
1151 sem = &cmd->sem; 1146 sem = &cmd->sem;
1152 ktime_get_ts(&ent->ts2); 1147 ent->ts2 = ktime_get_ns();
1153 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1148 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1154 dump_command(dev, ent, 0); 1149 dump_command(dev, ent, 0);
1155 if (!ent->ret) { 1150 if (!ent->ret) {
@@ -1163,10 +1158,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1163 } 1158 }
1164 free_ent(cmd, ent->idx); 1159 free_ent(cmd, ent->idx);
1165 if (ent->callback) { 1160 if (ent->callback) {
1166 t1 = timespec_to_ktime(ent->ts1); 1161 ds = ent->ts2 - ent->ts1;
1167 t2 = timespec_to_ktime(ent->ts2);
1168 delta = ktime_sub(t2, t1);
1169 ds = ktime_to_ns(delta);
1170 if (ent->op < ARRAY_SIZE(cmd->stats)) { 1162 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1171 stats = &cmd->stats[ent->op]; 1163 stats = &cmd->stats[ent->op];
1172 spin_lock_irqsave(&stats->lock, flags); 1164 spin_lock_irqsave(&stats->lock, flags);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2a8ed8375ec0..14b80b1b450c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1734,7 +1734,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1734 struct ath9k_hw_cal_data *caldata, bool fastcc) 1734 struct ath9k_hw_cal_data *caldata, bool fastcc)
1735{ 1735{
1736 struct ath_common *common = ath9k_hw_common(ah); 1736 struct ath_common *common = ath9k_hw_common(ah);
1737 struct timespec ts;
1738 u32 saveLedState; 1737 u32 saveLedState;
1739 u32 saveDefAntenna; 1738 u32 saveDefAntenna;
1740 u32 macStaId1; 1739 u32 macStaId1;
@@ -1784,8 +1783,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1784 1783
1785 /* Save TSF before chip reset, a cold reset clears it */ 1784 /* Save TSF before chip reset, a cold reset clears it */
1786 tsf = ath9k_hw_gettsf64(ah); 1785 tsf = ath9k_hw_gettsf64(ah);
1787 getrawmonotonic(&ts); 1786 usec = ktime_to_us(ktime_get_raw());
1788 usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
1789 1787
1790 saveLedState = REG_READ(ah, AR_CFG_LED) & 1788 saveLedState = REG_READ(ah, AR_CFG_LED) &
1791 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | 1789 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1818,8 +1816,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1818 } 1816 }
1819 1817
1820 /* Restore TSF */ 1818 /* Restore TSF */
1821 getrawmonotonic(&ts); 1819 usec = ktime_to_us(ktime_get_raw()) - usec;
1822 usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
1823 ath9k_hw_settsf64(ah, tsf + usec); 1820 ath9k_hw_settsf64(ah, tsf + usec);
1824 1821
1825 if (AR_SREV_9280_20_OR_LATER(ah)) 1822 if (AR_SREV_9280_20_OR_LATER(ah))
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 5edfcb0da37d..e3718250d66e 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -702,6 +702,42 @@ void __iomem *of_iomap(struct device_node *np, int index)
702} 702}
703EXPORT_SYMBOL(of_iomap); 703EXPORT_SYMBOL(of_iomap);
704 704
705/*
706 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
707 * for a given device_node
708 * @device: the device whose io range will be mapped
709 * @index: index of the io range
710 * @name: name of the resource
711 *
712 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
713 * error code on failure. Usage example:
714 *
715 * base = of_io_request_and_map(node, 0, "foo");
716 * if (IS_ERR(base))
717 * return PTR_ERR(base);
718 */
719void __iomem *of_io_request_and_map(struct device_node *np, int index,
720 char *name)
721{
722 struct resource res;
723 void __iomem *mem;
724
725 if (of_address_to_resource(np, index, &res))
726 return IOMEM_ERR_PTR(-EINVAL);
727
728 if (!request_mem_region(res.start, resource_size(&res), name))
729 return IOMEM_ERR_PTR(-EBUSY);
730
731 mem = ioremap(res.start, resource_size(&res));
732 if (!mem) {
733 release_mem_region(res.start, resource_size(&res));
734 return IOMEM_ERR_PTR(-ENOMEM);
735 }
736
737 return mem;
738}
739EXPORT_SYMBOL(of_io_request_and_map);
740
705/** 741/**
706 * of_dma_get_range - Get DMA range info 742 * of_dma_get_range - Get DMA range info
707 * @np: device node to get DMA range info 743 * @np: device node to get DMA range info
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1812f026960c..daa8e7514eae 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -306,11 +306,9 @@ static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
306static void nsm_init_private(struct nsm_handle *nsm) 306static void nsm_init_private(struct nsm_handle *nsm)
307{ 307{
308 u64 *p = (u64 *)&nsm->sm_priv.data; 308 u64 *p = (u64 *)&nsm->sm_priv.data;
309 struct timespec ts;
310 s64 ns; 309 s64 ns;
311 310
312 ktime_get_ts(&ts); 311 ns = ktime_get_ns();
313 ns = timespec_to_ns(&ts);
314 put_unaligned(ns, p); 312 put_unaligned(ns, p);
315 put_unaligned((unsigned long)nsm, p + 1); 313 put_unaligned((unsigned long)nsm, p + 1);
316} 314}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 64db2bceac59..d7f9199217bb 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -473,13 +473,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
473 priority = task_prio(task); 473 priority = task_prio(task);
474 nice = task_nice(task); 474 nice = task_nice(task);
475 475
476 /* Temporary variable needed for gcc-2.96 */
477 /* convert timespec -> nsec*/
478 start_time =
479 (unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC
480 + task->real_start_time.tv_nsec;
481 /* convert nsec -> ticks */ 476 /* convert nsec -> ticks */
482 start_time = nsec_to_clock_t(start_time); 477 start_time = nsec_to_clock_t(task->real_start_time);
483 478
484 seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); 479 seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
485 seq_put_decimal_ll(m, ' ', ppid); 480 seq_put_decimal_ll(m, ' ', ppid);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 0013142c0475..80c350216ea8 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -35,8 +35,9 @@ struct timerfd_ctx {
35 ktime_t moffs; 35 ktime_t moffs;
36 wait_queue_head_t wqh; 36 wait_queue_head_t wqh;
37 u64 ticks; 37 u64 ticks;
38 int expired;
39 int clockid; 38 int clockid;
39 short unsigned expired;
40 short unsigned settime_flags; /* to show in fdinfo */
40 struct rcu_head rcu; 41 struct rcu_head rcu;
41 struct list_head clist; 42 struct list_head clist;
42 bool might_cancel; 43 bool might_cancel;
@@ -92,7 +93,7 @@ static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
92 */ 93 */
93void timerfd_clock_was_set(void) 94void timerfd_clock_was_set(void)
94{ 95{
95 ktime_t moffs = ktime_get_monotonic_offset(); 96 ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
96 struct timerfd_ctx *ctx; 97 struct timerfd_ctx *ctx;
97 unsigned long flags; 98 unsigned long flags;
98 99
@@ -125,7 +126,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
125{ 126{
126 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) 127 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
127 return false; 128 return false;
128 ctx->moffs = ktime_get_monotonic_offset(); 129 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
129 return true; 130 return true;
130} 131}
131 132
@@ -196,6 +197,8 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
196 if (timerfd_canceled(ctx)) 197 if (timerfd_canceled(ctx))
197 return -ECANCELED; 198 return -ECANCELED;
198 } 199 }
200
201 ctx->settime_flags = flags & TFD_SETTIME_FLAGS;
199 return 0; 202 return 0;
200} 203}
201 204
@@ -284,11 +287,77 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
284 return res; 287 return res;
285} 288}
286 289
290#ifdef CONFIG_PROC_FS
291static int timerfd_show(struct seq_file *m, struct file *file)
292{
293 struct timerfd_ctx *ctx = file->private_data;
294 struct itimerspec t;
295
296 spin_lock_irq(&ctx->wqh.lock);
297 t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
298 t.it_interval = ktime_to_timespec(ctx->tintv);
299 spin_unlock_irq(&ctx->wqh.lock);
300
301 return seq_printf(m,
302 "clockid: %d\n"
303 "ticks: %llu\n"
304 "settime flags: 0%o\n"
305 "it_value: (%llu, %llu)\n"
306 "it_interval: (%llu, %llu)\n",
307 ctx->clockid, (unsigned long long)ctx->ticks,
308 ctx->settime_flags,
309 (unsigned long long)t.it_value.tv_sec,
310 (unsigned long long)t.it_value.tv_nsec,
311 (unsigned long long)t.it_interval.tv_sec,
312 (unsigned long long)t.it_interval.tv_nsec);
313}
314#else
315#define timerfd_show NULL
316#endif
317
318#ifdef CONFIG_CHECKPOINT_RESTORE
319static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
320{
321 struct timerfd_ctx *ctx = file->private_data;
322 int ret = 0;
323
324 switch (cmd) {
325 case TFD_IOC_SET_TICKS: {
326 u64 ticks;
327
328 if (copy_from_user(&ticks, (u64 __user *)arg, sizeof(ticks)))
329 return -EFAULT;
330 if (!ticks)
331 return -EINVAL;
332
333 spin_lock_irq(&ctx->wqh.lock);
334 if (!timerfd_canceled(ctx)) {
335 ctx->ticks = ticks;
336 if (ticks)
337 wake_up_locked(&ctx->wqh);
338 } else
339 ret = -ECANCELED;
340 spin_unlock_irq(&ctx->wqh.lock);
341 break;
342 }
343 default:
344 ret = -ENOTTY;
345 break;
346 }
347
348 return ret;
349}
350#else
351#define timerfd_ioctl NULL
352#endif
353
287static const struct file_operations timerfd_fops = { 354static const struct file_operations timerfd_fops = {
288 .release = timerfd_release, 355 .release = timerfd_release,
289 .poll = timerfd_poll, 356 .poll = timerfd_poll,
290 .read = timerfd_read, 357 .read = timerfd_read,
291 .llseek = noop_llseek, 358 .llseek = noop_llseek,
359 .show_fdinfo = timerfd_show,
360 .unlocked_ioctl = timerfd_ioctl,
292}; 361};
293 362
294static int timerfd_fget(int fd, struct fd *p) 363static int timerfd_fget(int fd, struct fd *p)
@@ -336,7 +405,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
336 else 405 else
337 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); 406 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
338 407
339 ctx->moffs = ktime_get_monotonic_offset(); 408 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
340 409
341 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, 410 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
342 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); 411 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
new file mode 100644
index 000000000000..1efbe5a66958
--- /dev/null
+++ b/include/clocksource/pxa.h
@@ -0,0 +1,18 @@
1/*
2 * PXA clocksource, clockevents, and OST interrupt handlers.
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 */
11
12#ifndef _CLOCKSOURCE_PXA_H
13#define _CLOCKSOURCE_PXA_H
14
15extern void pxa_timer_nodt_init(int irq, void __iomem *base,
16 unsigned long clock_tick_rate);
17
18#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a16b497d5159..653f0e2b6ca9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
162 * @archdata: arch-specific data 162 * @archdata: arch-specific data
163 * @suspend: suspend function for the clocksource, if necessary 163 * @suspend: suspend function for the clocksource, if necessary
164 * @resume: resume function for the clocksource, if necessary 164 * @resume: resume function for the clocksource, if necessary
165 * @cycle_last: most recent cycle counter value seen by ::read()
166 * @owner: module reference, must be set by clocksource in modules 165 * @owner: module reference, must be set by clocksource in modules
167 */ 166 */
168struct clocksource { 167struct clocksource {
@@ -171,7 +170,6 @@ struct clocksource {
171 * clocksource itself is cacheline aligned. 170 * clocksource itself is cacheline aligned.
172 */ 171 */
173 cycle_t (*read)(struct clocksource *cs); 172 cycle_t (*read)(struct clocksource *cs);
174 cycle_t cycle_last;
175 cycle_t mask; 173 cycle_t mask;
176 u32 mult; 174 u32 mult;
177 u32 shift; 175 u32 shift;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index e7a8d3fa91d5..a036d058a249 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -165,6 +165,7 @@ enum hrtimer_base_type {
165 * struct hrtimer_cpu_base - the per cpu clock bases 165 * struct hrtimer_cpu_base - the per cpu clock bases
166 * @lock: lock protecting the base and associated clock bases 166 * @lock: lock protecting the base and associated clock bases
167 * and timers 167 * and timers
168 * @cpu: cpu number
168 * @active_bases: Bitfield to mark bases with active timers 169 * @active_bases: Bitfield to mark bases with active timers
169 * @clock_was_set: Indicates that clock was set from irq context. 170 * @clock_was_set: Indicates that clock was set from irq context.
170 * @expires_next: absolute time of the next event which was scheduled 171 * @expires_next: absolute time of the next event which was scheduled
@@ -179,6 +180,7 @@ enum hrtimer_base_type {
179 */ 180 */
180struct hrtimer_cpu_base { 181struct hrtimer_cpu_base {
181 raw_spinlock_t lock; 182 raw_spinlock_t lock;
183 unsigned int cpu;
182 unsigned int active_bases; 184 unsigned int active_bases;
183 unsigned int clock_was_set; 185 unsigned int clock_was_set;
184#ifdef CONFIG_HIGH_RES_TIMERS 186#ifdef CONFIG_HIGH_RES_TIMERS
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { }
324#endif 326#endif
325extern void hrtimers_resume(void); 327extern void hrtimers_resume(void);
326 328
327extern ktime_t ktime_get(void);
328extern ktime_t ktime_get_real(void);
329extern ktime_t ktime_get_boottime(void);
330extern ktime_t ktime_get_monotonic_offset(void);
331extern ktime_t ktime_get_clocktai(void);
332extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
333 ktime_t *offs_tai);
334
335DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 329DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
336 330
337 331
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void);
452/* Bootup initialization: */ 446/* Bootup initialization: */
453extern void __init hrtimers_init(void); 447extern void __init hrtimers_init(void);
454 448
455#if BITS_PER_LONG < 64
456extern u64 ktime_divns(const ktime_t kt, s64 div);
457#else /* BITS_PER_LONG < 64 */
458# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
459#endif
460
461/* Show pending timers: */ 449/* Show pending timers: */
462extern void sysrq_timer_list_show(void); 450extern void sysrq_timer_list_show(void);
463 451
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index ccde91725f98..15dc6bc2bdd2 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
277 **/ 277 **/
278static inline s64 iio_get_time_ns(void) 278static inline s64 iio_get_time_ns(void)
279{ 279{
280 struct timespec ts; 280 return ktime_get_real_ns();
281 /*
282 * calls getnstimeofday.
283 * If hrtimers then up to ns accurate, if not microsecond.
284 */
285 ktime_get_real_ts(&ts);
286
287 return timespec_to_ns(&ts);
288} 281}
289 282
290/* Device operating modes */ 283/* Device operating modes */
diff --git a/include/linux/io.h b/include/linux/io.h
index b76e6e545806..d5fc9b8d8b03 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
58} 58}
59#endif 59#endif
60 60
61#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
62
61void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
62 unsigned long size); 64 unsigned long size);
63void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 65void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index de9e46e6bcc9..c9d645ad98ff 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -27,43 +27,19 @@
27/* 27/*
28 * ktime_t: 28 * ktime_t:
29 * 29 *
30 * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers 30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The 31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are 32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic 33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations. 34 * operations.
35 * 35 *
36 * On 32-bit CPUs an optimized representation of the timespec structure
37 * is used to avoid expensive conversions from and to timespecs. The
38 * endian-aware order of the tv struct members is chosen to allow
39 * mathematical operations on the tv64 member of the union too, which
40 * for certain operations produces better code.
41 *
42 * For architectures with efficient support for 64/32-bit conversions the
43 * plain scalar nanosecond based representation can be selected by the
44 * config switch CONFIG_KTIME_SCALAR.
45 */ 36 */
46union ktime { 37union ktime {
47 s64 tv64; 38 s64 tv64;
48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
49 struct {
50# ifdef __BIG_ENDIAN
51 s32 sec, nsec;
52# else
53 s32 nsec, sec;
54# endif
55 } tv;
56#endif
57}; 39};
58 40
59typedef union ktime ktime_t; /* Kill this */ 41typedef union ktime ktime_t; /* Kill this */
60 42
61/*
62 * ktime_t definitions when using the 64-bit scalar representation:
63 */
64
65#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
66
67/** 43/**
68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
69 * @secs: seconds to set 45 * @secs: seconds to set
@@ -71,13 +47,12 @@ typedef union ktime ktime_t; /* Kill this */
71 * 47 *
72 * Return: The ktime_t representation of the value. 48 * Return: The ktime_t representation of the value.
73 */ 49 */
74static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) 50static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
75{ 51{
76#if (BITS_PER_LONG == 64)
77 if (unlikely(secs >= KTIME_SEC_MAX)) 52 if (unlikely(secs >= KTIME_SEC_MAX))
78 return (ktime_t){ .tv64 = KTIME_MAX }; 53 return (ktime_t){ .tv64 = KTIME_MAX };
79#endif 54
80 return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs }; 55 return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
81} 56}
82 57
83/* Subtract two ktime_t variables. rem = lhs -rhs: */ 58/* Subtract two ktime_t variables. rem = lhs -rhs: */
@@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts)
108 return ktime_set(ts.tv_sec, ts.tv_nsec); 83 return ktime_set(ts.tv_sec, ts.tv_nsec);
109} 84}
110 85
86/* convert a timespec64 to ktime_t format: */
87static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
88{
89 return ktime_set(ts.tv_sec, ts.tv_nsec);
90}
91
111/* convert a timeval to ktime_t format: */ 92/* convert a timeval to ktime_t format: */
112static inline ktime_t timeval_to_ktime(struct timeval tv) 93static inline ktime_t timeval_to_ktime(struct timeval tv)
113{ 94{
@@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
117/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 98/* Map the ktime_t to timespec conversion to ns_to_timespec function */
118#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 99#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
119 100
101/* Map the ktime_t to timespec conversion to ns_to_timespec function */
102#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
103
120/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 104/* Map the ktime_t to timeval conversion to ns_to_timeval function */
121#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 105#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
122 106
123/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 107/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
124#define ktime_to_ns(kt) ((kt).tv64) 108#define ktime_to_ns(kt) ((kt).tv64)
125 109
126#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
127
128/*
129 * Helper macros/inlines to get the ktime_t math right in the timespec
130 * representation. The macros are sometimes ugly - their actual use is
131 * pretty okay-ish, given the circumstances. We do all this for
132 * performance reasons. The pure scalar nsec_t based code was nice and
133 * simple, but created too many 64-bit / 32-bit conversions and divisions.
134 *
135 * Be especially aware that negative values are represented in a way
136 * that the tv.sec field is negative and the tv.nsec field is greater
137 * or equal to zero but less than nanoseconds per second. This is the
138 * same representation which is used by timespecs.
139 *
140 * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
141 */
142
143/* Set a ktime_t variable to a value in sec/nsec representation: */
144static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
145{
146 return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
147}
148
149/**
150 * ktime_sub - subtract two ktime_t variables
151 * @lhs: minuend
152 * @rhs: subtrahend
153 *
154 * Return: The remainder of the subtraction.
155 */
156static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
157{
158 ktime_t res;
159
160 res.tv64 = lhs.tv64 - rhs.tv64;
161 if (res.tv.nsec < 0)
162 res.tv.nsec += NSEC_PER_SEC;
163
164 return res;
165}
166
167/**
168 * ktime_add - add two ktime_t variables
169 * @add1: addend1
170 * @add2: addend2
171 *
172 * Return: The sum of @add1 and @add2.
173 */
174static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
175{
176 ktime_t res;
177
178 res.tv64 = add1.tv64 + add2.tv64;
179 /*
180 * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
181 * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
182 *
183 * it's equivalent to:
184 * tv.nsec -= NSEC_PER_SEC
185 * tv.sec ++;
186 */
187 if (res.tv.nsec >= NSEC_PER_SEC)
188 res.tv64 += (u32)-NSEC_PER_SEC;
189
190 return res;
191}
192
193/**
194 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
195 * @kt: addend
196 * @nsec: the scalar nsec value to add
197 *
198 * Return: The sum of @kt and @nsec in ktime_t format.
199 */
200extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
201
202/**
203 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
204 * @kt: minuend
205 * @nsec: the scalar nsec value to subtract
206 *
207 * Return: The subtraction of @nsec from @kt in ktime_t format.
208 */
209extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
210
211/**
212 * timespec_to_ktime - convert a timespec to ktime_t format
213 * @ts: the timespec variable to convert
214 *
215 * Return: A ktime_t variable with the converted timespec value.
216 */
217static inline ktime_t timespec_to_ktime(const struct timespec ts)
218{
219 return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
220 .nsec = (s32)ts.tv_nsec } };
221}
222
223/**
224 * timeval_to_ktime - convert a timeval to ktime_t format
225 * @tv: the timeval variable to convert
226 *
227 * Return: A ktime_t variable with the converted timeval value.
228 */
229static inline ktime_t timeval_to_ktime(const struct timeval tv)
230{
231 return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
232 .nsec = (s32)(tv.tv_usec *
233 NSEC_PER_USEC) } };
234}
235
236/**
237 * ktime_to_timespec - convert a ktime_t variable to timespec format
238 * @kt: the ktime_t variable to convert
239 *
240 * Return: The timespec representation of the ktime value.
241 */
242static inline struct timespec ktime_to_timespec(const ktime_t kt)
243{
244 return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
245 .tv_nsec = (long) kt.tv.nsec };
246}
247
248/**
249 * ktime_to_timeval - convert a ktime_t variable to timeval format
250 * @kt: the ktime_t variable to convert
251 *
252 * Return: The timeval representation of the ktime value.
253 */
254static inline struct timeval ktime_to_timeval(const ktime_t kt)
255{
256 return (struct timeval) {
257 .tv_sec = (time_t) kt.tv.sec,
258 .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
259}
260
261/**
262 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
263 * @kt: the ktime_t variable to convert
264 *
265 * Return: The scalar nanoseconds representation of @kt.
266 */
267static inline s64 ktime_to_ns(const ktime_t kt)
268{
269 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
270}
271
272#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
273 110
274/** 111/**
275 * ktime_equal - Compares two ktime_t variables to see if they are equal 112 * ktime_equal - Compares two ktime_t variables to see if they are equal
@@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
328 return ktime_compare(cmp1, cmp2) < 0; 165 return ktime_compare(cmp1, cmp2) < 0;
329} 166}
330 167
168#if BITS_PER_LONG < 64
169extern u64 ktime_divns(const ktime_t kt, s64 div);
170#else /* BITS_PER_LONG < 64 */
171# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
172#endif
173
331static inline s64 ktime_to_us(const ktime_t kt) 174static inline s64 ktime_to_us(const ktime_t kt)
332{ 175{
333 struct timeval tv = ktime_to_timeval(kt); 176 return ktime_divns(kt, NSEC_PER_USEC);
334 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
335} 177}
336 178
337static inline s64 ktime_to_ms(const ktime_t kt) 179static inline s64 ktime_to_ms(const ktime_t kt)
338{ 180{
339 struct timeval tv = ktime_to_timeval(kt); 181 return ktime_divns(kt, NSEC_PER_MSEC);
340 return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
341} 182}
342 183
343static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) 184static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
@@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
381 } 222 }
382} 223}
383 224
225/**
226 * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
227 * format only if the variable contains data
228 * @kt: the ktime_t variable to convert
229 * @ts: the timespec variable to store the result in
230 *
231 * Return: %true if there was a successful conversion, %false if kt was 0.
232 */
233static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
234 struct timespec64 *ts)
235{
236 if (kt.tv64) {
237 *ts = ktime_to_timespec64(kt);
238 return true;
239 } else {
240 return false;
241 }
242}
243
384/* 244/*
385 * The resolution of the clocks. The resolution value is returned in 245 * The resolution of the clocks. The resolution value is returned in
386 * the clock_getres() system call to give application programmers an 246 * the clock_getres() system call to give application programmers an
@@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
390#define LOW_RES_NSEC TICK_NSEC 250#define LOW_RES_NSEC TICK_NSEC
391#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 251#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
392 252
393/* Get the monotonic time in timespec format: */
394extern void ktime_get_ts(struct timespec *ts);
395
396/* Get the real (wall-) time in timespec format: */
397#define ktime_get_real_ts(ts) getnstimeofday(ts)
398
399static inline ktime_t ns_to_ktime(u64 ns) 253static inline ktime_t ns_to_ktime(u64 ns)
400{ 254{
401 static const ktime_t ktime_zero = { .tv64 = 0 }; 255 static const ktime_t ktime_zero = { .tv64 = 0 };
@@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms)
410 return ktime_add_ms(ktime_zero, ms); 264 return ktime_add_ms(ktime_zero, ms);
411} 265}
412 266
267# include <linux/timekeeping.h>
268
413#endif 269#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2bce4aad2570..52d631ca32cf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -604,8 +604,8 @@ struct mlx5_cmd_work_ent {
604 int page_queue; 604 int page_queue;
605 u8 status; 605 u8 status;
606 u8 token; 606 u8 token;
607 struct timespec ts1; 607 u64 ts1;
608 struct timespec ts2; 608 u64 ts2;
609 u16 op; 609 u16 op;
610}; 610};
611 611
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index c13b8782a4eb..fb7b7221e063 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np)
109extern int of_address_to_resource(struct device_node *dev, int index, 109extern int of_address_to_resource(struct device_node *dev, int index,
110 struct resource *r); 110 struct resource *r);
111void __iomem *of_iomap(struct device_node *node, int index); 111void __iomem *of_iomap(struct device_node *node, int index);
112void __iomem *of_io_request_and_map(struct device_node *device,
113 int index, char *name);
112#else 114#else
115
116#include <linux/io.h>
117
113static inline int of_address_to_resource(struct device_node *dev, int index, 118static inline int of_address_to_resource(struct device_node *dev, int index,
114 struct resource *r) 119 struct resource *r)
115{ 120{
@@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
120{ 125{
121 return NULL; 126 return NULL;
122} 127}
128
129static inline void __iomem *of_io_request_and_map(struct device_node *device,
130 int index, char *name)
131{
132 return IOMEM_ERR_PTR(-EINVAL);
133}
123#endif 134#endif
124 135
125#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) 136#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42cac4dc2157..66124d63371a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -813,7 +813,7 @@ struct task_delay_info {
813 * associated with the operation is added to XXX_delay. 813 * associated with the operation is added to XXX_delay.
814 * XXX_delay contains the accumulated delay time in nanoseconds. 814 * XXX_delay contains the accumulated delay time in nanoseconds.
815 */ 815 */
816 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 816 u64 blkio_start; /* Shared by blkio, swapin */
817 u64 blkio_delay; /* wait for sync block io completion */ 817 u64 blkio_delay; /* wait for sync block io completion */
818 u64 swapin_delay; /* wait for swapin block io completion */ 818 u64 swapin_delay; /* wait for swapin block io completion */
819 u32 blkio_count; /* total count of the number of sync block */ 819 u32 blkio_count; /* total count of the number of sync block */
@@ -821,7 +821,7 @@ struct task_delay_info {
821 u32 swapin_count; /* total count of the number of swapin block */ 821 u32 swapin_count; /* total count of the number of swapin block */
822 /* io operations performed */ 822 /* io operations performed */
823 823
824 struct timespec freepages_start, freepages_end; 824 u64 freepages_start;
825 u64 freepages_delay; /* wait for memory reclaim */ 825 u64 freepages_delay; /* wait for memory reclaim */
826 u32 freepages_count; /* total count of memory reclaim */ 826 u32 freepages_count; /* total count of memory reclaim */
827}; 827};
@@ -1364,8 +1364,8 @@ struct task_struct {
1364 } vtime_snap_whence; 1364 } vtime_snap_whence;
1365#endif 1365#endif
1366 unsigned long nvcsw, nivcsw; /* context switch counts */ 1366 unsigned long nvcsw, nivcsw; /* context switch counts */
1367 struct timespec start_time; /* monotonic time */ 1367 u64 start_time; /* monotonic time in nsec */
1368 struct timespec real_start_time; /* boot based time */ 1368 u64 real_start_time; /* boot based time in nsec */
1369/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1369/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1370 unsigned long min_flt, maj_flt; 1370 unsigned long min_flt, maj_flt;
1371 1371
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 8cf350325dc6..cc359636cfa3 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,6 +117,22 @@ repeat:
117} 117}
118 118
119/** 119/**
120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
127 */
128static inline unsigned raw_read_seqcount(const seqcount_t *s)
129{
130 unsigned ret = ACCESS_ONCE(s->sequence);
131 smp_rmb();
132 return ret;
133}
134
135/**
120 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep 136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
121 * @s: pointer to seqcount_t 137 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry 138 * Returns: count to be passed to read_seqcount_retry
@@ -218,6 +234,17 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
218} 234}
219 235
220/* 236/*
237 * raw_write_seqcount_latch - redirect readers to even/odd copy
238 * @s: pointer to seqcount_t
239 */
240static inline void raw_write_seqcount_latch(seqcount_t *s)
241{
242 smp_wmb(); /* prior stores before incrementing "sequence" */
243 s->sequence++;
244 smp_wmb(); /* increment "sequence" before following stores */
245}
246
247/*
221 * Sequence counter only version assumes that callers are using their 248 * Sequence counter only version assumes that callers are using their
222 * own mutexing. 249 * own mutexing.
223 */ 250 */
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 8e1e036d6d45..64638b058076 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -2,11 +2,6 @@
2#define __SH_TIMER_H__ 2#define __SH_TIMER_H__
3 3
4struct sh_timer_config { 4struct sh_timer_config {
5 char *name;
6 long channel_offset;
7 int timer_bit;
8 unsigned long clockevent_rating;
9 unsigned long clocksource_rating;
10 unsigned int channels_mask; 5 unsigned int channels_mask;
11}; 6};
12 7
diff --git a/include/linux/time.h b/include/linux/time.h
index d5d229b2e5af..8c42cf8d2444 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -4,19 +4,10 @@
4# include <linux/cache.h> 4# include <linux/cache.h>
5# include <linux/seqlock.h> 5# include <linux/seqlock.h>
6# include <linux/math64.h> 6# include <linux/math64.h>
7#include <uapi/linux/time.h> 7# include <linux/time64.h>
8 8
9extern struct timezone sys_tz; 9extern struct timezone sys_tz;
10 10
11/* Parameters used to convert the timespec values: */
12#define MSEC_PER_SEC 1000L
13#define USEC_PER_MSEC 1000L
14#define NSEC_PER_USEC 1000L
15#define NSEC_PER_MSEC 1000000L
16#define USEC_PER_SEC 1000000L
17#define NSEC_PER_SEC 1000000000L
18#define FSEC_PER_SEC 1000000000000000LL
19
20#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 11#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
21 12
22static inline int timespec_equal(const struct timespec *a, 13static inline int timespec_equal(const struct timespec *a,
@@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
84 return ts_delta; 75 return ts_delta;
85} 76}
86 77
87#define KTIME_MAX ((s64)~((u64)1 << 63))
88#if (BITS_PER_LONG == 64)
89# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
90#else
91# define KTIME_SEC_MAX LONG_MAX
92#endif
93
94/* 78/*
95 * Returns true if the timespec is norm, false if denorm: 79 * Returns true if the timespec is norm, false if denorm:
96 */ 80 */
@@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
115 return true; 99 return true;
116} 100}
117 101
118extern bool persistent_clock_exist; 102extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
119
120static inline bool has_persistent_clock(void)
121{
122 return persistent_clock_exist;
123}
124
125extern void read_persistent_clock(struct timespec *ts);
126extern void read_boot_clock(struct timespec *ts);
127extern int persistent_clock_is_local;
128extern int update_persistent_clock(struct timespec now);
129void timekeeping_init(void);
130extern int timekeeping_suspended;
131
132unsigned long get_seconds(void);
133struct timespec current_kernel_time(void);
134struct timespec __current_kernel_time(void); /* does not take xtime_lock */
135struct timespec get_monotonic_coarse(void);
136void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
137 struct timespec *wtom, struct timespec *sleep);
138void timekeeping_inject_sleeptime(struct timespec *delta);
139 103
140#define CURRENT_TIME (current_kernel_time()) 104#define CURRENT_TIME (current_kernel_time())
141#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) 105#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta);
153extern u32 (*arch_gettimeoffset)(void); 117extern u32 (*arch_gettimeoffset)(void);
154#endif 118#endif
155 119
156extern void do_gettimeofday(struct timeval *tv);
157extern int do_settimeofday(const struct timespec *tv);
158extern int do_sys_settimeofday(const struct timespec *tv,
159 const struct timezone *tz);
160#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
161extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
162struct itimerval; 120struct itimerval;
163extern int do_setitimer(int which, struct itimerval *value, 121extern int do_setitimer(int which, struct itimerval *value,
164 struct itimerval *ovalue); 122 struct itimerval *ovalue);
165extern unsigned int alarm_setitimer(unsigned int seconds);
166extern int do_getitimer(int which, struct itimerval *value); 123extern int do_getitimer(int which, struct itimerval *value);
167extern int __getnstimeofday(struct timespec *tv);
168extern void getnstimeofday(struct timespec *tv);
169extern void getrawmonotonic(struct timespec *ts);
170extern void getnstime_raw_and_real(struct timespec *ts_raw,
171 struct timespec *ts_real);
172extern void getboottime(struct timespec *ts);
173extern void monotonic_to_bootbased(struct timespec *ts);
174extern void get_monotonic_boottime(struct timespec *ts);
175 124
176extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 125extern unsigned int alarm_setitimer(unsigned int seconds);
177extern int timekeeping_valid_for_hres(void); 126
178extern u64 timekeeping_max_deferment(void); 127extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
179extern int timekeeping_inject_offset(struct timespec *ts);
180extern s32 timekeeping_get_tai_offset(void);
181extern void timekeeping_set_tai_offset(s32 tai_offset);
182extern void timekeeping_clocktai(struct timespec *ts);
183 128
184struct tms; 129struct tms;
185extern void do_sys_times(struct tms *); 130extern void do_sys_times(struct tms *);
diff --git a/include/linux/time64.h b/include/linux/time64.h
new file mode 100644
index 000000000000..a3831478d9cf
--- /dev/null
+++ b/include/linux/time64.h
@@ -0,0 +1,190 @@
1#ifndef _LINUX_TIME64_H
2#define _LINUX_TIME64_H
3
4#include <uapi/linux/time.h>
5
6typedef __s64 time64_t;
7
8/*
9 * This wants to go into uapi/linux/time.h once we agreed about the
10 * userspace interfaces.
11 */
12#if __BITS_PER_LONG == 64
13# define timespec64 timespec
14#else
15struct timespec64 {
16 time64_t tv_sec; /* seconds */
17 long tv_nsec; /* nanoseconds */
18};
19#endif
20
21/* Parameters used to convert the timespec values: */
22#define MSEC_PER_SEC 1000L
23#define USEC_PER_MSEC 1000L
24#define NSEC_PER_USEC 1000L
25#define NSEC_PER_MSEC 1000000L
26#define USEC_PER_SEC 1000000L
27#define NSEC_PER_SEC 1000000000L
28#define FSEC_PER_SEC 1000000000000000LL
29
30/* Located here for timespec[64]_valid_strict */
31#define KTIME_MAX ((s64)~((u64)1 << 63))
32#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
33
34#if __BITS_PER_LONG == 64
35
36static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
37{
38 return ts64;
39}
40
41static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
42{
43 return ts;
44}
45
46# define timespec64_equal timespec_equal
47# define timespec64_compare timespec_compare
48# define set_normalized_timespec64 set_normalized_timespec
49# define timespec64_add_safe timespec_add_safe
50# define timespec64_add timespec_add
51# define timespec64_sub timespec_sub
52# define timespec64_valid timespec_valid
53# define timespec64_valid_strict timespec_valid_strict
54# define timespec64_to_ns timespec_to_ns
55# define ns_to_timespec64 ns_to_timespec
56# define timespec64_add_ns timespec_add_ns
57
58#else
59
60static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
61{
62 struct timespec ret;
63
64 ret.tv_sec = (time_t)ts64.tv_sec;
65 ret.tv_nsec = ts64.tv_nsec;
66 return ret;
67}
68
69static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
70{
71 struct timespec64 ret;
72
73 ret.tv_sec = ts.tv_sec;
74 ret.tv_nsec = ts.tv_nsec;
75 return ret;
76}
77
78static inline int timespec64_equal(const struct timespec64 *a,
79 const struct timespec64 *b)
80{
81 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
82}
83
84/*
85 * lhs < rhs: return <0
86 * lhs == rhs: return 0
87 * lhs > rhs: return >0
88 */
89static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
90{
91 if (lhs->tv_sec < rhs->tv_sec)
92 return -1;
93 if (lhs->tv_sec > rhs->tv_sec)
94 return 1;
95 return lhs->tv_nsec - rhs->tv_nsec;
96}
97
98extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
99
100/*
101 * timespec64_add_safe assumes both values are positive and checks for
102 * overflow. It will return TIME_T_MAX if the returned value would be
103 * smaller then either of the arguments.
104 */
105extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
106 const struct timespec64 rhs);
107
108
109static inline struct timespec64 timespec64_add(struct timespec64 lhs,
110 struct timespec64 rhs)
111{
112 struct timespec64 ts_delta;
113 set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
114 lhs.tv_nsec + rhs.tv_nsec);
115 return ts_delta;
116}
117
118/*
119 * sub = lhs - rhs, in normalized form
120 */
121static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
122 struct timespec64 rhs)
123{
124 struct timespec64 ts_delta;
125 set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
126 lhs.tv_nsec - rhs.tv_nsec);
127 return ts_delta;
128}
129
130/*
131 * Returns true if the timespec64 is norm, false if denorm:
132 */
133static inline bool timespec64_valid(const struct timespec64 *ts)
134{
135 /* Dates before 1970 are bogus */
136 if (ts->tv_sec < 0)
137 return false;
138 /* Can't have more nanoseconds then a second */
139 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
140 return false;
141 return true;
142}
143
144static inline bool timespec64_valid_strict(const struct timespec64 *ts)
145{
146 if (!timespec64_valid(ts))
147 return false;
148 /* Disallow values that could overflow ktime_t */
149 if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
150 return false;
151 return true;
152}
153
154/**
155 * timespec64_to_ns - Convert timespec64 to nanoseconds
156 * @ts: pointer to the timespec64 variable to be converted
157 *
158 * Returns the scalar nanosecond representation of the timespec64
159 * parameter.
160 */
161static inline s64 timespec64_to_ns(const struct timespec64 *ts)
162{
163 return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
164}
165
166/**
167 * ns_to_timespec64 - Convert nanoseconds to timespec64
168 * @nsec: the nanoseconds value to be converted
169 *
170 * Returns the timespec64 representation of the nsec parameter.
171 */
172extern struct timespec64 ns_to_timespec64(const s64 nsec);
173
174/**
175 * timespec64_add_ns - Adds nanoseconds to a timespec64
176 * @a: pointer to timespec64 to be incremented
177 * @ns: unsigned nanoseconds value to be added
178 *
179 * This must always be inlined because its used from the x86-64 vdso,
180 * which cannot call other kernel functions.
181 */
182static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
183{
184 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
185 a->tv_nsec = ns;
186}
187
188#endif
189
190#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index c1825eb436ed..95640dcd1899 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -10,77 +10,100 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/time.h> 11#include <linux/time.h>
12 12
13/* Structure holding internal timekeeping values. */ 13/**
14struct timekeeper { 14 * struct tk_read_base - base structure for timekeeping readout
15 /* Current clocksource used for timekeeping. */ 15 * @clock: Current clocksource used for timekeeping.
16 * @read: Read function of @clock
17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks
18 * @cycle_last: @clock cycle value at last update
19 * @mult: NTP adjusted multiplier for scaled math conversion
20 * @shift: Shift value for scaled math conversion
21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
22 * @base_mono: ktime_t (nanoseconds) base time for readout
23 *
24 * This struct has size 56 byte on 64 bit. Together with a seqcount it
25 * occupies a single 64byte cache line.
26 *
27 * The struct is separate from struct timekeeper as it is also used
28 * for a fast NMI safe accessor to clock monotonic.
29 */
30struct tk_read_base {
16 struct clocksource *clock; 31 struct clocksource *clock;
17 /* NTP adjusted clock multiplier */ 32 cycle_t (*read)(struct clocksource *cs);
33 cycle_t mask;
34 cycle_t cycle_last;
18 u32 mult; 35 u32 mult;
19 /* The shift value of the current clocksource. */
20 u32 shift; 36 u32 shift;
21 /* Number of clock cycles in one NTP interval. */ 37 u64 xtime_nsec;
38 ktime_t base_mono;
39};
40
41/**
42 * struct timekeeper - Structure holding internal timekeeping values.
43 * @tkr: The readout base structure
44 * @xtime_sec: Current CLOCK_REALTIME time in seconds
45 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
46 * @offs_real: Offset clock monotonic -> clock realtime
47 * @offs_boot: Offset clock monotonic -> clock boottime
48 * @offs_tai: Offset clock monotonic -> clock tai
49 * @tai_offset: The current UTC to TAI offset in seconds
50 * @base_raw: Monotonic raw base time in ktime_t format
51 * @raw_time: Monotonic raw base time in timespec64 format
52 * @cycle_interval: Number of clock cycles in one NTP interval
53 * @xtime_interval: Number of clock shifted nano seconds in one NTP
54 * interval.
55 * @xtime_remainder: Shifted nano seconds left over when rounding
56 * @cycle_interval
57 * @raw_interval: Raw nano seconds accumulated per NTP interval.
58 * @ntp_error: Difference between accumulated time and NTP time in ntp
59 * shifted nano seconds.
60 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
61 * ntp shifted nano seconds.
62 *
63 * Note: For timespec(64) based interfaces wall_to_monotonic is what
64 * we need to add to xtime (or xtime corrected for sub jiffie times)
65 * to get to monotonic time. Monotonic is pegged at zero at system
66 * boot time, so wall_to_monotonic will be negative, however, we will
67 * ALWAYS keep the tv_nsec part positive so we can use the usual
68 * normalization.
69 *
70 * wall_to_monotonic is moved after resume from suspend for the
71 * monotonic time not to jump. We need to add total_sleep_time to
72 * wall_to_monotonic to get the real boot based time offset.
73 *
74 * wall_to_monotonic is no longer the boot time, getboottime must be
75 * used instead.
76 */
77struct timekeeper {
78 struct tk_read_base tkr;
79 u64 xtime_sec;
80 struct timespec64 wall_to_monotonic;
81 ktime_t offs_real;
82 ktime_t offs_boot;
83 ktime_t offs_tai;
84 s32 tai_offset;
85 ktime_t base_raw;
86 struct timespec64 raw_time;
87
88 /* The following members are for timekeeping internal use */
22 cycle_t cycle_interval; 89 cycle_t cycle_interval;
23 /* Last cycle value (also stored in clock->cycle_last) */
24 cycle_t cycle_last;
25 /* Number of clock shifted nano seconds in one NTP interval. */
26 u64 xtime_interval; 90 u64 xtime_interval;
27 /* shifted nano seconds left over when rounding cycle_interval */
28 s64 xtime_remainder; 91 s64 xtime_remainder;
29 /* Raw nano seconds accumulated per NTP interval. */
30 u32 raw_interval; 92 u32 raw_interval;
31 93 /* The ntp_tick_length() value currently being used.
32 /* Current CLOCK_REALTIME time in seconds */ 94 * This cached copy ensures we consistently apply the tick
33 u64 xtime_sec; 95 * length for an entire tick, as ntp_tick_length may change
34 /* Clock shifted nano seconds */ 96 * mid-tick, and we don't want to apply that new value to
35 u64 xtime_nsec; 97 * the tick in progress.
36 98 */
99 u64 ntp_tick;
37 /* Difference between accumulated time and NTP time in ntp 100 /* Difference between accumulated time and NTP time in ntp
38 * shifted nano seconds. */ 101 * shifted nano seconds. */
39 s64 ntp_error; 102 s64 ntp_error;
40 /* Shift conversion between clock shifted nano seconds and
41 * ntp shifted nano seconds. */
42 u32 ntp_error_shift; 103 u32 ntp_error_shift;
43 104 u32 ntp_err_mult;
44 /*
45 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
46 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
47 * at zero at system boot time, so wall_to_monotonic will be negative,
48 * however, we will ALWAYS keep the tv_nsec part positive so we can use
49 * the usual normalization.
50 *
51 * wall_to_monotonic is moved after resume from suspend for the
52 * monotonic time not to jump. We need to add total_sleep_time to
53 * wall_to_monotonic to get the real boot based time offset.
54 *
55 * - wall_to_monotonic is no longer the boot time, getboottime must be
56 * used instead.
57 */
58 struct timespec wall_to_monotonic;
59 /* Offset clock monotonic -> clock realtime */
60 ktime_t offs_real;
61 /* time spent in suspend */
62 struct timespec total_sleep_time;
63 /* Offset clock monotonic -> clock boottime */
64 ktime_t offs_boot;
65 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
66 struct timespec raw_time;
67 /* The current UTC to TAI offset in seconds */
68 s32 tai_offset;
69 /* Offset clock monotonic -> clock tai */
70 ktime_t offs_tai;
71
72}; 105};
73 106
74static inline struct timespec tk_xtime(struct timekeeper *tk)
75{
76 struct timespec ts;
77
78 ts.tv_sec = tk->xtime_sec;
79 ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
80 return ts;
81}
82
83
84#ifdef CONFIG_GENERIC_TIME_VSYSCALL 107#ifdef CONFIG_GENERIC_TIME_VSYSCALL
85 108
86extern void update_vsyscall(struct timekeeper *tk); 109extern void update_vsyscall(struct timekeeper *tk);
@@ -89,17 +112,10 @@ extern void update_vsyscall_tz(void);
89#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) 112#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
90 113
91extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, 114extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
92 struct clocksource *c, u32 mult); 115 struct clocksource *c, u32 mult,
116 cycle_t cycle_last);
93extern void update_vsyscall_tz(void); 117extern void update_vsyscall_tz(void);
94 118
95static inline void update_vsyscall(struct timekeeper *tk)
96{
97 struct timespec xt;
98
99 xt = tk_xtime(tk);
100 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
101}
102
103#else 119#else
104 120
105static inline void update_vsyscall(struct timekeeper *tk) 121static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
new file mode 100644
index 000000000000..1caa6b04fdc5
--- /dev/null
+++ b/include/linux/timekeeping.h
@@ -0,0 +1,209 @@
1#ifndef _LINUX_TIMEKEEPING_H
2#define _LINUX_TIMEKEEPING_H
3
4/* Included from linux/ktime.h */
5
6void timekeeping_init(void);
7extern int timekeeping_suspended;
8
9/*
10 * Get and set timeofday
11 */
12extern void do_gettimeofday(struct timeval *tv);
13extern int do_settimeofday(const struct timespec *tv);
14extern int do_sys_settimeofday(const struct timespec *tv,
15 const struct timezone *tz);
16
17/*
18 * Kernel time accessors
19 */
20unsigned long get_seconds(void);
21struct timespec current_kernel_time(void);
22/* does not take xtime_lock */
23struct timespec __current_kernel_time(void);
24
25/*
26 * timespec based interfaces
27 */
28struct timespec get_monotonic_coarse(void);
29extern void getrawmonotonic(struct timespec *ts);
30extern void ktime_get_ts64(struct timespec64 *ts);
31
32extern int __getnstimeofday64(struct timespec64 *tv);
33extern void getnstimeofday64(struct timespec64 *tv);
34
35#if BITS_PER_LONG == 64
36static inline int __getnstimeofday(struct timespec *ts)
37{
38 return __getnstimeofday64(ts);
39}
40
41static inline void getnstimeofday(struct timespec *ts)
42{
43 getnstimeofday64(ts);
44}
45
46static inline void ktime_get_ts(struct timespec *ts)
47{
48 ktime_get_ts64(ts);
49}
50
51static inline void ktime_get_real_ts(struct timespec *ts)
52{
53 getnstimeofday64(ts);
54}
55
56#else
57static inline int __getnstimeofday(struct timespec *ts)
58{
59 struct timespec64 ts64;
60 int ret = __getnstimeofday64(&ts64);
61
62 *ts = timespec64_to_timespec(ts64);
63 return ret;
64}
65
66static inline void getnstimeofday(struct timespec *ts)
67{
68 struct timespec64 ts64;
69
70 getnstimeofday64(&ts64);
71 *ts = timespec64_to_timespec(ts64);
72}
73
74static inline void ktime_get_ts(struct timespec *ts)
75{
76 struct timespec64 ts64;
77
78 ktime_get_ts64(&ts64);
79 *ts = timespec64_to_timespec(ts64);
80}
81
82static inline void ktime_get_real_ts(struct timespec *ts)
83{
84 struct timespec64 ts64;
85
86 getnstimeofday64(&ts64);
87 *ts = timespec64_to_timespec(ts64);
88}
89#endif
90
91extern void getboottime(struct timespec *ts);
92
93#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
94#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
95
96/*
97 * ktime_t based interfaces
98 */
99
100enum tk_offsets {
101 TK_OFFS_REAL,
102 TK_OFFS_BOOT,
103 TK_OFFS_TAI,
104 TK_OFFS_MAX,
105};
106
107extern ktime_t ktime_get(void);
108extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
109extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
110extern ktime_t ktime_get_raw(void);
111
112/**
113 * ktime_get_real - get the real (wall-) time in ktime_t format
114 */
115static inline ktime_t ktime_get_real(void)
116{
117 return ktime_get_with_offset(TK_OFFS_REAL);
118}
119
120/**
121 * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
122 *
123 * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
124 * time spent in suspend.
125 */
126static inline ktime_t ktime_get_boottime(void)
127{
128 return ktime_get_with_offset(TK_OFFS_BOOT);
129}
130
131/**
132 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
133 */
134static inline ktime_t ktime_get_clocktai(void)
135{
136 return ktime_get_with_offset(TK_OFFS_TAI);
137}
138
139/**
140 * ktime_mono_to_real - Convert monotonic time to clock realtime
141 */
142static inline ktime_t ktime_mono_to_real(ktime_t mono)
143{
144 return ktime_mono_to_any(mono, TK_OFFS_REAL);
145}
146
147static inline u64 ktime_get_ns(void)
148{
149 return ktime_to_ns(ktime_get());
150}
151
152static inline u64 ktime_get_real_ns(void)
153{
154 return ktime_to_ns(ktime_get_real());
155}
156
157static inline u64 ktime_get_boot_ns(void)
158{
159 return ktime_to_ns(ktime_get_boottime());
160}
161
162static inline u64 ktime_get_raw_ns(void)
163{
164 return ktime_to_ns(ktime_get_raw());
165}
166
167extern u64 ktime_get_mono_fast_ns(void);
168
169/*
170 * Timespec interfaces utilizing the ktime based ones
171 */
172static inline void get_monotonic_boottime(struct timespec *ts)
173{
174 *ts = ktime_to_timespec(ktime_get_boottime());
175}
176
177static inline void timekeeping_clocktai(struct timespec *ts)
178{
179 *ts = ktime_to_timespec(ktime_get_clocktai());
180}
181
182/*
183 * RTC specific
184 */
185extern void timekeeping_inject_sleeptime(struct timespec *delta);
186
187/*
188 * PPS accessor
189 */
190extern void getnstime_raw_and_real(struct timespec *ts_raw,
191 struct timespec *ts_real);
192
193/*
194 * Persistent clock related interfaces
195 */
196extern bool persistent_clock_exist;
197extern int persistent_clock_is_local;
198
199static inline bool has_persistent_clock(void)
200{
201 return persistent_clock_exist;
202}
203
204extern void read_persistent_clock(struct timespec *ts);
205extern void read_boot_clock(struct timespec *ts);
206extern int update_persistent_clock(struct timespec now);
207
208
209#endif
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index d3b57fa12225..bd36ce431e32 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -11,6 +11,9 @@
11/* For O_CLOEXEC and O_NONBLOCK */ 11/* For O_CLOEXEC and O_NONBLOCK */
12#include <linux/fcntl.h> 12#include <linux/fcntl.h>
13 13
14/* For _IO helpers */
15#include <linux/ioctl.h>
16
14/* 17/*
15 * CAREFUL: Check include/asm-generic/fcntl.h when defining 18 * CAREFUL: Check include/asm-generic/fcntl.h when defining
16 * new flags, since they might collide with O_* ones. We want 19 * new flags, since they might collide with O_* ones. We want
@@ -29,4 +32,6 @@
29/* Flags for timerfd_settime. */ 32/* Flags for timerfd_settime. */
30#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) 33#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
31 34
35#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
36
32#endif /* _LINUX_TIMERFD_H */ 37#endif /* _LINUX_TIMERFD_H */
diff --git a/kernel/Makefile b/kernel/Makefile
index f2a8b6246ce9..973a40cf8068 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -3,12 +3,11 @@
3# 3#
4 4
5obj-y = fork.o exec_domain.o panic.o \ 5obj-y = fork.o exec_domain.o panic.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o softirq.o resource.o \
7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
9 extable.o params.o posix-timers.o \ 9 extable.o params.o \
10 kthread.o sys_ni.o posix-cpu-timers.o \ 10 kthread.o sys_ni.o nsproxy.o \
11 hrtimer.o nsproxy.o \
12 notifier.o ksysfs.o cred.o reboot.o \ 11 notifier.o ksysfs.o cred.o reboot.o \
13 async.o range.o groups.o smpboot.o 12 async.o range.o groups.o smpboot.o
14 13
@@ -110,22 +109,6 @@ targets += config_data.h
110$(obj)/config_data.h: $(obj)/config_data.gz FORCE 109$(obj)/config_data.h: $(obj)/config_data.gz FORCE
111 $(call filechk,ikconfiggz) 110 $(call filechk,ikconfiggz)
112 111
113$(obj)/time.o: $(obj)/timeconst.h
114
115quiet_cmd_hzfile = HZFILE $@
116 cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
117
118targets += hz.bc
119$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
120 $(call if_changed,hzfile)
121
122quiet_cmd_bc = BC $@
123 cmd_bc = bc -q $(filter-out FORCE,$^) > $@
124
125targets += timeconst.h
126$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
127 $(call if_changed,bc)
128
129############################################################################### 112###############################################################################
130# 113#
131# Roll all the X.509 certificates that we can find together and pull them into 114# Roll all the X.509 certificates that we can find together and pull them into
diff --git a/kernel/acct.c b/kernel/acct.c
index 808a86ff229d..a1844f14c6d6 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -458,9 +458,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
458 acct_t ac; 458 acct_t ac;
459 mm_segment_t fs; 459 mm_segment_t fs;
460 unsigned long flim; 460 unsigned long flim;
461 u64 elapsed; 461 u64 elapsed, run_time;
462 u64 run_time;
463 struct timespec uptime;
464 struct tty_struct *tty; 462 struct tty_struct *tty;
465 const struct cred *orig_cred; 463 const struct cred *orig_cred;
466 464
@@ -484,10 +482,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
484 strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm)); 482 strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
485 483
486 /* calculate run_time in nsec*/ 484 /* calculate run_time in nsec*/
487 do_posix_clock_monotonic_gettime(&uptime); 485 run_time = ktime_get_ns();
488 run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec; 486 run_time -= current->group_leader->start_time;
489 run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC
490 + current->group_leader->start_time.tv_nsec;
491 /* convert nsec -> AHZ */ 487 /* convert nsec -> AHZ */
492 elapsed = nsec_to_AHZ(run_time); 488 elapsed = nsec_to_AHZ(run_time);
493#if ACCT_VERSION==3 489#if ACCT_VERSION==3
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 2f7c760305ca..379650b984f8 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2472,7 +2472,7 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
2472static void kdb_sysinfo(struct sysinfo *val) 2472static void kdb_sysinfo(struct sysinfo *val)
2473{ 2473{
2474 struct timespec uptime; 2474 struct timespec uptime;
2475 do_posix_clock_monotonic_gettime(&uptime); 2475 ktime_get_ts(&uptime);
2476 memset(val, 0, sizeof(*val)); 2476 memset(val, 0, sizeof(*val));
2477 val->uptime = uptime.tv_sec; 2477 val->uptime = uptime.tv_sec;
2478 val->loads[0] = avenrun[0]; 2478 val->loads[0] = avenrun[0];
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 54996b71e66d..ef90b04d783f 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -46,42 +46,25 @@ void __delayacct_tsk_init(struct task_struct *tsk)
46} 46}
47 47
48/* 48/*
49 * Start accounting for a delay statistic using 49 * Finish delay accounting for a statistic using its timestamps (@start),
50 * its starting timestamp (@start) 50 * accumalator (@total) and @count
51 */ 51 */
52 52static void delayacct_end(u64 *start, u64 *total, u32 *count)
53static inline void delayacct_start(struct timespec *start)
54{ 53{
55 do_posix_clock_monotonic_gettime(start); 54 s64 ns = ktime_get_ns() - *start;
56}
57
58/*
59 * Finish delay accounting for a statistic using
60 * its timestamps (@start, @end), accumalator (@total) and @count
61 */
62
63static void delayacct_end(struct timespec *start, struct timespec *end,
64 u64 *total, u32 *count)
65{
66 struct timespec ts;
67 s64 ns;
68 unsigned long flags; 55 unsigned long flags;
69 56
70 do_posix_clock_monotonic_gettime(end); 57 if (ns > 0) {
71 ts = timespec_sub(*end, *start); 58 spin_lock_irqsave(&current->delays->lock, flags);
72 ns = timespec_to_ns(&ts); 59 *total += ns;
73 if (ns < 0) 60 (*count)++;
74 return; 61 spin_unlock_irqrestore(&current->delays->lock, flags);
75 62 }
76 spin_lock_irqsave(&current->delays->lock, flags);
77 *total += ns;
78 (*count)++;
79 spin_unlock_irqrestore(&current->delays->lock, flags);
80} 63}
81 64
82void __delayacct_blkio_start(void) 65void __delayacct_blkio_start(void)
83{ 66{
84 delayacct_start(&current->delays->blkio_start); 67 current->delays->blkio_start = ktime_get_ns();
85} 68}
86 69
87void __delayacct_blkio_end(void) 70void __delayacct_blkio_end(void)
@@ -89,35 +72,29 @@ void __delayacct_blkio_end(void)
89 if (current->delays->flags & DELAYACCT_PF_SWAPIN) 72 if (current->delays->flags & DELAYACCT_PF_SWAPIN)
90 /* Swapin block I/O */ 73 /* Swapin block I/O */
91 delayacct_end(&current->delays->blkio_start, 74 delayacct_end(&current->delays->blkio_start,
92 &current->delays->blkio_end,
93 &current->delays->swapin_delay, 75 &current->delays->swapin_delay,
94 &current->delays->swapin_count); 76 &current->delays->swapin_count);
95 else /* Other block I/O */ 77 else /* Other block I/O */
96 delayacct_end(&current->delays->blkio_start, 78 delayacct_end(&current->delays->blkio_start,
97 &current->delays->blkio_end,
98 &current->delays->blkio_delay, 79 &current->delays->blkio_delay,
99 &current->delays->blkio_count); 80 &current->delays->blkio_count);
100} 81}
101 82
102int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) 83int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
103{ 84{
104 s64 tmp;
105 unsigned long t1;
106 unsigned long long t2, t3;
107 unsigned long flags;
108 struct timespec ts;
109 cputime_t utime, stime, stimescaled, utimescaled; 85 cputime_t utime, stime, stimescaled, utimescaled;
86 unsigned long long t2, t3;
87 unsigned long flags, t1;
88 s64 tmp;
110 89
111 tmp = (s64)d->cpu_run_real_total;
112 task_cputime(tsk, &utime, &stime); 90 task_cputime(tsk, &utime, &stime);
113 cputime_to_timespec(utime + stime, &ts); 91 tmp = (s64)d->cpu_run_real_total;
114 tmp += timespec_to_ns(&ts); 92 tmp += cputime_to_nsecs(utime + stime);
115 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 93 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
116 94
117 tmp = (s64)d->cpu_scaled_run_real_total;
118 task_cputime_scaled(tsk, &utimescaled, &stimescaled); 95 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
119 cputime_to_timespec(utimescaled + stimescaled, &ts); 96 tmp = (s64)d->cpu_scaled_run_real_total;
120 tmp += timespec_to_ns(&ts); 97 tmp += cputime_to_nsecs(utimescaled + stimescaled);
121 d->cpu_scaled_run_real_total = 98 d->cpu_scaled_run_real_total =
122 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; 99 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
123 100
@@ -169,13 +146,12 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
169 146
170void __delayacct_freepages_start(void) 147void __delayacct_freepages_start(void)
171{ 148{
172 delayacct_start(&current->delays->freepages_start); 149 current->delays->freepages_start = ktime_get_ns();
173} 150}
174 151
175void __delayacct_freepages_end(void) 152void __delayacct_freepages_end(void)
176{ 153{
177 delayacct_end(&current->delays->freepages_start, 154 delayacct_end(&current->delays->freepages_start,
178 &current->delays->freepages_end,
179 &current->delays->freepages_delay, 155 &current->delays->freepages_delay,
180 &current->delays->freepages_count); 156 &current->delays->freepages_count);
181} 157}
diff --git a/kernel/fork.c b/kernel/fork.c
index 962885edbe53..5f1bf3bebb4f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1261,9 +1261,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1261 1261
1262 posix_cpu_timers_init(p); 1262 posix_cpu_timers_init(p);
1263 1263
1264 do_posix_clock_monotonic_gettime(&p->start_time); 1264 p->start_time = ktime_get_ns();
1265 p->real_start_time = p->start_time; 1265 p->real_start_time = ktime_get_boot_ns();
1266 monotonic_to_bootbased(&p->real_start_time);
1267 p->io_context = NULL; 1266 p->io_context = NULL;
1268 p->audit_context = NULL; 1267 p->audit_context = NULL;
1269 if (clone_flags & CLONE_THREAD) 1268 if (clone_flags & CLONE_THREAD)
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f448513a45ed..d626dc98e8df 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
12config ARCH_CLOCKSOURCE_DATA 12config ARCH_CLOCKSOURCE_DATA
13 bool 13 bool
14 14
15# Clocksources require validation of the clocksource against the last
16# cycle update - x86/TSC misfeature
17config CLOCKSOURCE_VALIDATE_LAST_CYCLE
18 bool
19
15# Timekeeping vsyscall support 20# Timekeeping vsyscall support
16config GENERIC_TIME_VSYSCALL 21config GENERIC_TIME_VSYSCALL
17 bool 22 bool
@@ -20,10 +25,6 @@ config GENERIC_TIME_VSYSCALL
20config GENERIC_TIME_VSYSCALL_OLD 25config GENERIC_TIME_VSYSCALL_OLD
21 bool 26 bool
22 27
23# ktime_t scalar 64bit nsec representation
24config KTIME_SCALAR
25 bool
26
27# Old style timekeeping 28# Old style timekeeping
28config ARCH_USES_GETTIMEOFFSET 29config ARCH_USES_GETTIMEOFFSET
29 bool 30 bool
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 57a413fd0ebf..7347426fa68d 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,3 +1,4 @@
1obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o 2obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
2obj-y += timeconv.o posix-clock.o alarmtimer.o 3obj-y += timeconv.o posix-clock.o alarmtimer.o
3 4
@@ -12,3 +13,21 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
12obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o 13obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
13obj-$(CONFIG_TIMER_STATS) += timer_stats.o 14obj-$(CONFIG_TIMER_STATS) += timer_stats.o
14obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o 15obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
16obj-$(CONFIG_TEST_UDELAY) += udelay_test.o
17
18$(obj)/time.o: $(obj)/timeconst.h
19
20quiet_cmd_hzfile = HZFILE $@
21 cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
22
23targets += hz.bc
24$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
25 $(call if_changed,hzfile)
26
27quiet_cmd_bc = BC $@
28 cmd_bc = bc -q $(filter-out FORCE,$^) > $@
29
30targets += timeconst.h
31$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
32 $(call if_changed,bc)
33
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ba3e502c955a..2e949cc9c9f1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -32,6 +32,7 @@
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33 33
34#include "tick-internal.h" 34#include "tick-internal.h"
35#include "timekeeping_internal.h"
35 36
36void timecounter_init(struct timecounter *tc, 37void timecounter_init(struct timecounter *tc,
37 const struct cyclecounter *cc, 38 const struct cyclecounter *cc,
@@ -249,7 +250,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
249static void clocksource_watchdog(unsigned long data) 250static void clocksource_watchdog(unsigned long data)
250{ 251{
251 struct clocksource *cs; 252 struct clocksource *cs;
252 cycle_t csnow, wdnow; 253 cycle_t csnow, wdnow, delta;
253 int64_t wd_nsec, cs_nsec; 254 int64_t wd_nsec, cs_nsec;
254 int next_cpu, reset_pending; 255 int next_cpu, reset_pending;
255 256
@@ -282,11 +283,12 @@ static void clocksource_watchdog(unsigned long data)
282 continue; 283 continue;
283 } 284 }
284 285
285 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, 286 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
286 watchdog->mult, watchdog->shift); 287 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
288 watchdog->shift);
287 289
288 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & 290 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
289 cs->mask, cs->mult, cs->shift); 291 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
290 cs->cs_last = csnow; 292 cs->cs_last = csnow;
291 cs->wd_last = wdnow; 293 cs->wd_last = wdnow;
292 294
diff --git a/kernel/hrtimer.c b/kernel/time/hrtimer.c
index 3ab28993f6e0..1c2fe7de2842 100644
--- a/kernel/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -54,6 +54,8 @@
54 54
55#include <trace/events/timer.h> 55#include <trace/events/timer.h>
56 56
57#include "timekeeping.h"
58
57/* 59/*
58 * The timer bases: 60 * The timer bases:
59 * 61 *
@@ -114,21 +116,18 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
114 */ 116 */
115static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) 117static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
116{ 118{
117 ktime_t xtim, mono, boot; 119 ktime_t xtim, mono, boot, tai;
118 struct timespec xts, tom, slp; 120 ktime_t off_real, off_boot, off_tai;
119 s32 tai_offset;
120 121
121 get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); 122 mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
122 tai_offset = timekeeping_get_tai_offset(); 123 boot = ktime_add(mono, off_boot);
124 xtim = ktime_add(mono, off_real);
125 tai = ktime_add(xtim, off_tai);
123 126
124 xtim = timespec_to_ktime(xts);
125 mono = ktime_add(xtim, timespec_to_ktime(tom));
126 boot = ktime_add(mono, timespec_to_ktime(slp));
127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; 127 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; 128 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
129 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; 129 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
130 base->clock_base[HRTIMER_BASE_TAI].softirq_time = 130 base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai;
131 ktime_add(xtim, ktime_set(tai_offset, 0));
132} 131}
133 132
134/* 133/*
@@ -264,60 +263,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
264 * too large for inlining: 263 * too large for inlining:
265 */ 264 */
266#if BITS_PER_LONG < 64 265#if BITS_PER_LONG < 64
267# ifndef CONFIG_KTIME_SCALAR
268/**
269 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
270 * @kt: addend
271 * @nsec: the scalar nsec value to add
272 *
273 * Returns the sum of kt and nsec in ktime_t format
274 */
275ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
276{
277 ktime_t tmp;
278
279 if (likely(nsec < NSEC_PER_SEC)) {
280 tmp.tv64 = nsec;
281 } else {
282 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
283
284 /* Make sure nsec fits into long */
285 if (unlikely(nsec > KTIME_SEC_MAX))
286 return (ktime_t){ .tv64 = KTIME_MAX };
287
288 tmp = ktime_set((long)nsec, rem);
289 }
290
291 return ktime_add(kt, tmp);
292}
293
294EXPORT_SYMBOL_GPL(ktime_add_ns);
295
296/**
297 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
298 * @kt: minuend
299 * @nsec: the scalar nsec value to subtract
300 *
301 * Returns the subtraction of @nsec from @kt in ktime_t format
302 */
303ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
304{
305 ktime_t tmp;
306
307 if (likely(nsec < NSEC_PER_SEC)) {
308 tmp.tv64 = nsec;
309 } else {
310 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
311
312 tmp = ktime_set((long)nsec, rem);
313 }
314
315 return ktime_sub(kt, tmp);
316}
317
318EXPORT_SYMBOL_GPL(ktime_sub_ns);
319# endif /* !CONFIG_KTIME_SCALAR */
320
321/* 266/*
322 * Divide a ktime value by a nanosecond value 267 * Divide a ktime value by a nanosecond value
323 */ 268 */
@@ -337,6 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
337 282
338 return dclc; 283 return dclc;
339} 284}
285EXPORT_SYMBOL_GPL(ktime_divns);
340#endif /* BITS_PER_LONG >= 64 */ 286#endif /* BITS_PER_LONG >= 64 */
341 287
342/* 288/*
@@ -602,6 +548,11 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
602 * timers, we have to check, whether it expires earlier than the timer for 548 * timers, we have to check, whether it expires earlier than the timer for
603 * which the clock event device was armed. 549 * which the clock event device was armed.
604 * 550 *
551 * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming
552 * and no expiry check happens. The timer gets enqueued into the rbtree. The
553 * reprogramming and expiry check is done in the hrtimer_interrupt or in the
554 * softirq.
555 *
605 * Called with interrupts disabled and base->cpu_base.lock held 556 * Called with interrupts disabled and base->cpu_base.lock held
606 */ 557 */
607static int hrtimer_reprogram(struct hrtimer *timer, 558static int hrtimer_reprogram(struct hrtimer *timer,
@@ -662,25 +613,13 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
662 base->hres_active = 0; 613 base->hres_active = 0;
663} 614}
664 615
665/*
666 * When High resolution timers are active, try to reprogram. Note, that in case
667 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
668 * check happens. The timer gets enqueued into the rbtree. The reprogramming
669 * and expiry check is done in the hrtimer_interrupt or in the softirq.
670 */
671static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
672 struct hrtimer_clock_base *base)
673{
674 return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
675}
676
677static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 616static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
678{ 617{
679 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 618 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
680 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 619 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
681 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 620 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
682 621
683 return ktime_get_update_offsets(offs_real, offs_boot, offs_tai); 622 return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
684} 623}
685 624
686/* 625/*
@@ -755,8 +694,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
755static inline int hrtimer_switch_to_hres(void) { return 0; } 694static inline int hrtimer_switch_to_hres(void) { return 0; }
756static inline void 695static inline void
757hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 696hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
758static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 697static inline int hrtimer_reprogram(struct hrtimer *timer,
759 struct hrtimer_clock_base *base) 698 struct hrtimer_clock_base *base)
760{ 699{
761 return 0; 700 return 0;
762} 701}
@@ -1013,14 +952,25 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1013 952
1014 leftmost = enqueue_hrtimer(timer, new_base); 953 leftmost = enqueue_hrtimer(timer, new_base);
1015 954
1016 /* 955 if (!leftmost) {
1017 * Only allow reprogramming if the new base is on this CPU. 956 unlock_hrtimer_base(timer, &flags);
1018 * (it might still be on another CPU if the timer was pending) 957 return ret;
1019 * 958 }
1020 * XXX send_remote_softirq() ? 959
1021 */ 960 if (!hrtimer_is_hres_active(timer)) {
1022 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) 961 /*
1023 && hrtimer_enqueue_reprogram(timer, new_base)) { 962 * Kick to reschedule the next tick to handle the new timer
963 * on dynticks target.
964 */
965 wake_up_nohz_cpu(new_base->cpu_base->cpu);
966 } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
967 hrtimer_reprogram(timer, new_base)) {
968 /*
969 * Only allow reprogramming if the new base is on this CPU.
970 * (it might still be on another CPU if the timer was pending)
971 *
972 * XXX send_remote_softirq() ?
973 */
1024 if (wakeup) { 974 if (wakeup) {
1025 /* 975 /*
1026 * We need to drop cpu_base->lock to avoid a 976 * We need to drop cpu_base->lock to avoid a
@@ -1680,6 +1630,7 @@ static void init_hrtimers_cpu(int cpu)
1680 timerqueue_init_head(&cpu_base->clock_base[i].active); 1630 timerqueue_init_head(&cpu_base->clock_base[i].active);
1681 } 1631 }
1682 1632
1633 cpu_base->cpu = cpu;
1683 hrtimer_init_hres(cpu_base); 1634 hrtimer_init_hres(cpu_base);
1684} 1635}
1685 1636
diff --git a/kernel/itimer.c b/kernel/time/itimer.c
index 8d262b467573..8d262b467573 100644
--- a/kernel/itimer.c
+++ b/kernel/time/itimer.c
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 33db43a39515..87a346fd6d61 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -466,7 +466,8 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
466 466
467static void sync_cmos_clock(struct work_struct *work) 467static void sync_cmos_clock(struct work_struct *work)
468{ 468{
469 struct timespec now, next; 469 struct timespec64 now;
470 struct timespec next;
470 int fail = 1; 471 int fail = 1;
471 472
472 /* 473 /*
@@ -485,9 +486,9 @@ static void sync_cmos_clock(struct work_struct *work)
485 return; 486 return;
486 } 487 }
487 488
488 getnstimeofday(&now); 489 getnstimeofday64(&now);
489 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { 490 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
490 struct timespec adjust = now; 491 struct timespec adjust = timespec64_to_timespec(now);
491 492
492 fail = -ENODEV; 493 fail = -ENODEV;
493 if (persistent_clock_is_local) 494 if (persistent_clock_is_local)
@@ -531,7 +532,7 @@ void ntp_notify_cmos_timer(void) { }
531/* 532/*
532 * Propagate a new txc->status value into the NTP state: 533 * Propagate a new txc->status value into the NTP state:
533 */ 534 */
534static inline void process_adj_status(struct timex *txc, struct timespec *ts) 535static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
535{ 536{
536 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { 537 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
537 time_state = TIME_OK; 538 time_state = TIME_OK;
@@ -554,7 +555,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
554 555
555 556
556static inline void process_adjtimex_modes(struct timex *txc, 557static inline void process_adjtimex_modes(struct timex *txc,
557 struct timespec *ts, 558 struct timespec64 *ts,
558 s32 *time_tai) 559 s32 *time_tai)
559{ 560{
560 if (txc->modes & ADJ_STATUS) 561 if (txc->modes & ADJ_STATUS)
@@ -640,7 +641,7 @@ int ntp_validate_timex(struct timex *txc)
640 * adjtimex mainly allows reading (and writing, if superuser) of 641 * adjtimex mainly allows reading (and writing, if superuser) of
641 * kernel time-keeping variables. used by xntpd. 642 * kernel time-keeping variables. used by xntpd.
642 */ 643 */
643int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) 644int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
644{ 645{
645 int result; 646 int result;
646 647
@@ -684,7 +685,7 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
684 /* fill PPS status fields */ 685 /* fill PPS status fields */
685 pps_fill_timex(txc); 686 pps_fill_timex(txc);
686 687
687 txc->time.tv_sec = ts->tv_sec; 688 txc->time.tv_sec = (time_t)ts->tv_sec;
688 txc->time.tv_usec = ts->tv_nsec; 689 txc->time.tv_usec = ts->tv_nsec;
689 if (!(time_status & STA_NANO)) 690 if (!(time_status & STA_NANO))
690 txc->time.tv_usec /= NSEC_PER_USEC; 691 txc->time.tv_usec /= NSEC_PER_USEC;
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 1950cb4ca2a4..bbd102ad9df7 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -7,6 +7,6 @@ extern void ntp_clear(void);
7extern u64 ntp_tick_length(void); 7extern u64 ntp_tick_length(void);
8extern int second_overflow(unsigned long secs); 8extern int second_overflow(unsigned long secs);
9extern int ntp_validate_timex(struct timex *); 9extern int ntp_validate_timex(struct timex *);
10extern int __do_adjtimex(struct timex *, struct timespec *, s32 *); 10extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
11extern void __hardpps(const struct timespec *, const struct timespec *); 11extern void __hardpps(const struct timespec *, const struct timespec *);
12#endif /* _LINUX_NTP_INTERNAL_H */ 12#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 3b8946416a5f..3b8946416a5f 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
diff --git a/kernel/posix-timers.c b/kernel/time/posix-timers.c
index 424c2d4265c9..42b463ad90f2 100644
--- a/kernel/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -49,6 +49,8 @@
49#include <linux/export.h> 49#include <linux/export.h>
50#include <linux/hashtable.h> 50#include <linux/hashtable.h>
51 51
52#include "timekeeping.h"
53
52/* 54/*
53 * Management arrays for POSIX timers. Timers are now kept in static hash table 55 * Management arrays for POSIX timers. Timers are now kept in static hash table
54 * with 512 entries. 56 * with 512 entries.
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7ab92b19965a..c19c1d84b6f3 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -4,6 +4,8 @@
4#include <linux/hrtimer.h> 4#include <linux/hrtimer.h>
5#include <linux/tick.h> 5#include <linux/tick.h>
6 6
7#include "timekeeping.h"
8
7extern seqlock_t jiffies_lock; 9extern seqlock_t jiffies_lock;
8 10
9#define CS_NAME_LEN 32 11#define CS_NAME_LEN 32
diff --git a/kernel/time.c b/kernel/time/time.c
index 7c7964c33ae7..f0294ba14634 100644
--- a/kernel/time.c
+++ b/kernel/time/time.c
@@ -42,6 +42,7 @@
42#include <asm/unistd.h> 42#include <asm/unistd.h>
43 43
44#include "timeconst.h" 44#include "timeconst.h"
45#include "timekeeping.h"
45 46
46/* 47/*
47 * The timezone where the local system is located. Used as a default by some 48 * The timezone where the local system is located. Used as a default by some
@@ -420,6 +421,68 @@ struct timeval ns_to_timeval(const s64 nsec)
420} 421}
421EXPORT_SYMBOL(ns_to_timeval); 422EXPORT_SYMBOL(ns_to_timeval);
422 423
424#if BITS_PER_LONG == 32
425/**
426 * set_normalized_timespec - set timespec sec and nsec parts and normalize
427 *
428 * @ts: pointer to timespec variable to be set
429 * @sec: seconds to set
430 * @nsec: nanoseconds to set
431 *
432 * Set seconds and nanoseconds field of a timespec variable and
433 * normalize to the timespec storage format
434 *
435 * Note: The tv_nsec part is always in the range of
436 * 0 <= tv_nsec < NSEC_PER_SEC
437 * For negative values only the tv_sec field is negative !
438 */
439void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
440{
441 while (nsec >= NSEC_PER_SEC) {
442 /*
443 * The following asm() prevents the compiler from
444 * optimising this loop into a modulo operation. See
445 * also __iter_div_u64_rem() in include/linux/time.h
446 */
447 asm("" : "+rm"(nsec));
448 nsec -= NSEC_PER_SEC;
449 ++sec;
450 }
451 while (nsec < 0) {
452 asm("" : "+rm"(nsec));
453 nsec += NSEC_PER_SEC;
454 --sec;
455 }
456 ts->tv_sec = sec;
457 ts->tv_nsec = nsec;
458}
459EXPORT_SYMBOL(set_normalized_timespec64);
460
461/**
462 * ns_to_timespec64 - Convert nanoseconds to timespec64
463 * @nsec: the nanoseconds value to be converted
464 *
465 * Returns the timespec64 representation of the nsec parameter.
466 */
467struct timespec64 ns_to_timespec64(const s64 nsec)
468{
469 struct timespec64 ts;
470 s32 rem;
471
472 if (!nsec)
473 return (struct timespec64) {0, 0};
474
475 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
476 if (unlikely(rem < 0)) {
477 ts.tv_sec--;
478 rem += NSEC_PER_SEC;
479 }
480 ts.tv_nsec = rem;
481
482 return ts;
483}
484EXPORT_SYMBOL(ns_to_timespec64);
485#endif
423/* 486/*
424 * When we convert to jiffies then we interpret incoming values 487 * When we convert to jiffies then we interpret incoming values
425 * the following way: 488 * the following way:
@@ -694,6 +757,7 @@ unsigned long nsecs_to_jiffies(u64 n)
694{ 757{
695 return (unsigned long)nsecs_to_jiffies64(n); 758 return (unsigned long)nsecs_to_jiffies64(n);
696} 759}
760EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
697 761
698/* 762/*
699 * Add two timespec values and do a safety check for overflow. 763 * Add two timespec values and do a safety check for overflow.
diff --git a/kernel/timeconst.bc b/kernel/time/timeconst.bc
index 511bdf2cafda..511bdf2cafda 100644
--- a/kernel/timeconst.bc
+++ b/kernel/time/timeconst.bc
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 32d8d6aaedb8..f36b02838a47 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -32,11 +32,34 @@
32#define TK_MIRROR (1 << 1) 32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2) 33#define TK_CLOCK_WAS_SET (1 << 2)
34 34
35static struct timekeeper timekeeper; 35/*
36 * The most important data for readout fits into a single 64 byte
37 * cache line.
38 */
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
36static DEFINE_RAW_SPINLOCK(timekeeper_lock); 44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
37static seqcount_t timekeeper_seq;
38static struct timekeeper shadow_timekeeper; 45static struct timekeeper shadow_timekeeper;
39 46
47/**
48 * struct tk_fast - NMI safe timekeeper
49 * @seq: Sequence counter for protecting updates. The lowest bit
50 * is the index for the tk_read_base array
51 * @base: tk_read_base array. Access is indexed by the lowest bit of
52 * @seq.
53 *
54 * See @update_fast_timekeeper() below.
55 */
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62
40/* flag for if timekeeping is suspended */ 63/* flag for if timekeeping is suspended */
41int __read_mostly timekeeping_suspended; 64int __read_mostly timekeeping_suspended;
42 65
@@ -45,49 +68,54 @@ bool __read_mostly persistent_clock_exist = false;
45 68
46static inline void tk_normalize_xtime(struct timekeeper *tk) 69static inline void tk_normalize_xtime(struct timekeeper *tk)
47{ 70{
48 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 71 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
49 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; 72 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
50 tk->xtime_sec++; 73 tk->xtime_sec++;
51 } 74 }
52} 75}
53 76
54static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) 77static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78{
79 struct timespec64 ts;
80
81 ts.tv_sec = tk->xtime_sec;
82 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
83 return ts;
84}
85
86static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
55{ 87{
56 tk->xtime_sec = ts->tv_sec; 88 tk->xtime_sec = ts->tv_sec;
57 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; 89 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
58} 90}
59 91
60static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) 92static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
61{ 93{
62 tk->xtime_sec += ts->tv_sec; 94 tk->xtime_sec += ts->tv_sec;
63 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 95 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
64 tk_normalize_xtime(tk); 96 tk_normalize_xtime(tk);
65} 97}
66 98
67static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) 99static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
68{ 100{
69 struct timespec tmp; 101 struct timespec64 tmp;
70 102
71 /* 103 /*
72 * Verify consistency of: offset_real = -wall_to_monotonic 104 * Verify consistency of: offset_real = -wall_to_monotonic
73 * before modifying anything 105 * before modifying anything
74 */ 106 */
75 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, 107 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
76 -tk->wall_to_monotonic.tv_nsec); 108 -tk->wall_to_monotonic.tv_nsec);
77 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); 109 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
78 tk->wall_to_monotonic = wtm; 110 tk->wall_to_monotonic = wtm;
79 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 111 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
80 tk->offs_real = timespec_to_ktime(tmp); 112 tk->offs_real = timespec64_to_ktime(tmp);
81 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); 113 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
82} 114}
83 115
84static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) 116static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
85{ 117{
86 /* Verify consistency before modifying */ 118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
87 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
88
89 tk->total_sleep_time = t;
90 tk->offs_boot = timespec_to_ktime(t);
91} 119}
92 120
93/** 121/**
@@ -107,9 +135,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
107 u64 tmp, ntpinterval; 135 u64 tmp, ntpinterval;
108 struct clocksource *old_clock; 136 struct clocksource *old_clock;
109 137
110 old_clock = tk->clock; 138 old_clock = tk->tkr.clock;
111 tk->clock = clock; 139 tk->tkr.clock = clock;
112 tk->cycle_last = clock->cycle_last = clock->read(clock); 140 tk->tkr.read = clock->read;
141 tk->tkr.mask = clock->mask;
142 tk->tkr.cycle_last = tk->tkr.read(clock);
113 143
114 /* Do the ns -> cycle conversion first, using original mult */ 144 /* Do the ns -> cycle conversion first, using original mult */
115 tmp = NTP_INTERVAL_LENGTH; 145 tmp = NTP_INTERVAL_LENGTH;
@@ -133,78 +163,212 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
133 if (old_clock) { 163 if (old_clock) {
134 int shift_change = clock->shift - old_clock->shift; 164 int shift_change = clock->shift - old_clock->shift;
135 if (shift_change < 0) 165 if (shift_change < 0)
136 tk->xtime_nsec >>= -shift_change; 166 tk->tkr.xtime_nsec >>= -shift_change;
137 else 167 else
138 tk->xtime_nsec <<= shift_change; 168 tk->tkr.xtime_nsec <<= shift_change;
139 } 169 }
140 tk->shift = clock->shift; 170 tk->tkr.shift = clock->shift;
141 171
142 tk->ntp_error = 0; 172 tk->ntp_error = 0;
143 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 173 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
174 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
144 175
145 /* 176 /*
146 * The timekeeper keeps its own mult values for the currently 177 * The timekeeper keeps its own mult values for the currently
147 * active clocksource. These value will be adjusted via NTP 178 * active clocksource. These value will be adjusted via NTP
148 * to counteract clock drifting. 179 * to counteract clock drifting.
149 */ 180 */
150 tk->mult = clock->mult; 181 tk->tkr.mult = clock->mult;
182 tk->ntp_err_mult = 0;
151} 183}
152 184
153/* Timekeeper helper functions. */ 185/* Timekeeper helper functions. */
154 186
155#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 187#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
156u32 (*arch_gettimeoffset)(void); 188static u32 default_arch_gettimeoffset(void) { return 0; }
157 189u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
158u32 get_arch_timeoffset(void)
159{
160 if (likely(arch_gettimeoffset))
161 return arch_gettimeoffset();
162 return 0;
163}
164#else 190#else
165static inline u32 get_arch_timeoffset(void) { return 0; } 191static inline u32 arch_gettimeoffset(void) { return 0; }
166#endif 192#endif
167 193
168static inline s64 timekeeping_get_ns(struct timekeeper *tk) 194static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
169{ 195{
170 cycle_t cycle_now, cycle_delta; 196 cycle_t cycle_now, delta;
171 struct clocksource *clock;
172 s64 nsec; 197 s64 nsec;
173 198
174 /* read clocksource: */ 199 /* read clocksource: */
175 clock = tk->clock; 200 cycle_now = tkr->read(tkr->clock);
176 cycle_now = clock->read(clock);
177 201
178 /* calculate the delta since the last update_wall_time: */ 202 /* calculate the delta since the last update_wall_time: */
179 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 203 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
180 204
181 nsec = cycle_delta * tk->mult + tk->xtime_nsec; 205 nsec = delta * tkr->mult + tkr->xtime_nsec;
182 nsec >>= tk->shift; 206 nsec >>= tkr->shift;
183 207
184 /* If arch requires, add in get_arch_timeoffset() */ 208 /* If arch requires, add in get_arch_timeoffset() */
185 return nsec + get_arch_timeoffset(); 209 return nsec + arch_gettimeoffset();
186} 210}
187 211
188static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 212static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
189{ 213{
190 cycle_t cycle_now, cycle_delta; 214 struct clocksource *clock = tk->tkr.clock;
191 struct clocksource *clock; 215 cycle_t cycle_now, delta;
192 s64 nsec; 216 s64 nsec;
193 217
194 /* read clocksource: */ 218 /* read clocksource: */
195 clock = tk->clock; 219 cycle_now = tk->tkr.read(clock);
196 cycle_now = clock->read(clock);
197 220
198 /* calculate the delta since the last update_wall_time: */ 221 /* calculate the delta since the last update_wall_time: */
199 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 222 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
200 223
201 /* convert delta to nanoseconds. */ 224 /* convert delta to nanoseconds. */
202 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 225 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
203 226
204 /* If arch requires, add in get_arch_timeoffset() */ 227 /* If arch requires, add in get_arch_timeoffset() */
205 return nsec + get_arch_timeoffset(); 228 return nsec + arch_gettimeoffset();
229}
230
231/**
232 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
233 * @tk: The timekeeper from which we take the update
234 * @tkf: The fast timekeeper to update
235 * @tbase: The time base for the fast timekeeper (mono/raw)
236 *
237 * We want to use this from any context including NMI and tracing /
238 * instrumenting the timekeeping code itself.
239 *
240 * So we handle this differently than the other timekeeping accessor
241 * functions which retry when the sequence count has changed. The
242 * update side does:
243 *
244 * smp_wmb(); <- Ensure that the last base[1] update is visible
245 * tkf->seq++;
246 * smp_wmb(); <- Ensure that the seqcount update is visible
247 * update(tkf->base[0], tk);
248 * smp_wmb(); <- Ensure that the base[0] update is visible
249 * tkf->seq++;
250 * smp_wmb(); <- Ensure that the seqcount update is visible
251 * update(tkf->base[1], tk);
252 *
253 * The reader side does:
254 *
255 * do {
256 * seq = tkf->seq;
257 * smp_rmb();
258 * idx = seq & 0x01;
259 * now = now(tkf->base[idx]);
260 * smp_rmb();
261 * } while (seq != tkf->seq)
262 *
263 * As long as we update base[0] readers are forced off to
264 * base[1]. Once base[0] is updated readers are redirected to base[0]
265 * and the base[1] update takes place.
266 *
267 * So if a NMI hits the update of base[0] then it will use base[1]
268 * which is still consistent. In the worst case this can result is a
269 * slightly wrong timestamp (a few nanoseconds). See
270 * @ktime_get_mono_fast_ns.
271 */
272static void update_fast_timekeeper(struct timekeeper *tk)
273{
274 struct tk_read_base *base = tk_fast_mono.base;
275
276 /* Force readers off to base[1] */
277 raw_write_seqcount_latch(&tk_fast_mono.seq);
278
279 /* Update base[0] */
280 memcpy(base, &tk->tkr, sizeof(*base));
281
282 /* Force readers back to base[0] */
283 raw_write_seqcount_latch(&tk_fast_mono.seq);
284
285 /* Update base[1] */
286 memcpy(base + 1, base, sizeof(*base));
206} 287}
207 288
289/**
290 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
291 *
292 * This timestamp is not guaranteed to be monotonic across an update.
293 * The timestamp is calculated by:
294 *
295 * now = base_mono + clock_delta * slope
296 *
297 * So if the update lowers the slope, readers who are forced to the
298 * not yet updated second array are still using the old steeper slope.
299 *
300 * tmono
301 * ^
302 * | o n
303 * | o n
304 * | u
305 * | o
306 * |o
307 * |12345678---> reader order
308 *
309 * o = old slope
310 * u = update
311 * n = new slope
312 *
313 * So reader 6 will observe time going backwards versus reader 5.
314 *
315 * While other CPUs are likely to be able observe that, the only way
316 * for a CPU local observation is when an NMI hits in the middle of
317 * the update. Timestamps taken from that NMI context might be ahead
318 * of the following timestamps. Callers need to be aware of that and
319 * deal with it.
320 */
321u64 notrace ktime_get_mono_fast_ns(void)
322{
323 struct tk_read_base *tkr;
324 unsigned int seq;
325 u64 now;
326
327 do {
328 seq = raw_read_seqcount(&tk_fast_mono.seq);
329 tkr = tk_fast_mono.base + (seq & 0x01);
330 now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
331
332 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
333 return now;
334}
335EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
336
337#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
338
339static inline void update_vsyscall(struct timekeeper *tk)
340{
341 struct timespec xt;
342
343 xt = timespec64_to_timespec(tk_xtime(tk));
344 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
345 tk->tkr.cycle_last);
346}
347
348static inline void old_vsyscall_fixup(struct timekeeper *tk)
349{
350 s64 remainder;
351
352 /*
353 * Store only full nanoseconds into xtime_nsec after rounding
354 * it up and add the remainder to the error difference.
355 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
356 * by truncating the remainder in vsyscalls. However, it causes
357 * additional work to be done in timekeeping_adjust(). Once
358 * the vsyscall implementations are converted to use xtime_nsec
359 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
360 * users are removed, this can be killed.
361 */
362 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
363 tk->tkr.xtime_nsec -= remainder;
364 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
365 tk->ntp_error += remainder << tk->ntp_error_shift;
366 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
367}
368#else
369#define old_vsyscall_fixup(tk)
370#endif
371
208static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); 372static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
209 373
210static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) 374static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -217,7 +381,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
217 */ 381 */
218int pvclock_gtod_register_notifier(struct notifier_block *nb) 382int pvclock_gtod_register_notifier(struct notifier_block *nb)
219{ 383{
220 struct timekeeper *tk = &timekeeper; 384 struct timekeeper *tk = &tk_core.timekeeper;
221 unsigned long flags; 385 unsigned long flags;
222 int ret; 386 int ret;
223 387
@@ -247,6 +411,29 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
247} 411}
248EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); 412EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
249 413
414/*
415 * Update the ktime_t based scalar nsec members of the timekeeper
416 */
417static inline void tk_update_ktime_data(struct timekeeper *tk)
418{
419 s64 nsec;
420
421 /*
422 * The xtime based monotonic readout is:
423 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
424 * The ktime based monotonic readout is:
425 * nsec = base_mono + now();
426 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
427 */
428 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
429 nsec *= NSEC_PER_SEC;
430 nsec += tk->wall_to_monotonic.tv_nsec;
431 tk->tkr.base_mono = ns_to_ktime(nsec);
432
433 /* Update the monotonic raw base */
434 tk->base_raw = timespec64_to_ktime(tk->raw_time);
435}
436
250/* must hold timekeeper_lock */ 437/* must hold timekeeper_lock */
251static void timekeeping_update(struct timekeeper *tk, unsigned int action) 438static void timekeeping_update(struct timekeeper *tk, unsigned int action)
252{ 439{
@@ -257,8 +444,13 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
257 update_vsyscall(tk); 444 update_vsyscall(tk);
258 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); 445 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
259 446
447 tk_update_ktime_data(tk);
448
260 if (action & TK_MIRROR) 449 if (action & TK_MIRROR)
261 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); 450 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
451 sizeof(tk_core.timekeeper));
452
453 update_fast_timekeeper(tk);
262} 454}
263 455
264/** 456/**
@@ -270,49 +462,48 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
270 */ 462 */
271static void timekeeping_forward_now(struct timekeeper *tk) 463static void timekeeping_forward_now(struct timekeeper *tk)
272{ 464{
273 cycle_t cycle_now, cycle_delta; 465 struct clocksource *clock = tk->tkr.clock;
274 struct clocksource *clock; 466 cycle_t cycle_now, delta;
275 s64 nsec; 467 s64 nsec;
276 468
277 clock = tk->clock; 469 cycle_now = tk->tkr.read(clock);
278 cycle_now = clock->read(clock); 470 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
279 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 471 tk->tkr.cycle_last = cycle_now;
280 tk->cycle_last = clock->cycle_last = cycle_now;
281 472
282 tk->xtime_nsec += cycle_delta * tk->mult; 473 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
283 474
284 /* If arch requires, add in get_arch_timeoffset() */ 475 /* If arch requires, add in get_arch_timeoffset() */
285 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift; 476 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
286 477
287 tk_normalize_xtime(tk); 478 tk_normalize_xtime(tk);
288 479
289 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 480 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
290 timespec_add_ns(&tk->raw_time, nsec); 481 timespec64_add_ns(&tk->raw_time, nsec);
291} 482}
292 483
293/** 484/**
294 * __getnstimeofday - Returns the time of day in a timespec. 485 * __getnstimeofday64 - Returns the time of day in a timespec64.
295 * @ts: pointer to the timespec to be set 486 * @ts: pointer to the timespec to be set
296 * 487 *
297 * Updates the time of day in the timespec. 488 * Updates the time of day in the timespec.
298 * Returns 0 on success, or -ve when suspended (timespec will be undefined). 489 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
299 */ 490 */
300int __getnstimeofday(struct timespec *ts) 491int __getnstimeofday64(struct timespec64 *ts)
301{ 492{
302 struct timekeeper *tk = &timekeeper; 493 struct timekeeper *tk = &tk_core.timekeeper;
303 unsigned long seq; 494 unsigned long seq;
304 s64 nsecs = 0; 495 s64 nsecs = 0;
305 496
306 do { 497 do {
307 seq = read_seqcount_begin(&timekeeper_seq); 498 seq = read_seqcount_begin(&tk_core.seq);
308 499
309 ts->tv_sec = tk->xtime_sec; 500 ts->tv_sec = tk->xtime_sec;
310 nsecs = timekeeping_get_ns(tk); 501 nsecs = timekeeping_get_ns(&tk->tkr);
311 502
312 } while (read_seqcount_retry(&timekeeper_seq, seq)); 503 } while (read_seqcount_retry(&tk_core.seq, seq));
313 504
314 ts->tv_nsec = 0; 505 ts->tv_nsec = 0;
315 timespec_add_ns(ts, nsecs); 506 timespec64_add_ns(ts, nsecs);
316 507
317 /* 508 /*
318 * Do not bail out early, in case there were callers still using 509 * Do not bail out early, in case there were callers still using
@@ -322,116 +513,138 @@ int __getnstimeofday(struct timespec *ts)
322 return -EAGAIN; 513 return -EAGAIN;
323 return 0; 514 return 0;
324} 515}
325EXPORT_SYMBOL(__getnstimeofday); 516EXPORT_SYMBOL(__getnstimeofday64);
326 517
327/** 518/**
328 * getnstimeofday - Returns the time of day in a timespec. 519 * getnstimeofday64 - Returns the time of day in a timespec64.
329 * @ts: pointer to the timespec to be set 520 * @ts: pointer to the timespec to be set
330 * 521 *
331 * Returns the time of day in a timespec (WARN if suspended). 522 * Returns the time of day in a timespec (WARN if suspended).
332 */ 523 */
333void getnstimeofday(struct timespec *ts) 524void getnstimeofday64(struct timespec64 *ts)
334{ 525{
335 WARN_ON(__getnstimeofday(ts)); 526 WARN_ON(__getnstimeofday64(ts));
336} 527}
337EXPORT_SYMBOL(getnstimeofday); 528EXPORT_SYMBOL(getnstimeofday64);
338 529
339ktime_t ktime_get(void) 530ktime_t ktime_get(void)
340{ 531{
341 struct timekeeper *tk = &timekeeper; 532 struct timekeeper *tk = &tk_core.timekeeper;
342 unsigned int seq; 533 unsigned int seq;
343 s64 secs, nsecs; 534 ktime_t base;
535 s64 nsecs;
344 536
345 WARN_ON(timekeeping_suspended); 537 WARN_ON(timekeeping_suspended);
346 538
347 do { 539 do {
348 seq = read_seqcount_begin(&timekeeper_seq); 540 seq = read_seqcount_begin(&tk_core.seq);
349 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 541 base = tk->tkr.base_mono;
350 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; 542 nsecs = timekeeping_get_ns(&tk->tkr);
351 543
352 } while (read_seqcount_retry(&timekeeper_seq, seq)); 544 } while (read_seqcount_retry(&tk_core.seq, seq));
353 /* 545
354 * Use ktime_set/ktime_add_ns to create a proper ktime on 546 return ktime_add_ns(base, nsecs);
355 * 32-bit architectures without CONFIG_KTIME_SCALAR.
356 */
357 return ktime_add_ns(ktime_set(secs, 0), nsecs);
358} 547}
359EXPORT_SYMBOL_GPL(ktime_get); 548EXPORT_SYMBOL_GPL(ktime_get);
360 549
361/** 550static ktime_t *offsets[TK_OFFS_MAX] = {
362 * ktime_get_ts - get the monotonic clock in timespec format 551 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
363 * @ts: pointer to timespec variable 552 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
364 * 553 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
365 * The function calculates the monotonic clock from the realtime 554};
366 * clock and the wall_to_monotonic offset and stores the result 555
367 * in normalized timespec format in the variable pointed to by @ts. 556ktime_t ktime_get_with_offset(enum tk_offsets offs)
368 */
369void ktime_get_ts(struct timespec *ts)
370{ 557{
371 struct timekeeper *tk = &timekeeper; 558 struct timekeeper *tk = &tk_core.timekeeper;
372 struct timespec tomono;
373 s64 nsec;
374 unsigned int seq; 559 unsigned int seq;
560 ktime_t base, *offset = offsets[offs];
561 s64 nsecs;
375 562
376 WARN_ON(timekeeping_suspended); 563 WARN_ON(timekeeping_suspended);
377 564
378 do { 565 do {
379 seq = read_seqcount_begin(&timekeeper_seq); 566 seq = read_seqcount_begin(&tk_core.seq);
380 ts->tv_sec = tk->xtime_sec; 567 base = ktime_add(tk->tkr.base_mono, *offset);
381 nsec = timekeeping_get_ns(tk); 568 nsecs = timekeeping_get_ns(&tk->tkr);
382 tomono = tk->wall_to_monotonic;
383 569
384 } while (read_seqcount_retry(&timekeeper_seq, seq)); 570 } while (read_seqcount_retry(&tk_core.seq, seq));
385 571
386 ts->tv_sec += tomono.tv_sec; 572 return ktime_add_ns(base, nsecs);
387 ts->tv_nsec = 0;
388 timespec_add_ns(ts, nsec + tomono.tv_nsec);
389}
390EXPORT_SYMBOL_GPL(ktime_get_ts);
391 573
574}
575EXPORT_SYMBOL_GPL(ktime_get_with_offset);
392 576
393/** 577/**
394 * timekeeping_clocktai - Returns the TAI time of day in a timespec 578 * ktime_mono_to_any() - convert mononotic time to any other time
395 * @ts: pointer to the timespec to be set 579 * @tmono: time to convert.
396 * 580 * @offs: which offset to use
397 * Returns the time of day in a timespec.
398 */ 581 */
399void timekeeping_clocktai(struct timespec *ts) 582ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
400{ 583{
401 struct timekeeper *tk = &timekeeper; 584 ktime_t *offset = offsets[offs];
402 unsigned long seq; 585 unsigned long seq;
403 u64 nsecs; 586 ktime_t tconv;
404
405 WARN_ON(timekeeping_suspended);
406 587
407 do { 588 do {
408 seq = read_seqcount_begin(&timekeeper_seq); 589 seq = read_seqcount_begin(&tk_core.seq);
590 tconv = ktime_add(tmono, *offset);
591 } while (read_seqcount_retry(&tk_core.seq, seq));
409 592
410 ts->tv_sec = tk->xtime_sec + tk->tai_offset; 593 return tconv;
411 nsecs = timekeeping_get_ns(tk); 594}
595EXPORT_SYMBOL_GPL(ktime_mono_to_any);
412 596
413 } while (read_seqcount_retry(&timekeeper_seq, seq)); 597/**
598 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
599 */
600ktime_t ktime_get_raw(void)
601{
602 struct timekeeper *tk = &tk_core.timekeeper;
603 unsigned int seq;
604 ktime_t base;
605 s64 nsecs;
414 606
415 ts->tv_nsec = 0; 607 do {
416 timespec_add_ns(ts, nsecs); 608 seq = read_seqcount_begin(&tk_core.seq);
609 base = tk->base_raw;
610 nsecs = timekeeping_get_ns_raw(tk);
417 611
418} 612 } while (read_seqcount_retry(&tk_core.seq, seq));
419EXPORT_SYMBOL(timekeeping_clocktai);
420 613
614 return ktime_add_ns(base, nsecs);
615}
616EXPORT_SYMBOL_GPL(ktime_get_raw);
421 617
422/** 618/**
423 * ktime_get_clocktai - Returns the TAI time of day in a ktime 619 * ktime_get_ts64 - get the monotonic clock in timespec64 format
620 * @ts: pointer to timespec variable
424 * 621 *
425 * Returns the time of day in a ktime. 622 * The function calculates the monotonic clock from the realtime
623 * clock and the wall_to_monotonic offset and stores the result
624 * in normalized timespec format in the variable pointed to by @ts.
426 */ 625 */
427ktime_t ktime_get_clocktai(void) 626void ktime_get_ts64(struct timespec64 *ts)
428{ 627{
429 struct timespec ts; 628 struct timekeeper *tk = &tk_core.timekeeper;
629 struct timespec64 tomono;
630 s64 nsec;
631 unsigned int seq;
632
633 WARN_ON(timekeeping_suspended);
430 634
431 timekeeping_clocktai(&ts); 635 do {
432 return timespec_to_ktime(ts); 636 seq = read_seqcount_begin(&tk_core.seq);
637 ts->tv_sec = tk->xtime_sec;
638 nsec = timekeeping_get_ns(&tk->tkr);
639 tomono = tk->wall_to_monotonic;
640
641 } while (read_seqcount_retry(&tk_core.seq, seq));
642
643 ts->tv_sec += tomono.tv_sec;
644 ts->tv_nsec = 0;
645 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
433} 646}
434EXPORT_SYMBOL(ktime_get_clocktai); 647EXPORT_SYMBOL_GPL(ktime_get_ts64);
435 648
436#ifdef CONFIG_NTP_PPS 649#ifdef CONFIG_NTP_PPS
437 650
@@ -446,23 +659,23 @@ EXPORT_SYMBOL(ktime_get_clocktai);
446 */ 659 */
447void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 660void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
448{ 661{
449 struct timekeeper *tk = &timekeeper; 662 struct timekeeper *tk = &tk_core.timekeeper;
450 unsigned long seq; 663 unsigned long seq;
451 s64 nsecs_raw, nsecs_real; 664 s64 nsecs_raw, nsecs_real;
452 665
453 WARN_ON_ONCE(timekeeping_suspended); 666 WARN_ON_ONCE(timekeeping_suspended);
454 667
455 do { 668 do {
456 seq = read_seqcount_begin(&timekeeper_seq); 669 seq = read_seqcount_begin(&tk_core.seq);
457 670
458 *ts_raw = tk->raw_time; 671 *ts_raw = timespec64_to_timespec(tk->raw_time);
459 ts_real->tv_sec = tk->xtime_sec; 672 ts_real->tv_sec = tk->xtime_sec;
460 ts_real->tv_nsec = 0; 673 ts_real->tv_nsec = 0;
461 674
462 nsecs_raw = timekeeping_get_ns_raw(tk); 675 nsecs_raw = timekeeping_get_ns_raw(tk);
463 nsecs_real = timekeeping_get_ns(tk); 676 nsecs_real = timekeeping_get_ns(&tk->tkr);
464 677
465 } while (read_seqcount_retry(&timekeeper_seq, seq)); 678 } while (read_seqcount_retry(&tk_core.seq, seq));
466 679
467 timespec_add_ns(ts_raw, nsecs_raw); 680 timespec_add_ns(ts_raw, nsecs_raw);
468 timespec_add_ns(ts_real, nsecs_real); 681 timespec_add_ns(ts_real, nsecs_real);
@@ -479,9 +692,9 @@ EXPORT_SYMBOL(getnstime_raw_and_real);
479 */ 692 */
480void do_gettimeofday(struct timeval *tv) 693void do_gettimeofday(struct timeval *tv)
481{ 694{
482 struct timespec now; 695 struct timespec64 now;
483 696
484 getnstimeofday(&now); 697 getnstimeofday64(&now);
485 tv->tv_sec = now.tv_sec; 698 tv->tv_sec = now.tv_sec;
486 tv->tv_usec = now.tv_nsec/1000; 699 tv->tv_usec = now.tv_nsec/1000;
487} 700}
@@ -495,15 +708,15 @@ EXPORT_SYMBOL(do_gettimeofday);
495 */ 708 */
496int do_settimeofday(const struct timespec *tv) 709int do_settimeofday(const struct timespec *tv)
497{ 710{
498 struct timekeeper *tk = &timekeeper; 711 struct timekeeper *tk = &tk_core.timekeeper;
499 struct timespec ts_delta, xt; 712 struct timespec64 ts_delta, xt, tmp;
500 unsigned long flags; 713 unsigned long flags;
501 714
502 if (!timespec_valid_strict(tv)) 715 if (!timespec_valid_strict(tv))
503 return -EINVAL; 716 return -EINVAL;
504 717
505 raw_spin_lock_irqsave(&timekeeper_lock, flags); 718 raw_spin_lock_irqsave(&timekeeper_lock, flags);
506 write_seqcount_begin(&timekeeper_seq); 719 write_seqcount_begin(&tk_core.seq);
507 720
508 timekeeping_forward_now(tk); 721 timekeeping_forward_now(tk);
509 722
@@ -511,13 +724,14 @@ int do_settimeofday(const struct timespec *tv)
511 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; 724 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
512 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; 725 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
513 726
514 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); 727 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
515 728
516 tk_set_xtime(tk, tv); 729 tmp = timespec_to_timespec64(*tv);
730 tk_set_xtime(tk, &tmp);
517 731
518 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); 732 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
519 733
520 write_seqcount_end(&timekeeper_seq); 734 write_seqcount_end(&tk_core.seq);
521 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 735 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
522 736
523 /* signal hrtimers about time change */ 737 /* signal hrtimers about time change */
@@ -535,33 +749,35 @@ EXPORT_SYMBOL(do_settimeofday);
535 */ 749 */
536int timekeeping_inject_offset(struct timespec *ts) 750int timekeeping_inject_offset(struct timespec *ts)
537{ 751{
538 struct timekeeper *tk = &timekeeper; 752 struct timekeeper *tk = &tk_core.timekeeper;
539 unsigned long flags; 753 unsigned long flags;
540 struct timespec tmp; 754 struct timespec64 ts64, tmp;
541 int ret = 0; 755 int ret = 0;
542 756
543 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 757 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
544 return -EINVAL; 758 return -EINVAL;
545 759
760 ts64 = timespec_to_timespec64(*ts);
761
546 raw_spin_lock_irqsave(&timekeeper_lock, flags); 762 raw_spin_lock_irqsave(&timekeeper_lock, flags);
547 write_seqcount_begin(&timekeeper_seq); 763 write_seqcount_begin(&tk_core.seq);
548 764
549 timekeeping_forward_now(tk); 765 timekeeping_forward_now(tk);
550 766
551 /* Make sure the proposed value is valid */ 767 /* Make sure the proposed value is valid */
552 tmp = timespec_add(tk_xtime(tk), *ts); 768 tmp = timespec64_add(tk_xtime(tk), ts64);
553 if (!timespec_valid_strict(&tmp)) { 769 if (!timespec64_valid_strict(&tmp)) {
554 ret = -EINVAL; 770 ret = -EINVAL;
555 goto error; 771 goto error;
556 } 772 }
557 773
558 tk_xtime_add(tk, ts); 774 tk_xtime_add(tk, &ts64);
559 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); 775 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
560 776
561error: /* even if we error out, we forwarded the time, so call update */ 777error: /* even if we error out, we forwarded the time, so call update */
562 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); 778 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
563 779
564 write_seqcount_end(&timekeeper_seq); 780 write_seqcount_end(&tk_core.seq);
565 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 781 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
566 782
567 /* signal hrtimers about time change */ 783 /* signal hrtimers about time change */
@@ -578,14 +794,14 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
578 */ 794 */
579s32 timekeeping_get_tai_offset(void) 795s32 timekeeping_get_tai_offset(void)
580{ 796{
581 struct timekeeper *tk = &timekeeper; 797 struct timekeeper *tk = &tk_core.timekeeper;
582 unsigned int seq; 798 unsigned int seq;
583 s32 ret; 799 s32 ret;
584 800
585 do { 801 do {
586 seq = read_seqcount_begin(&timekeeper_seq); 802 seq = read_seqcount_begin(&tk_core.seq);
587 ret = tk->tai_offset; 803 ret = tk->tai_offset;
588 } while (read_seqcount_retry(&timekeeper_seq, seq)); 804 } while (read_seqcount_retry(&tk_core.seq, seq));
589 805
590 return ret; 806 return ret;
591} 807}
@@ -606,14 +822,14 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
606 */ 822 */
607void timekeeping_set_tai_offset(s32 tai_offset) 823void timekeeping_set_tai_offset(s32 tai_offset)
608{ 824{
609 struct timekeeper *tk = &timekeeper; 825 struct timekeeper *tk = &tk_core.timekeeper;
610 unsigned long flags; 826 unsigned long flags;
611 827
612 raw_spin_lock_irqsave(&timekeeper_lock, flags); 828 raw_spin_lock_irqsave(&timekeeper_lock, flags);
613 write_seqcount_begin(&timekeeper_seq); 829 write_seqcount_begin(&tk_core.seq);
614 __timekeeping_set_tai_offset(tk, tai_offset); 830 __timekeeping_set_tai_offset(tk, tai_offset);
615 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 831 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
616 write_seqcount_end(&timekeeper_seq); 832 write_seqcount_end(&tk_core.seq);
617 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 833 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
618 clock_was_set(); 834 clock_was_set();
619} 835}
@@ -625,14 +841,14 @@ void timekeeping_set_tai_offset(s32 tai_offset)
625 */ 841 */
626static int change_clocksource(void *data) 842static int change_clocksource(void *data)
627{ 843{
628 struct timekeeper *tk = &timekeeper; 844 struct timekeeper *tk = &tk_core.timekeeper;
629 struct clocksource *new, *old; 845 struct clocksource *new, *old;
630 unsigned long flags; 846 unsigned long flags;
631 847
632 new = (struct clocksource *) data; 848 new = (struct clocksource *) data;
633 849
634 raw_spin_lock_irqsave(&timekeeper_lock, flags); 850 raw_spin_lock_irqsave(&timekeeper_lock, flags);
635 write_seqcount_begin(&timekeeper_seq); 851 write_seqcount_begin(&tk_core.seq);
636 852
637 timekeeping_forward_now(tk); 853 timekeeping_forward_now(tk);
638 /* 854 /*
@@ -641,7 +857,7 @@ static int change_clocksource(void *data)
641 */ 857 */
642 if (try_module_get(new->owner)) { 858 if (try_module_get(new->owner)) {
643 if (!new->enable || new->enable(new) == 0) { 859 if (!new->enable || new->enable(new) == 0) {
644 old = tk->clock; 860 old = tk->tkr.clock;
645 tk_setup_internals(tk, new); 861 tk_setup_internals(tk, new);
646 if (old->disable) 862 if (old->disable)
647 old->disable(old); 863 old->disable(old);
@@ -652,7 +868,7 @@ static int change_clocksource(void *data)
652 } 868 }
653 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); 869 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
654 870
655 write_seqcount_end(&timekeeper_seq); 871 write_seqcount_end(&tk_core.seq);
656 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 872 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
657 873
658 return 0; 874 return 0;
@@ -667,29 +883,14 @@ static int change_clocksource(void *data)
667 */ 883 */
668int timekeeping_notify(struct clocksource *clock) 884int timekeeping_notify(struct clocksource *clock)
669{ 885{
670 struct timekeeper *tk = &timekeeper; 886 struct timekeeper *tk = &tk_core.timekeeper;
671 887
672 if (tk->clock == clock) 888 if (tk->tkr.clock == clock)
673 return 0; 889 return 0;
674 stop_machine(change_clocksource, clock, NULL); 890 stop_machine(change_clocksource, clock, NULL);
675 tick_clock_notify(); 891 tick_clock_notify();
676 return tk->clock == clock ? 0 : -1; 892 return tk->tkr.clock == clock ? 0 : -1;
677}
678
679/**
680 * ktime_get_real - get the real (wall-) time in ktime_t format
681 *
682 * returns the time in ktime_t format
683 */
684ktime_t ktime_get_real(void)
685{
686 struct timespec now;
687
688 getnstimeofday(&now);
689
690 return timespec_to_ktime(now);
691} 893}
692EXPORT_SYMBOL_GPL(ktime_get_real);
693 894
694/** 895/**
695 * getrawmonotonic - Returns the raw monotonic time in a timespec 896 * getrawmonotonic - Returns the raw monotonic time in a timespec
@@ -699,18 +900,20 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
699 */ 900 */
700void getrawmonotonic(struct timespec *ts) 901void getrawmonotonic(struct timespec *ts)
701{ 902{
702 struct timekeeper *tk = &timekeeper; 903 struct timekeeper *tk = &tk_core.timekeeper;
904 struct timespec64 ts64;
703 unsigned long seq; 905 unsigned long seq;
704 s64 nsecs; 906 s64 nsecs;
705 907
706 do { 908 do {
707 seq = read_seqcount_begin(&timekeeper_seq); 909 seq = read_seqcount_begin(&tk_core.seq);
708 nsecs = timekeeping_get_ns_raw(tk); 910 nsecs = timekeeping_get_ns_raw(tk);
709 *ts = tk->raw_time; 911 ts64 = tk->raw_time;
710 912
711 } while (read_seqcount_retry(&timekeeper_seq, seq)); 913 } while (read_seqcount_retry(&tk_core.seq, seq));
712 914
713 timespec_add_ns(ts, nsecs); 915 timespec64_add_ns(&ts64, nsecs);
916 *ts = timespec64_to_timespec(ts64);
714} 917}
715EXPORT_SYMBOL(getrawmonotonic); 918EXPORT_SYMBOL(getrawmonotonic);
716 919
@@ -719,16 +922,16 @@ EXPORT_SYMBOL(getrawmonotonic);
719 */ 922 */
720int timekeeping_valid_for_hres(void) 923int timekeeping_valid_for_hres(void)
721{ 924{
722 struct timekeeper *tk = &timekeeper; 925 struct timekeeper *tk = &tk_core.timekeeper;
723 unsigned long seq; 926 unsigned long seq;
724 int ret; 927 int ret;
725 928
726 do { 929 do {
727 seq = read_seqcount_begin(&timekeeper_seq); 930 seq = read_seqcount_begin(&tk_core.seq);
728 931
729 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 932 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
730 933
731 } while (read_seqcount_retry(&timekeeper_seq, seq)); 934 } while (read_seqcount_retry(&tk_core.seq, seq));
732 935
733 return ret; 936 return ret;
734} 937}
@@ -738,16 +941,16 @@ int timekeeping_valid_for_hres(void)
738 */ 941 */
739u64 timekeeping_max_deferment(void) 942u64 timekeeping_max_deferment(void)
740{ 943{
741 struct timekeeper *tk = &timekeeper; 944 struct timekeeper *tk = &tk_core.timekeeper;
742 unsigned long seq; 945 unsigned long seq;
743 u64 ret; 946 u64 ret;
744 947
745 do { 948 do {
746 seq = read_seqcount_begin(&timekeeper_seq); 949 seq = read_seqcount_begin(&tk_core.seq);
747 950
748 ret = tk->clock->max_idle_ns; 951 ret = tk->tkr.clock->max_idle_ns;
749 952
750 } while (read_seqcount_retry(&timekeeper_seq, seq)); 953 } while (read_seqcount_retry(&tk_core.seq, seq));
751 954
752 return ret; 955 return ret;
753} 956}
@@ -787,14 +990,15 @@ void __weak read_boot_clock(struct timespec *ts)
787 */ 990 */
788void __init timekeeping_init(void) 991void __init timekeeping_init(void)
789{ 992{
790 struct timekeeper *tk = &timekeeper; 993 struct timekeeper *tk = &tk_core.timekeeper;
791 struct clocksource *clock; 994 struct clocksource *clock;
792 unsigned long flags; 995 unsigned long flags;
793 struct timespec now, boot, tmp; 996 struct timespec64 now, boot, tmp;
794 997 struct timespec ts;
795 read_persistent_clock(&now);
796 998
797 if (!timespec_valid_strict(&now)) { 999 read_persistent_clock(&ts);
1000 now = timespec_to_timespec64(ts);
1001 if (!timespec64_valid_strict(&now)) {
798 pr_warn("WARNING: Persistent clock returned invalid value!\n" 1002 pr_warn("WARNING: Persistent clock returned invalid value!\n"
799 " Check your CMOS/BIOS settings.\n"); 1003 " Check your CMOS/BIOS settings.\n");
800 now.tv_sec = 0; 1004 now.tv_sec = 0;
@@ -802,8 +1006,9 @@ void __init timekeeping_init(void)
802 } else if (now.tv_sec || now.tv_nsec) 1006 } else if (now.tv_sec || now.tv_nsec)
803 persistent_clock_exist = true; 1007 persistent_clock_exist = true;
804 1008
805 read_boot_clock(&boot); 1009 read_boot_clock(&ts);
806 if (!timespec_valid_strict(&boot)) { 1010 boot = timespec_to_timespec64(ts);
1011 if (!timespec64_valid_strict(&boot)) {
807 pr_warn("WARNING: Boot clock returned invalid value!\n" 1012 pr_warn("WARNING: Boot clock returned invalid value!\n"
808 " Check your CMOS/BIOS settings.\n"); 1013 " Check your CMOS/BIOS settings.\n");
809 boot.tv_sec = 0; 1014 boot.tv_sec = 0;
@@ -811,7 +1016,7 @@ void __init timekeeping_init(void)
811 } 1016 }
812 1017
813 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1018 raw_spin_lock_irqsave(&timekeeper_lock, flags);
814 write_seqcount_begin(&timekeeper_seq); 1019 write_seqcount_begin(&tk_core.seq);
815 ntp_init(); 1020 ntp_init();
816 1021
817 clock = clocksource_default_clock(); 1022 clock = clocksource_default_clock();
@@ -822,24 +1027,21 @@ void __init timekeeping_init(void)
822 tk_set_xtime(tk, &now); 1027 tk_set_xtime(tk, &now);
823 tk->raw_time.tv_sec = 0; 1028 tk->raw_time.tv_sec = 0;
824 tk->raw_time.tv_nsec = 0; 1029 tk->raw_time.tv_nsec = 0;
1030 tk->base_raw.tv64 = 0;
825 if (boot.tv_sec == 0 && boot.tv_nsec == 0) 1031 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
826 boot = tk_xtime(tk); 1032 boot = tk_xtime(tk);
827 1033
828 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); 1034 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
829 tk_set_wall_to_mono(tk, tmp); 1035 tk_set_wall_to_mono(tk, tmp);
830 1036
831 tmp.tv_sec = 0; 1037 timekeeping_update(tk, TK_MIRROR);
832 tmp.tv_nsec = 0;
833 tk_set_sleep_time(tk, tmp);
834
835 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
836 1038
837 write_seqcount_end(&timekeeper_seq); 1039 write_seqcount_end(&tk_core.seq);
838 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1040 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
839} 1041}
840 1042
841/* time in seconds when suspend began */ 1043/* time in seconds when suspend began */
842static struct timespec timekeeping_suspend_time; 1044static struct timespec64 timekeeping_suspend_time;
843 1045
844/** 1046/**
845 * __timekeeping_inject_sleeptime - Internal function to add sleep interval 1047 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
@@ -849,17 +1051,17 @@ static struct timespec timekeeping_suspend_time;
849 * adds the sleep offset to the timekeeping variables. 1051 * adds the sleep offset to the timekeeping variables.
850 */ 1052 */
851static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 1053static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
852 struct timespec *delta) 1054 struct timespec64 *delta)
853{ 1055{
854 if (!timespec_valid_strict(delta)) { 1056 if (!timespec64_valid_strict(delta)) {
855 printk_deferred(KERN_WARNING 1057 printk_deferred(KERN_WARNING
856 "__timekeeping_inject_sleeptime: Invalid " 1058 "__timekeeping_inject_sleeptime: Invalid "
857 "sleep delta value!\n"); 1059 "sleep delta value!\n");
858 return; 1060 return;
859 } 1061 }
860 tk_xtime_add(tk, delta); 1062 tk_xtime_add(tk, delta);
861 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); 1063 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
862 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); 1064 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
863 tk_debug_account_sleep_time(delta); 1065 tk_debug_account_sleep_time(delta);
864} 1066}
865 1067
@@ -875,7 +1077,8 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
875 */ 1077 */
876void timekeeping_inject_sleeptime(struct timespec *delta) 1078void timekeeping_inject_sleeptime(struct timespec *delta)
877{ 1079{
878 struct timekeeper *tk = &timekeeper; 1080 struct timekeeper *tk = &tk_core.timekeeper;
1081 struct timespec64 tmp;
879 unsigned long flags; 1082 unsigned long flags;
880 1083
881 /* 1084 /*
@@ -886,15 +1089,16 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
886 return; 1089 return;
887 1090
888 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1091 raw_spin_lock_irqsave(&timekeeper_lock, flags);
889 write_seqcount_begin(&timekeeper_seq); 1092 write_seqcount_begin(&tk_core.seq);
890 1093
891 timekeeping_forward_now(tk); 1094 timekeeping_forward_now(tk);
892 1095
893 __timekeeping_inject_sleeptime(tk, delta); 1096 tmp = timespec_to_timespec64(*delta);
1097 __timekeeping_inject_sleeptime(tk, &tmp);
894 1098
895 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); 1099 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
896 1100
897 write_seqcount_end(&timekeeper_seq); 1101 write_seqcount_end(&tk_core.seq);
898 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1102 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
899 1103
900 /* signal hrtimers about time change */ 1104 /* signal hrtimers about time change */
@@ -910,20 +1114,22 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
910 */ 1114 */
911static void timekeeping_resume(void) 1115static void timekeeping_resume(void)
912{ 1116{
913 struct timekeeper *tk = &timekeeper; 1117 struct timekeeper *tk = &tk_core.timekeeper;
914 struct clocksource *clock = tk->clock; 1118 struct clocksource *clock = tk->tkr.clock;
915 unsigned long flags; 1119 unsigned long flags;
916 struct timespec ts_new, ts_delta; 1120 struct timespec64 ts_new, ts_delta;
1121 struct timespec tmp;
917 cycle_t cycle_now, cycle_delta; 1122 cycle_t cycle_now, cycle_delta;
918 bool suspendtime_found = false; 1123 bool suspendtime_found = false;
919 1124
920 read_persistent_clock(&ts_new); 1125 read_persistent_clock(&tmp);
1126 ts_new = timespec_to_timespec64(tmp);
921 1127
922 clockevents_resume(); 1128 clockevents_resume();
923 clocksource_resume(); 1129 clocksource_resume();
924 1130
925 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1131 raw_spin_lock_irqsave(&timekeeper_lock, flags);
926 write_seqcount_begin(&timekeeper_seq); 1132 write_seqcount_begin(&tk_core.seq);
927 1133
928 /* 1134 /*
929 * After system resumes, we need to calculate the suspended time and 1135 * After system resumes, we need to calculate the suspended time and
@@ -937,15 +1143,16 @@ static void timekeeping_resume(void)
937 * The less preferred source will only be tried if there is no better 1143 * The less preferred source will only be tried if there is no better
938 * usable source. The rtc part is handled separately in rtc core code. 1144 * usable source. The rtc part is handled separately in rtc core code.
939 */ 1145 */
940 cycle_now = clock->read(clock); 1146 cycle_now = tk->tkr.read(clock);
941 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1147 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
942 cycle_now > clock->cycle_last) { 1148 cycle_now > tk->tkr.cycle_last) {
943 u64 num, max = ULLONG_MAX; 1149 u64 num, max = ULLONG_MAX;
944 u32 mult = clock->mult; 1150 u32 mult = clock->mult;
945 u32 shift = clock->shift; 1151 u32 shift = clock->shift;
946 s64 nsec = 0; 1152 s64 nsec = 0;
947 1153
948 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 1154 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1155 tk->tkr.mask);
949 1156
950 /* 1157 /*
951 * "cycle_delta * mutl" may cause 64 bits overflow, if the 1158 * "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -960,10 +1167,10 @@ static void timekeeping_resume(void)
960 } 1167 }
961 nsec += ((u64) cycle_delta * mult) >> shift; 1168 nsec += ((u64) cycle_delta * mult) >> shift;
962 1169
963 ts_delta = ns_to_timespec(nsec); 1170 ts_delta = ns_to_timespec64(nsec);
964 suspendtime_found = true; 1171 suspendtime_found = true;
965 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) { 1172 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
966 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time); 1173 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
967 suspendtime_found = true; 1174 suspendtime_found = true;
968 } 1175 }
969 1176
@@ -971,11 +1178,11 @@ static void timekeeping_resume(void)
971 __timekeeping_inject_sleeptime(tk, &ts_delta); 1178 __timekeeping_inject_sleeptime(tk, &ts_delta);
972 1179
973 /* Re-base the last cycle value */ 1180 /* Re-base the last cycle value */
974 tk->cycle_last = clock->cycle_last = cycle_now; 1181 tk->tkr.cycle_last = cycle_now;
975 tk->ntp_error = 0; 1182 tk->ntp_error = 0;
976 timekeeping_suspended = 0; 1183 timekeeping_suspended = 0;
977 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 1184 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
978 write_seqcount_end(&timekeeper_seq); 1185 write_seqcount_end(&tk_core.seq);
979 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1186 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
980 1187
981 touch_softlockup_watchdog(); 1188 touch_softlockup_watchdog();
@@ -988,12 +1195,14 @@ static void timekeeping_resume(void)
988 1195
989static int timekeeping_suspend(void) 1196static int timekeeping_suspend(void)
990{ 1197{
991 struct timekeeper *tk = &timekeeper; 1198 struct timekeeper *tk = &tk_core.timekeeper;
992 unsigned long flags; 1199 unsigned long flags;
993 struct timespec delta, delta_delta; 1200 struct timespec64 delta, delta_delta;
994 static struct timespec old_delta; 1201 static struct timespec64 old_delta;
1202 struct timespec tmp;
995 1203
996 read_persistent_clock(&timekeeping_suspend_time); 1204 read_persistent_clock(&tmp);
1205 timekeeping_suspend_time = timespec_to_timespec64(tmp);
997 1206
998 /* 1207 /*
999 * On some systems the persistent_clock can not be detected at 1208 * On some systems the persistent_clock can not be detected at
@@ -1004,7 +1213,7 @@ static int timekeeping_suspend(void)
1004 persistent_clock_exist = true; 1213 persistent_clock_exist = true;
1005 1214
1006 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1215 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1007 write_seqcount_begin(&timekeeper_seq); 1216 write_seqcount_begin(&tk_core.seq);
1008 timekeeping_forward_now(tk); 1217 timekeeping_forward_now(tk);
1009 timekeeping_suspended = 1; 1218 timekeeping_suspended = 1;
1010 1219
@@ -1014,8 +1223,8 @@ static int timekeeping_suspend(void)
1014 * try to compensate so the difference in system time 1223 * try to compensate so the difference in system time
1015 * and persistent_clock time stays close to constant. 1224 * and persistent_clock time stays close to constant.
1016 */ 1225 */
1017 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); 1226 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1018 delta_delta = timespec_sub(delta, old_delta); 1227 delta_delta = timespec64_sub(delta, old_delta);
1019 if (abs(delta_delta.tv_sec) >= 2) { 1228 if (abs(delta_delta.tv_sec) >= 2) {
1020 /* 1229 /*
1021 * if delta_delta is too large, assume time correction 1230 * if delta_delta is too large, assume time correction
@@ -1025,11 +1234,11 @@ static int timekeeping_suspend(void)
1025 } else { 1234 } else {
1026 /* Otherwise try to adjust old_system to compensate */ 1235 /* Otherwise try to adjust old_system to compensate */
1027 timekeeping_suspend_time = 1236 timekeeping_suspend_time =
1028 timespec_add(timekeeping_suspend_time, delta_delta); 1237 timespec64_add(timekeeping_suspend_time, delta_delta);
1029 } 1238 }
1030 1239
1031 timekeeping_update(tk, TK_MIRROR); 1240 timekeeping_update(tk, TK_MIRROR);
1032 write_seqcount_end(&timekeeper_seq); 1241 write_seqcount_end(&tk_core.seq);
1033 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1242 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1034 1243
1035 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 1244 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
@@ -1050,125 +1259,34 @@ static int __init timekeeping_init_ops(void)
1050 register_syscore_ops(&timekeeping_syscore_ops); 1259 register_syscore_ops(&timekeeping_syscore_ops);
1051 return 0; 1260 return 0;
1052} 1261}
1053
1054device_initcall(timekeeping_init_ops); 1262device_initcall(timekeeping_init_ops);
1055 1263
1056/* 1264/*
1057 * If the error is already larger, we look ahead even further 1265 * Apply a multiplier adjustment to the timekeeper
1058 * to compensate for late or lost adjustments.
1059 */ 1266 */
1060static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, 1267static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1061 s64 error, s64 *interval, 1268 s64 offset,
1062 s64 *offset) 1269 bool negative,
1270 int adj_scale)
1063{ 1271{
1064 s64 tick_error, i; 1272 s64 interval = tk->cycle_interval;
1065 u32 look_ahead, adj; 1273 s32 mult_adj = 1;
1066 s32 error2, mult;
1067
1068 /*
1069 * Use the current error value to determine how much to look ahead.
1070 * The larger the error the slower we adjust for it to avoid problems
1071 * with losing too many ticks, otherwise we would overadjust and
1072 * produce an even larger error. The smaller the adjustment the
1073 * faster we try to adjust for it, as lost ticks can do less harm
1074 * here. This is tuned so that an error of about 1 msec is adjusted
1075 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1076 */
1077 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1078 error2 = abs(error2);
1079 for (look_ahead = 0; error2 > 0; look_ahead++)
1080 error2 >>= 2;
1081 1274
1082 /* 1275 if (negative) {
1083 * Now calculate the error in (1 << look_ahead) ticks, but first 1276 mult_adj = -mult_adj;
1084 * remove the single look ahead already included in the error. 1277 interval = -interval;
1085 */ 1278 offset = -offset;
1086 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1087 tick_error -= tk->xtime_interval >> 1;
1088 error = ((error - tick_error) >> look_ahead) + tick_error;
1089
1090 /* Finally calculate the adjustment shift value. */
1091 i = *interval;
1092 mult = 1;
1093 if (error < 0) {
1094 error = -error;
1095 *interval = -*interval;
1096 *offset = -*offset;
1097 mult = -1;
1098 } 1279 }
1099 for (adj = 0; error > i; adj++) 1280 mult_adj <<= adj_scale;
1100 error >>= 1; 1281 interval <<= adj_scale;
1101 1282 offset <<= adj_scale;
1102 *interval <<= adj;
1103 *offset <<= adj;
1104 return mult << adj;
1105}
1106
1107/*
1108 * Adjust the multiplier to reduce the error value,
1109 * this is optimized for the most common adjustments of -1,0,1,
1110 * for other values we can do a bit more work.
1111 */
1112static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1113{
1114 s64 error, interval = tk->cycle_interval;
1115 int adj;
1116 1283
1117 /* 1284 /*
1118 * The point of this is to check if the error is greater than half
1119 * an interval.
1120 *
1121 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1122 *
1123 * Note we subtract one in the shift, so that error is really error*2.
1124 * This "saves" dividing(shifting) interval twice, but keeps the
1125 * (error > interval) comparison as still measuring if error is
1126 * larger than half an interval.
1127 *
1128 * Note: It does not "save" on aggravation when reading the code.
1129 */
1130 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1131 if (error > interval) {
1132 /*
1133 * We now divide error by 4(via shift), which checks if
1134 * the error is greater than twice the interval.
1135 * If it is greater, we need a bigadjust, if its smaller,
1136 * we can adjust by 1.
1137 */
1138 error >>= 2;
1139 if (likely(error <= interval))
1140 adj = 1;
1141 else
1142 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1143 } else {
1144 if (error < -interval) {
1145 /* See comment above, this is just switched for the negative */
1146 error >>= 2;
1147 if (likely(error >= -interval)) {
1148 adj = -1;
1149 interval = -interval;
1150 offset = -offset;
1151 } else {
1152 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1153 }
1154 } else {
1155 goto out_adjust;
1156 }
1157 }
1158
1159 if (unlikely(tk->clock->maxadj &&
1160 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1161 printk_deferred_once(KERN_WARNING
1162 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1163 tk->clock->name, (long)tk->mult + adj,
1164 (long)tk->clock->mult + tk->clock->maxadj);
1165 }
1166 /*
1167 * So the following can be confusing. 1285 * So the following can be confusing.
1168 * 1286 *
1169 * To keep things simple, lets assume adj == 1 for now. 1287 * To keep things simple, lets assume mult_adj == 1 for now.
1170 * 1288 *
1171 * When adj != 1, remember that the interval and offset values 1289 * When mult_adj != 1, remember that the interval and offset values
1172 * have been appropriately scaled so the math is the same. 1290 * have been appropriately scaled so the math is the same.
1173 * 1291 *
1174 * The basic idea here is that we're increasing the multiplier 1292 * The basic idea here is that we're increasing the multiplier
@@ -1212,12 +1330,78 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1212 * 1330 *
1213 * XXX - TODO: Doc ntp_error calculation. 1331 * XXX - TODO: Doc ntp_error calculation.
1214 */ 1332 */
1215 tk->mult += adj; 1333 tk->tkr.mult += mult_adj;
1216 tk->xtime_interval += interval; 1334 tk->xtime_interval += interval;
1217 tk->xtime_nsec -= offset; 1335 tk->tkr.xtime_nsec -= offset;
1218 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1336 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1337}
1338
1339/*
1340 * Calculate the multiplier adjustment needed to match the frequency
1341 * specified by NTP
1342 */
1343static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1344 s64 offset)
1345{
1346 s64 interval = tk->cycle_interval;
1347 s64 xinterval = tk->xtime_interval;
1348 s64 tick_error;
1349 bool negative;
1350 u32 adj;
1351
1352 /* Remove any current error adj from freq calculation */
1353 if (tk->ntp_err_mult)
1354 xinterval -= tk->cycle_interval;
1355
1356 tk->ntp_tick = ntp_tick_length();
1357
1358 /* Calculate current error per tick */
1359 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1360 tick_error -= (xinterval + tk->xtime_remainder);
1361
1362 /* Don't worry about correcting it if its small */
1363 if (likely((tick_error >= 0) && (tick_error <= interval)))
1364 return;
1365
1366 /* preserve the direction of correction */
1367 negative = (tick_error < 0);
1368
1369 /* Sort out the magnitude of the correction */
1370 tick_error = abs(tick_error);
1371 for (adj = 0; tick_error > interval; adj++)
1372 tick_error >>= 1;
1373
1374 /* scale the corrections */
1375 timekeeping_apply_adjustment(tk, offset, negative, adj);
1376}
1377
1378/*
1379 * Adjust the timekeeper's multiplier to the correct frequency
1380 * and also to reduce the accumulated error value.
1381 */
1382static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1383{
1384 /* Correct for the current frequency error */
1385 timekeeping_freqadjust(tk, offset);
1386
1387 /* Next make a small adjustment to fix any cumulative error */
1388 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1389 tk->ntp_err_mult = 1;
1390 timekeeping_apply_adjustment(tk, offset, 0, 0);
1391 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1392 /* Undo any existing error adjustment */
1393 timekeeping_apply_adjustment(tk, offset, 1, 0);
1394 tk->ntp_err_mult = 0;
1395 }
1396
1397 if (unlikely(tk->tkr.clock->maxadj &&
1398 (tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
1399 printk_once(KERN_WARNING
1400 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1401 tk->tkr.clock->name, (long)tk->tkr.mult,
1402 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1403 }
1219 1404
1220out_adjust:
1221 /* 1405 /*
1222 * It may be possible that when we entered this function, xtime_nsec 1406 * It may be possible that when we entered this function, xtime_nsec
1223 * was very small. Further, if we're slightly speeding the clocksource 1407 * was very small. Further, if we're slightly speeding the clocksource
@@ -1232,12 +1416,11 @@ out_adjust:
1232 * We'll correct this error next time through this function, when 1416 * We'll correct this error next time through this function, when
1233 * xtime_nsec is not as small. 1417 * xtime_nsec is not as small.
1234 */ 1418 */
1235 if (unlikely((s64)tk->xtime_nsec < 0)) { 1419 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1236 s64 neg = -(s64)tk->xtime_nsec; 1420 s64 neg = -(s64)tk->tkr.xtime_nsec;
1237 tk->xtime_nsec = 0; 1421 tk->tkr.xtime_nsec = 0;
1238 tk->ntp_error += neg << tk->ntp_error_shift; 1422 tk->ntp_error += neg << tk->ntp_error_shift;
1239 } 1423 }
1240
1241} 1424}
1242 1425
1243/** 1426/**
@@ -1250,26 +1433,26 @@ out_adjust:
1250 */ 1433 */
1251static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) 1434static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1252{ 1435{
1253 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; 1436 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1254 unsigned int clock_set = 0; 1437 unsigned int clock_set = 0;
1255 1438
1256 while (tk->xtime_nsec >= nsecps) { 1439 while (tk->tkr.xtime_nsec >= nsecps) {
1257 int leap; 1440 int leap;
1258 1441
1259 tk->xtime_nsec -= nsecps; 1442 tk->tkr.xtime_nsec -= nsecps;
1260 tk->xtime_sec++; 1443 tk->xtime_sec++;
1261 1444
1262 /* Figure out if its a leap sec and apply if needed */ 1445 /* Figure out if its a leap sec and apply if needed */
1263 leap = second_overflow(tk->xtime_sec); 1446 leap = second_overflow(tk->xtime_sec);
1264 if (unlikely(leap)) { 1447 if (unlikely(leap)) {
1265 struct timespec ts; 1448 struct timespec64 ts;
1266 1449
1267 tk->xtime_sec += leap; 1450 tk->xtime_sec += leap;
1268 1451
1269 ts.tv_sec = leap; 1452 ts.tv_sec = leap;
1270 ts.tv_nsec = 0; 1453 ts.tv_nsec = 0;
1271 tk_set_wall_to_mono(tk, 1454 tk_set_wall_to_mono(tk,
1272 timespec_sub(tk->wall_to_monotonic, ts)); 1455 timespec64_sub(tk->wall_to_monotonic, ts));
1273 1456
1274 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); 1457 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1275 1458
@@ -1301,9 +1484,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1301 1484
1302 /* Accumulate one shifted interval */ 1485 /* Accumulate one shifted interval */
1303 offset -= interval; 1486 offset -= interval;
1304 tk->cycle_last += interval; 1487 tk->tkr.cycle_last += interval;
1305 1488
1306 tk->xtime_nsec += tk->xtime_interval << shift; 1489 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1307 *clock_set |= accumulate_nsecs_to_secs(tk); 1490 *clock_set |= accumulate_nsecs_to_secs(tk);
1308 1491
1309 /* Accumulate raw time */ 1492 /* Accumulate raw time */
@@ -1317,48 +1500,20 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1317 tk->raw_time.tv_nsec = raw_nsecs; 1500 tk->raw_time.tv_nsec = raw_nsecs;
1318 1501
1319 /* Accumulate error between NTP and clock interval */ 1502 /* Accumulate error between NTP and clock interval */
1320 tk->ntp_error += ntp_tick_length() << shift; 1503 tk->ntp_error += tk->ntp_tick << shift;
1321 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << 1504 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1322 (tk->ntp_error_shift + shift); 1505 (tk->ntp_error_shift + shift);
1323 1506
1324 return offset; 1507 return offset;
1325} 1508}
1326 1509
1327#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1328static inline void old_vsyscall_fixup(struct timekeeper *tk)
1329{
1330 s64 remainder;
1331
1332 /*
1333 * Store only full nanoseconds into xtime_nsec after rounding
1334 * it up and add the remainder to the error difference.
1335 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1336 * by truncating the remainder in vsyscalls. However, it causes
1337 * additional work to be done in timekeeping_adjust(). Once
1338 * the vsyscall implementations are converted to use xtime_nsec
1339 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1340 * users are removed, this can be killed.
1341 */
1342 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1343 tk->xtime_nsec -= remainder;
1344 tk->xtime_nsec += 1ULL << tk->shift;
1345 tk->ntp_error += remainder << tk->ntp_error_shift;
1346 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
1347}
1348#else
1349#define old_vsyscall_fixup(tk)
1350#endif
1351
1352
1353
1354/** 1510/**
1355 * update_wall_time - Uses the current clocksource to increment the wall time 1511 * update_wall_time - Uses the current clocksource to increment the wall time
1356 * 1512 *
1357 */ 1513 */
1358void update_wall_time(void) 1514void update_wall_time(void)
1359{ 1515{
1360 struct clocksource *clock; 1516 struct timekeeper *real_tk = &tk_core.timekeeper;
1361 struct timekeeper *real_tk = &timekeeper;
1362 struct timekeeper *tk = &shadow_timekeeper; 1517 struct timekeeper *tk = &shadow_timekeeper;
1363 cycle_t offset; 1518 cycle_t offset;
1364 int shift = 0, maxshift; 1519 int shift = 0, maxshift;
@@ -1371,12 +1526,11 @@ void update_wall_time(void)
1371 if (unlikely(timekeeping_suspended)) 1526 if (unlikely(timekeeping_suspended))
1372 goto out; 1527 goto out;
1373 1528
1374 clock = real_tk->clock;
1375
1376#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1529#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1377 offset = real_tk->cycle_interval; 1530 offset = real_tk->cycle_interval;
1378#else 1531#else
1379 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1532 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1533 tk->tkr.cycle_last, tk->tkr.mask);
1380#endif 1534#endif
1381 1535
1382 /* Check if there's really nothing to do */ 1536 /* Check if there's really nothing to do */
@@ -1418,9 +1572,7 @@ void update_wall_time(void)
1418 */ 1572 */
1419 clock_set |= accumulate_nsecs_to_secs(tk); 1573 clock_set |= accumulate_nsecs_to_secs(tk);
1420 1574
1421 write_seqcount_begin(&timekeeper_seq); 1575 write_seqcount_begin(&tk_core.seq);
1422 /* Update clock->cycle_last with the new value */
1423 clock->cycle_last = tk->cycle_last;
1424 /* 1576 /*
1425 * Update the real timekeeper. 1577 * Update the real timekeeper.
1426 * 1578 *
@@ -1428,12 +1580,12 @@ void update_wall_time(void)
1428 * requires changes to all other timekeeper usage sites as 1580 * requires changes to all other timekeeper usage sites as
1429 * well, i.e. move the timekeeper pointer getter into the 1581 * well, i.e. move the timekeeper pointer getter into the
1430 * spinlocked/seqcount protected sections. And we trade this 1582 * spinlocked/seqcount protected sections. And we trade this
1431 * memcpy under the timekeeper_seq against one before we start 1583 * memcpy under the tk_core.seq against one before we start
1432 * updating. 1584 * updating.
1433 */ 1585 */
1434 memcpy(real_tk, tk, sizeof(*tk)); 1586 memcpy(real_tk, tk, sizeof(*tk));
1435 timekeeping_update(real_tk, clock_set); 1587 timekeeping_update(real_tk, clock_set);
1436 write_seqcount_end(&timekeeper_seq); 1588 write_seqcount_end(&tk_core.seq);
1437out: 1589out:
1438 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1590 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1439 if (clock_set) 1591 if (clock_set)
@@ -1454,83 +1606,16 @@ out:
1454 */ 1606 */
1455void getboottime(struct timespec *ts) 1607void getboottime(struct timespec *ts)
1456{ 1608{
1457 struct timekeeper *tk = &timekeeper; 1609 struct timekeeper *tk = &tk_core.timekeeper;
1458 struct timespec boottime = { 1610 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1459 .tv_sec = tk->wall_to_monotonic.tv_sec +
1460 tk->total_sleep_time.tv_sec,
1461 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1462 tk->total_sleep_time.tv_nsec
1463 };
1464
1465 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1466}
1467EXPORT_SYMBOL_GPL(getboottime);
1468
1469/**
1470 * get_monotonic_boottime - Returns monotonic time since boot
1471 * @ts: pointer to the timespec to be set
1472 *
1473 * Returns the monotonic time since boot in a timespec.
1474 *
1475 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1476 * includes the time spent in suspend.
1477 */
1478void get_monotonic_boottime(struct timespec *ts)
1479{
1480 struct timekeeper *tk = &timekeeper;
1481 struct timespec tomono, sleep;
1482 s64 nsec;
1483 unsigned int seq;
1484
1485 WARN_ON(timekeeping_suspended);
1486
1487 do {
1488 seq = read_seqcount_begin(&timekeeper_seq);
1489 ts->tv_sec = tk->xtime_sec;
1490 nsec = timekeeping_get_ns(tk);
1491 tomono = tk->wall_to_monotonic;
1492 sleep = tk->total_sleep_time;
1493
1494 } while (read_seqcount_retry(&timekeeper_seq, seq));
1495
1496 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1497 ts->tv_nsec = 0;
1498 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1499}
1500EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1501
1502/**
1503 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1504 *
1505 * Returns the monotonic time since boot in a ktime
1506 *
1507 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1508 * includes the time spent in suspend.
1509 */
1510ktime_t ktime_get_boottime(void)
1511{
1512 struct timespec ts;
1513
1514 get_monotonic_boottime(&ts);
1515 return timespec_to_ktime(ts);
1516}
1517EXPORT_SYMBOL_GPL(ktime_get_boottime);
1518
1519/**
1520 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1521 * @ts: pointer to the timespec to be converted
1522 */
1523void monotonic_to_bootbased(struct timespec *ts)
1524{
1525 struct timekeeper *tk = &timekeeper;
1526 1611
1527 *ts = timespec_add(*ts, tk->total_sleep_time); 1612 *ts = ktime_to_timespec(t);
1528} 1613}
1529EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 1614EXPORT_SYMBOL_GPL(getboottime);
1530 1615
1531unsigned long get_seconds(void) 1616unsigned long get_seconds(void)
1532{ 1617{
1533 struct timekeeper *tk = &timekeeper; 1618 struct timekeeper *tk = &tk_core.timekeeper;
1534 1619
1535 return tk->xtime_sec; 1620 return tk->xtime_sec;
1536} 1621}
@@ -1538,43 +1623,44 @@ EXPORT_SYMBOL(get_seconds);
1538 1623
1539struct timespec __current_kernel_time(void) 1624struct timespec __current_kernel_time(void)
1540{ 1625{
1541 struct timekeeper *tk = &timekeeper; 1626 struct timekeeper *tk = &tk_core.timekeeper;
1542 1627
1543 return tk_xtime(tk); 1628 return timespec64_to_timespec(tk_xtime(tk));
1544} 1629}
1545 1630
1546struct timespec current_kernel_time(void) 1631struct timespec current_kernel_time(void)
1547{ 1632{
1548 struct timekeeper *tk = &timekeeper; 1633 struct timekeeper *tk = &tk_core.timekeeper;
1549 struct timespec now; 1634 struct timespec64 now;
1550 unsigned long seq; 1635 unsigned long seq;
1551 1636
1552 do { 1637 do {
1553 seq = read_seqcount_begin(&timekeeper_seq); 1638 seq = read_seqcount_begin(&tk_core.seq);
1554 1639
1555 now = tk_xtime(tk); 1640 now = tk_xtime(tk);
1556 } while (read_seqcount_retry(&timekeeper_seq, seq)); 1641 } while (read_seqcount_retry(&tk_core.seq, seq));
1557 1642
1558 return now; 1643 return timespec64_to_timespec(now);
1559} 1644}
1560EXPORT_SYMBOL(current_kernel_time); 1645EXPORT_SYMBOL(current_kernel_time);
1561 1646
1562struct timespec get_monotonic_coarse(void) 1647struct timespec get_monotonic_coarse(void)
1563{ 1648{
1564 struct timekeeper *tk = &timekeeper; 1649 struct timekeeper *tk = &tk_core.timekeeper;
1565 struct timespec now, mono; 1650 struct timespec64 now, mono;
1566 unsigned long seq; 1651 unsigned long seq;
1567 1652
1568 do { 1653 do {
1569 seq = read_seqcount_begin(&timekeeper_seq); 1654 seq = read_seqcount_begin(&tk_core.seq);
1570 1655
1571 now = tk_xtime(tk); 1656 now = tk_xtime(tk);
1572 mono = tk->wall_to_monotonic; 1657 mono = tk->wall_to_monotonic;
1573 } while (read_seqcount_retry(&timekeeper_seq, seq)); 1658 } while (read_seqcount_retry(&tk_core.seq, seq));
1574 1659
1575 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 1660 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1576 now.tv_nsec + mono.tv_nsec); 1661 now.tv_nsec + mono.tv_nsec);
1577 return now; 1662
1663 return timespec64_to_timespec(now);
1578} 1664}
1579 1665
1580/* 1666/*
@@ -1587,29 +1673,38 @@ void do_timer(unsigned long ticks)
1587} 1673}
1588 1674
1589/** 1675/**
1590 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, 1676 * ktime_get_update_offsets_tick - hrtimer helper
1591 * and sleep offsets. 1677 * @offs_real: pointer to storage for monotonic -> realtime offset
1592 * @xtim: pointer to timespec to be set with xtime 1678 * @offs_boot: pointer to storage for monotonic -> boottime offset
1593 * @wtom: pointer to timespec to be set with wall_to_monotonic 1679 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1594 * @sleep: pointer to timespec to be set with time in suspend 1680 *
1681 * Returns monotonic time at last tick and various offsets
1595 */ 1682 */
1596void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, 1683ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1597 struct timespec *wtom, struct timespec *sleep) 1684 ktime_t *offs_tai)
1598{ 1685{
1599 struct timekeeper *tk = &timekeeper; 1686 struct timekeeper *tk = &tk_core.timekeeper;
1600 unsigned long seq; 1687 unsigned int seq;
1688 ktime_t base;
1689 u64 nsecs;
1601 1690
1602 do { 1691 do {
1603 seq = read_seqcount_begin(&timekeeper_seq); 1692 seq = read_seqcount_begin(&tk_core.seq);
1604 *xtim = tk_xtime(tk); 1693
1605 *wtom = tk->wall_to_monotonic; 1694 base = tk->tkr.base_mono;
1606 *sleep = tk->total_sleep_time; 1695 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1607 } while (read_seqcount_retry(&timekeeper_seq, seq)); 1696
1697 *offs_real = tk->offs_real;
1698 *offs_boot = tk->offs_boot;
1699 *offs_tai = tk->offs_tai;
1700 } while (read_seqcount_retry(&tk_core.seq, seq));
1701
1702 return ktime_add_ns(base, nsecs);
1608} 1703}
1609 1704
1610#ifdef CONFIG_HIGH_RES_TIMERS 1705#ifdef CONFIG_HIGH_RES_TIMERS
1611/** 1706/**
1612 * ktime_get_update_offsets - hrtimer helper 1707 * ktime_get_update_offsets_now - hrtimer helper
1613 * @offs_real: pointer to storage for monotonic -> realtime offset 1708 * @offs_real: pointer to storage for monotonic -> realtime offset
1614 * @offs_boot: pointer to storage for monotonic -> boottime offset 1709 * @offs_boot: pointer to storage for monotonic -> boottime offset
1615 * @offs_tai: pointer to storage for monotonic -> clock tai offset 1710 * @offs_tai: pointer to storage for monotonic -> clock tai offset
@@ -1617,57 +1712,37 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1617 * Returns current monotonic time and updates the offsets 1712 * Returns current monotonic time and updates the offsets
1618 * Called from hrtimer_interrupt() or retrigger_next_event() 1713 * Called from hrtimer_interrupt() or retrigger_next_event()
1619 */ 1714 */
1620ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, 1715ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1621 ktime_t *offs_tai) 1716 ktime_t *offs_tai)
1622{ 1717{
1623 struct timekeeper *tk = &timekeeper; 1718 struct timekeeper *tk = &tk_core.timekeeper;
1624 ktime_t now;
1625 unsigned int seq; 1719 unsigned int seq;
1626 u64 secs, nsecs; 1720 ktime_t base;
1721 u64 nsecs;
1627 1722
1628 do { 1723 do {
1629 seq = read_seqcount_begin(&timekeeper_seq); 1724 seq = read_seqcount_begin(&tk_core.seq);
1630 1725
1631 secs = tk->xtime_sec; 1726 base = tk->tkr.base_mono;
1632 nsecs = timekeeping_get_ns(tk); 1727 nsecs = timekeeping_get_ns(&tk->tkr);
1633 1728
1634 *offs_real = tk->offs_real; 1729 *offs_real = tk->offs_real;
1635 *offs_boot = tk->offs_boot; 1730 *offs_boot = tk->offs_boot;
1636 *offs_tai = tk->offs_tai; 1731 *offs_tai = tk->offs_tai;
1637 } while (read_seqcount_retry(&timekeeper_seq, seq)); 1732 } while (read_seqcount_retry(&tk_core.seq, seq));
1638 1733
1639 now = ktime_add_ns(ktime_set(secs, 0), nsecs); 1734 return ktime_add_ns(base, nsecs);
1640 now = ktime_sub(now, *offs_real);
1641 return now;
1642} 1735}
1643#endif 1736#endif
1644 1737
1645/** 1738/**
1646 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1647 */
1648ktime_t ktime_get_monotonic_offset(void)
1649{
1650 struct timekeeper *tk = &timekeeper;
1651 unsigned long seq;
1652 struct timespec wtom;
1653
1654 do {
1655 seq = read_seqcount_begin(&timekeeper_seq);
1656 wtom = tk->wall_to_monotonic;
1657 } while (read_seqcount_retry(&timekeeper_seq, seq));
1658
1659 return timespec_to_ktime(wtom);
1660}
1661EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1662
1663/**
1664 * do_adjtimex() - Accessor function to NTP __do_adjtimex function 1739 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1665 */ 1740 */
1666int do_adjtimex(struct timex *txc) 1741int do_adjtimex(struct timex *txc)
1667{ 1742{
1668 struct timekeeper *tk = &timekeeper; 1743 struct timekeeper *tk = &tk_core.timekeeper;
1669 unsigned long flags; 1744 unsigned long flags;
1670 struct timespec ts; 1745 struct timespec64 ts;
1671 s32 orig_tai, tai; 1746 s32 orig_tai, tai;
1672 int ret; 1747 int ret;
1673 1748
@@ -1687,10 +1762,10 @@ int do_adjtimex(struct timex *txc)
1687 return ret; 1762 return ret;
1688 } 1763 }
1689 1764
1690 getnstimeofday(&ts); 1765 getnstimeofday64(&ts);
1691 1766
1692 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1767 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1693 write_seqcount_begin(&timekeeper_seq); 1768 write_seqcount_begin(&tk_core.seq);
1694 1769
1695 orig_tai = tai = tk->tai_offset; 1770 orig_tai = tai = tk->tai_offset;
1696 ret = __do_adjtimex(txc, &ts, &tai); 1771 ret = __do_adjtimex(txc, &ts, &tai);
@@ -1699,7 +1774,7 @@ int do_adjtimex(struct timex *txc)
1699 __timekeeping_set_tai_offset(tk, tai); 1774 __timekeeping_set_tai_offset(tk, tai);
1700 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); 1775 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1701 } 1776 }
1702 write_seqcount_end(&timekeeper_seq); 1777 write_seqcount_end(&tk_core.seq);
1703 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1778 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1704 1779
1705 if (tai != orig_tai) 1780 if (tai != orig_tai)
@@ -1719,11 +1794,11 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1719 unsigned long flags; 1794 unsigned long flags;
1720 1795
1721 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1796 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1722 write_seqcount_begin(&timekeeper_seq); 1797 write_seqcount_begin(&tk_core.seq);
1723 1798
1724 __hardpps(phase_ts, raw_ts); 1799 __hardpps(phase_ts, raw_ts);
1725 1800
1726 write_seqcount_end(&timekeeper_seq); 1801 write_seqcount_end(&tk_core.seq);
1727 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1802 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1728} 1803}
1729EXPORT_SYMBOL(hardpps); 1804EXPORT_SYMBOL(hardpps);
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
new file mode 100644
index 000000000000..adc1fc98bde3
--- /dev/null
+++ b/kernel/time/timekeeping.h
@@ -0,0 +1,20 @@
1#ifndef _KERNEL_TIME_TIMEKEEPING_H
2#define _KERNEL_TIME_TIMEKEEPING_H
3/*
4 * Internal interfaces for kernel/time/
5 */
6extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
7 ktime_t *offs_boot,
8 ktime_t *offs_tai);
9extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
10 ktime_t *offs_boot,
11 ktime_t *offs_tai);
12
13extern int timekeeping_valid_for_hres(void);
14extern u64 timekeeping_max_deferment(void);
15extern int timekeeping_inject_offset(struct timespec *ts);
16extern s32 timekeeping_get_tai_offset(void);
17extern void timekeeping_set_tai_offset(s32 tai_offset);
18extern void timekeeping_clocktai(struct timespec *ts);
19
20#endif
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 4d54f97558df..f6bd65236712 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -67,7 +67,7 @@ static int __init tk_debug_sleep_time_init(void)
67} 67}
68late_initcall(tk_debug_sleep_time_init); 68late_initcall(tk_debug_sleep_time_init);
69 69
70void tk_debug_account_sleep_time(struct timespec *t) 70void tk_debug_account_sleep_time(struct timespec64 *t)
71{ 71{
72 sleep_time_bin[fls(t->tv_sec)]++; 72 sleep_time_bin[fls(t->tv_sec)]++;
73} 73}
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index 13323ea08ffa..4ea005a7f9da 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -3,12 +3,27 @@
3/* 3/*
4 * timekeeping debug functions 4 * timekeeping debug functions
5 */ 5 */
6#include <linux/clocksource.h>
6#include <linux/time.h> 7#include <linux/time.h>
7 8
8#ifdef CONFIG_DEBUG_FS 9#ifdef CONFIG_DEBUG_FS
9extern void tk_debug_account_sleep_time(struct timespec *t); 10extern void tk_debug_account_sleep_time(struct timespec64 *t);
10#else 11#else
11#define tk_debug_account_sleep_time(x) 12#define tk_debug_account_sleep_time(x)
12#endif 13#endif
13 14
15#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
16static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
17{
18 cycle_t ret = (now - last) & mask;
19
20 return (s64) ret > 0 ? ret : 0;
21}
22#else
23static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
24{
25 return (now - last) & mask;
26}
27#endif
28
14#endif /* _TIMEKEEPING_INTERNAL_H */ 29#endif /* _TIMEKEEPING_INTERNAL_H */
diff --git a/kernel/timer.c b/kernel/time/timer.c
index 3bb01a323b2a..aca5dfe2fa3d 100644
--- a/kernel/timer.c
+++ b/kernel/time/timer.c
@@ -82,6 +82,7 @@ struct tvec_base {
82 unsigned long next_timer; 82 unsigned long next_timer;
83 unsigned long active_timers; 83 unsigned long active_timers;
84 unsigned long all_timers; 84 unsigned long all_timers;
85 int cpu;
85 struct tvec_root tv1; 86 struct tvec_root tv1;
86 struct tvec tv2; 87 struct tvec tv2;
87 struct tvec tv3; 88 struct tvec tv3;
@@ -409,6 +410,22 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
409 base->next_timer = timer->expires; 410 base->next_timer = timer->expires;
410 } 411 }
411 base->all_timers++; 412 base->all_timers++;
413
414 /*
415 * Check whether the other CPU is in dynticks mode and needs
416 * to be triggered to reevaluate the timer wheel.
417 * We are protected against the other CPU fiddling
418 * with the timer by holding the timer base lock. This also
419 * makes sure that a CPU on the way to stop its tick can not
420 * evaluate the timer wheel.
421 *
422 * Spare the IPI for deferrable timers on idle targets though.
423 * The next busy ticks will take care of it. Except full dynticks
424 * require special care against races with idle_cpu(), lets deal
425 * with that later.
426 */
427 if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
428 wake_up_nohz_cpu(base->cpu);
412} 429}
413 430
414#ifdef CONFIG_TIMER_STATS 431#ifdef CONFIG_TIMER_STATS
@@ -948,22 +965,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
948 timer_set_base(timer, base); 965 timer_set_base(timer, base);
949 debug_activate(timer, timer->expires); 966 debug_activate(timer, timer->expires);
950 internal_add_timer(base, timer); 967 internal_add_timer(base, timer);
951 /*
952 * Check whether the other CPU is in dynticks mode and needs
953 * to be triggered to reevaluate the timer wheel.
954 * We are protected against the other CPU fiddling
955 * with the timer by holding the timer base lock. This also
956 * makes sure that a CPU on the way to stop its tick can not
957 * evaluate the timer wheel.
958 *
959 * Spare the IPI for deferrable timers on idle targets though.
960 * The next busy ticks will take care of it. Except full dynticks
961 * require special care against races with idle_cpu(), lets deal
962 * with that later.
963 */
964 if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
965 wake_up_nohz_cpu(cpu);
966
967 spin_unlock_irqrestore(&base->lock, flags); 968 spin_unlock_irqrestore(&base->lock, flags);
968} 969}
969EXPORT_SYMBOL_GPL(add_timer_on); 970EXPORT_SYMBOL_GPL(add_timer_on);
@@ -1568,6 +1569,7 @@ static int init_timers_cpu(int cpu)
1568 } 1569 }
1569 spin_lock_init(&base->lock); 1570 spin_lock_init(&base->lock);
1570 tvec_base_done[cpu] = 1; 1571 tvec_base_done[cpu] = 1;
1572 base->cpu = cpu;
1571 } else { 1573 } else {
1572 base = per_cpu(tvec_bases, cpu); 1574 base = per_cpu(tvec_bases, cpu);
1573 } 1575 }
diff --git a/kernel/time/udelay_test.c b/kernel/time/udelay_test.c
new file mode 100644
index 000000000000..e622ba365a13
--- /dev/null
+++ b/kernel/time/udelay_test.c
@@ -0,0 +1,168 @@
1/*
2 * udelay() test kernel module
3 *
4 * Test is executed by writing and reading to /sys/kernel/debug/udelay_test
5 * Tests are configured by writing: USECS ITERATIONS
6 * Tests are executed by reading from the same file.
7 * Specifying usecs of 0 or negative values will run multiples tests.
8 *
9 * Copyright (C) 2014 Google, Inc.
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/debugfs.h>
22#include <linux/delay.h>
23#include <linux/ktime.h>
24#include <linux/module.h>
25#include <linux/uaccess.h>
26
27#define DEFAULT_ITERATIONS 100
28
29#define DEBUGFS_FILENAME "udelay_test"
30
31static DEFINE_MUTEX(udelay_test_lock);
32static struct dentry *udelay_test_debugfs_file;
33static int udelay_test_usecs;
34static int udelay_test_iterations = DEFAULT_ITERATIONS;
35
36static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
37{
38 int min = 0, max = 0, fail_count = 0;
39 uint64_t sum = 0;
40 uint64_t avg;
41 int i;
42 /* Allow udelay to be up to 0.5% fast */
43 int allowed_error_ns = usecs * 5;
44
45 for (i = 0; i < iters; ++i) {
46 struct timespec ts1, ts2;
47 int time_passed;
48
49 ktime_get_ts(&ts1);
50 udelay(usecs);
51 ktime_get_ts(&ts2);
52 time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
53
54 if (i == 0 || time_passed < min)
55 min = time_passed;
56 if (i == 0 || time_passed > max)
57 max = time_passed;
58 if ((time_passed + allowed_error_ns) / 1000 < usecs)
59 ++fail_count;
60 WARN_ON(time_passed < 0);
61 sum += time_passed;
62 }
63
64 avg = sum;
65 do_div(avg, iters);
66 seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d",
67 usecs, iters, usecs * 1000,
68 (usecs * 1000) - allowed_error_ns, min, avg, max);
69 if (fail_count)
70 seq_printf(s, " FAIL=%d", fail_count);
71 seq_puts(s, "\n");
72
73 return 0;
74}
75
76static int udelay_test_show(struct seq_file *s, void *v)
77{
78 int usecs;
79 int iters;
80 int ret = 0;
81
82 mutex_lock(&udelay_test_lock);
83 usecs = udelay_test_usecs;
84 iters = udelay_test_iterations;
85 mutex_unlock(&udelay_test_lock);
86
87 if (usecs > 0 && iters > 0) {
88 return udelay_test_single(s, usecs, iters);
89 } else if (usecs == 0) {
90 struct timespec ts;
91
92 ktime_get_ts(&ts);
93 seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
94 loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
95 seq_puts(s, "usage:\n");
96 seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
97 seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
98 }
99
100 return ret;
101}
102
103static int udelay_test_open(struct inode *inode, struct file *file)
104{
105 return single_open(file, udelay_test_show, inode->i_private);
106}
107
108static ssize_t udelay_test_write(struct file *file, const char __user *buf,
109 size_t count, loff_t *pos)
110{
111 char lbuf[32];
112 int ret;
113 int usecs;
114 int iters;
115
116 if (count >= sizeof(lbuf))
117 return -EINVAL;
118
119 if (copy_from_user(lbuf, buf, count))
120 return -EFAULT;
121 lbuf[count] = '\0';
122
123 ret = sscanf(lbuf, "%d %d", &usecs, &iters);
124 if (ret < 1)
125 return -EINVAL;
126 else if (ret < 2)
127 iters = DEFAULT_ITERATIONS;
128
129 mutex_lock(&udelay_test_lock);
130 udelay_test_usecs = usecs;
131 udelay_test_iterations = iters;
132 mutex_unlock(&udelay_test_lock);
133
134 return count;
135}
136
137static const struct file_operations udelay_test_debugfs_ops = {
138 .owner = THIS_MODULE,
139 .open = udelay_test_open,
140 .read = seq_read,
141 .write = udelay_test_write,
142 .llseek = seq_lseek,
143 .release = single_release,
144};
145
146static int __init udelay_test_init(void)
147{
148 mutex_lock(&udelay_test_lock);
149 udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME,
150 S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops);
151 mutex_unlock(&udelay_test_lock);
152
153 return 0;
154}
155
156module_init(udelay_test_init);
157
158static void __exit udelay_test_exit(void)
159{
160 mutex_lock(&udelay_test_lock);
161 debugfs_remove(udelay_test_debugfs_file);
162 mutex_unlock(&udelay_test_lock);
163}
164
165module_exit(udelay_test_exit);
166
167MODULE_AUTHOR("David Riley <davidriley@chromium.org>");
168MODULE_LICENSE("GPL");
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8bb80fe08767..8a528392b1f4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -820,11 +820,12 @@ static struct {
820 const char *name; 820 const char *name;
821 int in_ns; /* is this clock in nanoseconds? */ 821 int in_ns; /* is this clock in nanoseconds? */
822} trace_clocks[] = { 822} trace_clocks[] = {
823 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
826 { trace_clock_jiffies, "uptime", 0 }, 826 { trace_clock_jiffies, "uptime", 0 },
827 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
828 { ktime_get_mono_fast_ns, "mono", 1 },
828 ARCH_TRACE_CLOCKS 829 ARCH_TRACE_CLOCKS
829}; 830};
830 831
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index a1dd9a1b1327..975cb49e32bf 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -31,20 +31,19 @@ void bacct_add_tsk(struct user_namespace *user_ns,
31 struct taskstats *stats, struct task_struct *tsk) 31 struct taskstats *stats, struct task_struct *tsk)
32{ 32{
33 const struct cred *tcred; 33 const struct cred *tcred;
34 struct timespec uptime, ts;
35 cputime_t utime, stime, utimescaled, stimescaled; 34 cputime_t utime, stime, utimescaled, stimescaled;
36 u64 ac_etime; 35 u64 delta;
37 36
38 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); 37 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
39 38
40 /* calculate task elapsed time in timespec */ 39 /* calculate task elapsed time in nsec */
41 do_posix_clock_monotonic_gettime(&uptime); 40 delta = ktime_get_ns() - tsk->start_time;
42 ts = timespec_sub(uptime, tsk->start_time); 41 /* Convert to micro seconds */
43 /* rebase elapsed time to usec (should never be negative) */ 42 do_div(delta, NSEC_PER_USEC);
44 ac_etime = timespec_to_ns(&ts); 43 stats->ac_etime = delta;
45 do_div(ac_etime, NSEC_PER_USEC); 44 /* Convert to seconds for btime */
46 stats->ac_etime = ac_etime; 45 do_div(delta, USEC_PER_SEC);
47 stats->ac_btime = get_seconds() - ts.tv_sec; 46 stats->ac_btime = get_seconds() - delta;
48 if (thread_group_leader(tsk)) { 47 if (thread_group_leader(tsk)) {
49 stats->ac_exitcode = tsk->exit_code; 48 stats->ac_exitcode = tsk->exit_code;
50 if (tsk->flags & PF_FORKNOEXEC) 49 if (tsk->flags & PF_FORKNOEXEC)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f8f45ec0ed46..1f630ad31fc2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1648,6 +1648,15 @@ config TEST_FIRMWARE
1648 1648
1649 If unsure, say N. 1649 If unsure, say N.
1650 1650
1651config TEST_UDELAY
1652 tristate "udelay test driver"
1653 default n
1654 help
1655 This builds the "udelay_test" module that helps to make sure
1656 that udelay() is working properly.
1657
1658 If unsure, say N.
1659
1651source "samples/Kconfig" 1660source "samples/Kconfig"
1652 1661
1653source "lib/Kconfig.kgdb" 1662source "lib/Kconfig.kgdb"
diff --git a/lib/devres.c b/lib/devres.c
index 6a4aee8a3a7e..f4a195a6efe4 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
86} 86}
87EXPORT_SYMBOL(devm_iounmap); 87EXPORT_SYMBOL(devm_iounmap);
88 88
89#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
90
91/** 89/**
92 * devm_ioremap_resource() - check, request region, and ioremap resource 90 * devm_ioremap_resource() - check, request region, and ioremap resource
93 * @dev: generic device to handle the resource for 91 * @dev: generic device to handle the resource for
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index c1b00375c9ad..3ffa4f5509d8 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -155,11 +155,9 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
155 u8 i; 155 u8 i;
156 if (!buffer) 156 if (!buffer)
157 return NULL; 157 return NULL;
158 { 158
159 struct timeval tv; 159 tomoyo_convert_time(get_seconds(), &stamp);
160 do_gettimeofday(&tv); 160
161 tomoyo_convert_time(tv.tv_sec, &stamp);
162 }
163 pos = snprintf(buffer, tomoyo_buffer_len - 1, 161 pos = snprintf(buffer, tomoyo_buffer_len - 1,
164 "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " 162 "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
165 "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " 163 "granted=%s (global-pid=%u) task={ pid=%u ppid=%u "
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 283862aebdc8..e0fb75052550 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2267,13 +2267,11 @@ static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
2267 */ 2267 */
2268void tomoyo_update_stat(const u8 index) 2268void tomoyo_update_stat(const u8 index)
2269{ 2269{
2270 struct timeval tv;
2271 do_gettimeofday(&tv);
2272 /* 2270 /*
2273 * I don't use atomic operations because race condition is not fatal. 2271 * I don't use atomic operations because race condition is not fatal.
2274 */ 2272 */
2275 tomoyo_stat_updated[index]++; 2273 tomoyo_stat_updated[index]++;
2276 tomoyo_stat_modified[index] = tv.tv_sec; 2274 tomoyo_stat_modified[index] = get_seconds();
2277} 2275}
2278 2276
2279/** 2277/**
diff --git a/tools/time/udelay_test.sh b/tools/time/udelay_test.sh
new file mode 100755
index 000000000000..12d46b926917
--- /dev/null
+++ b/tools/time/udelay_test.sh
@@ -0,0 +1,66 @@
1#!/bin/bash
2
3# udelay() test script
4#
5# Test is executed by writing and reading to /sys/kernel/debug/udelay_test
6# and exercises a variety of delays to ensure that udelay() is delaying
7# at least as long as requested (as compared to ktime).
8#
9# Copyright (C) 2014 Google, Inc.
10#
11# This software is licensed under the terms of the GNU General Public
12# License version 2, as published by the Free Software Foundation, and
13# may be copied, distributed, and modified under those terms.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU General Public License for more details.
19
20MODULE_NAME=udelay_test
21UDELAY_PATH=/sys/kernel/debug/udelay_test
22
23setup()
24{
25 /sbin/modprobe -q $MODULE_NAME
26 tmp_file=`mktemp`
27}
28
29test_one()
30{
31 delay=$1
32 echo $delay > $UDELAY_PATH
33 tee -a $tmp_file < $UDELAY_PATH
34}
35
36cleanup()
37{
38 if [ -f $tmp_file ]; then
39 rm $tmp_file
40 fi
41 /sbin/modprobe -q -r $MODULE_NAME
42}
43
44trap cleanup EXIT
45setup
46
47# Delay for a variety of times.
48# 1..200, 200..500 (by 10), 500..2000 (by 100)
49for (( delay = 1; delay < 200; delay += 1 )); do
50 test_one $delay
51done
52for (( delay = 200; delay < 500; delay += 10 )); do
53 test_one $delay
54done
55for (( delay = 500; delay <= 2000; delay += 100 )); do
56 test_one $delay
57done
58
59# Search for failures
60count=`grep -c FAIL $tmp_file`
61if [ $? -eq "0" ]; then
62 echo "ERROR: $count delays failed to delay long enough"
63 retcode=1
64fi
65
66exit $retcode