diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-08-07 14:07:57 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-08-07 14:07:57 -0400 |
commit | 82df9c8bebeff610705f30315f3903cbcb3aa58b (patch) | |
tree | 0d54eac4e7b46876111256caf169b22627441453 | |
parent | 19583ca584d6f574384e17fe7613dfaeadcdc4a6 (diff) | |
parent | ccbf62d8a284cf181ac28c8e8407dd077d90dd4b (diff) |
Merge commit 'ccbf62d8a284cf181ac28c8e8407dd077d90dd4b' into for-next
backmerge to avoid kernel/acct.c conflict
75 files changed, 2507 insertions, 1355 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index cc63f30de166..6e06ebdbe0c7 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -54,7 +54,7 @@ | |||
54 | !Ikernel/sched/cpupri.c | 54 | !Ikernel/sched/cpupri.c |
55 | !Ikernel/sched/fair.c | 55 | !Ikernel/sched/fair.c |
56 | !Iinclude/linux/completion.h | 56 | !Iinclude/linux/completion.h |
57 | !Ekernel/timer.c | 57 | !Ekernel/time/timer.c |
58 | </sect1> | 58 | </sect1> |
59 | <sect1><title>Wait queues and Wake events</title> | 59 | <sect1><title>Wait queues and Wake events</title> |
60 | !Iinclude/linux/wait.h | 60 | !Iinclude/linux/wait.h |
@@ -63,7 +63,7 @@ | |||
63 | <sect1><title>High-resolution timers</title> | 63 | <sect1><title>High-resolution timers</title> |
64 | !Iinclude/linux/ktime.h | 64 | !Iinclude/linux/ktime.h |
65 | !Iinclude/linux/hrtimer.h | 65 | !Iinclude/linux/hrtimer.h |
66 | !Ekernel/hrtimer.c | 66 | !Ekernel/time/hrtimer.c |
67 | </sect1> | 67 | </sect1> |
68 | <sect1><title>Workqueues and Kevents</title> | 68 | <sect1><title>Workqueues and Kevents</title> |
69 | !Ekernel/workqueue.c | 69 | !Ekernel/workqueue.c |
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt new file mode 100644 index 000000000000..cd55b52548e4 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | * Cirrus Logic CLPS711X Timer Counter | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Shall contain "cirrus,clps711x-timer". | ||
5 | - reg : Address and length of the register set. | ||
6 | - interrupts: The interrupt number of the timer. | ||
7 | - clocks : phandle of timer reference clock. | ||
8 | |||
9 | Note: Each timer should have an alias correctly numbered in "aliases" node. | ||
10 | |||
11 | Example: | ||
12 | aliases { | ||
13 | timer0 = &timer1; | ||
14 | timer1 = &timer2; | ||
15 | }; | ||
16 | |||
17 | timer1: timer@80000300 { | ||
18 | compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer"; | ||
19 | reg = <0x80000300 0x4>; | ||
20 | interrupts = <8>; | ||
21 | clocks = <&clks 5>; | ||
22 | }; | ||
23 | |||
24 | timer2: timer@80000340 { | ||
25 | compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer"; | ||
26 | reg = <0x80000340 0x4>; | ||
27 | interrupts = <9>; | ||
28 | clocks = <&clks 6>; | ||
29 | }; | ||
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt new file mode 100644 index 000000000000..7c4408ff4b83 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt | |||
@@ -0,0 +1,17 @@ | |||
1 | Mediatek MT6577, MT6572 and MT6589 Timers | ||
2 | --------------------------------------- | ||
3 | |||
4 | Required properties: | ||
5 | - compatible: Should be "mediatek,mt6577-timer" | ||
6 | - reg: Should contain location and length for timers register. | ||
7 | - clocks: Clocks driving the timer hardware. This list should include two | ||
8 | clocks. The order is system clock and as second clock the RTC clock. | ||
9 | |||
10 | Examples: | ||
11 | |||
12 | timer@10008000 { | ||
13 | compatible = "mediatek,mt6577-timer"; | ||
14 | reg = <0x10008000 0x80>; | ||
15 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>; | ||
16 | clocks = <&system_clk>, <&rtc_clk>; | ||
17 | }; | ||
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt new file mode 100644 index 000000000000..a17418b0ece3 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt | |||
@@ -0,0 +1,47 @@ | |||
1 | * Renesas R-Car Compare Match Timer (CMT) | ||
2 | |||
3 | The CMT is a multi-channel 16/32/48-bit timer/counter with configurable clock | ||
4 | inputs and programmable compare match. | ||
5 | |||
6 | Channels share hardware resources but their counter and compare match value | ||
7 | are independent. A particular CMT instance can implement only a subset of the | ||
8 | channels supported by the CMT model. Channel indices represent the hardware | ||
9 | position of the channel in the CMT and don't match the channel numbers in the | ||
10 | datasheets. | ||
11 | |||
12 | Required Properties: | ||
13 | |||
14 | - compatible: must contain one of the following. | ||
15 | - "renesas,cmt-32" for the 32-bit CMT | ||
16 | (CMT0 on sh7372, sh73a0 and r8a7740) | ||
17 | - "renesas,cmt-32-fast" for the 32-bit CMT with fast clock support | ||
18 | (CMT[234] on sh7372, sh73a0 and r8a7740) | ||
19 | - "renesas,cmt-48" for the 48-bit CMT | ||
20 | (CMT1 on sh7372, sh73a0 and r8a7740) | ||
21 | - "renesas,cmt-48-gen2" for the second generation 48-bit CMT | ||
22 | (CMT[01] on r8a73a4, r8a7790 and r8a7791) | ||
23 | |||
24 | - reg: base address and length of the registers block for the timer module. | ||
25 | - interrupts: interrupt-specifier for the timer, one per channel. | ||
26 | - clocks: a list of phandle + clock-specifier pairs, one for each entry | ||
27 | in clock-names. | ||
28 | - clock-names: must contain "fck" for the functional clock. | ||
29 | |||
30 | - renesas,channels-mask: bitmask of the available channels. | ||
31 | |||
32 | |||
33 | Example: R8A7790 (R-Car H2) CMT0 node | ||
34 | |||
35 | CMT0 on R8A7790 implements hardware channels 5 and 6 only and names | ||
36 | them channels 0 and 1 in the documentation. | ||
37 | |||
38 | cmt0: timer@ffca0000 { | ||
39 | compatible = "renesas,cmt-48-gen2"; | ||
40 | reg = <0 0xffca0000 0 0x1004>; | ||
41 | interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, | ||
42 | <0 142 IRQ_TYPE_LEVEL_HIGH>; | ||
43 | clocks = <&mstp1_clks R8A7790_CLK_CMT0>; | ||
44 | clock-names = "fck"; | ||
45 | |||
46 | renesas,channels-mask = <0x60>; | ||
47 | }; | ||
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.txt b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt new file mode 100644 index 000000000000..917453f826bc --- /dev/null +++ b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt | |||
@@ -0,0 +1,39 @@ | |||
1 | * Renesas R-Car Multi-Function Timer Pulse Unit 2 (MTU2) | ||
2 | |||
3 | The MTU2 is a multi-purpose, multi-channel timer/counter with configurable | ||
4 | clock inputs and programmable compare match. | ||
5 | |||
6 | Channels share hardware resources but their counter and compare match value | ||
7 | are independent. The MTU2 hardware supports five channels indexed from 0 to 4. | ||
8 | |||
9 | Required Properties: | ||
10 | |||
11 | - compatible: must contain "renesas,mtu2" | ||
12 | |||
13 | - reg: base address and length of the registers block for the timer module. | ||
14 | |||
15 | - interrupts: interrupt specifiers for the timer, one for each entry in | ||
16 | interrupt-names. | ||
17 | - interrupt-names: must contain one entry named "tgi?a" for each enabled | ||
18 | channel, where "?" is the channel index expressed as one digit from "0" to | ||
19 | "4". | ||
20 | |||
21 | - clocks: a list of phandle + clock-specifier pairs, one for each entry | ||
22 | in clock-names. | ||
23 | - clock-names: must contain "fck" for the functional clock. | ||
24 | |||
25 | |||
26 | Example: R7S72100 (RZ/A1H) MTU2 node | ||
27 | |||
28 | mtu2: timer@fcff0000 { | ||
29 | compatible = "renesas,mtu2"; | ||
30 | reg = <0xfcff0000 0x400>; | ||
31 | interrupts = <0 139 IRQ_TYPE_LEVEL_HIGH>, | ||
32 | <0 146 IRQ_TYPE_LEVEL_HIGH>, | ||
33 | <0 150 IRQ_TYPE_LEVEL_HIGH>, | ||
34 | <0 154 IRQ_TYPE_LEVEL_HIGH>, | ||
35 | <0 159 IRQ_TYPE_LEVEL_HIGH>; | ||
36 | interrupt-names = "tgi0a", "tgi1a", "tgi2a", "tgi3a", "tgi4a"; | ||
37 | clocks = <&mstp3_clks R7S72100_CLK_MTU2>; | ||
38 | clock-names = "fck"; | ||
39 | }; | ||
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt new file mode 100644 index 000000000000..425d0c5f4aee --- /dev/null +++ b/Documentation/devicetree/bindings/timer/renesas,tmu.txt | |||
@@ -0,0 +1,39 @@ | |||
1 | * Renesas R-Car Timer Unit (TMU) | ||
2 | |||
3 | The TMU is a 32-bit timer/counter with configurable clock inputs and | ||
4 | programmable compare match. | ||
5 | |||
6 | Channels share hardware resources but their counter and compare match value | ||
7 | are independent. The TMU hardware supports up to three channels. | ||
8 | |||
9 | Required Properties: | ||
10 | |||
11 | - compatible: must contain "renesas,tmu" | ||
12 | |||
13 | - reg: base address and length of the registers block for the timer module. | ||
14 | |||
15 | - interrupts: interrupt-specifier for the timer, one per channel. | ||
16 | |||
17 | - clocks: a list of phandle + clock-specifier pairs, one for each entry | ||
18 | in clock-names. | ||
19 | - clock-names: must contain "fck" for the functional clock. | ||
20 | |||
21 | Optional Properties: | ||
22 | |||
23 | - #renesas,channels: number of channels implemented by the timer, must be 2 | ||
24 | or 3 (if not specified the value defaults to 3). | ||
25 | |||
26 | |||
27 | Example: R8A7779 (R-Car H1) TMU0 node | ||
28 | |||
29 | tmu0: timer@ffd80000 { | ||
30 | compatible = "renesas,tmu"; | ||
31 | reg = <0xffd80000 0x30>; | ||
32 | interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>, | ||
33 | <0 33 IRQ_TYPE_LEVEL_HIGH>, | ||
34 | <0 34 IRQ_TYPE_LEVEL_HIGH>; | ||
35 | clocks = <&mstp0_clks R8A7779_CLK_TMU0>; | ||
36 | clock-names = "fck"; | ||
37 | |||
38 | #renesas,channels = <3>; | ||
39 | }; | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 46a311e728a8..dd5bce848cef 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -77,6 +77,7 @@ lsi LSI Corp. (LSI Logic) | |||
77 | lltc Linear Technology Corporation | 77 | lltc Linear Technology Corporation |
78 | marvell Marvell Technology Group Ltd. | 78 | marvell Marvell Technology Group Ltd. |
79 | maxim Maxim Integrated Products | 79 | maxim Maxim Integrated Products |
80 | mediatek MediaTek Inc. | ||
80 | micrel Micrel Inc. | 81 | micrel Micrel Inc. |
81 | microchip Microchip Technology Inc. | 82 | microchip Microchip Technology Inc. |
82 | mosaixtech Mosaix Technologies, Inc. | 83 | mosaixtech Mosaix Technologies, Inc. |
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index ddc531a74d04..eb8a10e22f7c 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -1743,6 +1743,25 @@ pair provide additional information particular to the objects they represent. | |||
1743 | While the first three lines are mandatory and always printed, the rest is | 1743 | While the first three lines are mandatory and always printed, the rest is |
1744 | optional and may be omitted if no marks created yet. | 1744 | optional and may be omitted if no marks created yet. |
1745 | 1745 | ||
1746 | Timerfd files | ||
1747 | ~~~~~~~~~~~~~ | ||
1748 | |||
1749 | pos: 0 | ||
1750 | flags: 02 | ||
1751 | mnt_id: 9 | ||
1752 | clockid: 0 | ||
1753 | ticks: 0 | ||
1754 | settime flags: 01 | ||
1755 | it_value: (0, 49406829) | ||
1756 | it_interval: (1, 0) | ||
1757 | |||
1758 | where 'clockid' is the clock type and 'ticks' is the number of the timer expirations | ||
1759 | that have occurred [see timerfd_create(2) for details]. 'settime flags' are | ||
1760 | flags in octal form been used to setup the timer [see timerfd_settime(2) for | ||
1761 | details]. 'it_value' is remaining time until the timer exiration. | ||
1762 | 'it_interval' is the interval for the timer. Note the timer might be set up | ||
1763 | with TIMER_ABSTIME option which will be shown in 'settime flags', but 'it_value' | ||
1764 | still exhibits timer's remaining time. | ||
1746 | 1765 | ||
1747 | ------------------------------------------------------------------------------ | 1766 | ------------------------------------------------------------------------------ |
1748 | Configuring procfs | 1767 | Configuring procfs |
diff --git a/MAINTAINERS b/MAINTAINERS index c2066f4c3286..117945edf26c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4204,7 +4204,7 @@ L: linux-kernel@vger.kernel.org | |||
4204 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core | 4204 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core |
4205 | S: Maintained | 4205 | S: Maintained |
4206 | F: Documentation/timers/ | 4206 | F: Documentation/timers/ |
4207 | F: kernel/hrtimer.c | 4207 | F: kernel/time/hrtimer.c |
4208 | F: kernel/time/clockevents.c | 4208 | F: kernel/time/clockevents.c |
4209 | F: kernel/time/tick*.* | 4209 | F: kernel/time/tick*.* |
4210 | F: kernel/time/timer_*.c | 4210 | F: kernel/time/timer_*.c |
@@ -7026,10 +7026,10 @@ POSIX CLOCKS and TIMERS | |||
7026 | M: Thomas Gleixner <tglx@linutronix.de> | 7026 | M: Thomas Gleixner <tglx@linutronix.de> |
7027 | L: linux-kernel@vger.kernel.org | 7027 | L: linux-kernel@vger.kernel.org |
7028 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core | 7028 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core |
7029 | S: Supported | 7029 | S: Maintained |
7030 | F: fs/timerfd.c | 7030 | F: fs/timerfd.c |
7031 | F: include/linux/timer* | 7031 | F: include/linux/timer* |
7032 | F: kernel/*timer* | 7032 | F: kernel/time/*timer* |
7033 | 7033 | ||
7034 | POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS | 7034 | POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS |
7035 | M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | 7035 | M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 290f02ee0157..5f38033f7a9f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -65,7 +65,6 @@ config ARM | |||
65 | select HAVE_UID16 | 65 | select HAVE_UID16 |
66 | select HAVE_VIRT_CPU_ACCOUNTING_GEN | 66 | select HAVE_VIRT_CPU_ACCOUNTING_GEN |
67 | select IRQ_FORCED_THREADING | 67 | select IRQ_FORCED_THREADING |
68 | select KTIME_SCALAR | ||
69 | select MODULES_USE_ELF_REL | 68 | select MODULES_USE_ELF_REL |
70 | select NO_BOOTMEM | 69 | select NO_BOOTMEM |
71 | select OLD_SIGACTION | 70 | select OLD_SIGACTION |
@@ -635,6 +634,7 @@ config ARCH_PXA | |||
635 | select AUTO_ZRELADDR | 634 | select AUTO_ZRELADDR |
636 | select CLKDEV_LOOKUP | 635 | select CLKDEV_LOOKUP |
637 | select CLKSRC_MMIO | 636 | select CLKSRC_MMIO |
637 | select CLKSRC_OF | ||
638 | select GENERIC_CLOCKEVENTS | 638 | select GENERIC_CLOCKEVENTS |
639 | select GPIO_PXA | 639 | select GPIO_PXA |
640 | select HAVE_IDE | 640 | select HAVE_IDE |
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 648867a8caa8..2fe1824c6dcb 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Common support (must be linked before board specific support) | 5 | # Common support (must be linked before board specific support) |
6 | obj-y += clock.o devices.o generic.o irq.o \ | 6 | obj-y += clock.o devices.o generic.o irq.o \ |
7 | time.o reset.o | 7 | reset.o |
8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o | 8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o |
9 | 9 | ||
10 | # Generic drivers that other drivers may depend upon | 10 | # Generic drivers that other drivers may depend upon |
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 42254175fcf4..6f38e1af45af 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c | |||
@@ -25,11 +25,13 @@ | |||
25 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
26 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
27 | 27 | ||
28 | #include <mach/irqs.h> | ||
28 | #include <mach/reset.h> | 29 | #include <mach/reset.h> |
29 | #include <mach/smemc.h> | 30 | #include <mach/smemc.h> |
30 | #include <mach/pxa3xx-regs.h> | 31 | #include <mach/pxa3xx-regs.h> |
31 | 32 | ||
32 | #include "generic.h" | 33 | #include "generic.h" |
34 | #include <clocksource/pxa.h> | ||
33 | 35 | ||
34 | void clear_reset_status(unsigned int mask) | 36 | void clear_reset_status(unsigned int mask) |
35 | { | 37 | { |
@@ -57,6 +59,15 @@ unsigned long get_clock_tick_rate(void) | |||
57 | EXPORT_SYMBOL(get_clock_tick_rate); | 59 | EXPORT_SYMBOL(get_clock_tick_rate); |
58 | 60 | ||
59 | /* | 61 | /* |
62 | * For non device-tree builds, keep legacy timer init | ||
63 | */ | ||
64 | void pxa_timer_init(void) | ||
65 | { | ||
66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), | ||
67 | get_clock_tick_rate()); | ||
68 | } | ||
69 | |||
70 | /* | ||
60 | * Get the clock frequency as reflected by CCCR and the turbo flag. | 71 | * Get the clock frequency as reflected by CCCR and the turbo flag. |
61 | * We assume these values have been applied via a fcs. | 72 | * We assume these values have been applied via a fcs. |
62 | * If info is not 0 we also display the current settings. | 73 | * If info is not 0 we also display the current settings. |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c deleted file mode 100644 index fca174e3865d..000000000000 --- a/arch/arm/mach-pxa/time.c +++ /dev/null | |||
@@ -1,162 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-pxa/time.c | ||
3 | * | ||
4 | * PXA clocksource, clockevents, and OST interrupt handlers. | ||
5 | * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>. | ||
6 | * | ||
7 | * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001 | ||
8 | * by MontaVista Software, Inc. (Nico, your code rocks!) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/clockchips.h> | ||
19 | #include <linux/sched_clock.h> | ||
20 | |||
21 | #include <asm/div64.h> | ||
22 | #include <asm/mach/irq.h> | ||
23 | #include <asm/mach/time.h> | ||
24 | #include <mach/regs-ost.h> | ||
25 | #include <mach/irqs.h> | ||
26 | |||
27 | /* | ||
28 | * This is PXA's sched_clock implementation. This has a resolution | ||
29 | * of at least 308 ns and a maximum value of 208 days. | ||
30 | * | ||
31 | * The return value is guaranteed to be monotonic in that range as | ||
32 | * long as there is always less than 582 seconds between successive | ||
33 | * calls to sched_clock() which should always be the case in practice. | ||
34 | */ | ||
35 | |||
36 | static u64 notrace pxa_read_sched_clock(void) | ||
37 | { | ||
38 | return readl_relaxed(OSCR); | ||
39 | } | ||
40 | |||
41 | |||
42 | #define MIN_OSCR_DELTA 16 | ||
43 | |||
44 | static irqreturn_t | ||
45 | pxa_ost0_interrupt(int irq, void *dev_id) | ||
46 | { | ||
47 | struct clock_event_device *c = dev_id; | ||
48 | |||
49 | /* Disarm the compare/match, signal the event. */ | ||
50 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
51 | writel_relaxed(OSSR_M0, OSSR); | ||
52 | c->event_handler(c); | ||
53 | |||
54 | return IRQ_HANDLED; | ||
55 | } | ||
56 | |||
57 | static int | ||
58 | pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) | ||
59 | { | ||
60 | unsigned long next, oscr; | ||
61 | |||
62 | writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); | ||
63 | next = readl_relaxed(OSCR) + delta; | ||
64 | writel_relaxed(next, OSMR0); | ||
65 | oscr = readl_relaxed(OSCR); | ||
66 | |||
67 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) | ||
72 | { | ||
73 | switch (mode) { | ||
74 | case CLOCK_EVT_MODE_ONESHOT: | ||
75 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
76 | writel_relaxed(OSSR_M0, OSSR); | ||
77 | break; | ||
78 | |||
79 | case CLOCK_EVT_MODE_UNUSED: | ||
80 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
81 | /* initializing, released, or preparing for suspend */ | ||
82 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
83 | writel_relaxed(OSSR_M0, OSSR); | ||
84 | break; | ||
85 | |||
86 | case CLOCK_EVT_MODE_RESUME: | ||
87 | case CLOCK_EVT_MODE_PERIODIC: | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | #ifdef CONFIG_PM | ||
93 | static unsigned long osmr[4], oier, oscr; | ||
94 | |||
95 | static void pxa_timer_suspend(struct clock_event_device *cedev) | ||
96 | { | ||
97 | osmr[0] = readl_relaxed(OSMR0); | ||
98 | osmr[1] = readl_relaxed(OSMR1); | ||
99 | osmr[2] = readl_relaxed(OSMR2); | ||
100 | osmr[3] = readl_relaxed(OSMR3); | ||
101 | oier = readl_relaxed(OIER); | ||
102 | oscr = readl_relaxed(OSCR); | ||
103 | } | ||
104 | |||
105 | static void pxa_timer_resume(struct clock_event_device *cedev) | ||
106 | { | ||
107 | /* | ||
108 | * Ensure that we have at least MIN_OSCR_DELTA between match | ||
109 | * register 0 and the OSCR, to guarantee that we will receive | ||
110 | * the one-shot timer interrupt. We adjust OSMR0 in preference | ||
111 | * to OSCR to guarantee that OSCR is monotonically incrementing. | ||
112 | */ | ||
113 | if (osmr[0] - oscr < MIN_OSCR_DELTA) | ||
114 | osmr[0] += MIN_OSCR_DELTA; | ||
115 | |||
116 | writel_relaxed(osmr[0], OSMR0); | ||
117 | writel_relaxed(osmr[1], OSMR1); | ||
118 | writel_relaxed(osmr[2], OSMR2); | ||
119 | writel_relaxed(osmr[3], OSMR3); | ||
120 | writel_relaxed(oier, OIER); | ||
121 | writel_relaxed(oscr, OSCR); | ||
122 | } | ||
123 | #else | ||
124 | #define pxa_timer_suspend NULL | ||
125 | #define pxa_timer_resume NULL | ||
126 | #endif | ||
127 | |||
128 | static struct clock_event_device ckevt_pxa_osmr0 = { | ||
129 | .name = "osmr0", | ||
130 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
131 | .rating = 200, | ||
132 | .set_next_event = pxa_osmr0_set_next_event, | ||
133 | .set_mode = pxa_osmr0_set_mode, | ||
134 | .suspend = pxa_timer_suspend, | ||
135 | .resume = pxa_timer_resume, | ||
136 | }; | ||
137 | |||
138 | static struct irqaction pxa_ost0_irq = { | ||
139 | .name = "ost0", | ||
140 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
141 | .handler = pxa_ost0_interrupt, | ||
142 | .dev_id = &ckevt_pxa_osmr0, | ||
143 | }; | ||
144 | |||
145 | void __init pxa_timer_init(void) | ||
146 | { | ||
147 | unsigned long clock_tick_rate = get_clock_tick_rate(); | ||
148 | |||
149 | writel_relaxed(0, OIER); | ||
150 | writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); | ||
151 | |||
152 | sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate); | ||
153 | |||
154 | ckevt_pxa_osmr0.cpumask = cpumask_of(0); | ||
155 | |||
156 | setup_irq(IRQ_OST0, &pxa_ost0_irq); | ||
157 | |||
158 | clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, | ||
159 | clocksource_mmio_readl_up); | ||
160 | clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, | ||
161 | MIN_OSCR_DELTA * 2, 0x7fffffff); | ||
162 | } | ||
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 0fd6138f6203..4dc89d1f9c48 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -23,7 +23,6 @@ config HEXAGON | |||
23 | select GENERIC_IOMAP | 23 | select GENERIC_IOMAP |
24 | select GENERIC_SMP_IDLE_THREAD | 24 | select GENERIC_SMP_IDLE_THREAD |
25 | select STACKTRACE_SUPPORT | 25 | select STACKTRACE_SUPPORT |
26 | select KTIME_SCALAR | ||
27 | select GENERIC_CLOCKEVENTS | 26 | select GENERIC_CLOCKEVENTS |
28 | select GENERIC_CLOCKEVENTS_BROADCAST | 27 | select GENERIC_CLOCKEVENTS_BROADCAST |
29 | select MODULES_USE_ELF_RELA | 28 | select MODULES_USE_ELF_RELA |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bb63499fc5d3..1afc7a686702 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -137,7 +137,6 @@ config S390 | |||
137 | select HAVE_SYSCALL_TRACEPOINTS | 137 | select HAVE_SYSCALL_TRACEPOINTS |
138 | select HAVE_UID16 if 32BIT | 138 | select HAVE_UID16 if 32BIT |
139 | select HAVE_VIRT_CPU_ACCOUNTING | 139 | select HAVE_VIRT_CPU_ACCOUNTING |
140 | select KTIME_SCALAR if 32BIT | ||
141 | select MODULES_USE_ELF_RELA | 140 | select MODULES_USE_ELF_RELA |
142 | select NO_BOOTMEM | 141 | select NO_BOOTMEM |
143 | select OLD_SIGACTION | 142 | select OLD_SIGACTION |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 462dcd0c1700..ae70155c2f16 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -260,7 +260,6 @@ void update_vsyscall_tz(void) | |||
260 | 260 | ||
261 | void update_vsyscall(struct timekeeper *tk) | 261 | void update_vsyscall(struct timekeeper *tk) |
262 | { | 262 | { |
263 | struct timespec wall_time = tk_xtime(tk); | ||
264 | struct timespec *wtm = &tk->wall_to_monotonic; | 263 | struct timespec *wtm = &tk->wall_to_monotonic; |
265 | struct clocksource *clock = tk->clock; | 264 | struct clocksource *clock = tk->clock; |
266 | 265 | ||
@@ -271,12 +270,12 @@ void update_vsyscall(struct timekeeper *tk) | |||
271 | ++vdso_data->tb_update_count; | 270 | ++vdso_data->tb_update_count; |
272 | smp_wmb(); | 271 | smp_wmb(); |
273 | vdso_data->xtime_tod_stamp = clock->cycle_last; | 272 | vdso_data->xtime_tod_stamp = clock->cycle_last; |
274 | vdso_data->xtime_clock_sec = wall_time.tv_sec; | 273 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
275 | vdso_data->xtime_clock_nsec = wall_time.tv_nsec; | 274 | vdso_data->xtime_clock_nsec = tk->xtime_nsec; |
276 | vdso_data->wtom_clock_sec = wtm->tv_sec; | 275 | vdso_data->wtom_clock_sec = wtm->tv_sec; |
277 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; | 276 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; |
278 | vdso_data->mult = clock->mult; | 277 | vdso_data->mult = tk->mult; |
279 | vdso_data->shift = clock->shift; | 278 | vdso_data->shift = tk->shift; |
280 | smp_wmb(); | 279 | smp_wmb(); |
281 | ++vdso_data->tb_update_count; | 280 | ++vdso_data->tb_update_count; |
282 | } | 281 | } |
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c index 51ec8e46f5f9..e933fb9fbf5c 100644 --- a/arch/tile/kernel/vdso/vgettimeofday.c +++ b/arch/tile/kernel/vdso/vgettimeofday.c | |||
@@ -83,10 +83,11 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
83 | if (count & 1) | 83 | if (count & 1) |
84 | continue; | 84 | continue; |
85 | 85 | ||
86 | cycles = (get_cycles() - vdso_data->xtime_tod_stamp); | ||
87 | ns = (cycles * vdso_data->mult) >> vdso_data->shift; | ||
88 | sec = vdso_data->xtime_clock_sec; | 86 | sec = vdso_data->xtime_clock_sec; |
89 | ns += vdso_data->xtime_clock_nsec; | 87 | cycles = get_cycles() - vdso_data->xtime_tod_stamp; |
88 | ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec; | ||
89 | ns >>= vdso_data->shift; | ||
90 | |||
90 | if (ns >= NSEC_PER_SEC) { | 91 | if (ns >= NSEC_PER_SEC) { |
91 | ns -= NSEC_PER_SEC; | 92 | ns -= NSEC_PER_SEC; |
92 | sec += 1; | 93 | sec += 1; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d24887b645dc..5b2acad59622 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -111,7 +111,6 @@ config X86 | |||
111 | select ARCH_CLOCKSOURCE_DATA | 111 | select ARCH_CLOCKSOURCE_DATA |
112 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 112 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
113 | select GENERIC_TIME_VSYSCALL | 113 | select GENERIC_TIME_VSYSCALL |
114 | select KTIME_SCALAR if X86_32 | ||
115 | select GENERIC_STRNCPY_FROM_USER | 114 | select GENERIC_STRNCPY_FROM_USER |
116 | select GENERIC_STRNLEN_USER | 115 | select GENERIC_STRNLEN_USER |
117 | select HAVE_CONTEXT_TRACKING if X86_64 | 116 | select HAVE_CONTEXT_TRACKING if X86_64 |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 065131cbfcc0..cfd6519df661 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -1,3 +1,5 @@ | |||
1 | menu "Clock Source drivers" | ||
2 | |||
1 | config CLKSRC_OF | 3 | config CLKSRC_OF |
2 | bool | 4 | bool |
3 | 5 | ||
@@ -125,6 +127,7 @@ config CLKSRC_METAG_GENERIC | |||
125 | 127 | ||
126 | config CLKSRC_EXYNOS_MCT | 128 | config CLKSRC_EXYNOS_MCT |
127 | def_bool y if ARCH_EXYNOS | 129 | def_bool y if ARCH_EXYNOS |
130 | depends on !ARM64 | ||
128 | help | 131 | help |
129 | Support for Multi Core Timer controller on Exynos SoCs. | 132 | Support for Multi Core Timer controller on Exynos SoCs. |
130 | 133 | ||
@@ -149,6 +152,11 @@ config VF_PIT_TIMER | |||
149 | config SYS_SUPPORTS_SH_CMT | 152 | config SYS_SUPPORTS_SH_CMT |
150 | bool | 153 | bool |
151 | 154 | ||
155 | config MTK_TIMER | ||
156 | select CLKSRC_OF | ||
157 | select CLKSRC_MMIO | ||
158 | bool | ||
159 | |||
152 | config SYS_SUPPORTS_SH_MTU2 | 160 | config SYS_SUPPORTS_SH_MTU2 |
153 | bool | 161 | bool |
154 | 162 | ||
@@ -173,7 +181,7 @@ config SH_TIMER_MTU2 | |||
173 | default SYS_SUPPORTS_SH_MTU2 | 181 | default SYS_SUPPORTS_SH_MTU2 |
174 | help | 182 | help |
175 | This enables build of a clockevent driver for the Multi-Function | 183 | This enables build of a clockevent driver for the Multi-Function |
176 | Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. | 184 | Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas. |
177 | This hardware comes with 16 bit-timer registers. | 185 | This hardware comes with 16 bit-timer registers. |
178 | 186 | ||
179 | config SH_TIMER_TMU | 187 | config SH_TIMER_TMU |
@@ -187,7 +195,7 @@ config SH_TIMER_TMU | |||
187 | 195 | ||
188 | config EM_TIMER_STI | 196 | config EM_TIMER_STI |
189 | bool "Renesas STI timer driver" if COMPILE_TEST | 197 | bool "Renesas STI timer driver" if COMPILE_TEST |
190 | depends on GENERIC_CLOCKEVENTS | 198 | depends on GENERIC_CLOCKEVENTS && HAS_IOMEM |
191 | default SYS_SUPPORTS_EM_STI | 199 | default SYS_SUPPORTS_EM_STI |
192 | help | 200 | help |
193 | This enables build of a clocksource and clockevent driver for | 201 | This enables build of a clocksource and clockevent driver for |
@@ -207,3 +215,5 @@ config CLKSRC_VERSATILE | |||
207 | counter available in the "System Registers" block of | 215 | counter available in the "System Registers" block of |
208 | ARM Versatile, RealView and Versatile Express reference | 216 | ARM Versatile, RealView and Versatile Express reference |
209 | platforms. | 217 | platforms. |
218 | |||
219 | endmenu | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 800b1303c236..7fd9fd1dff42 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -16,9 +16,11 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o | |||
16 | obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o | 16 | obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o |
17 | obj-$(CONFIG_ORION_TIMER) += time-orion.o | 17 | obj-$(CONFIG_ORION_TIMER) += time-orion.o |
18 | obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o | 18 | obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o |
19 | obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o | ||
19 | obj-$(CONFIG_ARCH_MARCO) += timer-marco.o | 20 | obj-$(CONFIG_ARCH_MARCO) += timer-marco.o |
20 | obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o | 21 | obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o |
21 | obj-$(CONFIG_ARCH_MXS) += mxs_timer.o | 22 | obj-$(CONFIG_ARCH_MXS) += mxs_timer.o |
23 | obj-$(CONFIG_ARCH_PXA) += pxa_timer.o | ||
22 | obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o | 24 | obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o |
23 | obj-$(CONFIG_ARCH_U300) += timer-u300.o | 25 | obj-$(CONFIG_ARCH_U300) += timer-u300.o |
24 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o | 26 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o |
@@ -34,6 +36,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o | |||
34 | obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o | 36 | obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o |
35 | obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o | 37 | obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o |
36 | obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o | 38 | obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o |
39 | obj-$(CONFIG_MTK_TIMER) += mtk_timer.o | ||
37 | 40 | ||
38 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | 41 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
39 | obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o | 42 | obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o |
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c new file mode 100644 index 000000000000..d83ec1f2fddc --- /dev/null +++ b/drivers/clocksource/clps711x-timer.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Cirrus Logic CLPS711X clocksource driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/clk.h> | ||
13 | #include <linux/clockchips.h> | ||
14 | #include <linux/clocksource.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/of_address.h> | ||
18 | #include <linux/of_irq.h> | ||
19 | #include <linux/sched_clock.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | enum { | ||
23 | CLPS711X_CLKSRC_CLOCKSOURCE, | ||
24 | CLPS711X_CLKSRC_CLOCKEVENT, | ||
25 | }; | ||
26 | |||
27 | static void __iomem *tcd; | ||
28 | |||
29 | static u64 notrace clps711x_sched_clock_read(void) | ||
30 | { | ||
31 | return ~readw(tcd); | ||
32 | } | ||
33 | |||
34 | static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) | ||
35 | { | ||
36 | unsigned long rate; | ||
37 | |||
38 | if (!base) | ||
39 | return -ENOMEM; | ||
40 | if (IS_ERR(clock)) | ||
41 | return PTR_ERR(clock); | ||
42 | |||
43 | rate = clk_get_rate(clock); | ||
44 | |||
45 | tcd = base; | ||
46 | |||
47 | clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16, | ||
48 | clocksource_mmio_readw_down); | ||
49 | |||
50 | sched_clock_register(clps711x_sched_clock_read, 16, rate); | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) | ||
56 | { | ||
57 | struct clock_event_device *evt = dev_id; | ||
58 | |||
59 | evt->event_handler(evt); | ||
60 | |||
61 | return IRQ_HANDLED; | ||
62 | } | ||
63 | |||
64 | static void clps711x_clockevent_set_mode(enum clock_event_mode mode, | ||
65 | struct clock_event_device *evt) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, | ||
70 | unsigned int irq) | ||
71 | { | ||
72 | struct clock_event_device *clkevt; | ||
73 | unsigned long rate; | ||
74 | |||
75 | if (!irq) | ||
76 | return -EINVAL; | ||
77 | if (!base) | ||
78 | return -ENOMEM; | ||
79 | if (IS_ERR(clock)) | ||
80 | return PTR_ERR(clock); | ||
81 | |||
82 | clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); | ||
83 | if (!clkevt) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | rate = clk_get_rate(clock); | ||
87 | |||
88 | /* Set Timer prescaler */ | ||
89 | writew(DIV_ROUND_CLOSEST(rate, HZ), base); | ||
90 | |||
91 | clkevt->name = "clps711x-clockevent"; | ||
92 | clkevt->rating = 300; | ||
93 | clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP; | ||
94 | clkevt->set_mode = clps711x_clockevent_set_mode; | ||
95 | clkevt->cpumask = cpumask_of(0); | ||
96 | clockevents_config_and_register(clkevt, HZ, 0, 0); | ||
97 | |||
98 | return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER, | ||
99 | "clps711x-timer", clkevt); | ||
100 | } | ||
101 | |||
102 | void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base, | ||
103 | unsigned int irq) | ||
104 | { | ||
105 | struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL); | ||
106 | struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL); | ||
107 | |||
108 | BUG_ON(_clps711x_clksrc_init(tc1, tc1_base)); | ||
109 | BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq)); | ||
110 | } | ||
111 | |||
112 | #ifdef CONFIG_CLKSRC_OF | ||
113 | static void __init clps711x_timer_init(struct device_node *np) | ||
114 | { | ||
115 | unsigned int irq = irq_of_parse_and_map(np, 0); | ||
116 | struct clk *clock = of_clk_get(np, 0); | ||
117 | void __iomem *base = of_iomap(np, 0); | ||
118 | |||
119 | switch (of_alias_get_id(np, "timer")) { | ||
120 | case CLPS711X_CLKSRC_CLOCKSOURCE: | ||
121 | BUG_ON(_clps711x_clksrc_init(clock, base)); | ||
122 | break; | ||
123 | case CLPS711X_CLKSRC_CLOCKEVENT: | ||
124 | BUG_ON(_clps711x_clkevt_init(clock, base, irq)); | ||
125 | break; | ||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | } | ||
130 | CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init); | ||
131 | #endif | ||
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index ab51bf20a3ed..9403061a2acc 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -94,7 +94,7 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) | |||
94 | u32 mask; | 94 | u32 mask; |
95 | u32 i; | 95 | u32 i; |
96 | 96 | ||
97 | __raw_writel(value, reg_base + offset); | 97 | writel_relaxed(value, reg_base + offset); |
98 | 98 | ||
99 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { | 99 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { |
100 | stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; | 100 | stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; |
@@ -144,8 +144,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) | |||
144 | 144 | ||
145 | /* Wait maximum 1 ms until written values are applied */ | 145 | /* Wait maximum 1 ms until written values are applied */ |
146 | for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) | 146 | for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) |
147 | if (__raw_readl(reg_base + stat_addr) & mask) { | 147 | if (readl_relaxed(reg_base + stat_addr) & mask) { |
148 | __raw_writel(mask, reg_base + stat_addr); | 148 | writel_relaxed(mask, reg_base + stat_addr); |
149 | return; | 149 | return; |
150 | } | 150 | } |
151 | 151 | ||
@@ -157,28 +157,51 @@ static void exynos4_mct_frc_start(void) | |||
157 | { | 157 | { |
158 | u32 reg; | 158 | u32 reg; |
159 | 159 | ||
160 | reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 160 | reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
161 | reg |= MCT_G_TCON_START; | 161 | reg |= MCT_G_TCON_START; |
162 | exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); | 162 | exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); |
163 | } | 163 | } |
164 | 164 | ||
165 | static cycle_t notrace _exynos4_frc_read(void) | 165 | /** |
166 | * exynos4_read_count_64 - Read all 64-bits of the global counter | ||
167 | * | ||
168 | * This will read all 64-bits of the global counter taking care to make sure | ||
169 | * that the upper and lower half match. Note that reading the MCT can be quite | ||
170 | * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half | ||
171 | * only) version when possible. | ||
172 | * | ||
173 | * Returns the number of cycles in the global counter. | ||
174 | */ | ||
175 | static u64 exynos4_read_count_64(void) | ||
166 | { | 176 | { |
167 | unsigned int lo, hi; | 177 | unsigned int lo, hi; |
168 | u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); | 178 | u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); |
169 | 179 | ||
170 | do { | 180 | do { |
171 | hi = hi2; | 181 | hi = hi2; |
172 | lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); | 182 | lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); |
173 | hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); | 183 | hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); |
174 | } while (hi != hi2); | 184 | } while (hi != hi2); |
175 | 185 | ||
176 | return ((cycle_t)hi << 32) | lo; | 186 | return ((cycle_t)hi << 32) | lo; |
177 | } | 187 | } |
178 | 188 | ||
189 | /** | ||
190 | * exynos4_read_count_32 - Read the lower 32-bits of the global counter | ||
191 | * | ||
192 | * This will read just the lower 32-bits of the global counter. This is marked | ||
193 | * as notrace so it can be used by the scheduler clock. | ||
194 | * | ||
195 | * Returns the number of cycles in the global counter (lower 32 bits). | ||
196 | */ | ||
197 | static u32 notrace exynos4_read_count_32(void) | ||
198 | { | ||
199 | return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); | ||
200 | } | ||
201 | |||
179 | static cycle_t exynos4_frc_read(struct clocksource *cs) | 202 | static cycle_t exynos4_frc_read(struct clocksource *cs) |
180 | { | 203 | { |
181 | return _exynos4_frc_read(); | 204 | return exynos4_read_count_32(); |
182 | } | 205 | } |
183 | 206 | ||
184 | static void exynos4_frc_resume(struct clocksource *cs) | 207 | static void exynos4_frc_resume(struct clocksource *cs) |
@@ -190,21 +213,23 @@ struct clocksource mct_frc = { | |||
190 | .name = "mct-frc", | 213 | .name = "mct-frc", |
191 | .rating = 400, | 214 | .rating = 400, |
192 | .read = exynos4_frc_read, | 215 | .read = exynos4_frc_read, |
193 | .mask = CLOCKSOURCE_MASK(64), | 216 | .mask = CLOCKSOURCE_MASK(32), |
194 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 217 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
195 | .resume = exynos4_frc_resume, | 218 | .resume = exynos4_frc_resume, |
196 | }; | 219 | }; |
197 | 220 | ||
198 | static u64 notrace exynos4_read_sched_clock(void) | 221 | static u64 notrace exynos4_read_sched_clock(void) |
199 | { | 222 | { |
200 | return _exynos4_frc_read(); | 223 | return exynos4_read_count_32(); |
201 | } | 224 | } |
202 | 225 | ||
203 | static struct delay_timer exynos4_delay_timer; | 226 | static struct delay_timer exynos4_delay_timer; |
204 | 227 | ||
205 | static cycles_t exynos4_read_current_timer(void) | 228 | static cycles_t exynos4_read_current_timer(void) |
206 | { | 229 | { |
207 | return _exynos4_frc_read(); | 230 | BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), |
231 | "cycles_t needs to move to 32-bit for ARM64 usage"); | ||
232 | return exynos4_read_count_32(); | ||
208 | } | 233 | } |
209 | 234 | ||
210 | static void __init exynos4_clocksource_init(void) | 235 | static void __init exynos4_clocksource_init(void) |
@@ -218,14 +243,14 @@ static void __init exynos4_clocksource_init(void) | |||
218 | if (clocksource_register_hz(&mct_frc, clk_rate)) | 243 | if (clocksource_register_hz(&mct_frc, clk_rate)) |
219 | panic("%s: can't register clocksource\n", mct_frc.name); | 244 | panic("%s: can't register clocksource\n", mct_frc.name); |
220 | 245 | ||
221 | sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); | 246 | sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); |
222 | } | 247 | } |
223 | 248 | ||
224 | static void exynos4_mct_comp0_stop(void) | 249 | static void exynos4_mct_comp0_stop(void) |
225 | { | 250 | { |
226 | unsigned int tcon; | 251 | unsigned int tcon; |
227 | 252 | ||
228 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 253 | tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
229 | tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); | 254 | tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); |
230 | 255 | ||
231 | exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); | 256 | exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); |
@@ -238,14 +263,14 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode, | |||
238 | unsigned int tcon; | 263 | unsigned int tcon; |
239 | cycle_t comp_cycle; | 264 | cycle_t comp_cycle; |
240 | 265 | ||
241 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 266 | tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
242 | 267 | ||
243 | if (mode == CLOCK_EVT_MODE_PERIODIC) { | 268 | if (mode == CLOCK_EVT_MODE_PERIODIC) { |
244 | tcon |= MCT_G_TCON_COMP0_AUTO_INC; | 269 | tcon |= MCT_G_TCON_COMP0_AUTO_INC; |
245 | exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); | 270 | exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); |
246 | } | 271 | } |
247 | 272 | ||
248 | comp_cycle = exynos4_frc_read(&mct_frc) + cycles; | 273 | comp_cycle = exynos4_read_count_64() + cycles; |
249 | exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); | 274 | exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); |
250 | exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); | 275 | exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); |
251 | 276 | ||
@@ -327,7 +352,7 @@ static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) | |||
327 | unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; | 352 | unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; |
328 | unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; | 353 | unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; |
329 | 354 | ||
330 | tmp = __raw_readl(reg_base + offset); | 355 | tmp = readl_relaxed(reg_base + offset); |
331 | if (tmp & mask) { | 356 | if (tmp & mask) { |
332 | tmp &= ~mask; | 357 | tmp &= ~mask; |
333 | exynos4_mct_write(tmp, offset); | 358 | exynos4_mct_write(tmp, offset); |
@@ -349,7 +374,7 @@ static void exynos4_mct_tick_start(unsigned long cycles, | |||
349 | /* enable MCT tick interrupt */ | 374 | /* enable MCT tick interrupt */ |
350 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); | 375 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); |
351 | 376 | ||
352 | tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); | 377 | tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); |
353 | tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | | 378 | tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | |
354 | MCT_L_TCON_INTERVAL_MODE; | 379 | MCT_L_TCON_INTERVAL_MODE; |
355 | exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); | 380 | exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); |
@@ -401,7 +426,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | |||
401 | exynos4_mct_tick_stop(mevt); | 426 | exynos4_mct_tick_stop(mevt); |
402 | 427 | ||
403 | /* Clear the MCT tick interrupt */ | 428 | /* Clear the MCT tick interrupt */ |
404 | if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { | 429 | if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { |
405 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); | 430 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); |
406 | return 1; | 431 | return 1; |
407 | } else { | 432 | } else { |
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c new file mode 100644 index 000000000000..32a3d25795d3 --- /dev/null +++ b/drivers/clocksource/mtk_timer.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * Mediatek SoCs General-Purpose Timer handling. | ||
3 | * | ||
4 | * Copyright (C) 2014 Matthias Brugger | ||
5 | * | ||
6 | * Matthias Brugger <matthias.bgg@gmail.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/clk.h> | ||
20 | #include <linux/clockchips.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/irq.h> | ||
23 | #include <linux/irqreturn.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_address.h> | ||
26 | #include <linux/of_irq.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #define GPT_IRQ_EN_REG 0x00 | ||
30 | #define GPT_IRQ_ENABLE(val) BIT((val) - 1) | ||
31 | #define GPT_IRQ_ACK_REG 0x08 | ||
32 | #define GPT_IRQ_ACK(val) BIT((val) - 1) | ||
33 | |||
34 | #define TIMER_CTRL_REG(val) (0x10 * (val)) | ||
35 | #define TIMER_CTRL_OP(val) (((val) & 0x3) << 4) | ||
36 | #define TIMER_CTRL_OP_ONESHOT (0) | ||
37 | #define TIMER_CTRL_OP_REPEAT (1) | ||
38 | #define TIMER_CTRL_OP_FREERUN (3) | ||
39 | #define TIMER_CTRL_CLEAR (2) | ||
40 | #define TIMER_CTRL_ENABLE (1) | ||
41 | #define TIMER_CTRL_DISABLE (0) | ||
42 | |||
43 | #define TIMER_CLK_REG(val) (0x04 + (0x10 * (val))) | ||
44 | #define TIMER_CLK_SRC(val) (((val) & 0x1) << 4) | ||
45 | #define TIMER_CLK_SRC_SYS13M (0) | ||
46 | #define TIMER_CLK_SRC_RTC32K (1) | ||
47 | #define TIMER_CLK_DIV1 (0x0) | ||
48 | #define TIMER_CLK_DIV2 (0x1) | ||
49 | |||
50 | #define TIMER_CNT_REG(val) (0x08 + (0x10 * (val))) | ||
51 | #define TIMER_CMP_REG(val) (0x0C + (0x10 * (val))) | ||
52 | |||
53 | #define GPT_CLK_EVT 1 | ||
54 | #define GPT_CLK_SRC 2 | ||
55 | |||
56 | struct mtk_clock_event_device { | ||
57 | void __iomem *gpt_base; | ||
58 | u32 ticks_per_jiffy; | ||
59 | struct clock_event_device dev; | ||
60 | }; | ||
61 | |||
62 | static inline struct mtk_clock_event_device *to_mtk_clk( | ||
63 | struct clock_event_device *c) | ||
64 | { | ||
65 | return container_of(c, struct mtk_clock_event_device, dev); | ||
66 | } | ||
67 | |||
68 | static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer) | ||
69 | { | ||
70 | u32 val; | ||
71 | |||
72 | val = readl(evt->gpt_base + TIMER_CTRL_REG(timer)); | ||
73 | writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base + | ||
74 | TIMER_CTRL_REG(timer)); | ||
75 | } | ||
76 | |||
77 | static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt, | ||
78 | unsigned long delay, u8 timer) | ||
79 | { | ||
80 | writel(delay, evt->gpt_base + TIMER_CMP_REG(timer)); | ||
81 | } | ||
82 | |||
83 | static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt, | ||
84 | bool periodic, u8 timer) | ||
85 | { | ||
86 | u32 val; | ||
87 | |||
88 | /* Acknowledge interrupt */ | ||
89 | writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG); | ||
90 | |||
91 | val = readl(evt->gpt_base + TIMER_CTRL_REG(timer)); | ||
92 | |||
93 | /* Clear 2 bit timer operation mode field */ | ||
94 | val &= ~TIMER_CTRL_OP(0x3); | ||
95 | |||
96 | if (periodic) | ||
97 | val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT); | ||
98 | else | ||
99 | val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT); | ||
100 | |||
101 | writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR, | ||
102 | evt->gpt_base + TIMER_CTRL_REG(timer)); | ||
103 | } | ||
104 | |||
105 | static void mtk_clkevt_mode(enum clock_event_mode mode, | ||
106 | struct clock_event_device *clk) | ||
107 | { | ||
108 | struct mtk_clock_event_device *evt = to_mtk_clk(clk); | ||
109 | |||
110 | mtk_clkevt_time_stop(evt, GPT_CLK_EVT); | ||
111 | |||
112 | switch (mode) { | ||
113 | case CLOCK_EVT_MODE_PERIODIC: | ||
114 | mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT); | ||
115 | mtk_clkevt_time_start(evt, true, GPT_CLK_EVT); | ||
116 | break; | ||
117 | case CLOCK_EVT_MODE_ONESHOT: | ||
118 | /* Timer is enabled in set_next_event */ | ||
119 | break; | ||
120 | case CLOCK_EVT_MODE_UNUSED: | ||
121 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
122 | default: | ||
123 | /* No more interrupts will occur as source is disabled */ | ||
124 | break; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static int mtk_clkevt_next_event(unsigned long event, | ||
129 | struct clock_event_device *clk) | ||
130 | { | ||
131 | struct mtk_clock_event_device *evt = to_mtk_clk(clk); | ||
132 | |||
133 | mtk_clkevt_time_stop(evt, GPT_CLK_EVT); | ||
134 | mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT); | ||
135 | mtk_clkevt_time_start(evt, false, GPT_CLK_EVT); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id) | ||
141 | { | ||
142 | struct mtk_clock_event_device *evt = dev_id; | ||
143 | |||
144 | /* Acknowledge timer0 irq */ | ||
145 | writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG); | ||
146 | evt->dev.event_handler(&evt->dev); | ||
147 | |||
148 | return IRQ_HANDLED; | ||
149 | } | ||
150 | |||
151 | static void mtk_timer_global_reset(struct mtk_clock_event_device *evt) | ||
152 | { | ||
153 | /* Disable all interrupts */ | ||
154 | writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); | ||
155 | /* Acknowledge all interrupts */ | ||
156 | writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); | ||
157 | } | ||
158 | |||
159 | static void | ||
160 | mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) | ||
161 | { | ||
162 | writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE, | ||
163 | evt->gpt_base + TIMER_CTRL_REG(timer)); | ||
164 | |||
165 | writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1, | ||
166 | evt->gpt_base + TIMER_CLK_REG(timer)); | ||
167 | |||
168 | writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer)); | ||
169 | |||
170 | writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE, | ||
171 | evt->gpt_base + TIMER_CTRL_REG(timer)); | ||
172 | } | ||
173 | |||
174 | static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) | ||
175 | { | ||
176 | u32 val; | ||
177 | |||
178 | val = readl(evt->gpt_base + GPT_IRQ_EN_REG); | ||
179 | writel(val | GPT_IRQ_ENABLE(timer), | ||
180 | evt->gpt_base + GPT_IRQ_EN_REG); | ||
181 | } | ||
182 | |||
183 | static void __init mtk_timer_init(struct device_node *node) | ||
184 | { | ||
185 | struct mtk_clock_event_device *evt; | ||
186 | struct resource res; | ||
187 | unsigned long rate = 0; | ||
188 | struct clk *clk; | ||
189 | |||
190 | evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
191 | if (!evt) { | ||
192 | pr_warn("Can't allocate mtk clock event driver struct"); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | evt->dev.name = "mtk_tick"; | ||
197 | evt->dev.rating = 300; | ||
198 | evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | ||
199 | evt->dev.set_mode = mtk_clkevt_mode; | ||
200 | evt->dev.set_next_event = mtk_clkevt_next_event; | ||
201 | evt->dev.cpumask = cpu_possible_mask; | ||
202 | |||
203 | evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer"); | ||
204 | if (IS_ERR(evt->gpt_base)) { | ||
205 | pr_warn("Can't get resource\n"); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | evt->dev.irq = irq_of_parse_and_map(node, 0); | ||
210 | if (evt->dev.irq <= 0) { | ||
211 | pr_warn("Can't parse IRQ"); | ||
212 | goto err_mem; | ||
213 | } | ||
214 | |||
215 | clk = of_clk_get(node, 0); | ||
216 | if (IS_ERR(clk)) { | ||
217 | pr_warn("Can't get timer clock"); | ||
218 | goto err_irq; | ||
219 | } | ||
220 | |||
221 | if (clk_prepare_enable(clk)) { | ||
222 | pr_warn("Can't prepare clock"); | ||
223 | goto err_clk_put; | ||
224 | } | ||
225 | rate = clk_get_rate(clk); | ||
226 | |||
227 | if (request_irq(evt->dev.irq, mtk_timer_interrupt, | ||
228 | IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { | ||
229 | pr_warn("failed to setup irq %d\n", evt->dev.irq); | ||
230 | goto err_clk_disable; | ||
231 | } | ||
232 | |||
233 | evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); | ||
234 | |||
235 | mtk_timer_global_reset(evt); | ||
236 | |||
237 | /* Configure clock source */ | ||
238 | mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); | ||
239 | clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), | ||
240 | node->name, rate, 300, 32, clocksource_mmio_readl_up); | ||
241 | |||
242 | /* Configure clock event */ | ||
243 | mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); | ||
244 | mtk_timer_enable_irq(evt, GPT_CLK_EVT); | ||
245 | |||
246 | clockevents_config_and_register(&evt->dev, rate, 0x3, | ||
247 | 0xffffffff); | ||
248 | return; | ||
249 | |||
250 | err_clk_disable: | ||
251 | clk_disable_unprepare(clk); | ||
252 | err_clk_put: | ||
253 | clk_put(clk); | ||
254 | err_irq: | ||
255 | irq_dispose_mapping(evt->dev.irq); | ||
256 | err_mem: | ||
257 | iounmap(evt->gpt_base); | ||
258 | of_address_to_resource(node, 0, &res); | ||
259 | release_mem_region(res.start, resource_size(&res)); | ||
260 | } | ||
261 | CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init); | ||
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c new file mode 100644 index 000000000000..941f3f344e08 --- /dev/null +++ b/drivers/clocksource/pxa_timer.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-pxa/time.c | ||
3 | * | ||
4 | * PXA clocksource, clockevents, and OST interrupt handlers. | ||
5 | * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>. | ||
6 | * | ||
7 | * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001 | ||
8 | * by MontaVista Software, Inc. (Nico, your code rocks!) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/clockchips.h> | ||
20 | #include <linux/of_address.h> | ||
21 | #include <linux/of_irq.h> | ||
22 | #include <linux/sched_clock.h> | ||
23 | |||
24 | #include <asm/div64.h> | ||
25 | |||
26 | #define OSMR0 0x00 /* OS Timer 0 Match Register */ | ||
27 | #define OSMR1 0x04 /* OS Timer 1 Match Register */ | ||
28 | #define OSMR2 0x08 /* OS Timer 2 Match Register */ | ||
29 | #define OSMR3 0x0C /* OS Timer 3 Match Register */ | ||
30 | |||
31 | #define OSCR 0x10 /* OS Timer Counter Register */ | ||
32 | #define OSSR 0x14 /* OS Timer Status Register */ | ||
33 | #define OWER 0x18 /* OS Timer Watchdog Enable Register */ | ||
34 | #define OIER 0x1C /* OS Timer Interrupt Enable Register */ | ||
35 | |||
36 | #define OSSR_M3 (1 << 3) /* Match status channel 3 */ | ||
37 | #define OSSR_M2 (1 << 2) /* Match status channel 2 */ | ||
38 | #define OSSR_M1 (1 << 1) /* Match status channel 1 */ | ||
39 | #define OSSR_M0 (1 << 0) /* Match status channel 0 */ | ||
40 | |||
41 | #define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */ | ||
42 | |||
43 | /* | ||
44 | * This is PXA's sched_clock implementation. This has a resolution | ||
45 | * of at least 308 ns and a maximum value of 208 days. | ||
46 | * | ||
47 | * The return value is guaranteed to be monotonic in that range as | ||
48 | * long as there is always less than 582 seconds between successive | ||
49 | * calls to sched_clock() which should always be the case in practice. | ||
50 | */ | ||
51 | |||
52 | #define timer_readl(reg) readl_relaxed(timer_base + (reg)) | ||
53 | #define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg)) | ||
54 | |||
55 | static void __iomem *timer_base; | ||
56 | |||
57 | static u64 notrace pxa_read_sched_clock(void) | ||
58 | { | ||
59 | return timer_readl(OSCR); | ||
60 | } | ||
61 | |||
62 | |||
63 | #define MIN_OSCR_DELTA 16 | ||
64 | |||
65 | static irqreturn_t | ||
66 | pxa_ost0_interrupt(int irq, void *dev_id) | ||
67 | { | ||
68 | struct clock_event_device *c = dev_id; | ||
69 | |||
70 | /* Disarm the compare/match, signal the event. */ | ||
71 | timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); | ||
72 | timer_writel(OSSR_M0, OSSR); | ||
73 | c->event_handler(c); | ||
74 | |||
75 | return IRQ_HANDLED; | ||
76 | } | ||
77 | |||
78 | static int | ||
79 | pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) | ||
80 | { | ||
81 | unsigned long next, oscr; | ||
82 | |||
83 | timer_writel(timer_readl(OIER) | OIER_E0, OIER); | ||
84 | next = timer_readl(OSCR) + delta; | ||
85 | timer_writel(next, OSMR0); | ||
86 | oscr = timer_readl(OSCR); | ||
87 | |||
88 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; | ||
89 | } | ||
90 | |||
91 | static void | ||
92 | pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) | ||
93 | { | ||
94 | switch (mode) { | ||
95 | case CLOCK_EVT_MODE_ONESHOT: | ||
96 | timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); | ||
97 | timer_writel(OSSR_M0, OSSR); | ||
98 | break; | ||
99 | |||
100 | case CLOCK_EVT_MODE_UNUSED: | ||
101 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
102 | /* initializing, released, or preparing for suspend */ | ||
103 | timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); | ||
104 | timer_writel(OSSR_M0, OSSR); | ||
105 | break; | ||
106 | |||
107 | case CLOCK_EVT_MODE_RESUME: | ||
108 | case CLOCK_EVT_MODE_PERIODIC: | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | #ifdef CONFIG_PM | ||
114 | static unsigned long osmr[4], oier, oscr; | ||
115 | |||
116 | static void pxa_timer_suspend(struct clock_event_device *cedev) | ||
117 | { | ||
118 | osmr[0] = timer_readl(OSMR0); | ||
119 | osmr[1] = timer_readl(OSMR1); | ||
120 | osmr[2] = timer_readl(OSMR2); | ||
121 | osmr[3] = timer_readl(OSMR3); | ||
122 | oier = timer_readl(OIER); | ||
123 | oscr = timer_readl(OSCR); | ||
124 | } | ||
125 | |||
126 | static void pxa_timer_resume(struct clock_event_device *cedev) | ||
127 | { | ||
128 | /* | ||
129 | * Ensure that we have at least MIN_OSCR_DELTA between match | ||
130 | * register 0 and the OSCR, to guarantee that we will receive | ||
131 | * the one-shot timer interrupt. We adjust OSMR0 in preference | ||
132 | * to OSCR to guarantee that OSCR is monotonically incrementing. | ||
133 | */ | ||
134 | if (osmr[0] - oscr < MIN_OSCR_DELTA) | ||
135 | osmr[0] += MIN_OSCR_DELTA; | ||
136 | |||
137 | timer_writel(osmr[0], OSMR0); | ||
138 | timer_writel(osmr[1], OSMR1); | ||
139 | timer_writel(osmr[2], OSMR2); | ||
140 | timer_writel(osmr[3], OSMR3); | ||
141 | timer_writel(oier, OIER); | ||
142 | timer_writel(oscr, OSCR); | ||
143 | } | ||
144 | #else | ||
145 | #define pxa_timer_suspend NULL | ||
146 | #define pxa_timer_resume NULL | ||
147 | #endif | ||
148 | |||
149 | static struct clock_event_device ckevt_pxa_osmr0 = { | ||
150 | .name = "osmr0", | ||
151 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
152 | .rating = 200, | ||
153 | .set_next_event = pxa_osmr0_set_next_event, | ||
154 | .set_mode = pxa_osmr0_set_mode, | ||
155 | .suspend = pxa_timer_suspend, | ||
156 | .resume = pxa_timer_resume, | ||
157 | }; | ||
158 | |||
159 | static struct irqaction pxa_ost0_irq = { | ||
160 | .name = "ost0", | ||
161 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
162 | .handler = pxa_ost0_interrupt, | ||
163 | .dev_id = &ckevt_pxa_osmr0, | ||
164 | }; | ||
165 | |||
166 | static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) | ||
167 | { | ||
168 | timer_writel(0, OIER); | ||
169 | timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); | ||
170 | |||
171 | sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate); | ||
172 | |||
173 | ckevt_pxa_osmr0.cpumask = cpumask_of(0); | ||
174 | |||
175 | setup_irq(irq, &pxa_ost0_irq); | ||
176 | |||
177 | clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, | ||
178 | 32, clocksource_mmio_readl_up); | ||
179 | clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, | ||
180 | MIN_OSCR_DELTA * 2, 0x7fffffff); | ||
181 | } | ||
182 | |||
183 | static void __init pxa_timer_dt_init(struct device_node *np) | ||
184 | { | ||
185 | struct clk *clk; | ||
186 | int irq; | ||
187 | |||
188 | /* timer registers are shared with watchdog timer */ | ||
189 | timer_base = of_iomap(np, 0); | ||
190 | if (!timer_base) | ||
191 | panic("%s: unable to map resource\n", np->name); | ||
192 | |||
193 | clk = of_clk_get(np, 0); | ||
194 | if (IS_ERR(clk)) { | ||
195 | pr_crit("%s: unable to get clk\n", np->name); | ||
196 | return; | ||
197 | } | ||
198 | clk_prepare_enable(clk); | ||
199 | |||
200 | /* we are only interested in OS-timer0 irq */ | ||
201 | irq = irq_of_parse_and_map(np, 0); | ||
202 | if (irq <= 0) { | ||
203 | pr_crit("%s: unable to parse OS-timer0 irq\n", np->name); | ||
204 | return; | ||
205 | } | ||
206 | |||
207 | pxa_timer_common_init(irq, clk_get_rate(clk)); | ||
208 | } | ||
209 | CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); | ||
210 | |||
211 | /* | ||
212 | * Legacy timer init for non device-tree boards. | ||
213 | */ | ||
214 | void __init pxa_timer_nodt_init(int irq, void __iomem *base, | ||
215 | unsigned long clock_tick_rate) | ||
216 | { | ||
217 | struct clk *clk; | ||
218 | |||
219 | timer_base = base; | ||
220 | clk = clk_get(NULL, "OSTIMER0"); | ||
221 | if (clk && !IS_ERR(clk)) | ||
222 | clk_prepare_enable(clk); | ||
223 | else | ||
224 | pr_crit("%s: unable to get clk\n", __func__); | ||
225 | |||
226 | pxa_timer_common_init(irq, clock_tick_rate); | ||
227 | } | ||
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dfa780396b91..2bd13b53b727 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
28 | #include <linux/pm_domain.h> | 29 | #include <linux/pm_domain.h> |
29 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
@@ -114,14 +115,15 @@ struct sh_cmt_device { | |||
114 | struct platform_device *pdev; | 115 | struct platform_device *pdev; |
115 | 116 | ||
116 | const struct sh_cmt_info *info; | 117 | const struct sh_cmt_info *info; |
117 | bool legacy; | ||
118 | 118 | ||
119 | void __iomem *mapbase_ch; | ||
120 | void __iomem *mapbase; | 119 | void __iomem *mapbase; |
121 | struct clk *clk; | 120 | struct clk *clk; |
122 | 121 | ||
122 | raw_spinlock_t lock; /* Protect the shared start/stop register */ | ||
123 | |||
123 | struct sh_cmt_channel *channels; | 124 | struct sh_cmt_channel *channels; |
124 | unsigned int num_channels; | 125 | unsigned int num_channels; |
126 | unsigned int hw_channels; | ||
125 | 127 | ||
126 | bool has_clockevent; | 128 | bool has_clockevent; |
127 | bool has_clocksource; | 129 | bool has_clocksource; |
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, | |||
301 | return v2; | 303 | return v2; |
302 | } | 304 | } |
303 | 305 | ||
304 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); | ||
305 | |||
306 | static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) | 306 | static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) |
307 | { | 307 | { |
308 | unsigned long flags, value; | 308 | unsigned long flags, value; |
309 | 309 | ||
310 | /* start stop register shared by multiple timer channels */ | 310 | /* start stop register shared by multiple timer channels */ |
311 | raw_spin_lock_irqsave(&sh_cmt_lock, flags); | 311 | raw_spin_lock_irqsave(&ch->cmt->lock, flags); |
312 | value = sh_cmt_read_cmstr(ch); | 312 | value = sh_cmt_read_cmstr(ch); |
313 | 313 | ||
314 | if (start) | 314 | if (start) |
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) | |||
317 | value &= ~(1 << ch->timer_bit); | 317 | value &= ~(1 << ch->timer_bit); |
318 | 318 | ||
319 | sh_cmt_write_cmstr(ch, value); | 319 | sh_cmt_write_cmstr(ch, value); |
320 | raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); | 320 | raw_spin_unlock_irqrestore(&ch->cmt->lock, flags); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) | 323 | static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) |
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, | |||
792 | int irq; | 792 | int irq; |
793 | int ret; | 793 | int ret; |
794 | 794 | ||
795 | irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); | 795 | irq = platform_get_irq(ch->cmt->pdev, ch->index); |
796 | if (irq < 0) { | 796 | if (irq < 0) { |
797 | dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", | 797 | dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", |
798 | ch->index); | 798 | ch->index); |
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, | |||
863 | * Compute the address of the channel control register block. For the | 863 | * Compute the address of the channel control register block. For the |
864 | * timers with a per-channel start/stop register, compute its address | 864 | * timers with a per-channel start/stop register, compute its address |
865 | * as well. | 865 | * as well. |
866 | * | ||
867 | * For legacy configuration the address has been mapped explicitly. | ||
868 | */ | 866 | */ |
869 | if (cmt->legacy) { | 867 | switch (cmt->info->model) { |
870 | ch->ioctrl = cmt->mapbase_ch; | 868 | case SH_CMT_16BIT: |
871 | } else { | 869 | ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; |
872 | switch (cmt->info->model) { | 870 | break; |
873 | case SH_CMT_16BIT: | 871 | case SH_CMT_32BIT: |
874 | ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; | 872 | case SH_CMT_48BIT: |
875 | break; | 873 | ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; |
876 | case SH_CMT_32BIT: | 874 | break; |
877 | case SH_CMT_48BIT: | 875 | case SH_CMT_32BIT_FAST: |
878 | ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; | 876 | /* |
879 | break; | 877 | * The 32-bit "fast" timer has a single channel at hwidx 5 but |
880 | case SH_CMT_32BIT_FAST: | 878 | * is located at offset 0x40 instead of 0x60 for some reason. |
881 | /* | 879 | */ |
882 | * The 32-bit "fast" timer has a single channel at hwidx | 880 | ch->ioctrl = cmt->mapbase + 0x40; |
883 | * 5 but is located at offset 0x40 instead of 0x60 for | 881 | break; |
884 | * some reason. | 882 | case SH_CMT_48BIT_GEN2: |
885 | */ | 883 | ch->iostart = cmt->mapbase + ch->hwidx * 0x100; |
886 | ch->ioctrl = cmt->mapbase + 0x40; | 884 | ch->ioctrl = ch->iostart + 0x10; |
887 | break; | 885 | break; |
888 | case SH_CMT_48BIT_GEN2: | ||
889 | ch->iostart = cmt->mapbase + ch->hwidx * 0x100; | ||
890 | ch->ioctrl = ch->iostart + 0x10; | ||
891 | break; | ||
892 | } | ||
893 | } | 886 | } |
894 | 887 | ||
895 | if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) | 888 | if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) |
@@ -900,12 +893,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, | |||
900 | ch->match_value = ch->max_match_value; | 893 | ch->match_value = ch->max_match_value; |
901 | raw_spin_lock_init(&ch->lock); | 894 | raw_spin_lock_init(&ch->lock); |
902 | 895 | ||
903 | if (cmt->legacy) { | 896 | ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx; |
904 | ch->timer_bit = ch->hwidx; | ||
905 | } else { | ||
906 | ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 | ||
907 | ? 0 : ch->hwidx; | ||
908 | } | ||
909 | 897 | ||
910 | ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), | 898 | ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), |
911 | clockevent, clocksource); | 899 | clockevent, clocksource); |
@@ -938,75 +926,65 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) | |||
938 | return 0; | 926 | return 0; |
939 | } | 927 | } |
940 | 928 | ||
941 | static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) | 929 | static const struct platform_device_id sh_cmt_id_table[] = { |
942 | { | 930 | { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, |
943 | struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; | 931 | { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, |
944 | struct resource *res, *res2; | 932 | { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, |
945 | 933 | { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, | |
946 | /* map memory, let mapbase_ch point to our channel */ | 934 | { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, |
947 | res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); | 935 | { } |
948 | if (!res) { | 936 | }; |
949 | dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); | 937 | MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); |
950 | return -ENXIO; | ||
951 | } | ||
952 | |||
953 | cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); | ||
954 | if (cmt->mapbase_ch == NULL) { | ||
955 | dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); | ||
956 | return -ENXIO; | ||
957 | } | ||
958 | |||
959 | /* optional resource for the shared timer start/stop register */ | ||
960 | res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); | ||
961 | |||
962 | /* map second resource for CMSTR */ | ||
963 | cmt->mapbase = ioremap_nocache(res2 ? res2->start : | ||
964 | res->start - cfg->channel_offset, | ||
965 | res2 ? resource_size(res2) : 2); | ||
966 | if (cmt->mapbase == NULL) { | ||
967 | dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); | ||
968 | iounmap(cmt->mapbase_ch); | ||
969 | return -ENXIO; | ||
970 | } | ||
971 | |||
972 | /* identify the model based on the resources */ | ||
973 | if (resource_size(res) == 6) | ||
974 | cmt->info = &sh_cmt_info[SH_CMT_16BIT]; | ||
975 | else if (res2 && (resource_size(res2) == 4)) | ||
976 | cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; | ||
977 | else | ||
978 | cmt->info = &sh_cmt_info[SH_CMT_32BIT]; | ||
979 | 938 | ||
980 | return 0; | 939 | static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { |
981 | } | 940 | { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] }, |
941 | { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] }, | ||
942 | { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, | ||
943 | { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] }, | ||
944 | { } | ||
945 | }; | ||
946 | MODULE_DEVICE_TABLE(of, sh_cmt_of_table); | ||
982 | 947 | ||
983 | static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) | 948 | static int sh_cmt_parse_dt(struct sh_cmt_device *cmt) |
984 | { | 949 | { |
985 | iounmap(cmt->mapbase); | 950 | struct device_node *np = cmt->pdev->dev.of_node; |
986 | if (cmt->mapbase_ch) | 951 | |
987 | iounmap(cmt->mapbase_ch); | 952 | return of_property_read_u32(np, "renesas,channels-mask", |
953 | &cmt->hw_channels); | ||
988 | } | 954 | } |
989 | 955 | ||
990 | static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | 956 | static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) |
991 | { | 957 | { |
992 | struct sh_timer_config *cfg = pdev->dev.platform_data; | 958 | unsigned int mask; |
993 | const struct platform_device_id *id = pdev->id_entry; | 959 | unsigned int i; |
994 | unsigned int hw_channels; | ||
995 | int ret; | 960 | int ret; |
996 | 961 | ||
997 | memset(cmt, 0, sizeof(*cmt)); | 962 | memset(cmt, 0, sizeof(*cmt)); |
998 | cmt->pdev = pdev; | 963 | cmt->pdev = pdev; |
964 | raw_spin_lock_init(&cmt->lock); | ||
965 | |||
966 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { | ||
967 | const struct of_device_id *id; | ||
968 | |||
969 | id = of_match_node(sh_cmt_of_table, pdev->dev.of_node); | ||
970 | cmt->info = id->data; | ||
999 | 971 | ||
1000 | if (!cfg) { | 972 | ret = sh_cmt_parse_dt(cmt); |
973 | if (ret < 0) | ||
974 | return ret; | ||
975 | } else if (pdev->dev.platform_data) { | ||
976 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
977 | const struct platform_device_id *id = pdev->id_entry; | ||
978 | |||
979 | cmt->info = (const struct sh_cmt_info *)id->driver_data; | ||
980 | cmt->hw_channels = cfg->channels_mask; | ||
981 | } else { | ||
1001 | dev_err(&cmt->pdev->dev, "missing platform data\n"); | 982 | dev_err(&cmt->pdev->dev, "missing platform data\n"); |
1002 | return -ENXIO; | 983 | return -ENXIO; |
1003 | } | 984 | } |
1004 | 985 | ||
1005 | cmt->info = (const struct sh_cmt_info *)id->driver_data; | ||
1006 | cmt->legacy = cmt->info ? false : true; | ||
1007 | |||
1008 | /* Get hold of clock. */ | 986 | /* Get hold of clock. */ |
1009 | cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); | 987 | cmt->clk = clk_get(&cmt->pdev->dev, "fck"); |
1010 | if (IS_ERR(cmt->clk)) { | 988 | if (IS_ERR(cmt->clk)) { |
1011 | dev_err(&cmt->pdev->dev, "cannot get clock\n"); | 989 | dev_err(&cmt->pdev->dev, "cannot get clock\n"); |
1012 | return PTR_ERR(cmt->clk); | 990 | return PTR_ERR(cmt->clk); |
@@ -1016,28 +994,13 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1016 | if (ret < 0) | 994 | if (ret < 0) |
1017 | goto err_clk_put; | 995 | goto err_clk_put; |
1018 | 996 | ||
1019 | /* | 997 | /* Map the memory resource(s). */ |
1020 | * Map the memory resource(s). We need to support both the legacy | 998 | ret = sh_cmt_map_memory(cmt); |
1021 | * platform device configuration (with one device per channel) and the | ||
1022 | * new version (with multiple channels per device). | ||
1023 | */ | ||
1024 | if (cmt->legacy) | ||
1025 | ret = sh_cmt_map_memory_legacy(cmt); | ||
1026 | else | ||
1027 | ret = sh_cmt_map_memory(cmt); | ||
1028 | |||
1029 | if (ret < 0) | 999 | if (ret < 0) |
1030 | goto err_clk_unprepare; | 1000 | goto err_clk_unprepare; |
1031 | 1001 | ||
1032 | /* Allocate and setup the channels. */ | 1002 | /* Allocate and setup the channels. */ |
1033 | if (cmt->legacy) { | 1003 | cmt->num_channels = hweight8(cmt->hw_channels); |
1034 | cmt->num_channels = 1; | ||
1035 | hw_channels = 0; | ||
1036 | } else { | ||
1037 | cmt->num_channels = hweight8(cfg->channels_mask); | ||
1038 | hw_channels = cfg->channels_mask; | ||
1039 | } | ||
1040 | |||
1041 | cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), | 1004 | cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), |
1042 | GFP_KERNEL); | 1005 | GFP_KERNEL); |
1043 | if (cmt->channels == NULL) { | 1006 | if (cmt->channels == NULL) { |
@@ -1045,35 +1008,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1045 | goto err_unmap; | 1008 | goto err_unmap; |
1046 | } | 1009 | } |
1047 | 1010 | ||
1048 | if (cmt->legacy) { | 1011 | /* |
1049 | ret = sh_cmt_setup_channel(&cmt->channels[0], | 1012 | * Use the first channel as a clock event device and the second channel |
1050 | cfg->timer_bit, cfg->timer_bit, | 1013 | * as a clock source. If only one channel is available use it for both. |
1051 | cfg->clockevent_rating != 0, | 1014 | */ |
1052 | cfg->clocksource_rating != 0, cmt); | 1015 | for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) { |
1016 | unsigned int hwidx = ffs(mask) - 1; | ||
1017 | bool clocksource = i == 1 || cmt->num_channels == 1; | ||
1018 | bool clockevent = i == 0; | ||
1019 | |||
1020 | ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, | ||
1021 | clockevent, clocksource, cmt); | ||
1053 | if (ret < 0) | 1022 | if (ret < 0) |
1054 | goto err_unmap; | 1023 | goto err_unmap; |
1055 | } else { | ||
1056 | unsigned int mask = hw_channels; | ||
1057 | unsigned int i; | ||
1058 | 1024 | ||
1059 | /* | 1025 | mask &= ~(1 << hwidx); |
1060 | * Use the first channel as a clock event device and the second | ||
1061 | * channel as a clock source. If only one channel is available | ||
1062 | * use it for both. | ||
1063 | */ | ||
1064 | for (i = 0; i < cmt->num_channels; ++i) { | ||
1065 | unsigned int hwidx = ffs(mask) - 1; | ||
1066 | bool clocksource = i == 1 || cmt->num_channels == 1; | ||
1067 | bool clockevent = i == 0; | ||
1068 | |||
1069 | ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, | ||
1070 | clockevent, clocksource, | ||
1071 | cmt); | ||
1072 | if (ret < 0) | ||
1073 | goto err_unmap; | ||
1074 | |||
1075 | mask &= ~(1 << hwidx); | ||
1076 | } | ||
1077 | } | 1026 | } |
1078 | 1027 | ||
1079 | platform_set_drvdata(pdev, cmt); | 1028 | platform_set_drvdata(pdev, cmt); |
@@ -1082,7 +1031,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
1082 | 1031 | ||
1083 | err_unmap: | 1032 | err_unmap: |
1084 | kfree(cmt->channels); | 1033 | kfree(cmt->channels); |
1085 | sh_cmt_unmap_memory(cmt); | 1034 | iounmap(cmt->mapbase); |
1086 | err_clk_unprepare: | 1035 | err_clk_unprepare: |
1087 | clk_unprepare(cmt->clk); | 1036 | clk_unprepare(cmt->clk); |
1088 | err_clk_put: | 1037 | err_clk_put: |
@@ -1132,22 +1081,12 @@ static int sh_cmt_remove(struct platform_device *pdev) | |||
1132 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | 1081 | return -EBUSY; /* cannot unregister clockevent and clocksource */ |
1133 | } | 1082 | } |
1134 | 1083 | ||
1135 | static const struct platform_device_id sh_cmt_id_table[] = { | ||
1136 | { "sh_cmt", 0 }, | ||
1137 | { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, | ||
1138 | { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, | ||
1139 | { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, | ||
1140 | { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, | ||
1141 | { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, | ||
1142 | { } | ||
1143 | }; | ||
1144 | MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); | ||
1145 | |||
1146 | static struct platform_driver sh_cmt_device_driver = { | 1084 | static struct platform_driver sh_cmt_device_driver = { |
1147 | .probe = sh_cmt_probe, | 1085 | .probe = sh_cmt_probe, |
1148 | .remove = sh_cmt_remove, | 1086 | .remove = sh_cmt_remove, |
1149 | .driver = { | 1087 | .driver = { |
1150 | .name = "sh_cmt", | 1088 | .name = "sh_cmt", |
1089 | .of_match_table = of_match_ptr(sh_cmt_of_table), | ||
1151 | }, | 1090 | }, |
1152 | .id_table = sh_cmt_id_table, | 1091 | .id_table = sh_cmt_id_table, |
1153 | }; | 1092 | }; |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 188d4e092efc..3d88698cf2b8 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of.h> | ||
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/pm_domain.h> | 28 | #include <linux/pm_domain.h> |
28 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
@@ -37,7 +38,6 @@ struct sh_mtu2_channel { | |||
37 | unsigned int index; | 38 | unsigned int index; |
38 | 39 | ||
39 | void __iomem *base; | 40 | void __iomem *base; |
40 | int irq; | ||
41 | 41 | ||
42 | struct clock_event_device ced; | 42 | struct clock_event_device ced; |
43 | }; | 43 | }; |
@@ -48,15 +48,14 @@ struct sh_mtu2_device { | |||
48 | void __iomem *mapbase; | 48 | void __iomem *mapbase; |
49 | struct clk *clk; | 49 | struct clk *clk; |
50 | 50 | ||
51 | raw_spinlock_t lock; /* Protect the shared registers */ | ||
52 | |||
51 | struct sh_mtu2_channel *channels; | 53 | struct sh_mtu2_channel *channels; |
52 | unsigned int num_channels; | 54 | unsigned int num_channels; |
53 | 55 | ||
54 | bool legacy; | ||
55 | bool has_clockevent; | 56 | bool has_clockevent; |
56 | }; | 57 | }; |
57 | 58 | ||
58 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); | ||
59 | |||
60 | #define TSTR -1 /* shared register */ | 59 | #define TSTR -1 /* shared register */ |
61 | #define TCR 0 /* channel register */ | 60 | #define TCR 0 /* channel register */ |
62 | #define TMDR 1 /* channel register */ | 61 | #define TMDR 1 /* channel register */ |
@@ -162,12 +161,8 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) | |||
162 | { | 161 | { |
163 | unsigned long offs; | 162 | unsigned long offs; |
164 | 163 | ||
165 | if (reg_nr == TSTR) { | 164 | if (reg_nr == TSTR) |
166 | if (ch->mtu->legacy) | 165 | return ioread8(ch->mtu->mapbase + 0x280); |
167 | return ioread8(ch->mtu->mapbase); | ||
168 | else | ||
169 | return ioread8(ch->mtu->mapbase + 0x280); | ||
170 | } | ||
171 | 166 | ||
172 | offs = mtu2_reg_offs[reg_nr]; | 167 | offs = mtu2_reg_offs[reg_nr]; |
173 | 168 | ||
@@ -182,12 +177,8 @@ static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, | |||
182 | { | 177 | { |
183 | unsigned long offs; | 178 | unsigned long offs; |
184 | 179 | ||
185 | if (reg_nr == TSTR) { | 180 | if (reg_nr == TSTR) |
186 | if (ch->mtu->legacy) | 181 | return iowrite8(value, ch->mtu->mapbase + 0x280); |
187 | return iowrite8(value, ch->mtu->mapbase); | ||
188 | else | ||
189 | return iowrite8(value, ch->mtu->mapbase + 0x280); | ||
190 | } | ||
191 | 182 | ||
192 | offs = mtu2_reg_offs[reg_nr]; | 183 | offs = mtu2_reg_offs[reg_nr]; |
193 | 184 | ||
@@ -202,7 +193,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) | |||
202 | unsigned long flags, value; | 193 | unsigned long flags, value; |
203 | 194 | ||
204 | /* start stop register shared by multiple timer channels */ | 195 | /* start stop register shared by multiple timer channels */ |
205 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); | 196 | raw_spin_lock_irqsave(&ch->mtu->lock, flags); |
206 | value = sh_mtu2_read(ch, TSTR); | 197 | value = sh_mtu2_read(ch, TSTR); |
207 | 198 | ||
208 | if (start) | 199 | if (start) |
@@ -211,7 +202,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) | |||
211 | value &= ~(1 << ch->index); | 202 | value &= ~(1 << ch->index); |
212 | 203 | ||
213 | sh_mtu2_write(ch, TSTR, value); | 204 | sh_mtu2_write(ch, TSTR, value); |
214 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 205 | raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); |
215 | } | 206 | } |
216 | 207 | ||
217 | static int sh_mtu2_enable(struct sh_mtu2_channel *ch) | 208 | static int sh_mtu2_enable(struct sh_mtu2_channel *ch) |
@@ -331,7 +322,6 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, | |||
331 | const char *name) | 322 | const char *name) |
332 | { | 323 | { |
333 | struct clock_event_device *ced = &ch->ced; | 324 | struct clock_event_device *ced = &ch->ced; |
334 | int ret; | ||
335 | 325 | ||
336 | ced->name = name; | 326 | ced->name = name; |
337 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | 327 | ced->features = CLOCK_EVT_FEAT_PERIODIC; |
@@ -344,24 +334,12 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, | |||
344 | dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", | 334 | dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", |
345 | ch->index); | 335 | ch->index); |
346 | clockevents_register_device(ced); | 336 | clockevents_register_device(ced); |
347 | |||
348 | ret = request_irq(ch->irq, sh_mtu2_interrupt, | ||
349 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | ||
350 | dev_name(&ch->mtu->pdev->dev), ch); | ||
351 | if (ret) { | ||
352 | dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", | ||
353 | ch->index, ch->irq); | ||
354 | return; | ||
355 | } | ||
356 | } | 337 | } |
357 | 338 | ||
358 | static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, | 339 | static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name) |
359 | bool clockevent) | ||
360 | { | 340 | { |
361 | if (clockevent) { | 341 | ch->mtu->has_clockevent = true; |
362 | ch->mtu->has_clockevent = true; | 342 | sh_mtu2_register_clockevent(ch, name); |
363 | sh_mtu2_register_clockevent(ch, name); | ||
364 | } | ||
365 | 343 | ||
366 | return 0; | 344 | return 0; |
367 | } | 345 | } |
@@ -372,40 +350,32 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, | |||
372 | static const unsigned int channel_offsets[] = { | 350 | static const unsigned int channel_offsets[] = { |
373 | 0x300, 0x380, 0x000, | 351 | 0x300, 0x380, 0x000, |
374 | }; | 352 | }; |
375 | bool clockevent; | 353 | char name[6]; |
354 | int irq; | ||
355 | int ret; | ||
376 | 356 | ||
377 | ch->mtu = mtu; | 357 | ch->mtu = mtu; |
378 | 358 | ||
379 | if (mtu->legacy) { | 359 | sprintf(name, "tgi%ua", index); |
380 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | 360 | irq = platform_get_irq_byname(mtu->pdev, name); |
381 | 361 | if (irq < 0) { | |
382 | clockevent = cfg->clockevent_rating != 0; | ||
383 | |||
384 | ch->irq = platform_get_irq(mtu->pdev, 0); | ||
385 | ch->base = mtu->mapbase - cfg->channel_offset; | ||
386 | ch->index = cfg->timer_bit; | ||
387 | } else { | ||
388 | char name[6]; | ||
389 | |||
390 | clockevent = true; | ||
391 | |||
392 | sprintf(name, "tgi%ua", index); | ||
393 | ch->irq = platform_get_irq_byname(mtu->pdev, name); | ||
394 | ch->base = mtu->mapbase + channel_offsets[index]; | ||
395 | ch->index = index; | ||
396 | } | ||
397 | |||
398 | if (ch->irq < 0) { | ||
399 | /* Skip channels with no declared interrupt. */ | 362 | /* Skip channels with no declared interrupt. */ |
400 | if (!mtu->legacy) | 363 | return 0; |
401 | return 0; | 364 | } |
402 | 365 | ||
403 | dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", | 366 | ret = request_irq(irq, sh_mtu2_interrupt, |
404 | ch->index); | 367 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
405 | return ch->irq; | 368 | dev_name(&ch->mtu->pdev->dev), ch); |
369 | if (ret) { | ||
370 | dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", | ||
371 | index, irq); | ||
372 | return ret; | ||
406 | } | 373 | } |
407 | 374 | ||
408 | return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); | 375 | ch->base = mtu->mapbase + channel_offsets[index]; |
376 | ch->index = index; | ||
377 | |||
378 | return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev)); | ||
409 | } | 379 | } |
410 | 380 | ||
411 | static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) | 381 | static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) |
@@ -422,46 +392,21 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) | |||
422 | if (mtu->mapbase == NULL) | 392 | if (mtu->mapbase == NULL) |
423 | return -ENXIO; | 393 | return -ENXIO; |
424 | 394 | ||
425 | /* | ||
426 | * In legacy platform device configuration (with one device per channel) | ||
427 | * the resource points to the channel base address. | ||
428 | */ | ||
429 | if (mtu->legacy) { | ||
430 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | ||
431 | mtu->mapbase += cfg->channel_offset; | ||
432 | } | ||
433 | |||
434 | return 0; | 395 | return 0; |
435 | } | 396 | } |
436 | 397 | ||
437 | static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) | ||
438 | { | ||
439 | if (mtu->legacy) { | ||
440 | struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; | ||
441 | mtu->mapbase -= cfg->channel_offset; | ||
442 | } | ||
443 | |||
444 | iounmap(mtu->mapbase); | ||
445 | } | ||
446 | |||
447 | static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | 398 | static int sh_mtu2_setup(struct sh_mtu2_device *mtu, |
448 | struct platform_device *pdev) | 399 | struct platform_device *pdev) |
449 | { | 400 | { |
450 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
451 | const struct platform_device_id *id = pdev->id_entry; | ||
452 | unsigned int i; | 401 | unsigned int i; |
453 | int ret; | 402 | int ret; |
454 | 403 | ||
455 | mtu->pdev = pdev; | 404 | mtu->pdev = pdev; |
456 | mtu->legacy = id->driver_data; | ||
457 | 405 | ||
458 | if (mtu->legacy && !cfg) { | 406 | raw_spin_lock_init(&mtu->lock); |
459 | dev_err(&mtu->pdev->dev, "missing platform data\n"); | ||
460 | return -ENXIO; | ||
461 | } | ||
462 | 407 | ||
463 | /* Get hold of clock. */ | 408 | /* Get hold of clock. */ |
464 | mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); | 409 | mtu->clk = clk_get(&mtu->pdev->dev, "fck"); |
465 | if (IS_ERR(mtu->clk)) { | 410 | if (IS_ERR(mtu->clk)) { |
466 | dev_err(&mtu->pdev->dev, "cannot get clock\n"); | 411 | dev_err(&mtu->pdev->dev, "cannot get clock\n"); |
467 | return PTR_ERR(mtu->clk); | 412 | return PTR_ERR(mtu->clk); |
@@ -479,10 +424,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
479 | } | 424 | } |
480 | 425 | ||
481 | /* Allocate and setup the channels. */ | 426 | /* Allocate and setup the channels. */ |
482 | if (mtu->legacy) | 427 | mtu->num_channels = 3; |
483 | mtu->num_channels = 1; | ||
484 | else | ||
485 | mtu->num_channels = 3; | ||
486 | 428 | ||
487 | mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, | 429 | mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, |
488 | GFP_KERNEL); | 430 | GFP_KERNEL); |
@@ -491,16 +433,10 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
491 | goto err_unmap; | 433 | goto err_unmap; |
492 | } | 434 | } |
493 | 435 | ||
494 | if (mtu->legacy) { | 436 | for (i = 0; i < mtu->num_channels; ++i) { |
495 | ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); | 437 | ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); |
496 | if (ret < 0) | 438 | if (ret < 0) |
497 | goto err_unmap; | 439 | goto err_unmap; |
498 | } else { | ||
499 | for (i = 0; i < mtu->num_channels; ++i) { | ||
500 | ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); | ||
501 | if (ret < 0) | ||
502 | goto err_unmap; | ||
503 | } | ||
504 | } | 440 | } |
505 | 441 | ||
506 | platform_set_drvdata(pdev, mtu); | 442 | platform_set_drvdata(pdev, mtu); |
@@ -509,7 +445,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, | |||
509 | 445 | ||
510 | err_unmap: | 446 | err_unmap: |
511 | kfree(mtu->channels); | 447 | kfree(mtu->channels); |
512 | sh_mtu2_unmap_memory(mtu); | 448 | iounmap(mtu->mapbase); |
513 | err_clk_unprepare: | 449 | err_clk_unprepare: |
514 | clk_unprepare(mtu->clk); | 450 | clk_unprepare(mtu->clk); |
515 | err_clk_put: | 451 | err_clk_put: |
@@ -560,17 +496,23 @@ static int sh_mtu2_remove(struct platform_device *pdev) | |||
560 | } | 496 | } |
561 | 497 | ||
562 | static const struct platform_device_id sh_mtu2_id_table[] = { | 498 | static const struct platform_device_id sh_mtu2_id_table[] = { |
563 | { "sh_mtu2", 1 }, | ||
564 | { "sh-mtu2", 0 }, | 499 | { "sh-mtu2", 0 }, |
565 | { }, | 500 | { }, |
566 | }; | 501 | }; |
567 | MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); | 502 | MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); |
568 | 503 | ||
504 | static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = { | ||
505 | { .compatible = "renesas,mtu2" }, | ||
506 | { } | ||
507 | }; | ||
508 | MODULE_DEVICE_TABLE(of, sh_mtu2_of_table); | ||
509 | |||
569 | static struct platform_driver sh_mtu2_device_driver = { | 510 | static struct platform_driver sh_mtu2_device_driver = { |
570 | .probe = sh_mtu2_probe, | 511 | .probe = sh_mtu2_probe, |
571 | .remove = sh_mtu2_remove, | 512 | .remove = sh_mtu2_remove, |
572 | .driver = { | 513 | .driver = { |
573 | .name = "sh_mtu2", | 514 | .name = "sh_mtu2", |
515 | .of_match_table = of_match_ptr(sh_mtu2_of_table), | ||
574 | }, | 516 | }, |
575 | .id_table = sh_mtu2_id_table, | 517 | .id_table = sh_mtu2_id_table, |
576 | }; | 518 | }; |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 6bd17a8f3dd4..0f665b8f2461 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
28 | #include <linux/pm_domain.h> | 29 | #include <linux/pm_domain.h> |
29 | #include <linux/pm_runtime.h> | 30 | #include <linux/pm_runtime.h> |
@@ -32,7 +33,6 @@ | |||
32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
33 | 34 | ||
34 | enum sh_tmu_model { | 35 | enum sh_tmu_model { |
35 | SH_TMU_LEGACY, | ||
36 | SH_TMU, | 36 | SH_TMU, |
37 | SH_TMU_SH3, | 37 | SH_TMU_SH3, |
38 | }; | 38 | }; |
@@ -62,6 +62,8 @@ struct sh_tmu_device { | |||
62 | 62 | ||
63 | enum sh_tmu_model model; | 63 | enum sh_tmu_model model; |
64 | 64 | ||
65 | raw_spinlock_t lock; /* Protect the shared start/stop register */ | ||
66 | |||
65 | struct sh_tmu_channel *channels; | 67 | struct sh_tmu_channel *channels; |
66 | unsigned int num_channels; | 68 | unsigned int num_channels; |
67 | 69 | ||
@@ -69,8 +71,6 @@ struct sh_tmu_device { | |||
69 | bool has_clocksource; | 71 | bool has_clocksource; |
70 | }; | 72 | }; |
71 | 73 | ||
72 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); | ||
73 | |||
74 | #define TSTR -1 /* shared register */ | 74 | #define TSTR -1 /* shared register */ |
75 | #define TCOR 0 /* channel register */ | 75 | #define TCOR 0 /* channel register */ |
76 | #define TCNT 1 /* channel register */ | 76 | #define TCNT 1 /* channel register */ |
@@ -91,8 +91,6 @@ static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) | |||
91 | 91 | ||
92 | if (reg_nr == TSTR) { | 92 | if (reg_nr == TSTR) { |
93 | switch (ch->tmu->model) { | 93 | switch (ch->tmu->model) { |
94 | case SH_TMU_LEGACY: | ||
95 | return ioread8(ch->tmu->mapbase); | ||
96 | case SH_TMU_SH3: | 94 | case SH_TMU_SH3: |
97 | return ioread8(ch->tmu->mapbase + 2); | 95 | return ioread8(ch->tmu->mapbase + 2); |
98 | case SH_TMU: | 96 | case SH_TMU: |
@@ -115,8 +113,6 @@ static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, | |||
115 | 113 | ||
116 | if (reg_nr == TSTR) { | 114 | if (reg_nr == TSTR) { |
117 | switch (ch->tmu->model) { | 115 | switch (ch->tmu->model) { |
118 | case SH_TMU_LEGACY: | ||
119 | return iowrite8(value, ch->tmu->mapbase); | ||
120 | case SH_TMU_SH3: | 116 | case SH_TMU_SH3: |
121 | return iowrite8(value, ch->tmu->mapbase + 2); | 117 | return iowrite8(value, ch->tmu->mapbase + 2); |
122 | case SH_TMU: | 118 | case SH_TMU: |
@@ -137,7 +133,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) | |||
137 | unsigned long flags, value; | 133 | unsigned long flags, value; |
138 | 134 | ||
139 | /* start stop register shared by multiple timer channels */ | 135 | /* start stop register shared by multiple timer channels */ |
140 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); | 136 | raw_spin_lock_irqsave(&ch->tmu->lock, flags); |
141 | value = sh_tmu_read(ch, TSTR); | 137 | value = sh_tmu_read(ch, TSTR); |
142 | 138 | ||
143 | if (start) | 139 | if (start) |
@@ -146,7 +142,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) | |||
146 | value &= ~(1 << ch->index); | 142 | value &= ~(1 << ch->index); |
147 | 143 | ||
148 | sh_tmu_write(ch, TSTR, value); | 144 | sh_tmu_write(ch, TSTR, value); |
149 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); | 145 | raw_spin_unlock_irqrestore(&ch->tmu->lock, flags); |
150 | } | 146 | } |
151 | 147 | ||
152 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) | 148 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
@@ -476,27 +472,12 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, | |||
476 | return 0; | 472 | return 0; |
477 | 473 | ||
478 | ch->tmu = tmu; | 474 | ch->tmu = tmu; |
475 | ch->index = index; | ||
479 | 476 | ||
480 | if (tmu->model == SH_TMU_LEGACY) { | 477 | if (tmu->model == SH_TMU_SH3) |
481 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | 478 | ch->base = tmu->mapbase + 4 + ch->index * 12; |
482 | 479 | else | |
483 | /* | 480 | ch->base = tmu->mapbase + 8 + ch->index * 12; |
484 | * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps | ||
485 | * channel registers blocks at base + 2 + 12 * index, while all | ||
486 | * other variants map them at base + 4 + 12 * index. We can | ||
487 | * compute the index by just dividing by 12, the 2 bytes or 4 | ||
488 | * bytes offset being hidden by the integer division. | ||
489 | */ | ||
490 | ch->index = cfg->channel_offset / 12; | ||
491 | ch->base = tmu->mapbase + cfg->channel_offset; | ||
492 | } else { | ||
493 | ch->index = index; | ||
494 | |||
495 | if (tmu->model == SH_TMU_SH3) | ||
496 | ch->base = tmu->mapbase + 4 + ch->index * 12; | ||
497 | else | ||
498 | ch->base = tmu->mapbase + 8 + ch->index * 12; | ||
499 | } | ||
500 | 481 | ||
501 | ch->irq = platform_get_irq(tmu->pdev, index); | 482 | ch->irq = platform_get_irq(tmu->pdev, index); |
502 | if (ch->irq < 0) { | 483 | if (ch->irq < 0) { |
@@ -526,46 +507,53 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu) | |||
526 | if (tmu->mapbase == NULL) | 507 | if (tmu->mapbase == NULL) |
527 | return -ENXIO; | 508 | return -ENXIO; |
528 | 509 | ||
529 | /* | ||
530 | * In legacy platform device configuration (with one device per channel) | ||
531 | * the resource points to the channel base address. | ||
532 | */ | ||
533 | if (tmu->model == SH_TMU_LEGACY) { | ||
534 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | ||
535 | tmu->mapbase -= cfg->channel_offset; | ||
536 | } | ||
537 | |||
538 | return 0; | 510 | return 0; |
539 | } | 511 | } |
540 | 512 | ||
541 | static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) | 513 | static int sh_tmu_parse_dt(struct sh_tmu_device *tmu) |
542 | { | 514 | { |
543 | if (tmu->model == SH_TMU_LEGACY) { | 515 | struct device_node *np = tmu->pdev->dev.of_node; |
544 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | 516 | |
545 | tmu->mapbase += cfg->channel_offset; | 517 | tmu->model = SH_TMU; |
518 | tmu->num_channels = 3; | ||
519 | |||
520 | of_property_read_u32(np, "#renesas,channels", &tmu->num_channels); | ||
521 | |||
522 | if (tmu->num_channels != 2 && tmu->num_channels != 3) { | ||
523 | dev_err(&tmu->pdev->dev, "invalid number of channels %u\n", | ||
524 | tmu->num_channels); | ||
525 | return -EINVAL; | ||
546 | } | 526 | } |
547 | 527 | ||
548 | iounmap(tmu->mapbase); | 528 | return 0; |
549 | } | 529 | } |
550 | 530 | ||
551 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | 531 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) |
552 | { | 532 | { |
553 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
554 | const struct platform_device_id *id = pdev->id_entry; | ||
555 | unsigned int i; | 533 | unsigned int i; |
556 | int ret; | 534 | int ret; |
557 | 535 | ||
558 | if (!cfg) { | 536 | tmu->pdev = pdev; |
537 | |||
538 | raw_spin_lock_init(&tmu->lock); | ||
539 | |||
540 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { | ||
541 | ret = sh_tmu_parse_dt(tmu); | ||
542 | if (ret < 0) | ||
543 | return ret; | ||
544 | } else if (pdev->dev.platform_data) { | ||
545 | const struct platform_device_id *id = pdev->id_entry; | ||
546 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
547 | |||
548 | tmu->model = id->driver_data; | ||
549 | tmu->num_channels = hweight8(cfg->channels_mask); | ||
550 | } else { | ||
559 | dev_err(&tmu->pdev->dev, "missing platform data\n"); | 551 | dev_err(&tmu->pdev->dev, "missing platform data\n"); |
560 | return -ENXIO; | 552 | return -ENXIO; |
561 | } | 553 | } |
562 | 554 | ||
563 | tmu->pdev = pdev; | ||
564 | tmu->model = id->driver_data; | ||
565 | |||
566 | /* Get hold of clock. */ | 555 | /* Get hold of clock. */ |
567 | tmu->clk = clk_get(&tmu->pdev->dev, | 556 | tmu->clk = clk_get(&tmu->pdev->dev, "fck"); |
568 | tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); | ||
569 | if (IS_ERR(tmu->clk)) { | 557 | if (IS_ERR(tmu->clk)) { |
570 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); | 558 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); |
571 | return PTR_ERR(tmu->clk); | 559 | return PTR_ERR(tmu->clk); |
@@ -583,11 +571,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
583 | } | 571 | } |
584 | 572 | ||
585 | /* Allocate and setup the channels. */ | 573 | /* Allocate and setup the channels. */ |
586 | if (tmu->model == SH_TMU_LEGACY) | ||
587 | tmu->num_channels = 1; | ||
588 | else | ||
589 | tmu->num_channels = hweight8(cfg->channels_mask); | ||
590 | |||
591 | tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, | 574 | tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, |
592 | GFP_KERNEL); | 575 | GFP_KERNEL); |
593 | if (tmu->channels == NULL) { | 576 | if (tmu->channels == NULL) { |
@@ -595,23 +578,15 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
595 | goto err_unmap; | 578 | goto err_unmap; |
596 | } | 579 | } |
597 | 580 | ||
598 | if (tmu->model == SH_TMU_LEGACY) { | 581 | /* |
599 | ret = sh_tmu_channel_setup(&tmu->channels[0], 0, | 582 | * Use the first channel as a clock event device and the second channel |
600 | cfg->clockevent_rating != 0, | 583 | * as a clock source. |
601 | cfg->clocksource_rating != 0, tmu); | 584 | */ |
585 | for (i = 0; i < tmu->num_channels; ++i) { | ||
586 | ret = sh_tmu_channel_setup(&tmu->channels[i], i, | ||
587 | i == 0, i == 1, tmu); | ||
602 | if (ret < 0) | 588 | if (ret < 0) |
603 | goto err_unmap; | 589 | goto err_unmap; |
604 | } else { | ||
605 | /* | ||
606 | * Use the first channel as a clock event device and the second | ||
607 | * channel as a clock source. | ||
608 | */ | ||
609 | for (i = 0; i < tmu->num_channels; ++i) { | ||
610 | ret = sh_tmu_channel_setup(&tmu->channels[i], i, | ||
611 | i == 0, i == 1, tmu); | ||
612 | if (ret < 0) | ||
613 | goto err_unmap; | ||
614 | } | ||
615 | } | 590 | } |
616 | 591 | ||
617 | platform_set_drvdata(pdev, tmu); | 592 | platform_set_drvdata(pdev, tmu); |
@@ -620,7 +595,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |||
620 | 595 | ||
621 | err_unmap: | 596 | err_unmap: |
622 | kfree(tmu->channels); | 597 | kfree(tmu->channels); |
623 | sh_tmu_unmap_memory(tmu); | 598 | iounmap(tmu->mapbase); |
624 | err_clk_unprepare: | 599 | err_clk_unprepare: |
625 | clk_unprepare(tmu->clk); | 600 | clk_unprepare(tmu->clk); |
626 | err_clk_put: | 601 | err_clk_put: |
@@ -671,18 +646,24 @@ static int sh_tmu_remove(struct platform_device *pdev) | |||
671 | } | 646 | } |
672 | 647 | ||
673 | static const struct platform_device_id sh_tmu_id_table[] = { | 648 | static const struct platform_device_id sh_tmu_id_table[] = { |
674 | { "sh_tmu", SH_TMU_LEGACY }, | ||
675 | { "sh-tmu", SH_TMU }, | 649 | { "sh-tmu", SH_TMU }, |
676 | { "sh-tmu-sh3", SH_TMU_SH3 }, | 650 | { "sh-tmu-sh3", SH_TMU_SH3 }, |
677 | { } | 651 | { } |
678 | }; | 652 | }; |
679 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); | 653 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); |
680 | 654 | ||
655 | static const struct of_device_id sh_tmu_of_table[] __maybe_unused = { | ||
656 | { .compatible = "renesas,tmu" }, | ||
657 | { } | ||
658 | }; | ||
659 | MODULE_DEVICE_TABLE(of, sh_tmu_of_table); | ||
660 | |||
681 | static struct platform_driver sh_tmu_device_driver = { | 661 | static struct platform_driver sh_tmu_device_driver = { |
682 | .probe = sh_tmu_probe, | 662 | .probe = sh_tmu_probe, |
683 | .remove = sh_tmu_remove, | 663 | .remove = sh_tmu_remove, |
684 | .driver = { | 664 | .driver = { |
685 | .name = "sh_tmu", | 665 | .name = "sh_tmu", |
666 | .of_match_table = of_match_ptr(sh_tmu_of_table), | ||
686 | }, | 667 | }, |
687 | .id_table = sh_tmu_id_table, | 668 | .id_table = sh_tmu_id_table, |
688 | }; | 669 | }; |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index dbd30398222a..330e93064692 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -260,6 +260,9 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np) | |||
260 | 260 | ||
261 | clk = of_clk_get(np, 0); | 261 | clk = of_clk_get(np, 0); |
262 | BUG_ON(IS_ERR(clk)); | 262 | BUG_ON(IS_ERR(clk)); |
263 | |||
264 | BUG_ON(clk_prepare_enable(clk)); | ||
265 | |||
263 | rate = clk_get_rate(clk); | 266 | rate = clk_get_rate(clk); |
264 | 267 | ||
265 | BUG_ON(rate < MARCO_CLOCK_FREQ); | 268 | BUG_ON(rate < MARCO_CLOCK_FREQ); |
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index a722aac7ac02..ce18d570e1cd 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c | |||
@@ -200,6 +200,9 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np) | |||
200 | 200 | ||
201 | clk = of_clk_get(np, 0); | 201 | clk = of_clk_get(np, 0); |
202 | BUG_ON(IS_ERR(clk)); | 202 | BUG_ON(IS_ERR(clk)); |
203 | |||
204 | BUG_ON(clk_prepare_enable(clk)); | ||
205 | |||
203 | rate = clk_get_rate(clk); | 206 | rate = clk_get_rate(clk); |
204 | 207 | ||
205 | BUG_ON(rate < PRIMA2_CLOCK_FREQ); | 208 | BUG_ON(rate < PRIMA2_CLOCK_FREQ); |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index d7d5c8af92b9..5d997a33907e 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | |||
1214 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); | 1214 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); |
1215 | 1215 | ||
1216 | switch (a->clk_id) { | 1216 | switch (a->clk_id) { |
1217 | case CLOCK_REALTIME: getnstimeofday(&ts); break; | 1217 | case CLOCK_REALTIME: getnstimeofday(&ts); break; |
1218 | case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; | 1218 | case CLOCK_MONOTONIC: ktime_get_ts(&ts); break; |
1219 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; | 1219 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; |
1220 | default: | 1220 | default: |
1221 | ret = -EINVAL; | 1221 | ret = -EINVAL; |
1222 | } | 1222 | } |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0de123afdb34..08ba1209228e 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -542,8 +542,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
542 | const struct drm_crtc *refcrtc, | 542 | const struct drm_crtc *refcrtc, |
543 | const struct drm_display_mode *mode) | 543 | const struct drm_display_mode *mode) |
544 | { | 544 | { |
545 | ktime_t stime, etime, mono_time_offset; | ||
546 | struct timeval tv_etime; | 545 | struct timeval tv_etime; |
546 | ktime_t stime, etime; | ||
547 | int vbl_status; | 547 | int vbl_status; |
548 | int vpos, hpos, i; | 548 | int vpos, hpos, i; |
549 | int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; | 549 | int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; |
@@ -588,13 +588,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
588 | vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, | 588 | vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, |
589 | &hpos, &stime, &etime); | 589 | &hpos, &stime, &etime); |
590 | 590 | ||
591 | /* | ||
592 | * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if | ||
593 | * CLOCK_REALTIME is requested. | ||
594 | */ | ||
595 | if (!drm_timestamp_monotonic) | ||
596 | mono_time_offset = ktime_get_monotonic_offset(); | ||
597 | |||
598 | /* Return as no-op if scanout query unsupported or failed. */ | 591 | /* Return as no-op if scanout query unsupported or failed. */ |
599 | if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { | 592 | if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { |
600 | DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", | 593 | DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", |
@@ -633,7 +626,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
633 | delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; | 626 | delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; |
634 | 627 | ||
635 | if (!drm_timestamp_monotonic) | 628 | if (!drm_timestamp_monotonic) |
636 | etime = ktime_sub(etime, mono_time_offset); | 629 | etime = ktime_mono_to_real(etime); |
637 | 630 | ||
638 | /* save this only for debugging purposes */ | 631 | /* save this only for debugging purposes */ |
639 | tv_etime = ktime_to_timeval(etime); | 632 | tv_etime = ktime_to_timeval(etime); |
@@ -664,10 +657,7 @@ static struct timeval get_drm_timestamp(void) | |||
664 | { | 657 | { |
665 | ktime_t now; | 658 | ktime_t now; |
666 | 659 | ||
667 | now = ktime_get(); | 660 | now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real(); |
668 | if (!drm_timestamp_monotonic) | ||
669 | now = ktime_sub(now, ktime_get_monotonic_offset()); | ||
670 | |||
671 | return ktime_to_timeval(now); | 661 | return ktime_to_timeval(now); |
672 | } | 662 | } |
673 | 663 | ||
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index fd325ec9f064..de055451d1af 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -108,9 +108,8 @@ static void evdev_queue_syn_dropped(struct evdev_client *client) | |||
108 | struct input_event ev; | 108 | struct input_event ev; |
109 | ktime_t time; | 109 | ktime_t time; |
110 | 110 | ||
111 | time = ktime_get(); | 111 | time = (client->clkid == CLOCK_MONOTONIC) ? |
112 | if (client->clkid != CLOCK_MONOTONIC) | 112 | ktime_get() : ktime_get_real(); |
113 | time = ktime_sub(time, ktime_get_monotonic_offset()); | ||
114 | 113 | ||
115 | ev.time = ktime_to_timeval(time); | 114 | ev.time = ktime_to_timeval(time); |
116 | ev.type = EV_SYN; | 115 | ev.type = EV_SYN; |
@@ -202,7 +201,7 @@ static void evdev_events(struct input_handle *handle, | |||
202 | ktime_t time_mono, time_real; | 201 | ktime_t time_mono, time_real; |
203 | 202 | ||
204 | time_mono = ktime_get(); | 203 | time_mono = ktime_get(); |
205 | time_real = ktime_sub(time_mono, ktime_get_monotonic_offset()); | 204 | time_real = ktime_mono_to_real(time_mono); |
206 | 205 | ||
207 | rcu_read_lock(); | 206 | rcu_read_lock(); |
208 | 207 | ||
diff --git a/drivers/of/address.c b/drivers/of/address.c index 5edfcb0da37d..e3718250d66e 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
@@ -702,6 +702,42 @@ void __iomem *of_iomap(struct device_node *np, int index) | |||
702 | } | 702 | } |
703 | EXPORT_SYMBOL(of_iomap); | 703 | EXPORT_SYMBOL(of_iomap); |
704 | 704 | ||
705 | /* | ||
706 | * of_io_request_and_map - Requests a resource and maps the memory mapped IO | ||
707 | * for a given device_node | ||
708 | * @device: the device whose io range will be mapped | ||
709 | * @index: index of the io range | ||
710 | * @name: name of the resource | ||
711 | * | ||
712 | * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded | ||
713 | * error code on failure. Usage example: | ||
714 | * | ||
715 | * base = of_io_request_and_map(node, 0, "foo"); | ||
716 | * if (IS_ERR(base)) | ||
717 | * return PTR_ERR(base); | ||
718 | */ | ||
719 | void __iomem *of_io_request_and_map(struct device_node *np, int index, | ||
720 | char *name) | ||
721 | { | ||
722 | struct resource res; | ||
723 | void __iomem *mem; | ||
724 | |||
725 | if (of_address_to_resource(np, index, &res)) | ||
726 | return IOMEM_ERR_PTR(-EINVAL); | ||
727 | |||
728 | if (!request_mem_region(res.start, resource_size(&res), name)) | ||
729 | return IOMEM_ERR_PTR(-EBUSY); | ||
730 | |||
731 | mem = ioremap(res.start, resource_size(&res)); | ||
732 | if (!mem) { | ||
733 | release_mem_region(res.start, resource_size(&res)); | ||
734 | return IOMEM_ERR_PTR(-ENOMEM); | ||
735 | } | ||
736 | |||
737 | return mem; | ||
738 | } | ||
739 | EXPORT_SYMBOL(of_io_request_and_map); | ||
740 | |||
705 | /** | 741 | /** |
706 | * of_dma_get_range - Get DMA range info | 742 | * of_dma_get_range - Get DMA range info |
707 | * @np: device node to get DMA range info | 743 | * @np: device node to get DMA range info |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 64db2bceac59..d7f9199217bb 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -473,13 +473,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
473 | priority = task_prio(task); | 473 | priority = task_prio(task); |
474 | nice = task_nice(task); | 474 | nice = task_nice(task); |
475 | 475 | ||
476 | /* Temporary variable needed for gcc-2.96 */ | ||
477 | /* convert timespec -> nsec*/ | ||
478 | start_time = | ||
479 | (unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC | ||
480 | + task->real_start_time.tv_nsec; | ||
481 | /* convert nsec -> ticks */ | 476 | /* convert nsec -> ticks */ |
482 | start_time = nsec_to_clock_t(start_time); | 477 | start_time = nsec_to_clock_t(task->real_start_time); |
483 | 478 | ||
484 | seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); | 479 | seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); |
485 | seq_put_decimal_ll(m, ' ', ppid); | 480 | seq_put_decimal_ll(m, ' ', ppid); |
diff --git a/fs/timerfd.c b/fs/timerfd.c index 0013142c0475..80c350216ea8 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
@@ -35,8 +35,9 @@ struct timerfd_ctx { | |||
35 | ktime_t moffs; | 35 | ktime_t moffs; |
36 | wait_queue_head_t wqh; | 36 | wait_queue_head_t wqh; |
37 | u64 ticks; | 37 | u64 ticks; |
38 | int expired; | ||
39 | int clockid; | 38 | int clockid; |
39 | short unsigned expired; | ||
40 | short unsigned settime_flags; /* to show in fdinfo */ | ||
40 | struct rcu_head rcu; | 41 | struct rcu_head rcu; |
41 | struct list_head clist; | 42 | struct list_head clist; |
42 | bool might_cancel; | 43 | bool might_cancel; |
@@ -92,7 +93,7 @@ static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm, | |||
92 | */ | 93 | */ |
93 | void timerfd_clock_was_set(void) | 94 | void timerfd_clock_was_set(void) |
94 | { | 95 | { |
95 | ktime_t moffs = ktime_get_monotonic_offset(); | 96 | ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); |
96 | struct timerfd_ctx *ctx; | 97 | struct timerfd_ctx *ctx; |
97 | unsigned long flags; | 98 | unsigned long flags; |
98 | 99 | ||
@@ -125,7 +126,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx) | |||
125 | { | 126 | { |
126 | if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) | 127 | if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) |
127 | return false; | 128 | return false; |
128 | ctx->moffs = ktime_get_monotonic_offset(); | 129 | ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); |
129 | return true; | 130 | return true; |
130 | } | 131 | } |
131 | 132 | ||
@@ -196,6 +197,8 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags, | |||
196 | if (timerfd_canceled(ctx)) | 197 | if (timerfd_canceled(ctx)) |
197 | return -ECANCELED; | 198 | return -ECANCELED; |
198 | } | 199 | } |
200 | |||
201 | ctx->settime_flags = flags & TFD_SETTIME_FLAGS; | ||
199 | return 0; | 202 | return 0; |
200 | } | 203 | } |
201 | 204 | ||
@@ -284,11 +287,77 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, | |||
284 | return res; | 287 | return res; |
285 | } | 288 | } |
286 | 289 | ||
290 | #ifdef CONFIG_PROC_FS | ||
291 | static int timerfd_show(struct seq_file *m, struct file *file) | ||
292 | { | ||
293 | struct timerfd_ctx *ctx = file->private_data; | ||
294 | struct itimerspec t; | ||
295 | |||
296 | spin_lock_irq(&ctx->wqh.lock); | ||
297 | t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); | ||
298 | t.it_interval = ktime_to_timespec(ctx->tintv); | ||
299 | spin_unlock_irq(&ctx->wqh.lock); | ||
300 | |||
301 | return seq_printf(m, | ||
302 | "clockid: %d\n" | ||
303 | "ticks: %llu\n" | ||
304 | "settime flags: 0%o\n" | ||
305 | "it_value: (%llu, %llu)\n" | ||
306 | "it_interval: (%llu, %llu)\n", | ||
307 | ctx->clockid, (unsigned long long)ctx->ticks, | ||
308 | ctx->settime_flags, | ||
309 | (unsigned long long)t.it_value.tv_sec, | ||
310 | (unsigned long long)t.it_value.tv_nsec, | ||
311 | (unsigned long long)t.it_interval.tv_sec, | ||
312 | (unsigned long long)t.it_interval.tv_nsec); | ||
313 | } | ||
314 | #else | ||
315 | #define timerfd_show NULL | ||
316 | #endif | ||
317 | |||
318 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
319 | static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
320 | { | ||
321 | struct timerfd_ctx *ctx = file->private_data; | ||
322 | int ret = 0; | ||
323 | |||
324 | switch (cmd) { | ||
325 | case TFD_IOC_SET_TICKS: { | ||
326 | u64 ticks; | ||
327 | |||
328 | if (copy_from_user(&ticks, (u64 __user *)arg, sizeof(ticks))) | ||
329 | return -EFAULT; | ||
330 | if (!ticks) | ||
331 | return -EINVAL; | ||
332 | |||
333 | spin_lock_irq(&ctx->wqh.lock); | ||
334 | if (!timerfd_canceled(ctx)) { | ||
335 | ctx->ticks = ticks; | ||
336 | if (ticks) | ||
337 | wake_up_locked(&ctx->wqh); | ||
338 | } else | ||
339 | ret = -ECANCELED; | ||
340 | spin_unlock_irq(&ctx->wqh.lock); | ||
341 | break; | ||
342 | } | ||
343 | default: | ||
344 | ret = -ENOTTY; | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | return ret; | ||
349 | } | ||
350 | #else | ||
351 | #define timerfd_ioctl NULL | ||
352 | #endif | ||
353 | |||
287 | static const struct file_operations timerfd_fops = { | 354 | static const struct file_operations timerfd_fops = { |
288 | .release = timerfd_release, | 355 | .release = timerfd_release, |
289 | .poll = timerfd_poll, | 356 | .poll = timerfd_poll, |
290 | .read = timerfd_read, | 357 | .read = timerfd_read, |
291 | .llseek = noop_llseek, | 358 | .llseek = noop_llseek, |
359 | .show_fdinfo = timerfd_show, | ||
360 | .unlocked_ioctl = timerfd_ioctl, | ||
292 | }; | 361 | }; |
293 | 362 | ||
294 | static int timerfd_fget(int fd, struct fd *p) | 363 | static int timerfd_fget(int fd, struct fd *p) |
@@ -336,7 +405,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
336 | else | 405 | else |
337 | hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); | 406 | hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); |
338 | 407 | ||
339 | ctx->moffs = ktime_get_monotonic_offset(); | 408 | ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); |
340 | 409 | ||
341 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, | 410 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, |
342 | O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); | 411 | O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); |
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h new file mode 100644 index 000000000000..1efbe5a66958 --- /dev/null +++ b/include/clocksource/pxa.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * PXA clocksource, clockevents, and OST interrupt handlers. | ||
3 | * | ||
4 | * Copyright (C) 2014 Robert Jarzmik | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef _CLOCKSOURCE_PXA_H | ||
13 | #define _CLOCKSOURCE_PXA_H | ||
14 | |||
15 | extern void pxa_timer_nodt_init(int irq, void __iomem *base, | ||
16 | unsigned long clock_tick_rate); | ||
17 | |||
18 | #endif | ||
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e7a8d3fa91d5..a036d058a249 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -165,6 +165,7 @@ enum hrtimer_base_type { | |||
165 | * struct hrtimer_cpu_base - the per cpu clock bases | 165 | * struct hrtimer_cpu_base - the per cpu clock bases |
166 | * @lock: lock protecting the base and associated clock bases | 166 | * @lock: lock protecting the base and associated clock bases |
167 | * and timers | 167 | * and timers |
168 | * @cpu: cpu number | ||
168 | * @active_bases: Bitfield to mark bases with active timers | 169 | * @active_bases: Bitfield to mark bases with active timers |
169 | * @clock_was_set: Indicates that clock was set from irq context. | 170 | * @clock_was_set: Indicates that clock was set from irq context. |
170 | * @expires_next: absolute time of the next event which was scheduled | 171 | * @expires_next: absolute time of the next event which was scheduled |
@@ -179,6 +180,7 @@ enum hrtimer_base_type { | |||
179 | */ | 180 | */ |
180 | struct hrtimer_cpu_base { | 181 | struct hrtimer_cpu_base { |
181 | raw_spinlock_t lock; | 182 | raw_spinlock_t lock; |
183 | unsigned int cpu; | ||
182 | unsigned int active_bases; | 184 | unsigned int active_bases; |
183 | unsigned int clock_was_set; | 185 | unsigned int clock_was_set; |
184 | #ifdef CONFIG_HIGH_RES_TIMERS | 186 | #ifdef CONFIG_HIGH_RES_TIMERS |
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { } | |||
324 | #endif | 326 | #endif |
325 | extern void hrtimers_resume(void); | 327 | extern void hrtimers_resume(void); |
326 | 328 | ||
327 | extern ktime_t ktime_get(void); | ||
328 | extern ktime_t ktime_get_real(void); | ||
329 | extern ktime_t ktime_get_boottime(void); | ||
330 | extern ktime_t ktime_get_monotonic_offset(void); | ||
331 | extern ktime_t ktime_get_clocktai(void); | ||
332 | extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, | ||
333 | ktime_t *offs_tai); | ||
334 | |||
335 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 329 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
336 | 330 | ||
337 | 331 | ||
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void); | |||
452 | /* Bootup initialization: */ | 446 | /* Bootup initialization: */ |
453 | extern void __init hrtimers_init(void); | 447 | extern void __init hrtimers_init(void); |
454 | 448 | ||
455 | #if BITS_PER_LONG < 64 | ||
456 | extern u64 ktime_divns(const ktime_t kt, s64 div); | ||
457 | #else /* BITS_PER_LONG < 64 */ | ||
458 | # define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) | ||
459 | #endif | ||
460 | |||
461 | /* Show pending timers: */ | 449 | /* Show pending timers: */ |
462 | extern void sysrq_timer_list_show(void); | 450 | extern void sysrq_timer_list_show(void); |
463 | 451 | ||
diff --git a/include/linux/io.h b/include/linux/io.h index b76e6e545806..d5fc9b8d8b03 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
58 | } | 58 | } |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | ||
62 | |||
61 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | 63 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
62 | unsigned long size); | 64 | unsigned long size); |
63 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, | 65 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index de9e46e6bcc9..c9d645ad98ff 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -27,43 +27,19 @@ | |||
27 | /* | 27 | /* |
28 | * ktime_t: | 28 | * ktime_t: |
29 | * | 29 | * |
30 | * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers | 30 | * A single 64-bit variable is used to store the hrtimers |
31 | * internal representation of time values in scalar nanoseconds. The | 31 | * internal representation of time values in scalar nanoseconds. The |
32 | * design plays out best on 64-bit CPUs, where most conversions are | 32 | * design plays out best on 64-bit CPUs, where most conversions are |
33 | * NOPs and most arithmetic ktime_t operations are plain arithmetic | 33 | * NOPs and most arithmetic ktime_t operations are plain arithmetic |
34 | * operations. | 34 | * operations. |
35 | * | 35 | * |
36 | * On 32-bit CPUs an optimized representation of the timespec structure | ||
37 | * is used to avoid expensive conversions from and to timespecs. The | ||
38 | * endian-aware order of the tv struct members is chosen to allow | ||
39 | * mathematical operations on the tv64 member of the union too, which | ||
40 | * for certain operations produces better code. | ||
41 | * | ||
42 | * For architectures with efficient support for 64/32-bit conversions the | ||
43 | * plain scalar nanosecond based representation can be selected by the | ||
44 | * config switch CONFIG_KTIME_SCALAR. | ||
45 | */ | 36 | */ |
46 | union ktime { | 37 | union ktime { |
47 | s64 tv64; | 38 | s64 tv64; |
48 | #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) | ||
49 | struct { | ||
50 | # ifdef __BIG_ENDIAN | ||
51 | s32 sec, nsec; | ||
52 | # else | ||
53 | s32 nsec, sec; | ||
54 | # endif | ||
55 | } tv; | ||
56 | #endif | ||
57 | }; | 39 | }; |
58 | 40 | ||
59 | typedef union ktime ktime_t; /* Kill this */ | 41 | typedef union ktime ktime_t; /* Kill this */ |
60 | 42 | ||
61 | /* | ||
62 | * ktime_t definitions when using the 64-bit scalar representation: | ||
63 | */ | ||
64 | |||
65 | #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) | ||
66 | |||
67 | /** | 43 | /** |
68 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value | 44 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value |
69 | * @secs: seconds to set | 45 | * @secs: seconds to set |
@@ -71,13 +47,12 @@ typedef union ktime ktime_t; /* Kill this */ | |||
71 | * | 47 | * |
72 | * Return: The ktime_t representation of the value. | 48 | * Return: The ktime_t representation of the value. |
73 | */ | 49 | */ |
74 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) | 50 | static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) |
75 | { | 51 | { |
76 | #if (BITS_PER_LONG == 64) | ||
77 | if (unlikely(secs >= KTIME_SEC_MAX)) | 52 | if (unlikely(secs >= KTIME_SEC_MAX)) |
78 | return (ktime_t){ .tv64 = KTIME_MAX }; | 53 | return (ktime_t){ .tv64 = KTIME_MAX }; |
79 | #endif | 54 | |
80 | return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs }; | 55 | return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; |
81 | } | 56 | } |
82 | 57 | ||
83 | /* Subtract two ktime_t variables. rem = lhs -rhs: */ | 58 | /* Subtract two ktime_t variables. rem = lhs -rhs: */ |
@@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts) | |||
108 | return ktime_set(ts.tv_sec, ts.tv_nsec); | 83 | return ktime_set(ts.tv_sec, ts.tv_nsec); |
109 | } | 84 | } |
110 | 85 | ||
86 | /* convert a timespec64 to ktime_t format: */ | ||
87 | static inline ktime_t timespec64_to_ktime(struct timespec64 ts) | ||
88 | { | ||
89 | return ktime_set(ts.tv_sec, ts.tv_nsec); | ||
90 | } | ||
91 | |||
111 | /* convert a timeval to ktime_t format: */ | 92 | /* convert a timeval to ktime_t format: */ |
112 | static inline ktime_t timeval_to_ktime(struct timeval tv) | 93 | static inline ktime_t timeval_to_ktime(struct timeval tv) |
113 | { | 94 | { |
@@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
117 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ | 98 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ |
118 | #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) | 99 | #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) |
119 | 100 | ||
101 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ | ||
102 | #define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) | ||
103 | |||
120 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ | 104 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ |
121 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) | 105 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) |
122 | 106 | ||
123 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ | 107 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ |
124 | #define ktime_to_ns(kt) ((kt).tv64) | 108 | #define ktime_to_ns(kt) ((kt).tv64) |
125 | 109 | ||
126 | #else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ | ||
127 | |||
128 | /* | ||
129 | * Helper macros/inlines to get the ktime_t math right in the timespec | ||
130 | * representation. The macros are sometimes ugly - their actual use is | ||
131 | * pretty okay-ish, given the circumstances. We do all this for | ||
132 | * performance reasons. The pure scalar nsec_t based code was nice and | ||
133 | * simple, but created too many 64-bit / 32-bit conversions and divisions. | ||
134 | * | ||
135 | * Be especially aware that negative values are represented in a way | ||
136 | * that the tv.sec field is negative and the tv.nsec field is greater | ||
137 | * or equal to zero but less than nanoseconds per second. This is the | ||
138 | * same representation which is used by timespecs. | ||
139 | * | ||
140 | * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC | ||
141 | */ | ||
142 | |||
143 | /* Set a ktime_t variable to a value in sec/nsec representation: */ | ||
144 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) | ||
145 | { | ||
146 | return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } }; | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * ktime_sub - subtract two ktime_t variables | ||
151 | * @lhs: minuend | ||
152 | * @rhs: subtrahend | ||
153 | * | ||
154 | * Return: The remainder of the subtraction. | ||
155 | */ | ||
156 | static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs) | ||
157 | { | ||
158 | ktime_t res; | ||
159 | |||
160 | res.tv64 = lhs.tv64 - rhs.tv64; | ||
161 | if (res.tv.nsec < 0) | ||
162 | res.tv.nsec += NSEC_PER_SEC; | ||
163 | |||
164 | return res; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * ktime_add - add two ktime_t variables | ||
169 | * @add1: addend1 | ||
170 | * @add2: addend2 | ||
171 | * | ||
172 | * Return: The sum of @add1 and @add2. | ||
173 | */ | ||
174 | static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2) | ||
175 | { | ||
176 | ktime_t res; | ||
177 | |||
178 | res.tv64 = add1.tv64 + add2.tv64; | ||
179 | /* | ||
180 | * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx | ||
181 | * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit. | ||
182 | * | ||
183 | * it's equivalent to: | ||
184 | * tv.nsec -= NSEC_PER_SEC | ||
185 | * tv.sec ++; | ||
186 | */ | ||
187 | if (res.tv.nsec >= NSEC_PER_SEC) | ||
188 | res.tv64 += (u32)-NSEC_PER_SEC; | ||
189 | |||
190 | return res; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | ||
195 | * @kt: addend | ||
196 | * @nsec: the scalar nsec value to add | ||
197 | * | ||
198 | * Return: The sum of @kt and @nsec in ktime_t format. | ||
199 | */ | ||
200 | extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); | ||
201 | |||
202 | /** | ||
203 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | ||
204 | * @kt: minuend | ||
205 | * @nsec: the scalar nsec value to subtract | ||
206 | * | ||
207 | * Return: The subtraction of @nsec from @kt in ktime_t format. | ||
208 | */ | ||
209 | extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec); | ||
210 | |||
211 | /** | ||
212 | * timespec_to_ktime - convert a timespec to ktime_t format | ||
213 | * @ts: the timespec variable to convert | ||
214 | * | ||
215 | * Return: A ktime_t variable with the converted timespec value. | ||
216 | */ | ||
217 | static inline ktime_t timespec_to_ktime(const struct timespec ts) | ||
218 | { | ||
219 | return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec, | ||
220 | .nsec = (s32)ts.tv_nsec } }; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * timeval_to_ktime - convert a timeval to ktime_t format | ||
225 | * @tv: the timeval variable to convert | ||
226 | * | ||
227 | * Return: A ktime_t variable with the converted timeval value. | ||
228 | */ | ||
229 | static inline ktime_t timeval_to_ktime(const struct timeval tv) | ||
230 | { | ||
231 | return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, | ||
232 | .nsec = (s32)(tv.tv_usec * | ||
233 | NSEC_PER_USEC) } }; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * ktime_to_timespec - convert a ktime_t variable to timespec format | ||
238 | * @kt: the ktime_t variable to convert | ||
239 | * | ||
240 | * Return: The timespec representation of the ktime value. | ||
241 | */ | ||
242 | static inline struct timespec ktime_to_timespec(const ktime_t kt) | ||
243 | { | ||
244 | return (struct timespec) { .tv_sec = (time_t) kt.tv.sec, | ||
245 | .tv_nsec = (long) kt.tv.nsec }; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * ktime_to_timeval - convert a ktime_t variable to timeval format | ||
250 | * @kt: the ktime_t variable to convert | ||
251 | * | ||
252 | * Return: The timeval representation of the ktime value. | ||
253 | */ | ||
254 | static inline struct timeval ktime_to_timeval(const ktime_t kt) | ||
255 | { | ||
256 | return (struct timeval) { | ||
257 | .tv_sec = (time_t) kt.tv.sec, | ||
258 | .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) }; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds | ||
263 | * @kt: the ktime_t variable to convert | ||
264 | * | ||
265 | * Return: The scalar nanoseconds representation of @kt. | ||
266 | */ | ||
267 | static inline s64 ktime_to_ns(const ktime_t kt) | ||
268 | { | ||
269 | return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; | ||
270 | } | ||
271 | |||
272 | #endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ | ||
273 | 110 | ||
274 | /** | 111 | /** |
275 | * ktime_equal - Compares two ktime_t variables to see if they are equal | 112 | * ktime_equal - Compares two ktime_t variables to see if they are equal |
@@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) | |||
328 | return ktime_compare(cmp1, cmp2) < 0; | 165 | return ktime_compare(cmp1, cmp2) < 0; |
329 | } | 166 | } |
330 | 167 | ||
168 | #if BITS_PER_LONG < 64 | ||
169 | extern u64 ktime_divns(const ktime_t kt, s64 div); | ||
170 | #else /* BITS_PER_LONG < 64 */ | ||
171 | # define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) | ||
172 | #endif | ||
173 | |||
331 | static inline s64 ktime_to_us(const ktime_t kt) | 174 | static inline s64 ktime_to_us(const ktime_t kt) |
332 | { | 175 | { |
333 | struct timeval tv = ktime_to_timeval(kt); | 176 | return ktime_divns(kt, NSEC_PER_USEC); |
334 | return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; | ||
335 | } | 177 | } |
336 | 178 | ||
337 | static inline s64 ktime_to_ms(const ktime_t kt) | 179 | static inline s64 ktime_to_ms(const ktime_t kt) |
338 | { | 180 | { |
339 | struct timeval tv = ktime_to_timeval(kt); | 181 | return ktime_divns(kt, NSEC_PER_MSEC); |
340 | return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC; | ||
341 | } | 182 | } |
342 | 183 | ||
343 | static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) | 184 | static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) |
@@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, | |||
381 | } | 222 | } |
382 | } | 223 | } |
383 | 224 | ||
225 | /** | ||
226 | * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 | ||
227 | * format only if the variable contains data | ||
228 | * @kt: the ktime_t variable to convert | ||
229 | * @ts: the timespec variable to store the result in | ||
230 | * | ||
231 | * Return: %true if there was a successful conversion, %false if kt was 0. | ||
232 | */ | ||
233 | static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, | ||
234 | struct timespec64 *ts) | ||
235 | { | ||
236 | if (kt.tv64) { | ||
237 | *ts = ktime_to_timespec64(kt); | ||
238 | return true; | ||
239 | } else { | ||
240 | return false; | ||
241 | } | ||
242 | } | ||
243 | |||
384 | /* | 244 | /* |
385 | * The resolution of the clocks. The resolution value is returned in | 245 | * The resolution of the clocks. The resolution value is returned in |
386 | * the clock_getres() system call to give application programmers an | 246 | * the clock_getres() system call to give application programmers an |
@@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, | |||
390 | #define LOW_RES_NSEC TICK_NSEC | 250 | #define LOW_RES_NSEC TICK_NSEC |
391 | #define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } | 251 | #define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } |
392 | 252 | ||
393 | /* Get the monotonic time in timespec format: */ | ||
394 | extern void ktime_get_ts(struct timespec *ts); | ||
395 | |||
396 | /* Get the real (wall-) time in timespec format: */ | ||
397 | #define ktime_get_real_ts(ts) getnstimeofday(ts) | ||
398 | |||
399 | static inline ktime_t ns_to_ktime(u64 ns) | 253 | static inline ktime_t ns_to_ktime(u64 ns) |
400 | { | 254 | { |
401 | static const ktime_t ktime_zero = { .tv64 = 0 }; | 255 | static const ktime_t ktime_zero = { .tv64 = 0 }; |
@@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms) | |||
410 | return ktime_add_ms(ktime_zero, ms); | 264 | return ktime_add_ms(ktime_zero, ms); |
411 | } | 265 | } |
412 | 266 | ||
267 | # include <linux/timekeeping.h> | ||
268 | |||
413 | #endif | 269 | #endif |
diff --git a/include/linux/of_address.h b/include/linux/of_address.h index c13b8782a4eb..fb7b7221e063 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h | |||
@@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np) | |||
109 | extern int of_address_to_resource(struct device_node *dev, int index, | 109 | extern int of_address_to_resource(struct device_node *dev, int index, |
110 | struct resource *r); | 110 | struct resource *r); |
111 | void __iomem *of_iomap(struct device_node *node, int index); | 111 | void __iomem *of_iomap(struct device_node *node, int index); |
112 | void __iomem *of_io_request_and_map(struct device_node *device, | ||
113 | int index, char *name); | ||
112 | #else | 114 | #else |
115 | |||
116 | #include <linux/io.h> | ||
117 | |||
113 | static inline int of_address_to_resource(struct device_node *dev, int index, | 118 | static inline int of_address_to_resource(struct device_node *dev, int index, |
114 | struct resource *r) | 119 | struct resource *r) |
115 | { | 120 | { |
@@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) | |||
120 | { | 125 | { |
121 | return NULL; | 126 | return NULL; |
122 | } | 127 | } |
128 | |||
129 | static inline void __iomem *of_io_request_and_map(struct device_node *device, | ||
130 | int index, char *name) | ||
131 | { | ||
132 | return IOMEM_ERR_PTR(-EINVAL); | ||
133 | } | ||
123 | #endif | 134 | #endif |
124 | 135 | ||
125 | #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) | 136 | #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0376b054a0d0..b1ce2aceaf49 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1367,8 +1367,8 @@ struct task_struct { | |||
1367 | } vtime_snap_whence; | 1367 | } vtime_snap_whence; |
1368 | #endif | 1368 | #endif |
1369 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1369 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
1370 | struct timespec start_time; /* monotonic time */ | 1370 | u64 start_time; /* monotonic time in nsec */ |
1371 | struct timespec real_start_time; /* boot based time */ | 1371 | u64 real_start_time; /* boot based time in nsec */ |
1372 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ | 1372 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
1373 | unsigned long min_flt, maj_flt; | 1373 | unsigned long min_flt, maj_flt; |
1374 | 1374 | ||
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h index 8e1e036d6d45..64638b058076 100644 --- a/include/linux/sh_timer.h +++ b/include/linux/sh_timer.h | |||
@@ -2,11 +2,6 @@ | |||
2 | #define __SH_TIMER_H__ | 2 | #define __SH_TIMER_H__ |
3 | 3 | ||
4 | struct sh_timer_config { | 4 | struct sh_timer_config { |
5 | char *name; | ||
6 | long channel_offset; | ||
7 | int timer_bit; | ||
8 | unsigned long clockevent_rating; | ||
9 | unsigned long clocksource_rating; | ||
10 | unsigned int channels_mask; | 5 | unsigned int channels_mask; |
11 | }; | 6 | }; |
12 | 7 | ||
diff --git a/include/linux/time.h b/include/linux/time.h index d5d229b2e5af..8c42cf8d2444 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -4,19 +4,10 @@ | |||
4 | # include <linux/cache.h> | 4 | # include <linux/cache.h> |
5 | # include <linux/seqlock.h> | 5 | # include <linux/seqlock.h> |
6 | # include <linux/math64.h> | 6 | # include <linux/math64.h> |
7 | #include <uapi/linux/time.h> | 7 | # include <linux/time64.h> |
8 | 8 | ||
9 | extern struct timezone sys_tz; | 9 | extern struct timezone sys_tz; |
10 | 10 | ||
11 | /* Parameters used to convert the timespec values: */ | ||
12 | #define MSEC_PER_SEC 1000L | ||
13 | #define USEC_PER_MSEC 1000L | ||
14 | #define NSEC_PER_USEC 1000L | ||
15 | #define NSEC_PER_MSEC 1000000L | ||
16 | #define USEC_PER_SEC 1000000L | ||
17 | #define NSEC_PER_SEC 1000000000L | ||
18 | #define FSEC_PER_SEC 1000000000000000LL | ||
19 | |||
20 | #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) | 11 | #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
21 | 12 | ||
22 | static inline int timespec_equal(const struct timespec *a, | 13 | static inline int timespec_equal(const struct timespec *a, |
@@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, | |||
84 | return ts_delta; | 75 | return ts_delta; |
85 | } | 76 | } |
86 | 77 | ||
87 | #define KTIME_MAX ((s64)~((u64)1 << 63)) | ||
88 | #if (BITS_PER_LONG == 64) | ||
89 | # define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) | ||
90 | #else | ||
91 | # define KTIME_SEC_MAX LONG_MAX | ||
92 | #endif | ||
93 | |||
94 | /* | 78 | /* |
95 | * Returns true if the timespec is norm, false if denorm: | 79 | * Returns true if the timespec is norm, false if denorm: |
96 | */ | 80 | */ |
@@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts) | |||
115 | return true; | 99 | return true; |
116 | } | 100 | } |
117 | 101 | ||
118 | extern bool persistent_clock_exist; | 102 | extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
119 | |||
120 | static inline bool has_persistent_clock(void) | ||
121 | { | ||
122 | return persistent_clock_exist; | ||
123 | } | ||
124 | |||
125 | extern void read_persistent_clock(struct timespec *ts); | ||
126 | extern void read_boot_clock(struct timespec *ts); | ||
127 | extern int persistent_clock_is_local; | ||
128 | extern int update_persistent_clock(struct timespec now); | ||
129 | void timekeeping_init(void); | ||
130 | extern int timekeeping_suspended; | ||
131 | |||
132 | unsigned long get_seconds(void); | ||
133 | struct timespec current_kernel_time(void); | ||
134 | struct timespec __current_kernel_time(void); /* does not take xtime_lock */ | ||
135 | struct timespec get_monotonic_coarse(void); | ||
136 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | ||
137 | struct timespec *wtom, struct timespec *sleep); | ||
138 | void timekeeping_inject_sleeptime(struct timespec *delta); | ||
139 | 103 | ||
140 | #define CURRENT_TIME (current_kernel_time()) | 104 | #define CURRENT_TIME (current_kernel_time()) |
141 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 105 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
@@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta); | |||
153 | extern u32 (*arch_gettimeoffset)(void); | 117 | extern u32 (*arch_gettimeoffset)(void); |
154 | #endif | 118 | #endif |
155 | 119 | ||
156 | extern void do_gettimeofday(struct timeval *tv); | ||
157 | extern int do_settimeofday(const struct timespec *tv); | ||
158 | extern int do_sys_settimeofday(const struct timespec *tv, | ||
159 | const struct timezone *tz); | ||
160 | #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) | ||
161 | extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); | ||
162 | struct itimerval; | 120 | struct itimerval; |
163 | extern int do_setitimer(int which, struct itimerval *value, | 121 | extern int do_setitimer(int which, struct itimerval *value, |
164 | struct itimerval *ovalue); | 122 | struct itimerval *ovalue); |
165 | extern unsigned int alarm_setitimer(unsigned int seconds); | ||
166 | extern int do_getitimer(int which, struct itimerval *value); | 123 | extern int do_getitimer(int which, struct itimerval *value); |
167 | extern int __getnstimeofday(struct timespec *tv); | ||
168 | extern void getnstimeofday(struct timespec *tv); | ||
169 | extern void getrawmonotonic(struct timespec *ts); | ||
170 | extern void getnstime_raw_and_real(struct timespec *ts_raw, | ||
171 | struct timespec *ts_real); | ||
172 | extern void getboottime(struct timespec *ts); | ||
173 | extern void monotonic_to_bootbased(struct timespec *ts); | ||
174 | extern void get_monotonic_boottime(struct timespec *ts); | ||
175 | 124 | ||
176 | extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | 125 | extern unsigned int alarm_setitimer(unsigned int seconds); |
177 | extern int timekeeping_valid_for_hres(void); | 126 | |
178 | extern u64 timekeeping_max_deferment(void); | 127 | extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
179 | extern int timekeeping_inject_offset(struct timespec *ts); | ||
180 | extern s32 timekeeping_get_tai_offset(void); | ||
181 | extern void timekeeping_set_tai_offset(s32 tai_offset); | ||
182 | extern void timekeeping_clocktai(struct timespec *ts); | ||
183 | 128 | ||
184 | struct tms; | 129 | struct tms; |
185 | extern void do_sys_times(struct tms *); | 130 | extern void do_sys_times(struct tms *); |
diff --git a/include/linux/time64.h b/include/linux/time64.h new file mode 100644 index 000000000000..a3831478d9cf --- /dev/null +++ b/include/linux/time64.h | |||
@@ -0,0 +1,190 @@ | |||
1 | #ifndef _LINUX_TIME64_H | ||
2 | #define _LINUX_TIME64_H | ||
3 | |||
4 | #include <uapi/linux/time.h> | ||
5 | |||
6 | typedef __s64 time64_t; | ||
7 | |||
8 | /* | ||
9 | * This wants to go into uapi/linux/time.h once we agreed about the | ||
10 | * userspace interfaces. | ||
11 | */ | ||
12 | #if __BITS_PER_LONG == 64 | ||
13 | # define timespec64 timespec | ||
14 | #else | ||
15 | struct timespec64 { | ||
16 | time64_t tv_sec; /* seconds */ | ||
17 | long tv_nsec; /* nanoseconds */ | ||
18 | }; | ||
19 | #endif | ||
20 | |||
21 | /* Parameters used to convert the timespec values: */ | ||
22 | #define MSEC_PER_SEC 1000L | ||
23 | #define USEC_PER_MSEC 1000L | ||
24 | #define NSEC_PER_USEC 1000L | ||
25 | #define NSEC_PER_MSEC 1000000L | ||
26 | #define USEC_PER_SEC 1000000L | ||
27 | #define NSEC_PER_SEC 1000000000L | ||
28 | #define FSEC_PER_SEC 1000000000000000LL | ||
29 | |||
30 | /* Located here for timespec[64]_valid_strict */ | ||
31 | #define KTIME_MAX ((s64)~((u64)1 << 63)) | ||
32 | #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) | ||
33 | |||
34 | #if __BITS_PER_LONG == 64 | ||
35 | |||
36 | static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) | ||
37 | { | ||
38 | return ts64; | ||
39 | } | ||
40 | |||
41 | static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) | ||
42 | { | ||
43 | return ts; | ||
44 | } | ||
45 | |||
46 | # define timespec64_equal timespec_equal | ||
47 | # define timespec64_compare timespec_compare | ||
48 | # define set_normalized_timespec64 set_normalized_timespec | ||
49 | # define timespec64_add_safe timespec_add_safe | ||
50 | # define timespec64_add timespec_add | ||
51 | # define timespec64_sub timespec_sub | ||
52 | # define timespec64_valid timespec_valid | ||
53 | # define timespec64_valid_strict timespec_valid_strict | ||
54 | # define timespec64_to_ns timespec_to_ns | ||
55 | # define ns_to_timespec64 ns_to_timespec | ||
56 | # define timespec64_add_ns timespec_add_ns | ||
57 | |||
58 | #else | ||
59 | |||
60 | static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) | ||
61 | { | ||
62 | struct timespec ret; | ||
63 | |||
64 | ret.tv_sec = (time_t)ts64.tv_sec; | ||
65 | ret.tv_nsec = ts64.tv_nsec; | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) | ||
70 | { | ||
71 | struct timespec64 ret; | ||
72 | |||
73 | ret.tv_sec = ts.tv_sec; | ||
74 | ret.tv_nsec = ts.tv_nsec; | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static inline int timespec64_equal(const struct timespec64 *a, | ||
79 | const struct timespec64 *b) | ||
80 | { | ||
81 | return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * lhs < rhs: return <0 | ||
86 | * lhs == rhs: return 0 | ||
87 | * lhs > rhs: return >0 | ||
88 | */ | ||
89 | static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) | ||
90 | { | ||
91 | if (lhs->tv_sec < rhs->tv_sec) | ||
92 | return -1; | ||
93 | if (lhs->tv_sec > rhs->tv_sec) | ||
94 | return 1; | ||
95 | return lhs->tv_nsec - rhs->tv_nsec; | ||
96 | } | ||
97 | |||
98 | extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); | ||
99 | |||
100 | /* | ||
101 | * timespec64_add_safe assumes both values are positive and checks for | ||
102 | * overflow. It will return TIME_T_MAX if the returned value would be | ||
103 | * smaller then either of the arguments. | ||
104 | */ | ||
105 | extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, | ||
106 | const struct timespec64 rhs); | ||
107 | |||
108 | |||
109 | static inline struct timespec64 timespec64_add(struct timespec64 lhs, | ||
110 | struct timespec64 rhs) | ||
111 | { | ||
112 | struct timespec64 ts_delta; | ||
113 | set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, | ||
114 | lhs.tv_nsec + rhs.tv_nsec); | ||
115 | return ts_delta; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * sub = lhs - rhs, in normalized form | ||
120 | */ | ||
121 | static inline struct timespec64 timespec64_sub(struct timespec64 lhs, | ||
122 | struct timespec64 rhs) | ||
123 | { | ||
124 | struct timespec64 ts_delta; | ||
125 | set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, | ||
126 | lhs.tv_nsec - rhs.tv_nsec); | ||
127 | return ts_delta; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Returns true if the timespec64 is norm, false if denorm: | ||
132 | */ | ||
133 | static inline bool timespec64_valid(const struct timespec64 *ts) | ||
134 | { | ||
135 | /* Dates before 1970 are bogus */ | ||
136 | if (ts->tv_sec < 0) | ||
137 | return false; | ||
138 | /* Can't have more nanoseconds then a second */ | ||
139 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | ||
140 | return false; | ||
141 | return true; | ||
142 | } | ||
143 | |||
144 | static inline bool timespec64_valid_strict(const struct timespec64 *ts) | ||
145 | { | ||
146 | if (!timespec64_valid(ts)) | ||
147 | return false; | ||
148 | /* Disallow values that could overflow ktime_t */ | ||
149 | if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) | ||
150 | return false; | ||
151 | return true; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * timespec64_to_ns - Convert timespec64 to nanoseconds | ||
156 | * @ts: pointer to the timespec64 variable to be converted | ||
157 | * | ||
158 | * Returns the scalar nanosecond representation of the timespec64 | ||
159 | * parameter. | ||
160 | */ | ||
161 | static inline s64 timespec64_to_ns(const struct timespec64 *ts) | ||
162 | { | ||
163 | return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * ns_to_timespec64 - Convert nanoseconds to timespec64 | ||
168 | * @nsec: the nanoseconds value to be converted | ||
169 | * | ||
170 | * Returns the timespec64 representation of the nsec parameter. | ||
171 | */ | ||
172 | extern struct timespec64 ns_to_timespec64(const s64 nsec); | ||
173 | |||
174 | /** | ||
175 | * timespec64_add_ns - Adds nanoseconds to a timespec64 | ||
176 | * @a: pointer to timespec64 to be incremented | ||
177 | * @ns: unsigned nanoseconds value to be added | ||
178 | * | ||
179 | * This must always be inlined because its used from the x86-64 vdso, | ||
180 | * which cannot call other kernel functions. | ||
181 | */ | ||
182 | static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) | ||
183 | { | ||
184 | a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); | ||
185 | a->tv_nsec = ns; | ||
186 | } | ||
187 | |||
188 | #endif | ||
189 | |||
190 | #endif /* _LINUX_TIME64_H */ | ||
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index c1825eb436ed..87e0992564f2 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
@@ -10,7 +10,22 @@ | |||
10 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | 12 | ||
13 | /* Structure holding internal timekeeping values. */ | 13 | /* |
14 | * Structure holding internal timekeeping values. | ||
15 | * | ||
16 | * Note: wall_to_monotonic is what we need to add to xtime (or xtime | ||
17 | * corrected for sub jiffie times) to get to monotonic time. | ||
18 | * Monotonic is pegged at zero at system boot time, so | ||
19 | * wall_to_monotonic will be negative, however, we will ALWAYS keep | ||
20 | * the tv_nsec part positive so we can use the usual normalization. | ||
21 | * | ||
22 | * wall_to_monotonic is moved after resume from suspend for the | ||
23 | * monotonic time not to jump. We need to add total_sleep_time to | ||
24 | * wall_to_monotonic to get the real boot based time offset. | ||
25 | * | ||
26 | * - wall_to_monotonic is no longer the boot time, getboottime must be | ||
27 | * used instead. | ||
28 | */ | ||
14 | struct timekeeper { | 29 | struct timekeeper { |
15 | /* Current clocksource used for timekeeping. */ | 30 | /* Current clocksource used for timekeeping. */ |
16 | struct clocksource *clock; | 31 | struct clocksource *clock; |
@@ -18,6 +33,32 @@ struct timekeeper { | |||
18 | u32 mult; | 33 | u32 mult; |
19 | /* The shift value of the current clocksource. */ | 34 | /* The shift value of the current clocksource. */ |
20 | u32 shift; | 35 | u32 shift; |
36 | /* Clock shifted nano seconds */ | ||
37 | u64 xtime_nsec; | ||
38 | |||
39 | /* Monotonic base time */ | ||
40 | ktime_t base_mono; | ||
41 | |||
42 | /* Current CLOCK_REALTIME time in seconds */ | ||
43 | u64 xtime_sec; | ||
44 | /* CLOCK_REALTIME to CLOCK_MONOTONIC offset */ | ||
45 | struct timespec64 wall_to_monotonic; | ||
46 | |||
47 | /* Offset clock monotonic -> clock realtime */ | ||
48 | ktime_t offs_real; | ||
49 | /* Offset clock monotonic -> clock boottime */ | ||
50 | ktime_t offs_boot; | ||
51 | /* Offset clock monotonic -> clock tai */ | ||
52 | ktime_t offs_tai; | ||
53 | |||
54 | /* time spent in suspend */ | ||
55 | struct timespec64 total_sleep_time; | ||
56 | /* The current UTC to TAI offset in seconds */ | ||
57 | s32 tai_offset; | ||
58 | |||
59 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ | ||
60 | struct timespec64 raw_time; | ||
61 | |||
21 | /* Number of clock cycles in one NTP interval. */ | 62 | /* Number of clock cycles in one NTP interval. */ |
22 | cycle_t cycle_interval; | 63 | cycle_t cycle_interval; |
23 | /* Last cycle value (also stored in clock->cycle_last) */ | 64 | /* Last cycle value (also stored in clock->cycle_last) */ |
@@ -29,58 +70,18 @@ struct timekeeper { | |||
29 | /* Raw nano seconds accumulated per NTP interval. */ | 70 | /* Raw nano seconds accumulated per NTP interval. */ |
30 | u32 raw_interval; | 71 | u32 raw_interval; |
31 | 72 | ||
32 | /* Current CLOCK_REALTIME time in seconds */ | 73 | /* |
33 | u64 xtime_sec; | 74 | * Difference between accumulated time and NTP time in ntp |
34 | /* Clock shifted nano seconds */ | 75 | * shifted nano seconds. |
35 | u64 xtime_nsec; | 76 | */ |
36 | |||
37 | /* Difference between accumulated time and NTP time in ntp | ||
38 | * shifted nano seconds. */ | ||
39 | s64 ntp_error; | 77 | s64 ntp_error; |
40 | /* Shift conversion between clock shifted nano seconds and | ||
41 | * ntp shifted nano seconds. */ | ||
42 | u32 ntp_error_shift; | ||
43 | |||
44 | /* | 78 | /* |
45 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | 79 | * Shift conversion between clock shifted nano seconds and |
46 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | 80 | * ntp shifted nano seconds. |
47 | * at zero at system boot time, so wall_to_monotonic will be negative, | ||
48 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | ||
49 | * the usual normalization. | ||
50 | * | ||
51 | * wall_to_monotonic is moved after resume from suspend for the | ||
52 | * monotonic time not to jump. We need to add total_sleep_time to | ||
53 | * wall_to_monotonic to get the real boot based time offset. | ||
54 | * | ||
55 | * - wall_to_monotonic is no longer the boot time, getboottime must be | ||
56 | * used instead. | ||
57 | */ | 81 | */ |
58 | struct timespec wall_to_monotonic; | 82 | u32 ntp_error_shift; |
59 | /* Offset clock monotonic -> clock realtime */ | ||
60 | ktime_t offs_real; | ||
61 | /* time spent in suspend */ | ||
62 | struct timespec total_sleep_time; | ||
63 | /* Offset clock monotonic -> clock boottime */ | ||
64 | ktime_t offs_boot; | ||
65 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ | ||
66 | struct timespec raw_time; | ||
67 | /* The current UTC to TAI offset in seconds */ | ||
68 | s32 tai_offset; | ||
69 | /* Offset clock monotonic -> clock tai */ | ||
70 | ktime_t offs_tai; | ||
71 | |||
72 | }; | 83 | }; |
73 | 84 | ||
74 | static inline struct timespec tk_xtime(struct timekeeper *tk) | ||
75 | { | ||
76 | struct timespec ts; | ||
77 | |||
78 | ts.tv_sec = tk->xtime_sec; | ||
79 | ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); | ||
80 | return ts; | ||
81 | } | ||
82 | |||
83 | |||
84 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL | 85 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
85 | 86 | ||
86 | extern void update_vsyscall(struct timekeeper *tk); | 87 | extern void update_vsyscall(struct timekeeper *tk); |
@@ -92,14 +93,6 @@ extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, | |||
92 | struct clocksource *c, u32 mult); | 93 | struct clocksource *c, u32 mult); |
93 | extern void update_vsyscall_tz(void); | 94 | extern void update_vsyscall_tz(void); |
94 | 95 | ||
95 | static inline void update_vsyscall(struct timekeeper *tk) | ||
96 | { | ||
97 | struct timespec xt; | ||
98 | |||
99 | xt = tk_xtime(tk); | ||
100 | update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); | ||
101 | } | ||
102 | |||
103 | #else | 96 | #else |
104 | 97 | ||
105 | static inline void update_vsyscall(struct timekeeper *tk) | 98 | static inline void update_vsyscall(struct timekeeper *tk) |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h new file mode 100644 index 000000000000..903ecc10fcff --- /dev/null +++ b/include/linux/timekeeping.h | |||
@@ -0,0 +1,190 @@ | |||
1 | #ifndef _LINUX_TIMEKEEPING_H | ||
2 | #define _LINUX_TIMEKEEPING_H | ||
3 | |||
4 | /* Included from linux/ktime.h */ | ||
5 | |||
6 | void timekeeping_init(void); | ||
7 | extern int timekeeping_suspended; | ||
8 | |||
9 | /* | ||
10 | * Get and set timeofday | ||
11 | */ | ||
12 | extern void do_gettimeofday(struct timeval *tv); | ||
13 | extern int do_settimeofday(const struct timespec *tv); | ||
14 | extern int do_sys_settimeofday(const struct timespec *tv, | ||
15 | const struct timezone *tz); | ||
16 | |||
17 | /* | ||
18 | * Kernel time accessors | ||
19 | */ | ||
20 | unsigned long get_seconds(void); | ||
21 | struct timespec current_kernel_time(void); | ||
22 | /* does not take xtime_lock */ | ||
23 | struct timespec __current_kernel_time(void); | ||
24 | |||
25 | /* | ||
26 | * timespec based interfaces | ||
27 | */ | ||
28 | struct timespec get_monotonic_coarse(void); | ||
29 | extern void getrawmonotonic(struct timespec *ts); | ||
30 | extern void monotonic_to_bootbased(struct timespec *ts); | ||
31 | extern void get_monotonic_boottime(struct timespec *ts); | ||
32 | extern void ktime_get_ts64(struct timespec64 *ts); | ||
33 | |||
34 | extern int __getnstimeofday64(struct timespec64 *tv); | ||
35 | extern void getnstimeofday64(struct timespec64 *tv); | ||
36 | |||
37 | #if BITS_PER_LONG == 64 | ||
38 | static inline int __getnstimeofday(struct timespec *ts) | ||
39 | { | ||
40 | return __getnstimeofday64(ts); | ||
41 | } | ||
42 | |||
43 | static inline void getnstimeofday(struct timespec *ts) | ||
44 | { | ||
45 | getnstimeofday64(ts); | ||
46 | } | ||
47 | |||
48 | static inline void ktime_get_ts(struct timespec *ts) | ||
49 | { | ||
50 | ktime_get_ts64(ts); | ||
51 | } | ||
52 | |||
53 | static inline void ktime_get_real_ts(struct timespec *ts) | ||
54 | { | ||
55 | getnstimeofday64(ts); | ||
56 | } | ||
57 | |||
58 | #else | ||
59 | static inline int __getnstimeofday(struct timespec *ts) | ||
60 | { | ||
61 | struct timespec64 ts64; | ||
62 | int ret = __getnstimeofday64(&ts64); | ||
63 | |||
64 | *ts = timespec64_to_timespec(ts64); | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static inline void getnstimeofday(struct timespec *ts) | ||
69 | { | ||
70 | struct timespec64 ts64; | ||
71 | |||
72 | getnstimeofday64(&ts64); | ||
73 | *ts = timespec64_to_timespec(ts64); | ||
74 | } | ||
75 | |||
76 | static inline void ktime_get_ts(struct timespec *ts) | ||
77 | { | ||
78 | struct timespec64 ts64; | ||
79 | |||
80 | ktime_get_ts64(&ts64); | ||
81 | *ts = timespec64_to_timespec(ts64); | ||
82 | } | ||
83 | |||
84 | static inline void ktime_get_real_ts(struct timespec *ts) | ||
85 | { | ||
86 | struct timespec64 ts64; | ||
87 | |||
88 | getnstimeofday64(&ts64); | ||
89 | *ts = timespec64_to_timespec(ts64); | ||
90 | } | ||
91 | #endif | ||
92 | |||
93 | extern void getboottime(struct timespec *ts); | ||
94 | |||
95 | #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) | ||
96 | #define ktime_get_real_ts64(ts) getnstimeofday64(ts) | ||
97 | |||
98 | /* | ||
99 | * ktime_t based interfaces | ||
100 | */ | ||
101 | |||
102 | enum tk_offsets { | ||
103 | TK_OFFS_REAL, | ||
104 | TK_OFFS_BOOT, | ||
105 | TK_OFFS_TAI, | ||
106 | TK_OFFS_MAX, | ||
107 | }; | ||
108 | |||
109 | extern ktime_t ktime_get(void); | ||
110 | extern ktime_t ktime_get_with_offset(enum tk_offsets offs); | ||
111 | extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); | ||
112 | |||
113 | /** | ||
114 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
115 | */ | ||
116 | static inline ktime_t ktime_get_real(void) | ||
117 | { | ||
118 | return ktime_get_with_offset(TK_OFFS_REAL); | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * ktime_get_boottime - Returns monotonic time since boot in ktime_t format | ||
123 | * | ||
124 | * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the | ||
125 | * time spent in suspend. | ||
126 | */ | ||
127 | static inline ktime_t ktime_get_boottime(void) | ||
128 | { | ||
129 | return ktime_get_with_offset(TK_OFFS_BOOT); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * ktime_get_clocktai - Returns the TAI time of day in ktime_t format | ||
134 | */ | ||
135 | static inline ktime_t ktime_get_clocktai(void) | ||
136 | { | ||
137 | return ktime_get_with_offset(TK_OFFS_TAI); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * ktime_mono_to_real - Convert monotonic time to clock realtime | ||
142 | */ | ||
143 | static inline ktime_t ktime_mono_to_real(ktime_t mono) | ||
144 | { | ||
145 | return ktime_mono_to_any(mono, TK_OFFS_REAL); | ||
146 | } | ||
147 | |||
148 | static inline u64 ktime_get_ns(void) | ||
149 | { | ||
150 | return ktime_to_ns(ktime_get()); | ||
151 | } | ||
152 | |||
153 | static inline u64 ktime_get_real_ns(void) | ||
154 | { | ||
155 | return ktime_to_ns(ktime_get_real()); | ||
156 | } | ||
157 | |||
158 | static inline u64 ktime_get_boot_ns(void) | ||
159 | { | ||
160 | return ktime_to_ns(ktime_get_boottime()); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * RTC specific | ||
165 | */ | ||
166 | extern void timekeeping_inject_sleeptime(struct timespec *delta); | ||
167 | |||
168 | /* | ||
169 | * PPS accessor | ||
170 | */ | ||
171 | extern void getnstime_raw_and_real(struct timespec *ts_raw, | ||
172 | struct timespec *ts_real); | ||
173 | |||
174 | /* | ||
175 | * Persistent clock related interfaces | ||
176 | */ | ||
177 | extern bool persistent_clock_exist; | ||
178 | extern int persistent_clock_is_local; | ||
179 | |||
180 | static inline bool has_persistent_clock(void) | ||
181 | { | ||
182 | return persistent_clock_exist; | ||
183 | } | ||
184 | |||
185 | extern void read_persistent_clock(struct timespec *ts); | ||
186 | extern void read_boot_clock(struct timespec *ts); | ||
187 | extern int update_persistent_clock(struct timespec now); | ||
188 | |||
189 | |||
190 | #endif | ||
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index d3b57fa12225..bd36ce431e32 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h | |||
@@ -11,6 +11,9 @@ | |||
11 | /* For O_CLOEXEC and O_NONBLOCK */ | 11 | /* For O_CLOEXEC and O_NONBLOCK */ |
12 | #include <linux/fcntl.h> | 12 | #include <linux/fcntl.h> |
13 | 13 | ||
14 | /* For _IO helpers */ | ||
15 | #include <linux/ioctl.h> | ||
16 | |||
14 | /* | 17 | /* |
15 | * CAREFUL: Check include/asm-generic/fcntl.h when defining | 18 | * CAREFUL: Check include/asm-generic/fcntl.h when defining |
16 | * new flags, since they might collide with O_* ones. We want | 19 | * new flags, since they might collide with O_* ones. We want |
@@ -29,4 +32,6 @@ | |||
29 | /* Flags for timerfd_settime. */ | 32 | /* Flags for timerfd_settime. */ |
30 | #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) | 33 | #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) |
31 | 34 | ||
35 | #define TFD_IOC_SET_TICKS _IOW('T', 0, u64) | ||
36 | |||
32 | #endif /* _LINUX_TIMERFD_H */ | 37 | #endif /* _LINUX_TIMERFD_H */ |
diff --git a/kernel/Makefile b/kernel/Makefile index f2a8b6246ce9..973a40cf8068 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -3,12 +3,11 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = fork.o exec_domain.o panic.o \ | 5 | obj-y = fork.o exec_domain.o panic.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ |
9 | extable.o params.o posix-timers.o \ | 9 | extable.o params.o \ |
10 | kthread.o sys_ni.o posix-cpu-timers.o \ | 10 | kthread.o sys_ni.o nsproxy.o \ |
11 | hrtimer.o nsproxy.o \ | ||
12 | notifier.o ksysfs.o cred.o reboot.o \ | 11 | notifier.o ksysfs.o cred.o reboot.o \ |
13 | async.o range.o groups.o smpboot.o | 12 | async.o range.o groups.o smpboot.o |
14 | 13 | ||
@@ -110,22 +109,6 @@ targets += config_data.h | |||
110 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE | 109 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE |
111 | $(call filechk,ikconfiggz) | 110 | $(call filechk,ikconfiggz) |
112 | 111 | ||
113 | $(obj)/time.o: $(obj)/timeconst.h | ||
114 | |||
115 | quiet_cmd_hzfile = HZFILE $@ | ||
116 | cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@ | ||
117 | |||
118 | targets += hz.bc | ||
119 | $(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE | ||
120 | $(call if_changed,hzfile) | ||
121 | |||
122 | quiet_cmd_bc = BC $@ | ||
123 | cmd_bc = bc -q $(filter-out FORCE,$^) > $@ | ||
124 | |||
125 | targets += timeconst.h | ||
126 | $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | ||
127 | $(call if_changed,bc) | ||
128 | |||
129 | ############################################################################### | 112 | ############################################################################### |
130 | # | 113 | # |
131 | # Roll all the X.509 certificates that we can find together and pull them into | 114 | # Roll all the X.509 certificates that we can find together and pull them into |
diff --git a/kernel/acct.c b/kernel/acct.c index 808a86ff229d..a1844f14c6d6 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -458,9 +458,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
458 | acct_t ac; | 458 | acct_t ac; |
459 | mm_segment_t fs; | 459 | mm_segment_t fs; |
460 | unsigned long flim; | 460 | unsigned long flim; |
461 | u64 elapsed; | 461 | u64 elapsed, run_time; |
462 | u64 run_time; | ||
463 | struct timespec uptime; | ||
464 | struct tty_struct *tty; | 462 | struct tty_struct *tty; |
465 | const struct cred *orig_cred; | 463 | const struct cred *orig_cred; |
466 | 464 | ||
@@ -484,10 +482,8 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
484 | strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm)); | 482 | strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm)); |
485 | 483 | ||
486 | /* calculate run_time in nsec*/ | 484 | /* calculate run_time in nsec*/ |
487 | do_posix_clock_monotonic_gettime(&uptime); | 485 | run_time = ktime_get_ns(); |
488 | run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec; | 486 | run_time -= current->group_leader->start_time; |
489 | run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC | ||
490 | + current->group_leader->start_time.tv_nsec; | ||
491 | /* convert nsec -> AHZ */ | 487 | /* convert nsec -> AHZ */ |
492 | elapsed = nsec_to_AHZ(run_time); | 488 | elapsed = nsec_to_AHZ(run_time); |
493 | #if ACCT_VERSION==3 | 489 | #if ACCT_VERSION==3 |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 2f7c760305ca..379650b984f8 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -2472,7 +2472,7 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm) | |||
2472 | static void kdb_sysinfo(struct sysinfo *val) | 2472 | static void kdb_sysinfo(struct sysinfo *val) |
2473 | { | 2473 | { |
2474 | struct timespec uptime; | 2474 | struct timespec uptime; |
2475 | do_posix_clock_monotonic_gettime(&uptime); | 2475 | ktime_get_ts(&uptime); |
2476 | memset(val, 0, sizeof(*val)); | 2476 | memset(val, 0, sizeof(*val)); |
2477 | val->uptime = uptime.tv_sec; | 2477 | val->uptime = uptime.tv_sec; |
2478 | val->loads[0] = avenrun[0]; | 2478 | val->loads[0] = avenrun[0]; |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 54996b71e66d..de699f42f9bc 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -46,16 +46,6 @@ void __delayacct_tsk_init(struct task_struct *tsk) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Start accounting for a delay statistic using | ||
50 | * its starting timestamp (@start) | ||
51 | */ | ||
52 | |||
53 | static inline void delayacct_start(struct timespec *start) | ||
54 | { | ||
55 | do_posix_clock_monotonic_gettime(start); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Finish delay accounting for a statistic using | 49 | * Finish delay accounting for a statistic using |
60 | * its timestamps (@start, @end), accumalator (@total) and @count | 50 | * its timestamps (@start, @end), accumalator (@total) and @count |
61 | */ | 51 | */ |
@@ -67,7 +57,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
67 | s64 ns; | 57 | s64 ns; |
68 | unsigned long flags; | 58 | unsigned long flags; |
69 | 59 | ||
70 | do_posix_clock_monotonic_gettime(end); | 60 | ktime_get_ts(end); |
71 | ts = timespec_sub(*end, *start); | 61 | ts = timespec_sub(*end, *start); |
72 | ns = timespec_to_ns(&ts); | 62 | ns = timespec_to_ns(&ts); |
73 | if (ns < 0) | 63 | if (ns < 0) |
@@ -81,7 +71,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
81 | 71 | ||
82 | void __delayacct_blkio_start(void) | 72 | void __delayacct_blkio_start(void) |
83 | { | 73 | { |
84 | delayacct_start(¤t->delays->blkio_start); | 74 | ktime_get_ts(¤t->delays->blkio_start); |
85 | } | 75 | } |
86 | 76 | ||
87 | void __delayacct_blkio_end(void) | 77 | void __delayacct_blkio_end(void) |
@@ -169,7 +159,7 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | |||
169 | 159 | ||
170 | void __delayacct_freepages_start(void) | 160 | void __delayacct_freepages_start(void) |
171 | { | 161 | { |
172 | delayacct_start(¤t->delays->freepages_start); | 162 | ktime_get_ts(¤t->delays->freepages_start); |
173 | } | 163 | } |
174 | 164 | ||
175 | void __delayacct_freepages_end(void) | 165 | void __delayacct_freepages_end(void) |
diff --git a/kernel/fork.c b/kernel/fork.c index 6a13c46cd87d..627b7f80afb0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1262,9 +1262,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1262 | 1262 | ||
1263 | posix_cpu_timers_init(p); | 1263 | posix_cpu_timers_init(p); |
1264 | 1264 | ||
1265 | do_posix_clock_monotonic_gettime(&p->start_time); | 1265 | p->start_time = ktime_get_ns(); |
1266 | p->real_start_time = p->start_time; | 1266 | p->real_start_time = ktime_get_boot_ns(); |
1267 | monotonic_to_bootbased(&p->real_start_time); | ||
1268 | p->io_context = NULL; | 1267 | p->io_context = NULL; |
1269 | p->audit_context = NULL; | 1268 | p->audit_context = NULL; |
1270 | if (clone_flags & CLONE_THREAD) | 1269 | if (clone_flags & CLONE_THREAD) |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index f448513a45ed..feccfd888732 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
@@ -20,10 +20,6 @@ config GENERIC_TIME_VSYSCALL | |||
20 | config GENERIC_TIME_VSYSCALL_OLD | 20 | config GENERIC_TIME_VSYSCALL_OLD |
21 | bool | 21 | bool |
22 | 22 | ||
23 | # ktime_t scalar 64bit nsec representation | ||
24 | config KTIME_SCALAR | ||
25 | bool | ||
26 | |||
27 | # Old style timekeeping | 23 | # Old style timekeeping |
28 | config ARCH_USES_GETTIMEOFFSET | 24 | config ARCH_USES_GETTIMEOFFSET |
29 | bool | 25 | bool |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 57a413fd0ebf..7347426fa68d 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o | ||
1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o | 2 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o |
2 | obj-y += timeconv.o posix-clock.o alarmtimer.o | 3 | obj-y += timeconv.o posix-clock.o alarmtimer.o |
3 | 4 | ||
@@ -12,3 +13,21 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | |||
12 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 13 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
13 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o | 14 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o |
14 | obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o | 15 | obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o |
16 | obj-$(CONFIG_TEST_UDELAY) += udelay_test.o | ||
17 | |||
18 | $(obj)/time.o: $(obj)/timeconst.h | ||
19 | |||
20 | quiet_cmd_hzfile = HZFILE $@ | ||
21 | cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@ | ||
22 | |||
23 | targets += hz.bc | ||
24 | $(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE | ||
25 | $(call if_changed,hzfile) | ||
26 | |||
27 | quiet_cmd_bc = BC $@ | ||
28 | cmd_bc = bc -q $(filter-out FORCE,$^) > $@ | ||
29 | |||
30 | targets += timeconst.h | ||
31 | $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | ||
32 | $(call if_changed,bc) | ||
33 | |||
diff --git a/kernel/hrtimer.c b/kernel/time/hrtimer.c index 3ab28993f6e0..1c2fe7de2842 100644 --- a/kernel/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -54,6 +54,8 @@ | |||
54 | 54 | ||
55 | #include <trace/events/timer.h> | 55 | #include <trace/events/timer.h> |
56 | 56 | ||
57 | #include "timekeeping.h" | ||
58 | |||
57 | /* | 59 | /* |
58 | * The timer bases: | 60 | * The timer bases: |
59 | * | 61 | * |
@@ -114,21 +116,18 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) | |||
114 | */ | 116 | */ |
115 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 117 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
116 | { | 118 | { |
117 | ktime_t xtim, mono, boot; | 119 | ktime_t xtim, mono, boot, tai; |
118 | struct timespec xts, tom, slp; | 120 | ktime_t off_real, off_boot, off_tai; |
119 | s32 tai_offset; | ||
120 | 121 | ||
121 | get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); | 122 | mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); |
122 | tai_offset = timekeeping_get_tai_offset(); | 123 | boot = ktime_add(mono, off_boot); |
124 | xtim = ktime_add(mono, off_real); | ||
125 | tai = ktime_add(xtim, off_tai); | ||
123 | 126 | ||
124 | xtim = timespec_to_ktime(xts); | ||
125 | mono = ktime_add(xtim, timespec_to_ktime(tom)); | ||
126 | boot = ktime_add(mono, timespec_to_ktime(slp)); | ||
127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; | 127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; |
128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; | 128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; |
129 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; | 129 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; |
130 | base->clock_base[HRTIMER_BASE_TAI].softirq_time = | 130 | base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai; |
131 | ktime_add(xtim, ktime_set(tai_offset, 0)); | ||
132 | } | 131 | } |
133 | 132 | ||
134 | /* | 133 | /* |
@@ -264,60 +263,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
264 | * too large for inlining: | 263 | * too large for inlining: |
265 | */ | 264 | */ |
266 | #if BITS_PER_LONG < 64 | 265 | #if BITS_PER_LONG < 64 |
267 | # ifndef CONFIG_KTIME_SCALAR | ||
268 | /** | ||
269 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | ||
270 | * @kt: addend | ||
271 | * @nsec: the scalar nsec value to add | ||
272 | * | ||
273 | * Returns the sum of kt and nsec in ktime_t format | ||
274 | */ | ||
275 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | ||
276 | { | ||
277 | ktime_t tmp; | ||
278 | |||
279 | if (likely(nsec < NSEC_PER_SEC)) { | ||
280 | tmp.tv64 = nsec; | ||
281 | } else { | ||
282 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
283 | |||
284 | /* Make sure nsec fits into long */ | ||
285 | if (unlikely(nsec > KTIME_SEC_MAX)) | ||
286 | return (ktime_t){ .tv64 = KTIME_MAX }; | ||
287 | |||
288 | tmp = ktime_set((long)nsec, rem); | ||
289 | } | ||
290 | |||
291 | return ktime_add(kt, tmp); | ||
292 | } | ||
293 | |||
294 | EXPORT_SYMBOL_GPL(ktime_add_ns); | ||
295 | |||
296 | /** | ||
297 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | ||
298 | * @kt: minuend | ||
299 | * @nsec: the scalar nsec value to subtract | ||
300 | * | ||
301 | * Returns the subtraction of @nsec from @kt in ktime_t format | ||
302 | */ | ||
303 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | ||
304 | { | ||
305 | ktime_t tmp; | ||
306 | |||
307 | if (likely(nsec < NSEC_PER_SEC)) { | ||
308 | tmp.tv64 = nsec; | ||
309 | } else { | ||
310 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
311 | |||
312 | tmp = ktime_set((long)nsec, rem); | ||
313 | } | ||
314 | |||
315 | return ktime_sub(kt, tmp); | ||
316 | } | ||
317 | |||
318 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | ||
319 | # endif /* !CONFIG_KTIME_SCALAR */ | ||
320 | |||
321 | /* | 266 | /* |
322 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
323 | */ | 268 | */ |
@@ -337,6 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div) | |||
337 | 282 | ||
338 | return dclc; | 283 | return dclc; |
339 | } | 284 | } |
285 | EXPORT_SYMBOL_GPL(ktime_divns); | ||
340 | #endif /* BITS_PER_LONG >= 64 */ | 286 | #endif /* BITS_PER_LONG >= 64 */ |
341 | 287 | ||
342 | /* | 288 | /* |
@@ -602,6 +548,11 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
602 | * timers, we have to check, whether it expires earlier than the timer for | 548 | * timers, we have to check, whether it expires earlier than the timer for |
603 | * which the clock event device was armed. | 549 | * which the clock event device was armed. |
604 | * | 550 | * |
551 | * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming | ||
552 | * and no expiry check happens. The timer gets enqueued into the rbtree. The | ||
553 | * reprogramming and expiry check is done in the hrtimer_interrupt or in the | ||
554 | * softirq. | ||
555 | * | ||
605 | * Called with interrupts disabled and base->cpu_base.lock held | 556 | * Called with interrupts disabled and base->cpu_base.lock held |
606 | */ | 557 | */ |
607 | static int hrtimer_reprogram(struct hrtimer *timer, | 558 | static int hrtimer_reprogram(struct hrtimer *timer, |
@@ -662,25 +613,13 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |||
662 | base->hres_active = 0; | 613 | base->hres_active = 0; |
663 | } | 614 | } |
664 | 615 | ||
665 | /* | ||
666 | * When High resolution timers are active, try to reprogram. Note, that in case | ||
667 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | ||
668 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | ||
669 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | ||
670 | */ | ||
671 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | ||
672 | struct hrtimer_clock_base *base) | ||
673 | { | ||
674 | return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); | ||
675 | } | ||
676 | |||
677 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | 616 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
678 | { | 617 | { |
679 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | 618 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; |
680 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | 619 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; |
681 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; | 620 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; |
682 | 621 | ||
683 | return ktime_get_update_offsets(offs_real, offs_boot, offs_tai); | 622 | return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai); |
684 | } | 623 | } |
685 | 624 | ||
686 | /* | 625 | /* |
@@ -755,8 +694,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } | |||
755 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 694 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
756 | static inline void | 695 | static inline void |
757 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | 696 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
758 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 697 | static inline int hrtimer_reprogram(struct hrtimer *timer, |
759 | struct hrtimer_clock_base *base) | 698 | struct hrtimer_clock_base *base) |
760 | { | 699 | { |
761 | return 0; | 700 | return 0; |
762 | } | 701 | } |
@@ -1013,14 +952,25 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
1013 | 952 | ||
1014 | leftmost = enqueue_hrtimer(timer, new_base); | 953 | leftmost = enqueue_hrtimer(timer, new_base); |
1015 | 954 | ||
1016 | /* | 955 | if (!leftmost) { |
1017 | * Only allow reprogramming if the new base is on this CPU. | 956 | unlock_hrtimer_base(timer, &flags); |
1018 | * (it might still be on another CPU if the timer was pending) | 957 | return ret; |
1019 | * | 958 | } |
1020 | * XXX send_remote_softirq() ? | 959 | |
1021 | */ | 960 | if (!hrtimer_is_hres_active(timer)) { |
1022 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) | 961 | /* |
1023 | && hrtimer_enqueue_reprogram(timer, new_base)) { | 962 | * Kick to reschedule the next tick to handle the new timer |
963 | * on dynticks target. | ||
964 | */ | ||
965 | wake_up_nohz_cpu(new_base->cpu_base->cpu); | ||
966 | } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) && | ||
967 | hrtimer_reprogram(timer, new_base)) { | ||
968 | /* | ||
969 | * Only allow reprogramming if the new base is on this CPU. | ||
970 | * (it might still be on another CPU if the timer was pending) | ||
971 | * | ||
972 | * XXX send_remote_softirq() ? | ||
973 | */ | ||
1024 | if (wakeup) { | 974 | if (wakeup) { |
1025 | /* | 975 | /* |
1026 | * We need to drop cpu_base->lock to avoid a | 976 | * We need to drop cpu_base->lock to avoid a |
@@ -1680,6 +1630,7 @@ static void init_hrtimers_cpu(int cpu) | |||
1680 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1630 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1681 | } | 1631 | } |
1682 | 1632 | ||
1633 | cpu_base->cpu = cpu; | ||
1683 | hrtimer_init_hres(cpu_base); | 1634 | hrtimer_init_hres(cpu_base); |
1684 | } | 1635 | } |
1685 | 1636 | ||
diff --git a/kernel/itimer.c b/kernel/time/itimer.c index 8d262b467573..8d262b467573 100644 --- a/kernel/itimer.c +++ b/kernel/time/itimer.c | |||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 33db43a39515..87a346fd6d61 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -466,7 +466,8 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); | |||
466 | 466 | ||
467 | static void sync_cmos_clock(struct work_struct *work) | 467 | static void sync_cmos_clock(struct work_struct *work) |
468 | { | 468 | { |
469 | struct timespec now, next; | 469 | struct timespec64 now; |
470 | struct timespec next; | ||
470 | int fail = 1; | 471 | int fail = 1; |
471 | 472 | ||
472 | /* | 473 | /* |
@@ -485,9 +486,9 @@ static void sync_cmos_clock(struct work_struct *work) | |||
485 | return; | 486 | return; |
486 | } | 487 | } |
487 | 488 | ||
488 | getnstimeofday(&now); | 489 | getnstimeofday64(&now); |
489 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { | 490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { |
490 | struct timespec adjust = now; | 491 | struct timespec adjust = timespec64_to_timespec(now); |
491 | 492 | ||
492 | fail = -ENODEV; | 493 | fail = -ENODEV; |
493 | if (persistent_clock_is_local) | 494 | if (persistent_clock_is_local) |
@@ -531,7 +532,7 @@ void ntp_notify_cmos_timer(void) { } | |||
531 | /* | 532 | /* |
532 | * Propagate a new txc->status value into the NTP state: | 533 | * Propagate a new txc->status value into the NTP state: |
533 | */ | 534 | */ |
534 | static inline void process_adj_status(struct timex *txc, struct timespec *ts) | 535 | static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) |
535 | { | 536 | { |
536 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { | 537 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { |
537 | time_state = TIME_OK; | 538 | time_state = TIME_OK; |
@@ -554,7 +555,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) | |||
554 | 555 | ||
555 | 556 | ||
556 | static inline void process_adjtimex_modes(struct timex *txc, | 557 | static inline void process_adjtimex_modes(struct timex *txc, |
557 | struct timespec *ts, | 558 | struct timespec64 *ts, |
558 | s32 *time_tai) | 559 | s32 *time_tai) |
559 | { | 560 | { |
560 | if (txc->modes & ADJ_STATUS) | 561 | if (txc->modes & ADJ_STATUS) |
@@ -640,7 +641,7 @@ int ntp_validate_timex(struct timex *txc) | |||
640 | * adjtimex mainly allows reading (and writing, if superuser) of | 641 | * adjtimex mainly allows reading (and writing, if superuser) of |
641 | * kernel time-keeping variables. used by xntpd. | 642 | * kernel time-keeping variables. used by xntpd. |
642 | */ | 643 | */ |
643 | int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) | 644 | int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) |
644 | { | 645 | { |
645 | int result; | 646 | int result; |
646 | 647 | ||
@@ -684,7 +685,7 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) | |||
684 | /* fill PPS status fields */ | 685 | /* fill PPS status fields */ |
685 | pps_fill_timex(txc); | 686 | pps_fill_timex(txc); |
686 | 687 | ||
687 | txc->time.tv_sec = ts->tv_sec; | 688 | txc->time.tv_sec = (time_t)ts->tv_sec; |
688 | txc->time.tv_usec = ts->tv_nsec; | 689 | txc->time.tv_usec = ts->tv_nsec; |
689 | if (!(time_status & STA_NANO)) | 690 | if (!(time_status & STA_NANO)) |
690 | txc->time.tv_usec /= NSEC_PER_USEC; | 691 | txc->time.tv_usec /= NSEC_PER_USEC; |
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 1950cb4ca2a4..bbd102ad9df7 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h | |||
@@ -7,6 +7,6 @@ extern void ntp_clear(void); | |||
7 | extern u64 ntp_tick_length(void); | 7 | extern u64 ntp_tick_length(void); |
8 | extern int second_overflow(unsigned long secs); | 8 | extern int second_overflow(unsigned long secs); |
9 | extern int ntp_validate_timex(struct timex *); | 9 | extern int ntp_validate_timex(struct timex *); |
10 | extern int __do_adjtimex(struct timex *, struct timespec *, s32 *); | 10 | extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); |
11 | extern void __hardpps(const struct timespec *, const struct timespec *); | 11 | extern void __hardpps(const struct timespec *, const struct timespec *); |
12 | #endif /* _LINUX_NTP_INTERNAL_H */ | 12 | #endif /* _LINUX_NTP_INTERNAL_H */ |
diff --git a/kernel/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 3b8946416a5f..3b8946416a5f 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
diff --git a/kernel/posix-timers.c b/kernel/time/posix-timers.c index 424c2d4265c9..42b463ad90f2 100644 --- a/kernel/posix-timers.c +++ b/kernel/time/posix-timers.c | |||
@@ -49,6 +49,8 @@ | |||
49 | #include <linux/export.h> | 49 | #include <linux/export.h> |
50 | #include <linux/hashtable.h> | 50 | #include <linux/hashtable.h> |
51 | 51 | ||
52 | #include "timekeeping.h" | ||
53 | |||
52 | /* | 54 | /* |
53 | * Management arrays for POSIX timers. Timers are now kept in static hash table | 55 | * Management arrays for POSIX timers. Timers are now kept in static hash table |
54 | * with 512 entries. | 56 | * with 512 entries. |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 7ab92b19965a..c19c1d84b6f3 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/hrtimer.h> | 4 | #include <linux/hrtimer.h> |
5 | #include <linux/tick.h> | 5 | #include <linux/tick.h> |
6 | 6 | ||
7 | #include "timekeeping.h" | ||
8 | |||
7 | extern seqlock_t jiffies_lock; | 9 | extern seqlock_t jiffies_lock; |
8 | 10 | ||
9 | #define CS_NAME_LEN 32 | 11 | #define CS_NAME_LEN 32 |
diff --git a/kernel/time.c b/kernel/time/time.c index 7c7964c33ae7..f0294ba14634 100644 --- a/kernel/time.c +++ b/kernel/time/time.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/unistd.h> | 42 | #include <asm/unistd.h> |
43 | 43 | ||
44 | #include "timeconst.h" | 44 | #include "timeconst.h" |
45 | #include "timekeeping.h" | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * The timezone where the local system is located. Used as a default by some | 48 | * The timezone where the local system is located. Used as a default by some |
@@ -420,6 +421,68 @@ struct timeval ns_to_timeval(const s64 nsec) | |||
420 | } | 421 | } |
421 | EXPORT_SYMBOL(ns_to_timeval); | 422 | EXPORT_SYMBOL(ns_to_timeval); |
422 | 423 | ||
424 | #if BITS_PER_LONG == 32 | ||
425 | /** | ||
426 | * set_normalized_timespec - set timespec sec and nsec parts and normalize | ||
427 | * | ||
428 | * @ts: pointer to timespec variable to be set | ||
429 | * @sec: seconds to set | ||
430 | * @nsec: nanoseconds to set | ||
431 | * | ||
432 | * Set seconds and nanoseconds field of a timespec variable and | ||
433 | * normalize to the timespec storage format | ||
434 | * | ||
435 | * Note: The tv_nsec part is always in the range of | ||
436 | * 0 <= tv_nsec < NSEC_PER_SEC | ||
437 | * For negative values only the tv_sec field is negative ! | ||
438 | */ | ||
439 | void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) | ||
440 | { | ||
441 | while (nsec >= NSEC_PER_SEC) { | ||
442 | /* | ||
443 | * The following asm() prevents the compiler from | ||
444 | * optimising this loop into a modulo operation. See | ||
445 | * also __iter_div_u64_rem() in include/linux/time.h | ||
446 | */ | ||
447 | asm("" : "+rm"(nsec)); | ||
448 | nsec -= NSEC_PER_SEC; | ||
449 | ++sec; | ||
450 | } | ||
451 | while (nsec < 0) { | ||
452 | asm("" : "+rm"(nsec)); | ||
453 | nsec += NSEC_PER_SEC; | ||
454 | --sec; | ||
455 | } | ||
456 | ts->tv_sec = sec; | ||
457 | ts->tv_nsec = nsec; | ||
458 | } | ||
459 | EXPORT_SYMBOL(set_normalized_timespec64); | ||
460 | |||
461 | /** | ||
462 | * ns_to_timespec64 - Convert nanoseconds to timespec64 | ||
463 | * @nsec: the nanoseconds value to be converted | ||
464 | * | ||
465 | * Returns the timespec64 representation of the nsec parameter. | ||
466 | */ | ||
467 | struct timespec64 ns_to_timespec64(const s64 nsec) | ||
468 | { | ||
469 | struct timespec64 ts; | ||
470 | s32 rem; | ||
471 | |||
472 | if (!nsec) | ||
473 | return (struct timespec64) {0, 0}; | ||
474 | |||
475 | ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); | ||
476 | if (unlikely(rem < 0)) { | ||
477 | ts.tv_sec--; | ||
478 | rem += NSEC_PER_SEC; | ||
479 | } | ||
480 | ts.tv_nsec = rem; | ||
481 | |||
482 | return ts; | ||
483 | } | ||
484 | EXPORT_SYMBOL(ns_to_timespec64); | ||
485 | #endif | ||
423 | /* | 486 | /* |
424 | * When we convert to jiffies then we interpret incoming values | 487 | * When we convert to jiffies then we interpret incoming values |
425 | * the following way: | 488 | * the following way: |
@@ -694,6 +757,7 @@ unsigned long nsecs_to_jiffies(u64 n) | |||
694 | { | 757 | { |
695 | return (unsigned long)nsecs_to_jiffies64(n); | 758 | return (unsigned long)nsecs_to_jiffies64(n); |
696 | } | 759 | } |
760 | EXPORT_SYMBOL_GPL(nsecs_to_jiffies); | ||
697 | 761 | ||
698 | /* | 762 | /* |
699 | * Add two timespec values and do a safety check for overflow. | 763 | * Add two timespec values and do a safety check for overflow. |
diff --git a/kernel/timeconst.bc b/kernel/time/timeconst.bc index 511bdf2cafda..511bdf2cafda 100644 --- a/kernel/timeconst.bc +++ b/kernel/time/timeconst.bc | |||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 32d8d6aaedb8..f7378eaebe67 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -32,9 +32,16 @@ | |||
32 | #define TK_MIRROR (1 << 1) | 32 | #define TK_MIRROR (1 << 1) |
33 | #define TK_CLOCK_WAS_SET (1 << 2) | 33 | #define TK_CLOCK_WAS_SET (1 << 2) |
34 | 34 | ||
35 | static struct timekeeper timekeeper; | 35 | /* |
36 | * The most important data for readout fits into a single 64 byte | ||
37 | * cache line. | ||
38 | */ | ||
39 | static struct { | ||
40 | seqcount_t seq; | ||
41 | struct timekeeper timekeeper; | ||
42 | } tk_core ____cacheline_aligned; | ||
43 | |||
36 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); | 44 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); |
37 | static seqcount_t timekeeper_seq; | ||
38 | static struct timekeeper shadow_timekeeper; | 45 | static struct timekeeper shadow_timekeeper; |
39 | 46 | ||
40 | /* flag for if timekeeping is suspended */ | 47 | /* flag for if timekeeping is suspended */ |
@@ -51,43 +58,52 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) | |||
51 | } | 58 | } |
52 | } | 59 | } |
53 | 60 | ||
54 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) | 61 | static inline struct timespec64 tk_xtime(struct timekeeper *tk) |
62 | { | ||
63 | struct timespec64 ts; | ||
64 | |||
65 | ts.tv_sec = tk->xtime_sec; | ||
66 | ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); | ||
67 | return ts; | ||
68 | } | ||
69 | |||
70 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) | ||
55 | { | 71 | { |
56 | tk->xtime_sec = ts->tv_sec; | 72 | tk->xtime_sec = ts->tv_sec; |
57 | tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; | 73 | tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; |
58 | } | 74 | } |
59 | 75 | ||
60 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) | 76 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) |
61 | { | 77 | { |
62 | tk->xtime_sec += ts->tv_sec; | 78 | tk->xtime_sec += ts->tv_sec; |
63 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; | 79 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; |
64 | tk_normalize_xtime(tk); | 80 | tk_normalize_xtime(tk); |
65 | } | 81 | } |
66 | 82 | ||
67 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) | 83 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) |
68 | { | 84 | { |
69 | struct timespec tmp; | 85 | struct timespec64 tmp; |
70 | 86 | ||
71 | /* | 87 | /* |
72 | * Verify consistency of: offset_real = -wall_to_monotonic | 88 | * Verify consistency of: offset_real = -wall_to_monotonic |
73 | * before modifying anything | 89 | * before modifying anything |
74 | */ | 90 | */ |
75 | set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, | 91 | set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, |
76 | -tk->wall_to_monotonic.tv_nsec); | 92 | -tk->wall_to_monotonic.tv_nsec); |
77 | WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); | 93 | WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); |
78 | tk->wall_to_monotonic = wtm; | 94 | tk->wall_to_monotonic = wtm; |
79 | set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); | 95 | set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); |
80 | tk->offs_real = timespec_to_ktime(tmp); | 96 | tk->offs_real = timespec64_to_ktime(tmp); |
81 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); | 97 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); |
82 | } | 98 | } |
83 | 99 | ||
84 | static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) | 100 | static void tk_set_sleep_time(struct timekeeper *tk, struct timespec64 t) |
85 | { | 101 | { |
86 | /* Verify consistency before modifying */ | 102 | /* Verify consistency before modifying */ |
87 | WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64); | 103 | WARN_ON_ONCE(tk->offs_boot.tv64 != timespec64_to_ktime(tk->total_sleep_time).tv64); |
88 | 104 | ||
89 | tk->total_sleep_time = t; | 105 | tk->total_sleep_time = t; |
90 | tk->offs_boot = timespec_to_ktime(t); | 106 | tk->offs_boot = timespec64_to_ktime(t); |
91 | } | 107 | } |
92 | 108 | ||
93 | /** | 109 | /** |
@@ -153,16 +169,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
153 | /* Timekeeper helper functions. */ | 169 | /* Timekeeper helper functions. */ |
154 | 170 | ||
155 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 171 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
156 | u32 (*arch_gettimeoffset)(void); | 172 | static u32 default_arch_gettimeoffset(void) { return 0; } |
157 | 173 | u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; | |
158 | u32 get_arch_timeoffset(void) | ||
159 | { | ||
160 | if (likely(arch_gettimeoffset)) | ||
161 | return arch_gettimeoffset(); | ||
162 | return 0; | ||
163 | } | ||
164 | #else | 174 | #else |
165 | static inline u32 get_arch_timeoffset(void) { return 0; } | 175 | static inline u32 arch_gettimeoffset(void) { return 0; } |
166 | #endif | 176 | #endif |
167 | 177 | ||
168 | static inline s64 timekeeping_get_ns(struct timekeeper *tk) | 178 | static inline s64 timekeeping_get_ns(struct timekeeper *tk) |
@@ -182,7 +192,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk) | |||
182 | nsec >>= tk->shift; | 192 | nsec >>= tk->shift; |
183 | 193 | ||
184 | /* If arch requires, add in get_arch_timeoffset() */ | 194 | /* If arch requires, add in get_arch_timeoffset() */ |
185 | return nsec + get_arch_timeoffset(); | 195 | return nsec + arch_gettimeoffset(); |
186 | } | 196 | } |
187 | 197 | ||
188 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | 198 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) |
@@ -202,9 +212,43 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | |||
202 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 212 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
203 | 213 | ||
204 | /* If arch requires, add in get_arch_timeoffset() */ | 214 | /* If arch requires, add in get_arch_timeoffset() */ |
205 | return nsec + get_arch_timeoffset(); | 215 | return nsec + arch_gettimeoffset(); |
206 | } | 216 | } |
207 | 217 | ||
218 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
219 | |||
220 | static inline void update_vsyscall(struct timekeeper *tk) | ||
221 | { | ||
222 | struct timespec xt; | ||
223 | |||
224 | xt = tk_xtime(tk); | ||
225 | update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); | ||
226 | } | ||
227 | |||
228 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | ||
229 | { | ||
230 | s64 remainder; | ||
231 | |||
232 | /* | ||
233 | * Store only full nanoseconds into xtime_nsec after rounding | ||
234 | * it up and add the remainder to the error difference. | ||
235 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused | ||
236 | * by truncating the remainder in vsyscalls. However, it causes | ||
237 | * additional work to be done in timekeeping_adjust(). Once | ||
238 | * the vsyscall implementations are converted to use xtime_nsec | ||
239 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
240 | * users are removed, this can be killed. | ||
241 | */ | ||
242 | remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); | ||
243 | tk->xtime_nsec -= remainder; | ||
244 | tk->xtime_nsec += 1ULL << tk->shift; | ||
245 | tk->ntp_error += remainder << tk->ntp_error_shift; | ||
246 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | ||
247 | } | ||
248 | #else | ||
249 | #define old_vsyscall_fixup(tk) | ||
250 | #endif | ||
251 | |||
208 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); | 252 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); |
209 | 253 | ||
210 | static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) | 254 | static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) |
@@ -217,7 +261,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) | |||
217 | */ | 261 | */ |
218 | int pvclock_gtod_register_notifier(struct notifier_block *nb) | 262 | int pvclock_gtod_register_notifier(struct notifier_block *nb) |
219 | { | 263 | { |
220 | struct timekeeper *tk = &timekeeper; | 264 | struct timekeeper *tk = &tk_core.timekeeper; |
221 | unsigned long flags; | 265 | unsigned long flags; |
222 | int ret; | 266 | int ret; |
223 | 267 | ||
@@ -247,6 +291,26 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb) | |||
247 | } | 291 | } |
248 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); | 292 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); |
249 | 293 | ||
294 | /* | ||
295 | * Update the ktime_t based scalar nsec members of the timekeeper | ||
296 | */ | ||
297 | static inline void tk_update_ktime_data(struct timekeeper *tk) | ||
298 | { | ||
299 | s64 nsec; | ||
300 | |||
301 | /* | ||
302 | * The xtime based monotonic readout is: | ||
303 | * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now(); | ||
304 | * The ktime based monotonic readout is: | ||
305 | * nsec = base_mono + now(); | ||
306 | * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec | ||
307 | */ | ||
308 | nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); | ||
309 | nsec *= NSEC_PER_SEC; | ||
310 | nsec += tk->wall_to_monotonic.tv_nsec; | ||
311 | tk->base_mono = ns_to_ktime(nsec); | ||
312 | } | ||
313 | |||
250 | /* must hold timekeeper_lock */ | 314 | /* must hold timekeeper_lock */ |
251 | static void timekeeping_update(struct timekeeper *tk, unsigned int action) | 315 | static void timekeeping_update(struct timekeeper *tk, unsigned int action) |
252 | { | 316 | { |
@@ -257,8 +321,11 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
257 | update_vsyscall(tk); | 321 | update_vsyscall(tk); |
258 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | 322 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); |
259 | 323 | ||
324 | tk_update_ktime_data(tk); | ||
325 | |||
260 | if (action & TK_MIRROR) | 326 | if (action & TK_MIRROR) |
261 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); | 327 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
328 | sizeof(tk_core.timekeeper)); | ||
262 | } | 329 | } |
263 | 330 | ||
264 | /** | 331 | /** |
@@ -282,37 +349,37 @@ static void timekeeping_forward_now(struct timekeeper *tk) | |||
282 | tk->xtime_nsec += cycle_delta * tk->mult; | 349 | tk->xtime_nsec += cycle_delta * tk->mult; |
283 | 350 | ||
284 | /* If arch requires, add in get_arch_timeoffset() */ | 351 | /* If arch requires, add in get_arch_timeoffset() */ |
285 | tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift; | 352 | tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; |
286 | 353 | ||
287 | tk_normalize_xtime(tk); | 354 | tk_normalize_xtime(tk); |
288 | 355 | ||
289 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 356 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
290 | timespec_add_ns(&tk->raw_time, nsec); | 357 | timespec64_add_ns(&tk->raw_time, nsec); |
291 | } | 358 | } |
292 | 359 | ||
293 | /** | 360 | /** |
294 | * __getnstimeofday - Returns the time of day in a timespec. | 361 | * __getnstimeofday64 - Returns the time of day in a timespec64. |
295 | * @ts: pointer to the timespec to be set | 362 | * @ts: pointer to the timespec to be set |
296 | * | 363 | * |
297 | * Updates the time of day in the timespec. | 364 | * Updates the time of day in the timespec. |
298 | * Returns 0 on success, or -ve when suspended (timespec will be undefined). | 365 | * Returns 0 on success, or -ve when suspended (timespec will be undefined). |
299 | */ | 366 | */ |
300 | int __getnstimeofday(struct timespec *ts) | 367 | int __getnstimeofday64(struct timespec64 *ts) |
301 | { | 368 | { |
302 | struct timekeeper *tk = &timekeeper; | 369 | struct timekeeper *tk = &tk_core.timekeeper; |
303 | unsigned long seq; | 370 | unsigned long seq; |
304 | s64 nsecs = 0; | 371 | s64 nsecs = 0; |
305 | 372 | ||
306 | do { | 373 | do { |
307 | seq = read_seqcount_begin(&timekeeper_seq); | 374 | seq = read_seqcount_begin(&tk_core.seq); |
308 | 375 | ||
309 | ts->tv_sec = tk->xtime_sec; | 376 | ts->tv_sec = tk->xtime_sec; |
310 | nsecs = timekeeping_get_ns(tk); | 377 | nsecs = timekeeping_get_ns(tk); |
311 | 378 | ||
312 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 379 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
313 | 380 | ||
314 | ts->tv_nsec = 0; | 381 | ts->tv_nsec = 0; |
315 | timespec_add_ns(ts, nsecs); | 382 | timespec64_add_ns(ts, nsecs); |
316 | 383 | ||
317 | /* | 384 | /* |
318 | * Do not bail out early, in case there were callers still using | 385 | * Do not bail out early, in case there were callers still using |
@@ -322,72 +389,117 @@ int __getnstimeofday(struct timespec *ts) | |||
322 | return -EAGAIN; | 389 | return -EAGAIN; |
323 | return 0; | 390 | return 0; |
324 | } | 391 | } |
325 | EXPORT_SYMBOL(__getnstimeofday); | 392 | EXPORT_SYMBOL(__getnstimeofday64); |
326 | 393 | ||
327 | /** | 394 | /** |
328 | * getnstimeofday - Returns the time of day in a timespec. | 395 | * getnstimeofday64 - Returns the time of day in a timespec64. |
329 | * @ts: pointer to the timespec to be set | 396 | * @ts: pointer to the timespec to be set |
330 | * | 397 | * |
331 | * Returns the time of day in a timespec (WARN if suspended). | 398 | * Returns the time of day in a timespec (WARN if suspended). |
332 | */ | 399 | */ |
333 | void getnstimeofday(struct timespec *ts) | 400 | void getnstimeofday64(struct timespec64 *ts) |
334 | { | 401 | { |
335 | WARN_ON(__getnstimeofday(ts)); | 402 | WARN_ON(__getnstimeofday64(ts)); |
336 | } | 403 | } |
337 | EXPORT_SYMBOL(getnstimeofday); | 404 | EXPORT_SYMBOL(getnstimeofday64); |
338 | 405 | ||
339 | ktime_t ktime_get(void) | 406 | ktime_t ktime_get(void) |
340 | { | 407 | { |
341 | struct timekeeper *tk = &timekeeper; | 408 | struct timekeeper *tk = &tk_core.timekeeper; |
342 | unsigned int seq; | 409 | unsigned int seq; |
343 | s64 secs, nsecs; | 410 | ktime_t base; |
411 | s64 nsecs; | ||
344 | 412 | ||
345 | WARN_ON(timekeeping_suspended); | 413 | WARN_ON(timekeeping_suspended); |
346 | 414 | ||
347 | do { | 415 | do { |
348 | seq = read_seqcount_begin(&timekeeper_seq); | 416 | seq = read_seqcount_begin(&tk_core.seq); |
349 | secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 417 | base = tk->base_mono; |
350 | nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; | 418 | nsecs = timekeeping_get_ns(tk); |
351 | 419 | ||
352 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 420 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
353 | /* | 421 | |
354 | * Use ktime_set/ktime_add_ns to create a proper ktime on | 422 | return ktime_add_ns(base, nsecs); |
355 | * 32-bit architectures without CONFIG_KTIME_SCALAR. | ||
356 | */ | ||
357 | return ktime_add_ns(ktime_set(secs, 0), nsecs); | ||
358 | } | 423 | } |
359 | EXPORT_SYMBOL_GPL(ktime_get); | 424 | EXPORT_SYMBOL_GPL(ktime_get); |
360 | 425 | ||
426 | static ktime_t *offsets[TK_OFFS_MAX] = { | ||
427 | [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, | ||
428 | [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot, | ||
429 | [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, | ||
430 | }; | ||
431 | |||
432 | ktime_t ktime_get_with_offset(enum tk_offsets offs) | ||
433 | { | ||
434 | struct timekeeper *tk = &tk_core.timekeeper; | ||
435 | unsigned int seq; | ||
436 | ktime_t base, *offset = offsets[offs]; | ||
437 | s64 nsecs; | ||
438 | |||
439 | WARN_ON(timekeeping_suspended); | ||
440 | |||
441 | do { | ||
442 | seq = read_seqcount_begin(&tk_core.seq); | ||
443 | base = ktime_add(tk->base_mono, *offset); | ||
444 | nsecs = timekeeping_get_ns(tk); | ||
445 | |||
446 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
447 | |||
448 | return ktime_add_ns(base, nsecs); | ||
449 | |||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(ktime_get_with_offset); | ||
452 | |||
361 | /** | 453 | /** |
362 | * ktime_get_ts - get the monotonic clock in timespec format | 454 | * ktime_mono_to_any() - convert mononotic time to any other time |
455 | * @tmono: time to convert. | ||
456 | * @offs: which offset to use | ||
457 | */ | ||
458 | ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs) | ||
459 | { | ||
460 | ktime_t *offset = offsets[offs]; | ||
461 | unsigned long seq; | ||
462 | ktime_t tconv; | ||
463 | |||
464 | do { | ||
465 | seq = read_seqcount_begin(&tk_core.seq); | ||
466 | tconv = ktime_add(tmono, *offset); | ||
467 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
468 | |||
469 | return tconv; | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(ktime_mono_to_any); | ||
472 | |||
473 | /** | ||
474 | * ktime_get_ts64 - get the monotonic clock in timespec64 format | ||
363 | * @ts: pointer to timespec variable | 475 | * @ts: pointer to timespec variable |
364 | * | 476 | * |
365 | * The function calculates the monotonic clock from the realtime | 477 | * The function calculates the monotonic clock from the realtime |
366 | * clock and the wall_to_monotonic offset and stores the result | 478 | * clock and the wall_to_monotonic offset and stores the result |
367 | * in normalized timespec format in the variable pointed to by @ts. | 479 | * in normalized timespec format in the variable pointed to by @ts. |
368 | */ | 480 | */ |
369 | void ktime_get_ts(struct timespec *ts) | 481 | void ktime_get_ts64(struct timespec64 *ts) |
370 | { | 482 | { |
371 | struct timekeeper *tk = &timekeeper; | 483 | struct timekeeper *tk = &tk_core.timekeeper; |
372 | struct timespec tomono; | 484 | struct timespec64 tomono; |
373 | s64 nsec; | 485 | s64 nsec; |
374 | unsigned int seq; | 486 | unsigned int seq; |
375 | 487 | ||
376 | WARN_ON(timekeeping_suspended); | 488 | WARN_ON(timekeeping_suspended); |
377 | 489 | ||
378 | do { | 490 | do { |
379 | seq = read_seqcount_begin(&timekeeper_seq); | 491 | seq = read_seqcount_begin(&tk_core.seq); |
380 | ts->tv_sec = tk->xtime_sec; | 492 | ts->tv_sec = tk->xtime_sec; |
381 | nsec = timekeeping_get_ns(tk); | 493 | nsec = timekeeping_get_ns(tk); |
382 | tomono = tk->wall_to_monotonic; | 494 | tomono = tk->wall_to_monotonic; |
383 | 495 | ||
384 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 496 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
385 | 497 | ||
386 | ts->tv_sec += tomono.tv_sec; | 498 | ts->tv_sec += tomono.tv_sec; |
387 | ts->tv_nsec = 0; | 499 | ts->tv_nsec = 0; |
388 | timespec_add_ns(ts, nsec + tomono.tv_nsec); | 500 | timespec64_add_ns(ts, nsec + tomono.tv_nsec); |
389 | } | 501 | } |
390 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 502 | EXPORT_SYMBOL_GPL(ktime_get_ts64); |
391 | 503 | ||
392 | 504 | ||
393 | /** | 505 | /** |
@@ -398,41 +510,28 @@ EXPORT_SYMBOL_GPL(ktime_get_ts); | |||
398 | */ | 510 | */ |
399 | void timekeeping_clocktai(struct timespec *ts) | 511 | void timekeeping_clocktai(struct timespec *ts) |
400 | { | 512 | { |
401 | struct timekeeper *tk = &timekeeper; | 513 | struct timekeeper *tk = &tk_core.timekeeper; |
514 | struct timespec64 ts64; | ||
402 | unsigned long seq; | 515 | unsigned long seq; |
403 | u64 nsecs; | 516 | u64 nsecs; |
404 | 517 | ||
405 | WARN_ON(timekeeping_suspended); | 518 | WARN_ON(timekeeping_suspended); |
406 | 519 | ||
407 | do { | 520 | do { |
408 | seq = read_seqcount_begin(&timekeeper_seq); | 521 | seq = read_seqcount_begin(&tk_core.seq); |
409 | 522 | ||
410 | ts->tv_sec = tk->xtime_sec + tk->tai_offset; | 523 | ts64.tv_sec = tk->xtime_sec + tk->tai_offset; |
411 | nsecs = timekeeping_get_ns(tk); | 524 | nsecs = timekeeping_get_ns(tk); |
412 | 525 | ||
413 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 526 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
414 | 527 | ||
415 | ts->tv_nsec = 0; | 528 | ts64.tv_nsec = 0; |
416 | timespec_add_ns(ts, nsecs); | 529 | timespec64_add_ns(&ts64, nsecs); |
530 | *ts = timespec64_to_timespec(ts64); | ||
417 | 531 | ||
418 | } | 532 | } |
419 | EXPORT_SYMBOL(timekeeping_clocktai); | 533 | EXPORT_SYMBOL(timekeeping_clocktai); |
420 | 534 | ||
421 | |||
422 | /** | ||
423 | * ktime_get_clocktai - Returns the TAI time of day in a ktime | ||
424 | * | ||
425 | * Returns the time of day in a ktime. | ||
426 | */ | ||
427 | ktime_t ktime_get_clocktai(void) | ||
428 | { | ||
429 | struct timespec ts; | ||
430 | |||
431 | timekeeping_clocktai(&ts); | ||
432 | return timespec_to_ktime(ts); | ||
433 | } | ||
434 | EXPORT_SYMBOL(ktime_get_clocktai); | ||
435 | |||
436 | #ifdef CONFIG_NTP_PPS | 535 | #ifdef CONFIG_NTP_PPS |
437 | 536 | ||
438 | /** | 537 | /** |
@@ -446,23 +545,23 @@ EXPORT_SYMBOL(ktime_get_clocktai); | |||
446 | */ | 545 | */ |
447 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | 546 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) |
448 | { | 547 | { |
449 | struct timekeeper *tk = &timekeeper; | 548 | struct timekeeper *tk = &tk_core.timekeeper; |
450 | unsigned long seq; | 549 | unsigned long seq; |
451 | s64 nsecs_raw, nsecs_real; | 550 | s64 nsecs_raw, nsecs_real; |
452 | 551 | ||
453 | WARN_ON_ONCE(timekeeping_suspended); | 552 | WARN_ON_ONCE(timekeeping_suspended); |
454 | 553 | ||
455 | do { | 554 | do { |
456 | seq = read_seqcount_begin(&timekeeper_seq); | 555 | seq = read_seqcount_begin(&tk_core.seq); |
457 | 556 | ||
458 | *ts_raw = tk->raw_time; | 557 | *ts_raw = timespec64_to_timespec(tk->raw_time); |
459 | ts_real->tv_sec = tk->xtime_sec; | 558 | ts_real->tv_sec = tk->xtime_sec; |
460 | ts_real->tv_nsec = 0; | 559 | ts_real->tv_nsec = 0; |
461 | 560 | ||
462 | nsecs_raw = timekeeping_get_ns_raw(tk); | 561 | nsecs_raw = timekeeping_get_ns_raw(tk); |
463 | nsecs_real = timekeeping_get_ns(tk); | 562 | nsecs_real = timekeeping_get_ns(tk); |
464 | 563 | ||
465 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 564 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
466 | 565 | ||
467 | timespec_add_ns(ts_raw, nsecs_raw); | 566 | timespec_add_ns(ts_raw, nsecs_raw); |
468 | timespec_add_ns(ts_real, nsecs_real); | 567 | timespec_add_ns(ts_real, nsecs_real); |
@@ -479,9 +578,9 @@ EXPORT_SYMBOL(getnstime_raw_and_real); | |||
479 | */ | 578 | */ |
480 | void do_gettimeofday(struct timeval *tv) | 579 | void do_gettimeofday(struct timeval *tv) |
481 | { | 580 | { |
482 | struct timespec now; | 581 | struct timespec64 now; |
483 | 582 | ||
484 | getnstimeofday(&now); | 583 | getnstimeofday64(&now); |
485 | tv->tv_sec = now.tv_sec; | 584 | tv->tv_sec = now.tv_sec; |
486 | tv->tv_usec = now.tv_nsec/1000; | 585 | tv->tv_usec = now.tv_nsec/1000; |
487 | } | 586 | } |
@@ -495,15 +594,15 @@ EXPORT_SYMBOL(do_gettimeofday); | |||
495 | */ | 594 | */ |
496 | int do_settimeofday(const struct timespec *tv) | 595 | int do_settimeofday(const struct timespec *tv) |
497 | { | 596 | { |
498 | struct timekeeper *tk = &timekeeper; | 597 | struct timekeeper *tk = &tk_core.timekeeper; |
499 | struct timespec ts_delta, xt; | 598 | struct timespec64 ts_delta, xt, tmp; |
500 | unsigned long flags; | 599 | unsigned long flags; |
501 | 600 | ||
502 | if (!timespec_valid_strict(tv)) | 601 | if (!timespec_valid_strict(tv)) |
503 | return -EINVAL; | 602 | return -EINVAL; |
504 | 603 | ||
505 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 604 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
506 | write_seqcount_begin(&timekeeper_seq); | 605 | write_seqcount_begin(&tk_core.seq); |
507 | 606 | ||
508 | timekeeping_forward_now(tk); | 607 | timekeeping_forward_now(tk); |
509 | 608 | ||
@@ -511,13 +610,14 @@ int do_settimeofday(const struct timespec *tv) | |||
511 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; | 610 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; |
512 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; | 611 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; |
513 | 612 | ||
514 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); | 613 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); |
515 | 614 | ||
516 | tk_set_xtime(tk, tv); | 615 | tmp = timespec_to_timespec64(*tv); |
616 | tk_set_xtime(tk, &tmp); | ||
517 | 617 | ||
518 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 618 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
519 | 619 | ||
520 | write_seqcount_end(&timekeeper_seq); | 620 | write_seqcount_end(&tk_core.seq); |
521 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 621 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
522 | 622 | ||
523 | /* signal hrtimers about time change */ | 623 | /* signal hrtimers about time change */ |
@@ -535,33 +635,35 @@ EXPORT_SYMBOL(do_settimeofday); | |||
535 | */ | 635 | */ |
536 | int timekeeping_inject_offset(struct timespec *ts) | 636 | int timekeeping_inject_offset(struct timespec *ts) |
537 | { | 637 | { |
538 | struct timekeeper *tk = &timekeeper; | 638 | struct timekeeper *tk = &tk_core.timekeeper; |
539 | unsigned long flags; | 639 | unsigned long flags; |
540 | struct timespec tmp; | 640 | struct timespec64 ts64, tmp; |
541 | int ret = 0; | 641 | int ret = 0; |
542 | 642 | ||
543 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 643 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
544 | return -EINVAL; | 644 | return -EINVAL; |
545 | 645 | ||
646 | ts64 = timespec_to_timespec64(*ts); | ||
647 | |||
546 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 648 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
547 | write_seqcount_begin(&timekeeper_seq); | 649 | write_seqcount_begin(&tk_core.seq); |
548 | 650 | ||
549 | timekeeping_forward_now(tk); | 651 | timekeeping_forward_now(tk); |
550 | 652 | ||
551 | /* Make sure the proposed value is valid */ | 653 | /* Make sure the proposed value is valid */ |
552 | tmp = timespec_add(tk_xtime(tk), *ts); | 654 | tmp = timespec64_add(tk_xtime(tk), ts64); |
553 | if (!timespec_valid_strict(&tmp)) { | 655 | if (!timespec64_valid_strict(&tmp)) { |
554 | ret = -EINVAL; | 656 | ret = -EINVAL; |
555 | goto error; | 657 | goto error; |
556 | } | 658 | } |
557 | 659 | ||
558 | tk_xtime_add(tk, ts); | 660 | tk_xtime_add(tk, &ts64); |
559 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); | 661 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64)); |
560 | 662 | ||
561 | error: /* even if we error out, we forwarded the time, so call update */ | 663 | error: /* even if we error out, we forwarded the time, so call update */ |
562 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 664 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
563 | 665 | ||
564 | write_seqcount_end(&timekeeper_seq); | 666 | write_seqcount_end(&tk_core.seq); |
565 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 667 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
566 | 668 | ||
567 | /* signal hrtimers about time change */ | 669 | /* signal hrtimers about time change */ |
@@ -578,14 +680,14 @@ EXPORT_SYMBOL(timekeeping_inject_offset); | |||
578 | */ | 680 | */ |
579 | s32 timekeeping_get_tai_offset(void) | 681 | s32 timekeeping_get_tai_offset(void) |
580 | { | 682 | { |
581 | struct timekeeper *tk = &timekeeper; | 683 | struct timekeeper *tk = &tk_core.timekeeper; |
582 | unsigned int seq; | 684 | unsigned int seq; |
583 | s32 ret; | 685 | s32 ret; |
584 | 686 | ||
585 | do { | 687 | do { |
586 | seq = read_seqcount_begin(&timekeeper_seq); | 688 | seq = read_seqcount_begin(&tk_core.seq); |
587 | ret = tk->tai_offset; | 689 | ret = tk->tai_offset; |
588 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 690 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
589 | 691 | ||
590 | return ret; | 692 | return ret; |
591 | } | 693 | } |
@@ -606,14 +708,14 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) | |||
606 | */ | 708 | */ |
607 | void timekeeping_set_tai_offset(s32 tai_offset) | 709 | void timekeeping_set_tai_offset(s32 tai_offset) |
608 | { | 710 | { |
609 | struct timekeeper *tk = &timekeeper; | 711 | struct timekeeper *tk = &tk_core.timekeeper; |
610 | unsigned long flags; | 712 | unsigned long flags; |
611 | 713 | ||
612 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 714 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
613 | write_seqcount_begin(&timekeeper_seq); | 715 | write_seqcount_begin(&tk_core.seq); |
614 | __timekeeping_set_tai_offset(tk, tai_offset); | 716 | __timekeeping_set_tai_offset(tk, tai_offset); |
615 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 717 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
616 | write_seqcount_end(&timekeeper_seq); | 718 | write_seqcount_end(&tk_core.seq); |
617 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 719 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
618 | clock_was_set(); | 720 | clock_was_set(); |
619 | } | 721 | } |
@@ -625,14 +727,14 @@ void timekeeping_set_tai_offset(s32 tai_offset) | |||
625 | */ | 727 | */ |
626 | static int change_clocksource(void *data) | 728 | static int change_clocksource(void *data) |
627 | { | 729 | { |
628 | struct timekeeper *tk = &timekeeper; | 730 | struct timekeeper *tk = &tk_core.timekeeper; |
629 | struct clocksource *new, *old; | 731 | struct clocksource *new, *old; |
630 | unsigned long flags; | 732 | unsigned long flags; |
631 | 733 | ||
632 | new = (struct clocksource *) data; | 734 | new = (struct clocksource *) data; |
633 | 735 | ||
634 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 736 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
635 | write_seqcount_begin(&timekeeper_seq); | 737 | write_seqcount_begin(&tk_core.seq); |
636 | 738 | ||
637 | timekeeping_forward_now(tk); | 739 | timekeeping_forward_now(tk); |
638 | /* | 740 | /* |
@@ -652,7 +754,7 @@ static int change_clocksource(void *data) | |||
652 | } | 754 | } |
653 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 755 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
654 | 756 | ||
655 | write_seqcount_end(&timekeeper_seq); | 757 | write_seqcount_end(&tk_core.seq); |
656 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 758 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
657 | 759 | ||
658 | return 0; | 760 | return 0; |
@@ -667,7 +769,7 @@ static int change_clocksource(void *data) | |||
667 | */ | 769 | */ |
668 | int timekeeping_notify(struct clocksource *clock) | 770 | int timekeeping_notify(struct clocksource *clock) |
669 | { | 771 | { |
670 | struct timekeeper *tk = &timekeeper; | 772 | struct timekeeper *tk = &tk_core.timekeeper; |
671 | 773 | ||
672 | if (tk->clock == clock) | 774 | if (tk->clock == clock) |
673 | return 0; | 775 | return 0; |
@@ -677,21 +779,6 @@ int timekeeping_notify(struct clocksource *clock) | |||
677 | } | 779 | } |
678 | 780 | ||
679 | /** | 781 | /** |
680 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
681 | * | ||
682 | * returns the time in ktime_t format | ||
683 | */ | ||
684 | ktime_t ktime_get_real(void) | ||
685 | { | ||
686 | struct timespec now; | ||
687 | |||
688 | getnstimeofday(&now); | ||
689 | |||
690 | return timespec_to_ktime(now); | ||
691 | } | ||
692 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
693 | |||
694 | /** | ||
695 | * getrawmonotonic - Returns the raw monotonic time in a timespec | 782 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
696 | * @ts: pointer to the timespec to be set | 783 | * @ts: pointer to the timespec to be set |
697 | * | 784 | * |
@@ -699,18 +786,20 @@ EXPORT_SYMBOL_GPL(ktime_get_real); | |||
699 | */ | 786 | */ |
700 | void getrawmonotonic(struct timespec *ts) | 787 | void getrawmonotonic(struct timespec *ts) |
701 | { | 788 | { |
702 | struct timekeeper *tk = &timekeeper; | 789 | struct timekeeper *tk = &tk_core.timekeeper; |
790 | struct timespec64 ts64; | ||
703 | unsigned long seq; | 791 | unsigned long seq; |
704 | s64 nsecs; | 792 | s64 nsecs; |
705 | 793 | ||
706 | do { | 794 | do { |
707 | seq = read_seqcount_begin(&timekeeper_seq); | 795 | seq = read_seqcount_begin(&tk_core.seq); |
708 | nsecs = timekeeping_get_ns_raw(tk); | 796 | nsecs = timekeeping_get_ns_raw(tk); |
709 | *ts = tk->raw_time; | 797 | ts64 = tk->raw_time; |
710 | 798 | ||
711 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 799 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
712 | 800 | ||
713 | timespec_add_ns(ts, nsecs); | 801 | timespec64_add_ns(&ts64, nsecs); |
802 | *ts = timespec64_to_timespec(ts64); | ||
714 | } | 803 | } |
715 | EXPORT_SYMBOL(getrawmonotonic); | 804 | EXPORT_SYMBOL(getrawmonotonic); |
716 | 805 | ||
@@ -719,16 +808,16 @@ EXPORT_SYMBOL(getrawmonotonic); | |||
719 | */ | 808 | */ |
720 | int timekeeping_valid_for_hres(void) | 809 | int timekeeping_valid_for_hres(void) |
721 | { | 810 | { |
722 | struct timekeeper *tk = &timekeeper; | 811 | struct timekeeper *tk = &tk_core.timekeeper; |
723 | unsigned long seq; | 812 | unsigned long seq; |
724 | int ret; | 813 | int ret; |
725 | 814 | ||
726 | do { | 815 | do { |
727 | seq = read_seqcount_begin(&timekeeper_seq); | 816 | seq = read_seqcount_begin(&tk_core.seq); |
728 | 817 | ||
729 | ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 818 | ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
730 | 819 | ||
731 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 820 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
732 | 821 | ||
733 | return ret; | 822 | return ret; |
734 | } | 823 | } |
@@ -738,16 +827,16 @@ int timekeeping_valid_for_hres(void) | |||
738 | */ | 827 | */ |
739 | u64 timekeeping_max_deferment(void) | 828 | u64 timekeeping_max_deferment(void) |
740 | { | 829 | { |
741 | struct timekeeper *tk = &timekeeper; | 830 | struct timekeeper *tk = &tk_core.timekeeper; |
742 | unsigned long seq; | 831 | unsigned long seq; |
743 | u64 ret; | 832 | u64 ret; |
744 | 833 | ||
745 | do { | 834 | do { |
746 | seq = read_seqcount_begin(&timekeeper_seq); | 835 | seq = read_seqcount_begin(&tk_core.seq); |
747 | 836 | ||
748 | ret = tk->clock->max_idle_ns; | 837 | ret = tk->clock->max_idle_ns; |
749 | 838 | ||
750 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 839 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
751 | 840 | ||
752 | return ret; | 841 | return ret; |
753 | } | 842 | } |
@@ -787,14 +876,15 @@ void __weak read_boot_clock(struct timespec *ts) | |||
787 | */ | 876 | */ |
788 | void __init timekeeping_init(void) | 877 | void __init timekeeping_init(void) |
789 | { | 878 | { |
790 | struct timekeeper *tk = &timekeeper; | 879 | struct timekeeper *tk = &tk_core.timekeeper; |
791 | struct clocksource *clock; | 880 | struct clocksource *clock; |
792 | unsigned long flags; | 881 | unsigned long flags; |
793 | struct timespec now, boot, tmp; | 882 | struct timespec64 now, boot, tmp; |
794 | 883 | struct timespec ts; | |
795 | read_persistent_clock(&now); | ||
796 | 884 | ||
797 | if (!timespec_valid_strict(&now)) { | 885 | read_persistent_clock(&ts); |
886 | now = timespec_to_timespec64(ts); | ||
887 | if (!timespec64_valid_strict(&now)) { | ||
798 | pr_warn("WARNING: Persistent clock returned invalid value!\n" | 888 | pr_warn("WARNING: Persistent clock returned invalid value!\n" |
799 | " Check your CMOS/BIOS settings.\n"); | 889 | " Check your CMOS/BIOS settings.\n"); |
800 | now.tv_sec = 0; | 890 | now.tv_sec = 0; |
@@ -802,8 +892,9 @@ void __init timekeeping_init(void) | |||
802 | } else if (now.tv_sec || now.tv_nsec) | 892 | } else if (now.tv_sec || now.tv_nsec) |
803 | persistent_clock_exist = true; | 893 | persistent_clock_exist = true; |
804 | 894 | ||
805 | read_boot_clock(&boot); | 895 | read_boot_clock(&ts); |
806 | if (!timespec_valid_strict(&boot)) { | 896 | boot = timespec_to_timespec64(ts); |
897 | if (!timespec64_valid_strict(&boot)) { | ||
807 | pr_warn("WARNING: Boot clock returned invalid value!\n" | 898 | pr_warn("WARNING: Boot clock returned invalid value!\n" |
808 | " Check your CMOS/BIOS settings.\n"); | 899 | " Check your CMOS/BIOS settings.\n"); |
809 | boot.tv_sec = 0; | 900 | boot.tv_sec = 0; |
@@ -811,7 +902,7 @@ void __init timekeeping_init(void) | |||
811 | } | 902 | } |
812 | 903 | ||
813 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 904 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
814 | write_seqcount_begin(&timekeeper_seq); | 905 | write_seqcount_begin(&tk_core.seq); |
815 | ntp_init(); | 906 | ntp_init(); |
816 | 907 | ||
817 | clock = clocksource_default_clock(); | 908 | clock = clocksource_default_clock(); |
@@ -825,21 +916,21 @@ void __init timekeeping_init(void) | |||
825 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) | 916 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
826 | boot = tk_xtime(tk); | 917 | boot = tk_xtime(tk); |
827 | 918 | ||
828 | set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); | 919 | set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); |
829 | tk_set_wall_to_mono(tk, tmp); | 920 | tk_set_wall_to_mono(tk, tmp); |
830 | 921 | ||
831 | tmp.tv_sec = 0; | 922 | tmp.tv_sec = 0; |
832 | tmp.tv_nsec = 0; | 923 | tmp.tv_nsec = 0; |
833 | tk_set_sleep_time(tk, tmp); | 924 | tk_set_sleep_time(tk, tmp); |
834 | 925 | ||
835 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); | 926 | timekeeping_update(tk, TK_MIRROR); |
836 | 927 | ||
837 | write_seqcount_end(&timekeeper_seq); | 928 | write_seqcount_end(&tk_core.seq); |
838 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 929 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
839 | } | 930 | } |
840 | 931 | ||
841 | /* time in seconds when suspend began */ | 932 | /* time in seconds when suspend began */ |
842 | static struct timespec timekeeping_suspend_time; | 933 | static struct timespec64 timekeeping_suspend_time; |
843 | 934 | ||
844 | /** | 935 | /** |
845 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval | 936 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
@@ -849,17 +940,17 @@ static struct timespec timekeeping_suspend_time; | |||
849 | * adds the sleep offset to the timekeeping variables. | 940 | * adds the sleep offset to the timekeeping variables. |
850 | */ | 941 | */ |
851 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | 942 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, |
852 | struct timespec *delta) | 943 | struct timespec64 *delta) |
853 | { | 944 | { |
854 | if (!timespec_valid_strict(delta)) { | 945 | if (!timespec64_valid_strict(delta)) { |
855 | printk_deferred(KERN_WARNING | 946 | printk_deferred(KERN_WARNING |
856 | "__timekeeping_inject_sleeptime: Invalid " | 947 | "__timekeeping_inject_sleeptime: Invalid " |
857 | "sleep delta value!\n"); | 948 | "sleep delta value!\n"); |
858 | return; | 949 | return; |
859 | } | 950 | } |
860 | tk_xtime_add(tk, delta); | 951 | tk_xtime_add(tk, delta); |
861 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); | 952 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); |
862 | tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); | 953 | tk_set_sleep_time(tk, timespec64_add(tk->total_sleep_time, *delta)); |
863 | tk_debug_account_sleep_time(delta); | 954 | tk_debug_account_sleep_time(delta); |
864 | } | 955 | } |
865 | 956 | ||
@@ -875,7 +966,8 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | |||
875 | */ | 966 | */ |
876 | void timekeeping_inject_sleeptime(struct timespec *delta) | 967 | void timekeeping_inject_sleeptime(struct timespec *delta) |
877 | { | 968 | { |
878 | struct timekeeper *tk = &timekeeper; | 969 | struct timekeeper *tk = &tk_core.timekeeper; |
970 | struct timespec64 tmp; | ||
879 | unsigned long flags; | 971 | unsigned long flags; |
880 | 972 | ||
881 | /* | 973 | /* |
@@ -886,15 +978,16 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
886 | return; | 978 | return; |
887 | 979 | ||
888 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 980 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
889 | write_seqcount_begin(&timekeeper_seq); | 981 | write_seqcount_begin(&tk_core.seq); |
890 | 982 | ||
891 | timekeeping_forward_now(tk); | 983 | timekeeping_forward_now(tk); |
892 | 984 | ||
893 | __timekeeping_inject_sleeptime(tk, delta); | 985 | tmp = timespec_to_timespec64(*delta); |
986 | __timekeeping_inject_sleeptime(tk, &tmp); | ||
894 | 987 | ||
895 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 988 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
896 | 989 | ||
897 | write_seqcount_end(&timekeeper_seq); | 990 | write_seqcount_end(&tk_core.seq); |
898 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 991 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
899 | 992 | ||
900 | /* signal hrtimers about time change */ | 993 | /* signal hrtimers about time change */ |
@@ -910,20 +1003,22 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
910 | */ | 1003 | */ |
911 | static void timekeeping_resume(void) | 1004 | static void timekeeping_resume(void) |
912 | { | 1005 | { |
913 | struct timekeeper *tk = &timekeeper; | 1006 | struct timekeeper *tk = &tk_core.timekeeper; |
914 | struct clocksource *clock = tk->clock; | 1007 | struct clocksource *clock = tk->clock; |
915 | unsigned long flags; | 1008 | unsigned long flags; |
916 | struct timespec ts_new, ts_delta; | 1009 | struct timespec64 ts_new, ts_delta; |
1010 | struct timespec tmp; | ||
917 | cycle_t cycle_now, cycle_delta; | 1011 | cycle_t cycle_now, cycle_delta; |
918 | bool suspendtime_found = false; | 1012 | bool suspendtime_found = false; |
919 | 1013 | ||
920 | read_persistent_clock(&ts_new); | 1014 | read_persistent_clock(&tmp); |
1015 | ts_new = timespec_to_timespec64(tmp); | ||
921 | 1016 | ||
922 | clockevents_resume(); | 1017 | clockevents_resume(); |
923 | clocksource_resume(); | 1018 | clocksource_resume(); |
924 | 1019 | ||
925 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1020 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
926 | write_seqcount_begin(&timekeeper_seq); | 1021 | write_seqcount_begin(&tk_core.seq); |
927 | 1022 | ||
928 | /* | 1023 | /* |
929 | * After system resumes, we need to calculate the suspended time and | 1024 | * After system resumes, we need to calculate the suspended time and |
@@ -960,10 +1055,10 @@ static void timekeeping_resume(void) | |||
960 | } | 1055 | } |
961 | nsec += ((u64) cycle_delta * mult) >> shift; | 1056 | nsec += ((u64) cycle_delta * mult) >> shift; |
962 | 1057 | ||
963 | ts_delta = ns_to_timespec(nsec); | 1058 | ts_delta = ns_to_timespec64(nsec); |
964 | suspendtime_found = true; | 1059 | suspendtime_found = true; |
965 | } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) { | 1060 | } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { |
966 | ts_delta = timespec_sub(ts_new, timekeeping_suspend_time); | 1061 | ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); |
967 | suspendtime_found = true; | 1062 | suspendtime_found = true; |
968 | } | 1063 | } |
969 | 1064 | ||
@@ -975,7 +1070,7 @@ static void timekeeping_resume(void) | |||
975 | tk->ntp_error = 0; | 1070 | tk->ntp_error = 0; |
976 | timekeeping_suspended = 0; | 1071 | timekeeping_suspended = 0; |
977 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1072 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
978 | write_seqcount_end(&timekeeper_seq); | 1073 | write_seqcount_end(&tk_core.seq); |
979 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1074 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
980 | 1075 | ||
981 | touch_softlockup_watchdog(); | 1076 | touch_softlockup_watchdog(); |
@@ -988,12 +1083,14 @@ static void timekeeping_resume(void) | |||
988 | 1083 | ||
989 | static int timekeeping_suspend(void) | 1084 | static int timekeeping_suspend(void) |
990 | { | 1085 | { |
991 | struct timekeeper *tk = &timekeeper; | 1086 | struct timekeeper *tk = &tk_core.timekeeper; |
992 | unsigned long flags; | 1087 | unsigned long flags; |
993 | struct timespec delta, delta_delta; | 1088 | struct timespec64 delta, delta_delta; |
994 | static struct timespec old_delta; | 1089 | static struct timespec64 old_delta; |
1090 | struct timespec tmp; | ||
995 | 1091 | ||
996 | read_persistent_clock(&timekeeping_suspend_time); | 1092 | read_persistent_clock(&tmp); |
1093 | timekeeping_suspend_time = timespec_to_timespec64(tmp); | ||
997 | 1094 | ||
998 | /* | 1095 | /* |
999 | * On some systems the persistent_clock can not be detected at | 1096 | * On some systems the persistent_clock can not be detected at |
@@ -1004,7 +1101,7 @@ static int timekeeping_suspend(void) | |||
1004 | persistent_clock_exist = true; | 1101 | persistent_clock_exist = true; |
1005 | 1102 | ||
1006 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1103 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1007 | write_seqcount_begin(&timekeeper_seq); | 1104 | write_seqcount_begin(&tk_core.seq); |
1008 | timekeeping_forward_now(tk); | 1105 | timekeeping_forward_now(tk); |
1009 | timekeeping_suspended = 1; | 1106 | timekeeping_suspended = 1; |
1010 | 1107 | ||
@@ -1014,8 +1111,8 @@ static int timekeeping_suspend(void) | |||
1014 | * try to compensate so the difference in system time | 1111 | * try to compensate so the difference in system time |
1015 | * and persistent_clock time stays close to constant. | 1112 | * and persistent_clock time stays close to constant. |
1016 | */ | 1113 | */ |
1017 | delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); | 1114 | delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time); |
1018 | delta_delta = timespec_sub(delta, old_delta); | 1115 | delta_delta = timespec64_sub(delta, old_delta); |
1019 | if (abs(delta_delta.tv_sec) >= 2) { | 1116 | if (abs(delta_delta.tv_sec) >= 2) { |
1020 | /* | 1117 | /* |
1021 | * if delta_delta is too large, assume time correction | 1118 | * if delta_delta is too large, assume time correction |
@@ -1025,11 +1122,11 @@ static int timekeeping_suspend(void) | |||
1025 | } else { | 1122 | } else { |
1026 | /* Otherwise try to adjust old_system to compensate */ | 1123 | /* Otherwise try to adjust old_system to compensate */ |
1027 | timekeeping_suspend_time = | 1124 | timekeeping_suspend_time = |
1028 | timespec_add(timekeeping_suspend_time, delta_delta); | 1125 | timespec64_add(timekeeping_suspend_time, delta_delta); |
1029 | } | 1126 | } |
1030 | 1127 | ||
1031 | timekeeping_update(tk, TK_MIRROR); | 1128 | timekeeping_update(tk, TK_MIRROR); |
1032 | write_seqcount_end(&timekeeper_seq); | 1129 | write_seqcount_end(&tk_core.seq); |
1033 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1130 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1034 | 1131 | ||
1035 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | 1132 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
@@ -1262,14 +1359,14 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | |||
1262 | /* Figure out if its a leap sec and apply if needed */ | 1359 | /* Figure out if its a leap sec and apply if needed */ |
1263 | leap = second_overflow(tk->xtime_sec); | 1360 | leap = second_overflow(tk->xtime_sec); |
1264 | if (unlikely(leap)) { | 1361 | if (unlikely(leap)) { |
1265 | struct timespec ts; | 1362 | struct timespec64 ts; |
1266 | 1363 | ||
1267 | tk->xtime_sec += leap; | 1364 | tk->xtime_sec += leap; |
1268 | 1365 | ||
1269 | ts.tv_sec = leap; | 1366 | ts.tv_sec = leap; |
1270 | ts.tv_nsec = 0; | 1367 | ts.tv_nsec = 0; |
1271 | tk_set_wall_to_mono(tk, | 1368 | tk_set_wall_to_mono(tk, |
1272 | timespec_sub(tk->wall_to_monotonic, ts)); | 1369 | timespec64_sub(tk->wall_to_monotonic, ts)); |
1273 | 1370 | ||
1274 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); | 1371 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); |
1275 | 1372 | ||
@@ -1324,33 +1421,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1324 | return offset; | 1421 | return offset; |
1325 | } | 1422 | } |
1326 | 1423 | ||
1327 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
1328 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | ||
1329 | { | ||
1330 | s64 remainder; | ||
1331 | |||
1332 | /* | ||
1333 | * Store only full nanoseconds into xtime_nsec after rounding | ||
1334 | * it up and add the remainder to the error difference. | ||
1335 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused | ||
1336 | * by truncating the remainder in vsyscalls. However, it causes | ||
1337 | * additional work to be done in timekeeping_adjust(). Once | ||
1338 | * the vsyscall implementations are converted to use xtime_nsec | ||
1339 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
1340 | * users are removed, this can be killed. | ||
1341 | */ | ||
1342 | remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); | ||
1343 | tk->xtime_nsec -= remainder; | ||
1344 | tk->xtime_nsec += 1ULL << tk->shift; | ||
1345 | tk->ntp_error += remainder << tk->ntp_error_shift; | ||
1346 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | ||
1347 | } | ||
1348 | #else | ||
1349 | #define old_vsyscall_fixup(tk) | ||
1350 | #endif | ||
1351 | |||
1352 | |||
1353 | |||
1354 | /** | 1424 | /** |
1355 | * update_wall_time - Uses the current clocksource to increment the wall time | 1425 | * update_wall_time - Uses the current clocksource to increment the wall time |
1356 | * | 1426 | * |
@@ -1358,7 +1428,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1358 | void update_wall_time(void) | 1428 | void update_wall_time(void) |
1359 | { | 1429 | { |
1360 | struct clocksource *clock; | 1430 | struct clocksource *clock; |
1361 | struct timekeeper *real_tk = &timekeeper; | 1431 | struct timekeeper *real_tk = &tk_core.timekeeper; |
1362 | struct timekeeper *tk = &shadow_timekeeper; | 1432 | struct timekeeper *tk = &shadow_timekeeper; |
1363 | cycle_t offset; | 1433 | cycle_t offset; |
1364 | int shift = 0, maxshift; | 1434 | int shift = 0, maxshift; |
@@ -1418,7 +1488,7 @@ void update_wall_time(void) | |||
1418 | */ | 1488 | */ |
1419 | clock_set |= accumulate_nsecs_to_secs(tk); | 1489 | clock_set |= accumulate_nsecs_to_secs(tk); |
1420 | 1490 | ||
1421 | write_seqcount_begin(&timekeeper_seq); | 1491 | write_seqcount_begin(&tk_core.seq); |
1422 | /* Update clock->cycle_last with the new value */ | 1492 | /* Update clock->cycle_last with the new value */ |
1423 | clock->cycle_last = tk->cycle_last; | 1493 | clock->cycle_last = tk->cycle_last; |
1424 | /* | 1494 | /* |
@@ -1428,12 +1498,12 @@ void update_wall_time(void) | |||
1428 | * requires changes to all other timekeeper usage sites as | 1498 | * requires changes to all other timekeeper usage sites as |
1429 | * well, i.e. move the timekeeper pointer getter into the | 1499 | * well, i.e. move the timekeeper pointer getter into the |
1430 | * spinlocked/seqcount protected sections. And we trade this | 1500 | * spinlocked/seqcount protected sections. And we trade this |
1431 | * memcpy under the timekeeper_seq against one before we start | 1501 | * memcpy under the tk_core.seq against one before we start |
1432 | * updating. | 1502 | * updating. |
1433 | */ | 1503 | */ |
1434 | memcpy(real_tk, tk, sizeof(*tk)); | 1504 | memcpy(real_tk, tk, sizeof(*tk)); |
1435 | timekeeping_update(real_tk, clock_set); | 1505 | timekeeping_update(real_tk, clock_set); |
1436 | write_seqcount_end(&timekeeper_seq); | 1506 | write_seqcount_end(&tk_core.seq); |
1437 | out: | 1507 | out: |
1438 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1508 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1439 | if (clock_set) | 1509 | if (clock_set) |
@@ -1454,7 +1524,7 @@ out: | |||
1454 | */ | 1524 | */ |
1455 | void getboottime(struct timespec *ts) | 1525 | void getboottime(struct timespec *ts) |
1456 | { | 1526 | { |
1457 | struct timekeeper *tk = &timekeeper; | 1527 | struct timekeeper *tk = &tk_core.timekeeper; |
1458 | struct timespec boottime = { | 1528 | struct timespec boottime = { |
1459 | .tv_sec = tk->wall_to_monotonic.tv_sec + | 1529 | .tv_sec = tk->wall_to_monotonic.tv_sec + |
1460 | tk->total_sleep_time.tv_sec, | 1530 | tk->total_sleep_time.tv_sec, |
@@ -1477,60 +1547,47 @@ EXPORT_SYMBOL_GPL(getboottime); | |||
1477 | */ | 1547 | */ |
1478 | void get_monotonic_boottime(struct timespec *ts) | 1548 | void get_monotonic_boottime(struct timespec *ts) |
1479 | { | 1549 | { |
1480 | struct timekeeper *tk = &timekeeper; | 1550 | struct timekeeper *tk = &tk_core.timekeeper; |
1481 | struct timespec tomono, sleep; | 1551 | struct timespec64 tomono, sleep, ret; |
1482 | s64 nsec; | 1552 | s64 nsec; |
1483 | unsigned int seq; | 1553 | unsigned int seq; |
1484 | 1554 | ||
1485 | WARN_ON(timekeeping_suspended); | 1555 | WARN_ON(timekeeping_suspended); |
1486 | 1556 | ||
1487 | do { | 1557 | do { |
1488 | seq = read_seqcount_begin(&timekeeper_seq); | 1558 | seq = read_seqcount_begin(&tk_core.seq); |
1489 | ts->tv_sec = tk->xtime_sec; | 1559 | ret.tv_sec = tk->xtime_sec; |
1490 | nsec = timekeeping_get_ns(tk); | 1560 | nsec = timekeeping_get_ns(tk); |
1491 | tomono = tk->wall_to_monotonic; | 1561 | tomono = tk->wall_to_monotonic; |
1492 | sleep = tk->total_sleep_time; | 1562 | sleep = tk->total_sleep_time; |
1493 | 1563 | ||
1494 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1564 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1495 | 1565 | ||
1496 | ts->tv_sec += tomono.tv_sec + sleep.tv_sec; | 1566 | ret.tv_sec += tomono.tv_sec + sleep.tv_sec; |
1497 | ts->tv_nsec = 0; | 1567 | ret.tv_nsec = 0; |
1498 | timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec); | 1568 | timespec64_add_ns(&ret, nsec + tomono.tv_nsec + sleep.tv_nsec); |
1569 | *ts = timespec64_to_timespec(ret); | ||
1499 | } | 1570 | } |
1500 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); | 1571 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
1501 | 1572 | ||
1502 | /** | 1573 | /** |
1503 | * ktime_get_boottime - Returns monotonic time since boot in a ktime | ||
1504 | * | ||
1505 | * Returns the monotonic time since boot in a ktime | ||
1506 | * | ||
1507 | * This is similar to CLOCK_MONTONIC/ktime_get, but also | ||
1508 | * includes the time spent in suspend. | ||
1509 | */ | ||
1510 | ktime_t ktime_get_boottime(void) | ||
1511 | { | ||
1512 | struct timespec ts; | ||
1513 | |||
1514 | get_monotonic_boottime(&ts); | ||
1515 | return timespec_to_ktime(ts); | ||
1516 | } | ||
1517 | EXPORT_SYMBOL_GPL(ktime_get_boottime); | ||
1518 | |||
1519 | /** | ||
1520 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | 1574 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
1521 | * @ts: pointer to the timespec to be converted | 1575 | * @ts: pointer to the timespec to be converted |
1522 | */ | 1576 | */ |
1523 | void monotonic_to_bootbased(struct timespec *ts) | 1577 | void monotonic_to_bootbased(struct timespec *ts) |
1524 | { | 1578 | { |
1525 | struct timekeeper *tk = &timekeeper; | 1579 | struct timekeeper *tk = &tk_core.timekeeper; |
1580 | struct timespec64 ts64; | ||
1526 | 1581 | ||
1527 | *ts = timespec_add(*ts, tk->total_sleep_time); | 1582 | ts64 = timespec_to_timespec64(*ts); |
1583 | ts64 = timespec64_add(ts64, tk->total_sleep_time); | ||
1584 | *ts = timespec64_to_timespec(ts64); | ||
1528 | } | 1585 | } |
1529 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | 1586 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
1530 | 1587 | ||
1531 | unsigned long get_seconds(void) | 1588 | unsigned long get_seconds(void) |
1532 | { | 1589 | { |
1533 | struct timekeeper *tk = &timekeeper; | 1590 | struct timekeeper *tk = &tk_core.timekeeper; |
1534 | 1591 | ||
1535 | return tk->xtime_sec; | 1592 | return tk->xtime_sec; |
1536 | } | 1593 | } |
@@ -1538,43 +1595,44 @@ EXPORT_SYMBOL(get_seconds); | |||
1538 | 1595 | ||
1539 | struct timespec __current_kernel_time(void) | 1596 | struct timespec __current_kernel_time(void) |
1540 | { | 1597 | { |
1541 | struct timekeeper *tk = &timekeeper; | 1598 | struct timekeeper *tk = &tk_core.timekeeper; |
1542 | 1599 | ||
1543 | return tk_xtime(tk); | 1600 | return timespec64_to_timespec(tk_xtime(tk)); |
1544 | } | 1601 | } |
1545 | 1602 | ||
1546 | struct timespec current_kernel_time(void) | 1603 | struct timespec current_kernel_time(void) |
1547 | { | 1604 | { |
1548 | struct timekeeper *tk = &timekeeper; | 1605 | struct timekeeper *tk = &tk_core.timekeeper; |
1549 | struct timespec now; | 1606 | struct timespec64 now; |
1550 | unsigned long seq; | 1607 | unsigned long seq; |
1551 | 1608 | ||
1552 | do { | 1609 | do { |
1553 | seq = read_seqcount_begin(&timekeeper_seq); | 1610 | seq = read_seqcount_begin(&tk_core.seq); |
1554 | 1611 | ||
1555 | now = tk_xtime(tk); | 1612 | now = tk_xtime(tk); |
1556 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1613 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1557 | 1614 | ||
1558 | return now; | 1615 | return timespec64_to_timespec(now); |
1559 | } | 1616 | } |
1560 | EXPORT_SYMBOL(current_kernel_time); | 1617 | EXPORT_SYMBOL(current_kernel_time); |
1561 | 1618 | ||
1562 | struct timespec get_monotonic_coarse(void) | 1619 | struct timespec get_monotonic_coarse(void) |
1563 | { | 1620 | { |
1564 | struct timekeeper *tk = &timekeeper; | 1621 | struct timekeeper *tk = &tk_core.timekeeper; |
1565 | struct timespec now, mono; | 1622 | struct timespec64 now, mono; |
1566 | unsigned long seq; | 1623 | unsigned long seq; |
1567 | 1624 | ||
1568 | do { | 1625 | do { |
1569 | seq = read_seqcount_begin(&timekeeper_seq); | 1626 | seq = read_seqcount_begin(&tk_core.seq); |
1570 | 1627 | ||
1571 | now = tk_xtime(tk); | 1628 | now = tk_xtime(tk); |
1572 | mono = tk->wall_to_monotonic; | 1629 | mono = tk->wall_to_monotonic; |
1573 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1630 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1574 | 1631 | ||
1575 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, | 1632 | set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec, |
1576 | now.tv_nsec + mono.tv_nsec); | 1633 | now.tv_nsec + mono.tv_nsec); |
1577 | return now; | 1634 | |
1635 | return timespec64_to_timespec(now); | ||
1578 | } | 1636 | } |
1579 | 1637 | ||
1580 | /* | 1638 | /* |
@@ -1587,29 +1645,38 @@ void do_timer(unsigned long ticks) | |||
1587 | } | 1645 | } |
1588 | 1646 | ||
1589 | /** | 1647 | /** |
1590 | * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, | 1648 | * ktime_get_update_offsets_tick - hrtimer helper |
1591 | * and sleep offsets. | 1649 | * @offs_real: pointer to storage for monotonic -> realtime offset |
1592 | * @xtim: pointer to timespec to be set with xtime | 1650 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
1593 | * @wtom: pointer to timespec to be set with wall_to_monotonic | 1651 | * @offs_tai: pointer to storage for monotonic -> clock tai offset |
1594 | * @sleep: pointer to timespec to be set with time in suspend | 1652 | * |
1653 | * Returns monotonic time at last tick and various offsets | ||
1595 | */ | 1654 | */ |
1596 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | 1655 | ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot, |
1597 | struct timespec *wtom, struct timespec *sleep) | 1656 | ktime_t *offs_tai) |
1598 | { | 1657 | { |
1599 | struct timekeeper *tk = &timekeeper; | 1658 | struct timekeeper *tk = &tk_core.timekeeper; |
1600 | unsigned long seq; | 1659 | unsigned int seq; |
1660 | ktime_t base; | ||
1661 | u64 nsecs; | ||
1601 | 1662 | ||
1602 | do { | 1663 | do { |
1603 | seq = read_seqcount_begin(&timekeeper_seq); | 1664 | seq = read_seqcount_begin(&tk_core.seq); |
1604 | *xtim = tk_xtime(tk); | 1665 | |
1605 | *wtom = tk->wall_to_monotonic; | 1666 | base = tk->base_mono; |
1606 | *sleep = tk->total_sleep_time; | 1667 | nsecs = tk->xtime_nsec >> tk->shift; |
1607 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1668 | |
1669 | *offs_real = tk->offs_real; | ||
1670 | *offs_boot = tk->offs_boot; | ||
1671 | *offs_tai = tk->offs_tai; | ||
1672 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
1673 | |||
1674 | return ktime_add_ns(base, nsecs); | ||
1608 | } | 1675 | } |
1609 | 1676 | ||
1610 | #ifdef CONFIG_HIGH_RES_TIMERS | 1677 | #ifdef CONFIG_HIGH_RES_TIMERS |
1611 | /** | 1678 | /** |
1612 | * ktime_get_update_offsets - hrtimer helper | 1679 | * ktime_get_update_offsets_now - hrtimer helper |
1613 | * @offs_real: pointer to storage for monotonic -> realtime offset | 1680 | * @offs_real: pointer to storage for monotonic -> realtime offset |
1614 | * @offs_boot: pointer to storage for monotonic -> boottime offset | 1681 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
1615 | * @offs_tai: pointer to storage for monotonic -> clock tai offset | 1682 | * @offs_tai: pointer to storage for monotonic -> clock tai offset |
@@ -1617,57 +1684,37 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |||
1617 | * Returns current monotonic time and updates the offsets | 1684 | * Returns current monotonic time and updates the offsets |
1618 | * Called from hrtimer_interrupt() or retrigger_next_event() | 1685 | * Called from hrtimer_interrupt() or retrigger_next_event() |
1619 | */ | 1686 | */ |
1620 | ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, | 1687 | ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot, |
1621 | ktime_t *offs_tai) | 1688 | ktime_t *offs_tai) |
1622 | { | 1689 | { |
1623 | struct timekeeper *tk = &timekeeper; | 1690 | struct timekeeper *tk = &tk_core.timekeeper; |
1624 | ktime_t now; | ||
1625 | unsigned int seq; | 1691 | unsigned int seq; |
1626 | u64 secs, nsecs; | 1692 | ktime_t base; |
1693 | u64 nsecs; | ||
1627 | 1694 | ||
1628 | do { | 1695 | do { |
1629 | seq = read_seqcount_begin(&timekeeper_seq); | 1696 | seq = read_seqcount_begin(&tk_core.seq); |
1630 | 1697 | ||
1631 | secs = tk->xtime_sec; | 1698 | base = tk->base_mono; |
1632 | nsecs = timekeeping_get_ns(tk); | 1699 | nsecs = timekeeping_get_ns(tk); |
1633 | 1700 | ||
1634 | *offs_real = tk->offs_real; | 1701 | *offs_real = tk->offs_real; |
1635 | *offs_boot = tk->offs_boot; | 1702 | *offs_boot = tk->offs_boot; |
1636 | *offs_tai = tk->offs_tai; | 1703 | *offs_tai = tk->offs_tai; |
1637 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1704 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1638 | 1705 | ||
1639 | now = ktime_add_ns(ktime_set(secs, 0), nsecs); | 1706 | return ktime_add_ns(base, nsecs); |
1640 | now = ktime_sub(now, *offs_real); | ||
1641 | return now; | ||
1642 | } | 1707 | } |
1643 | #endif | 1708 | #endif |
1644 | 1709 | ||
1645 | /** | 1710 | /** |
1646 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format | ||
1647 | */ | ||
1648 | ktime_t ktime_get_monotonic_offset(void) | ||
1649 | { | ||
1650 | struct timekeeper *tk = &timekeeper; | ||
1651 | unsigned long seq; | ||
1652 | struct timespec wtom; | ||
1653 | |||
1654 | do { | ||
1655 | seq = read_seqcount_begin(&timekeeper_seq); | ||
1656 | wtom = tk->wall_to_monotonic; | ||
1657 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | ||
1658 | |||
1659 | return timespec_to_ktime(wtom); | ||
1660 | } | ||
1661 | EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); | ||
1662 | |||
1663 | /** | ||
1664 | * do_adjtimex() - Accessor function to NTP __do_adjtimex function | 1711 | * do_adjtimex() - Accessor function to NTP __do_adjtimex function |
1665 | */ | 1712 | */ |
1666 | int do_adjtimex(struct timex *txc) | 1713 | int do_adjtimex(struct timex *txc) |
1667 | { | 1714 | { |
1668 | struct timekeeper *tk = &timekeeper; | 1715 | struct timekeeper *tk = &tk_core.timekeeper; |
1669 | unsigned long flags; | 1716 | unsigned long flags; |
1670 | struct timespec ts; | 1717 | struct timespec64 ts; |
1671 | s32 orig_tai, tai; | 1718 | s32 orig_tai, tai; |
1672 | int ret; | 1719 | int ret; |
1673 | 1720 | ||
@@ -1687,10 +1734,10 @@ int do_adjtimex(struct timex *txc) | |||
1687 | return ret; | 1734 | return ret; |
1688 | } | 1735 | } |
1689 | 1736 | ||
1690 | getnstimeofday(&ts); | 1737 | getnstimeofday64(&ts); |
1691 | 1738 | ||
1692 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1739 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1693 | write_seqcount_begin(&timekeeper_seq); | 1740 | write_seqcount_begin(&tk_core.seq); |
1694 | 1741 | ||
1695 | orig_tai = tai = tk->tai_offset; | 1742 | orig_tai = tai = tk->tai_offset; |
1696 | ret = __do_adjtimex(txc, &ts, &tai); | 1743 | ret = __do_adjtimex(txc, &ts, &tai); |
@@ -1699,7 +1746,7 @@ int do_adjtimex(struct timex *txc) | |||
1699 | __timekeeping_set_tai_offset(tk, tai); | 1746 | __timekeeping_set_tai_offset(tk, tai); |
1700 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1747 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
1701 | } | 1748 | } |
1702 | write_seqcount_end(&timekeeper_seq); | 1749 | write_seqcount_end(&tk_core.seq); |
1703 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1750 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1704 | 1751 | ||
1705 | if (tai != orig_tai) | 1752 | if (tai != orig_tai) |
@@ -1719,11 +1766,11 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
1719 | unsigned long flags; | 1766 | unsigned long flags; |
1720 | 1767 | ||
1721 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1768 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1722 | write_seqcount_begin(&timekeeper_seq); | 1769 | write_seqcount_begin(&tk_core.seq); |
1723 | 1770 | ||
1724 | __hardpps(phase_ts, raw_ts); | 1771 | __hardpps(phase_ts, raw_ts); |
1725 | 1772 | ||
1726 | write_seqcount_end(&timekeeper_seq); | 1773 | write_seqcount_end(&tk_core.seq); |
1727 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1774 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1728 | } | 1775 | } |
1729 | EXPORT_SYMBOL(hardpps); | 1776 | EXPORT_SYMBOL(hardpps); |
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h new file mode 100644 index 000000000000..adc1fc98bde3 --- /dev/null +++ b/kernel/time/timekeeping.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _KERNEL_TIME_TIMEKEEPING_H | ||
2 | #define _KERNEL_TIME_TIMEKEEPING_H | ||
3 | /* | ||
4 | * Internal interfaces for kernel/time/ | ||
5 | */ | ||
6 | extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, | ||
7 | ktime_t *offs_boot, | ||
8 | ktime_t *offs_tai); | ||
9 | extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, | ||
10 | ktime_t *offs_boot, | ||
11 | ktime_t *offs_tai); | ||
12 | |||
13 | extern int timekeeping_valid_for_hres(void); | ||
14 | extern u64 timekeeping_max_deferment(void); | ||
15 | extern int timekeeping_inject_offset(struct timespec *ts); | ||
16 | extern s32 timekeeping_get_tai_offset(void); | ||
17 | extern void timekeeping_set_tai_offset(s32 tai_offset); | ||
18 | extern void timekeeping_clocktai(struct timespec *ts); | ||
19 | |||
20 | #endif | ||
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 4d54f97558df..f6bd65236712 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c | |||
@@ -67,7 +67,7 @@ static int __init tk_debug_sleep_time_init(void) | |||
67 | } | 67 | } |
68 | late_initcall(tk_debug_sleep_time_init); | 68 | late_initcall(tk_debug_sleep_time_init); |
69 | 69 | ||
70 | void tk_debug_account_sleep_time(struct timespec *t) | 70 | void tk_debug_account_sleep_time(struct timespec64 *t) |
71 | { | 71 | { |
72 | sleep_time_bin[fls(t->tv_sec)]++; | 72 | sleep_time_bin[fls(t->tv_sec)]++; |
73 | } | 73 | } |
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 13323ea08ffa..e3d28ad236f9 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/time.h> | 6 | #include <linux/time.h> |
7 | 7 | ||
8 | #ifdef CONFIG_DEBUG_FS | 8 | #ifdef CONFIG_DEBUG_FS |
9 | extern void tk_debug_account_sleep_time(struct timespec *t); | 9 | extern void tk_debug_account_sleep_time(struct timespec64 *t); |
10 | #else | 10 | #else |
11 | #define tk_debug_account_sleep_time(x) | 11 | #define tk_debug_account_sleep_time(x) |
12 | #endif | 12 | #endif |
diff --git a/kernel/timer.c b/kernel/time/timer.c index 3bb01a323b2a..aca5dfe2fa3d 100644 --- a/kernel/timer.c +++ b/kernel/time/timer.c | |||
@@ -82,6 +82,7 @@ struct tvec_base { | |||
82 | unsigned long next_timer; | 82 | unsigned long next_timer; |
83 | unsigned long active_timers; | 83 | unsigned long active_timers; |
84 | unsigned long all_timers; | 84 | unsigned long all_timers; |
85 | int cpu; | ||
85 | struct tvec_root tv1; | 86 | struct tvec_root tv1; |
86 | struct tvec tv2; | 87 | struct tvec tv2; |
87 | struct tvec tv3; | 88 | struct tvec tv3; |
@@ -409,6 +410,22 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |||
409 | base->next_timer = timer->expires; | 410 | base->next_timer = timer->expires; |
410 | } | 411 | } |
411 | base->all_timers++; | 412 | base->all_timers++; |
413 | |||
414 | /* | ||
415 | * Check whether the other CPU is in dynticks mode and needs | ||
416 | * to be triggered to reevaluate the timer wheel. | ||
417 | * We are protected against the other CPU fiddling | ||
418 | * with the timer by holding the timer base lock. This also | ||
419 | * makes sure that a CPU on the way to stop its tick can not | ||
420 | * evaluate the timer wheel. | ||
421 | * | ||
422 | * Spare the IPI for deferrable timers on idle targets though. | ||
423 | * The next busy ticks will take care of it. Except full dynticks | ||
424 | * require special care against races with idle_cpu(), lets deal | ||
425 | * with that later. | ||
426 | */ | ||
427 | if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu)) | ||
428 | wake_up_nohz_cpu(base->cpu); | ||
412 | } | 429 | } |
413 | 430 | ||
414 | #ifdef CONFIG_TIMER_STATS | 431 | #ifdef CONFIG_TIMER_STATS |
@@ -948,22 +965,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
948 | timer_set_base(timer, base); | 965 | timer_set_base(timer, base); |
949 | debug_activate(timer, timer->expires); | 966 | debug_activate(timer, timer->expires); |
950 | internal_add_timer(base, timer); | 967 | internal_add_timer(base, timer); |
951 | /* | ||
952 | * Check whether the other CPU is in dynticks mode and needs | ||
953 | * to be triggered to reevaluate the timer wheel. | ||
954 | * We are protected against the other CPU fiddling | ||
955 | * with the timer by holding the timer base lock. This also | ||
956 | * makes sure that a CPU on the way to stop its tick can not | ||
957 | * evaluate the timer wheel. | ||
958 | * | ||
959 | * Spare the IPI for deferrable timers on idle targets though. | ||
960 | * The next busy ticks will take care of it. Except full dynticks | ||
961 | * require special care against races with idle_cpu(), lets deal | ||
962 | * with that later. | ||
963 | */ | ||
964 | if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu)) | ||
965 | wake_up_nohz_cpu(cpu); | ||
966 | |||
967 | spin_unlock_irqrestore(&base->lock, flags); | 968 | spin_unlock_irqrestore(&base->lock, flags); |
968 | } | 969 | } |
969 | EXPORT_SYMBOL_GPL(add_timer_on); | 970 | EXPORT_SYMBOL_GPL(add_timer_on); |
@@ -1568,6 +1569,7 @@ static int init_timers_cpu(int cpu) | |||
1568 | } | 1569 | } |
1569 | spin_lock_init(&base->lock); | 1570 | spin_lock_init(&base->lock); |
1570 | tvec_base_done[cpu] = 1; | 1571 | tvec_base_done[cpu] = 1; |
1572 | base->cpu = cpu; | ||
1571 | } else { | 1573 | } else { |
1572 | base = per_cpu(tvec_bases, cpu); | 1574 | base = per_cpu(tvec_bases, cpu); |
1573 | } | 1575 | } |
diff --git a/kernel/time/udelay_test.c b/kernel/time/udelay_test.c new file mode 100644 index 000000000000..e622ba365a13 --- /dev/null +++ b/kernel/time/udelay_test.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * udelay() test kernel module | ||
3 | * | ||
4 | * Test is executed by writing and reading to /sys/kernel/debug/udelay_test | ||
5 | * Tests are configured by writing: USECS ITERATIONS | ||
6 | * Tests are executed by reading from the same file. | ||
7 | * Specifying usecs of 0 or negative values will run multiples tests. | ||
8 | * | ||
9 | * Copyright (C) 2014 Google, Inc. | ||
10 | * | ||
11 | * This software is licensed under the terms of the GNU General Public | ||
12 | * License version 2, as published by the Free Software Foundation, and | ||
13 | * may be copied, distributed, and modified under those terms. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | */ | ||
20 | |||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/ktime.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #define DEFAULT_ITERATIONS 100 | ||
28 | |||
29 | #define DEBUGFS_FILENAME "udelay_test" | ||
30 | |||
31 | static DEFINE_MUTEX(udelay_test_lock); | ||
32 | static struct dentry *udelay_test_debugfs_file; | ||
33 | static int udelay_test_usecs; | ||
34 | static int udelay_test_iterations = DEFAULT_ITERATIONS; | ||
35 | |||
36 | static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters) | ||
37 | { | ||
38 | int min = 0, max = 0, fail_count = 0; | ||
39 | uint64_t sum = 0; | ||
40 | uint64_t avg; | ||
41 | int i; | ||
42 | /* Allow udelay to be up to 0.5% fast */ | ||
43 | int allowed_error_ns = usecs * 5; | ||
44 | |||
45 | for (i = 0; i < iters; ++i) { | ||
46 | struct timespec ts1, ts2; | ||
47 | int time_passed; | ||
48 | |||
49 | ktime_get_ts(&ts1); | ||
50 | udelay(usecs); | ||
51 | ktime_get_ts(&ts2); | ||
52 | time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1); | ||
53 | |||
54 | if (i == 0 || time_passed < min) | ||
55 | min = time_passed; | ||
56 | if (i == 0 || time_passed > max) | ||
57 | max = time_passed; | ||
58 | if ((time_passed + allowed_error_ns) / 1000 < usecs) | ||
59 | ++fail_count; | ||
60 | WARN_ON(time_passed < 0); | ||
61 | sum += time_passed; | ||
62 | } | ||
63 | |||
64 | avg = sum; | ||
65 | do_div(avg, iters); | ||
66 | seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d", | ||
67 | usecs, iters, usecs * 1000, | ||
68 | (usecs * 1000) - allowed_error_ns, min, avg, max); | ||
69 | if (fail_count) | ||
70 | seq_printf(s, " FAIL=%d", fail_count); | ||
71 | seq_puts(s, "\n"); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int udelay_test_show(struct seq_file *s, void *v) | ||
77 | { | ||
78 | int usecs; | ||
79 | int iters; | ||
80 | int ret = 0; | ||
81 | |||
82 | mutex_lock(&udelay_test_lock); | ||
83 | usecs = udelay_test_usecs; | ||
84 | iters = udelay_test_iterations; | ||
85 | mutex_unlock(&udelay_test_lock); | ||
86 | |||
87 | if (usecs > 0 && iters > 0) { | ||
88 | return udelay_test_single(s, usecs, iters); | ||
89 | } else if (usecs == 0) { | ||
90 | struct timespec ts; | ||
91 | |||
92 | ktime_get_ts(&ts); | ||
93 | seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n", | ||
94 | loops_per_jiffy, ts.tv_sec, ts.tv_nsec); | ||
95 | seq_puts(s, "usage:\n"); | ||
96 | seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n"); | ||
97 | seq_puts(s, "cat " DEBUGFS_FILENAME "\n"); | ||
98 | } | ||
99 | |||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | static int udelay_test_open(struct inode *inode, struct file *file) | ||
104 | { | ||
105 | return single_open(file, udelay_test_show, inode->i_private); | ||
106 | } | ||
107 | |||
108 | static ssize_t udelay_test_write(struct file *file, const char __user *buf, | ||
109 | size_t count, loff_t *pos) | ||
110 | { | ||
111 | char lbuf[32]; | ||
112 | int ret; | ||
113 | int usecs; | ||
114 | int iters; | ||
115 | |||
116 | if (count >= sizeof(lbuf)) | ||
117 | return -EINVAL; | ||
118 | |||
119 | if (copy_from_user(lbuf, buf, count)) | ||
120 | return -EFAULT; | ||
121 | lbuf[count] = '\0'; | ||
122 | |||
123 | ret = sscanf(lbuf, "%d %d", &usecs, &iters); | ||
124 | if (ret < 1) | ||
125 | return -EINVAL; | ||
126 | else if (ret < 2) | ||
127 | iters = DEFAULT_ITERATIONS; | ||
128 | |||
129 | mutex_lock(&udelay_test_lock); | ||
130 | udelay_test_usecs = usecs; | ||
131 | udelay_test_iterations = iters; | ||
132 | mutex_unlock(&udelay_test_lock); | ||
133 | |||
134 | return count; | ||
135 | } | ||
136 | |||
137 | static const struct file_operations udelay_test_debugfs_ops = { | ||
138 | .owner = THIS_MODULE, | ||
139 | .open = udelay_test_open, | ||
140 | .read = seq_read, | ||
141 | .write = udelay_test_write, | ||
142 | .llseek = seq_lseek, | ||
143 | .release = single_release, | ||
144 | }; | ||
145 | |||
146 | static int __init udelay_test_init(void) | ||
147 | { | ||
148 | mutex_lock(&udelay_test_lock); | ||
149 | udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME, | ||
150 | S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops); | ||
151 | mutex_unlock(&udelay_test_lock); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | module_init(udelay_test_init); | ||
157 | |||
158 | static void __exit udelay_test_exit(void) | ||
159 | { | ||
160 | mutex_lock(&udelay_test_lock); | ||
161 | debugfs_remove(udelay_test_debugfs_file); | ||
162 | mutex_unlock(&udelay_test_lock); | ||
163 | } | ||
164 | |||
165 | module_exit(udelay_test_exit); | ||
166 | |||
167 | MODULE_AUTHOR("David Riley <davidriley@chromium.org>"); | ||
168 | MODULE_LICENSE("GPL"); | ||
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index a1dd9a1b1327..975cb49e32bf 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -31,20 +31,19 @@ void bacct_add_tsk(struct user_namespace *user_ns, | |||
31 | struct taskstats *stats, struct task_struct *tsk) | 31 | struct taskstats *stats, struct task_struct *tsk) |
32 | { | 32 | { |
33 | const struct cred *tcred; | 33 | const struct cred *tcred; |
34 | struct timespec uptime, ts; | ||
35 | cputime_t utime, stime, utimescaled, stimescaled; | 34 | cputime_t utime, stime, utimescaled, stimescaled; |
36 | u64 ac_etime; | 35 | u64 delta; |
37 | 36 | ||
38 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); | 37 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); |
39 | 38 | ||
40 | /* calculate task elapsed time in timespec */ | 39 | /* calculate task elapsed time in nsec */ |
41 | do_posix_clock_monotonic_gettime(&uptime); | 40 | delta = ktime_get_ns() - tsk->start_time; |
42 | ts = timespec_sub(uptime, tsk->start_time); | 41 | /* Convert to micro seconds */ |
43 | /* rebase elapsed time to usec (should never be negative) */ | 42 | do_div(delta, NSEC_PER_USEC); |
44 | ac_etime = timespec_to_ns(&ts); | 43 | stats->ac_etime = delta; |
45 | do_div(ac_etime, NSEC_PER_USEC); | 44 | /* Convert to seconds for btime */ |
46 | stats->ac_etime = ac_etime; | 45 | do_div(delta, USEC_PER_SEC); |
47 | stats->ac_btime = get_seconds() - ts.tv_sec; | 46 | stats->ac_btime = get_seconds() - delta; |
48 | if (thread_group_leader(tsk)) { | 47 | if (thread_group_leader(tsk)) { |
49 | stats->ac_exitcode = tsk->exit_code; | 48 | stats->ac_exitcode = tsk->exit_code; |
50 | if (tsk->flags & PF_FORKNOEXEC) | 49 | if (tsk->flags & PF_FORKNOEXEC) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7a638aa3545b..24a26ad5c99d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1649,6 +1649,15 @@ config TEST_BPF | |||
1649 | 1649 | ||
1650 | If unsure, say N. | 1650 | If unsure, say N. |
1651 | 1651 | ||
1652 | config TEST_UDELAY | ||
1653 | tristate "udelay test driver" | ||
1654 | default n | ||
1655 | help | ||
1656 | This builds the "udelay_test" module that helps to make sure | ||
1657 | that udelay() is working properly. | ||
1658 | |||
1659 | If unsure, say N. | ||
1660 | |||
1652 | source "samples/Kconfig" | 1661 | source "samples/Kconfig" |
1653 | 1662 | ||
1654 | source "lib/Kconfig.kgdb" | 1663 | source "lib/Kconfig.kgdb" |
diff --git a/lib/devres.c b/lib/devres.c index f562bf6ff71d..bb632484a860 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr) | |||
86 | } | 86 | } |
87 | EXPORT_SYMBOL(devm_iounmap); | 87 | EXPORT_SYMBOL(devm_iounmap); |
88 | 88 | ||
89 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | ||
90 | |||
91 | /** | 89 | /** |
92 | * devm_ioremap_resource() - check, request region, and ioremap resource | 90 | * devm_ioremap_resource() - check, request region, and ioremap resource |
93 | * @dev: generic device to handle the resource for | 91 | * @dev: generic device to handle the resource for |
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c index c1b00375c9ad..3ffa4f5509d8 100644 --- a/security/tomoyo/audit.c +++ b/security/tomoyo/audit.c | |||
@@ -155,11 +155,9 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r) | |||
155 | u8 i; | 155 | u8 i; |
156 | if (!buffer) | 156 | if (!buffer) |
157 | return NULL; | 157 | return NULL; |
158 | { | 158 | |
159 | struct timeval tv; | 159 | tomoyo_convert_time(get_seconds(), &stamp); |
160 | do_gettimeofday(&tv); | 160 | |
161 | tomoyo_convert_time(tv.tv_sec, &stamp); | ||
162 | } | ||
163 | pos = snprintf(buffer, tomoyo_buffer_len - 1, | 161 | pos = snprintf(buffer, tomoyo_buffer_len - 1, |
164 | "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " | 162 | "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " |
165 | "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " | 163 | "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " |
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 283862aebdc8..e0fb75052550 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c | |||
@@ -2267,13 +2267,11 @@ static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT]; | |||
2267 | */ | 2267 | */ |
2268 | void tomoyo_update_stat(const u8 index) | 2268 | void tomoyo_update_stat(const u8 index) |
2269 | { | 2269 | { |
2270 | struct timeval tv; | ||
2271 | do_gettimeofday(&tv); | ||
2272 | /* | 2270 | /* |
2273 | * I don't use atomic operations because race condition is not fatal. | 2271 | * I don't use atomic operations because race condition is not fatal. |
2274 | */ | 2272 | */ |
2275 | tomoyo_stat_updated[index]++; | 2273 | tomoyo_stat_updated[index]++; |
2276 | tomoyo_stat_modified[index] = tv.tv_sec; | 2274 | tomoyo_stat_modified[index] = get_seconds(); |
2277 | } | 2275 | } |
2278 | 2276 | ||
2279 | /** | 2277 | /** |
diff --git a/tools/time/udelay_test.sh b/tools/time/udelay_test.sh new file mode 100755 index 000000000000..12d46b926917 --- /dev/null +++ b/tools/time/udelay_test.sh | |||
@@ -0,0 +1,66 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # udelay() test script | ||
4 | # | ||
5 | # Test is executed by writing and reading to /sys/kernel/debug/udelay_test | ||
6 | # and exercises a variety of delays to ensure that udelay() is delaying | ||
7 | # at least as long as requested (as compared to ktime). | ||
8 | # | ||
9 | # Copyright (C) 2014 Google, Inc. | ||
10 | # | ||
11 | # This software is licensed under the terms of the GNU General Public | ||
12 | # License version 2, as published by the Free Software Foundation, and | ||
13 | # may be copied, distributed, and modified under those terms. | ||
14 | # | ||
15 | # This program is distributed in the hope that it will be useful, | ||
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | # GNU General Public License for more details. | ||
19 | |||
20 | MODULE_NAME=udelay_test | ||
21 | UDELAY_PATH=/sys/kernel/debug/udelay_test | ||
22 | |||
23 | setup() | ||
24 | { | ||
25 | /sbin/modprobe -q $MODULE_NAME | ||
26 | tmp_file=`mktemp` | ||
27 | } | ||
28 | |||
29 | test_one() | ||
30 | { | ||
31 | delay=$1 | ||
32 | echo $delay > $UDELAY_PATH | ||
33 | tee -a $tmp_file < $UDELAY_PATH | ||
34 | } | ||
35 | |||
36 | cleanup() | ||
37 | { | ||
38 | if [ -f $tmp_file ]; then | ||
39 | rm $tmp_file | ||
40 | fi | ||
41 | /sbin/modprobe -q -r $MODULE_NAME | ||
42 | } | ||
43 | |||
44 | trap cleanup EXIT | ||
45 | setup | ||
46 | |||
47 | # Delay for a variety of times. | ||
48 | # 1..200, 200..500 (by 10), 500..2000 (by 100) | ||
49 | for (( delay = 1; delay < 200; delay += 1 )); do | ||
50 | test_one $delay | ||
51 | done | ||
52 | for (( delay = 200; delay < 500; delay += 10 )); do | ||
53 | test_one $delay | ||
54 | done | ||
55 | for (( delay = 500; delay <= 2000; delay += 100 )); do | ||
56 | test_one $delay | ||
57 | done | ||
58 | |||
59 | # Search for failures | ||
60 | count=`grep -c FAIL $tmp_file` | ||
61 | if [ $? -eq "0" ]; then | ||
62 | echo "ERROR: $count delays failed to delay long enough" | ||
63 | retcode=1 | ||
64 | fi | ||
65 | |||
66 | exit $retcode | ||