diff options
73 files changed, 1586 insertions, 412 deletions
diff --git a/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt b/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt new file mode 100644 index 000000000000..b2d07ad90e9a --- /dev/null +++ b/Documentation/devicetree/bindings/timer/lsi,zevio-timer.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | TI-NSPIRE timer | ||
2 | |||
3 | Required properties: | ||
4 | |||
5 | - compatible : should be "lsi,zevio-timer". | ||
6 | - reg : The physical base address and size of the timer (always first). | ||
7 | - clocks: phandle to the source clock. | ||
8 | |||
9 | Optional properties: | ||
10 | |||
11 | - interrupts : The interrupt number of the first timer. | ||
12 | - reg : The interrupt acknowledgement registers | ||
13 | (always after timer base address) | ||
14 | |||
15 | If any of the optional properties are not given, the timer is added as a | ||
16 | clock-source only. | ||
17 | |||
18 | Example: | ||
19 | |||
20 | timer { | ||
21 | compatible = "lsi,zevio-timer"; | ||
22 | reg = <0x900D0000 0x1000>, <0x900A0020 0x8>; | ||
23 | interrupts = <19>; | ||
24 | clocks = <&timer_clk>; | ||
25 | }; | ||
26 | |||
27 | Example (no clock-events): | ||
28 | |||
29 | timer { | ||
30 | compatible = "lsi,zevio-timer"; | ||
31 | reg = <0x900D0000 0x1000>; | ||
32 | clocks = <&timer_clk>; | ||
33 | }; | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 136f263ed47b..b02e6bbc1b46 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -14,6 +14,7 @@ config ARM | |||
14 | select GENERIC_IRQ_PROBE | 14 | select GENERIC_IRQ_PROBE |
15 | select GENERIC_IRQ_SHOW | 15 | select GENERIC_IRQ_SHOW |
16 | select GENERIC_PCI_IOMAP | 16 | select GENERIC_PCI_IOMAP |
17 | select GENERIC_SCHED_CLOCK | ||
17 | select GENERIC_SMP_IDLE_THREAD | 18 | select GENERIC_SMP_IDLE_THREAD |
18 | select GENERIC_IDLE_POLL_SETUP | 19 | select GENERIC_IDLE_POLL_SETUP |
19 | select GENERIC_STRNCPY_FROM_USER | 20 | select GENERIC_STRNCPY_FROM_USER |
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index ddc740769601..023ee63827a2 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | 29 | #include <linux/of_address.h> |
30 | #include <linux/of_irq.h> | 30 | #include <linux/of_irq.h> |
31 | #include <linux/sched_clock.h> | ||
31 | 32 | ||
32 | #include <asm/sched_clock.h> | ||
33 | #include <asm/hardware/arm_timer.h> | 33 | #include <asm/hardware/arm_timer.h> |
34 | #include <asm/hardware/timer-sp.h> | 34 | #include <asm/hardware/timer-sp.h> |
35 | 35 | ||
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 3d520ddca61b..2389b71a8e7c 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h | |||
@@ -1,16 +1,4 @@ | |||
1 | /* | 1 | /* You shouldn't include this file. Use linux/sched_clock.h instead. |
2 | * sched_clock.h: support for extending counters to full 64-bit ns counter | 2 | * Temporary file until all asm/sched_clock.h users are gone |
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | 3 | */ |
8 | #ifndef ASM_SCHED_CLOCK | 4 | #include <linux/sched_clock.h> |
9 | #define ASM_SCHED_CLOCK | ||
10 | |||
11 | extern void sched_clock_postinit(void); | ||
12 | extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); | ||
13 | |||
14 | extern unsigned long long (*sched_clock_func)(void); | ||
15 | |||
16 | #endif | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5f3338eacad2..97cb0576d07c 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
16 | # Object file lists. | 16 | # Object file lists. |
17 | 17 | ||
18 | obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ | 18 | obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ |
19 | process.o ptrace.o return_address.o sched_clock.o \ | 19 | process.o ptrace.o return_address.o \ |
20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o | 20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o |
21 | 21 | ||
22 | obj-$(CONFIG_ATAGS) += atags_parse.o | 22 | obj-$(CONFIG_ATAGS) += atags_parse.o |
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 59dcdced6e30..221f07b11ccb 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c | |||
@@ -11,9 +11,9 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/sched_clock.h> | ||
14 | 15 | ||
15 | #include <asm/delay.h> | 16 | #include <asm/delay.h> |
16 | #include <asm/sched_clock.h> | ||
17 | 17 | ||
18 | #include <clocksource/arm_arch_timer.h> | 18 | #include <clocksource/arm_arch_timer.h> |
19 | 19 | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index abff4e9aaee0..98aee3258398 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -24,9 +24,9 @@ | |||
24 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
25 | #include <linux/clocksource.h> | 25 | #include <linux/clocksource.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/sched_clock.h> | ||
27 | 28 | ||
28 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
29 | #include <asm/sched_clock.h> | ||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/mach/arch.h> | 31 | #include <asm/mach/arch.h> |
32 | #include <asm/mach/time.h> | 32 | #include <asm/mach/time.h> |
@@ -120,6 +120,4 @@ void __init time_init(void) | |||
120 | machine_desc->init_time(); | 120 | machine_desc->init_time(); |
121 | else | 121 | else |
122 | clocksource_of_init(); | 122 | clocksource_of_init(); |
123 | |||
124 | sched_clock_postinit(); | ||
125 | } | 123 | } |
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index bad361ec1666..7a55b5c95971 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/sched_clock.h> | ||
21 | 22 | ||
22 | #include <asm/sched_clock.h> | ||
23 | #include <asm/mach/irq.h> | 23 | #include <asm/mach/irq.h> |
24 | #include <asm/mach/time.h> | 24 | #include <asm/mach/time.h> |
25 | 25 | ||
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index fea91313678b..cd46529e9eaa 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c | |||
@@ -26,8 +26,8 @@ | |||
26 | #include <linux/clockchips.h> | 26 | #include <linux/clockchips.h> |
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
29 | #include <linux/sched_clock.h> | ||
29 | 30 | ||
30 | #include <asm/sched_clock.h> | ||
31 | #include <asm/mach/time.h> | 31 | #include <asm/mach/time.h> |
32 | 32 | ||
33 | #include "common.h" | 33 | #include "common.h" |
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index b23c8e4f28e8..aa4346227c41 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/stat.h> | 41 | #include <linux/stat.h> |
42 | #include <linux/sys_soc.h> | 42 | #include <linux/sys_soc.h> |
43 | #include <linux/termios.h> | 43 | #include <linux/termios.h> |
44 | #include <linux/sched_clock.h> | ||
44 | #include <video/vga.h> | 45 | #include <video/vga.h> |
45 | 46 | ||
46 | #include <mach/hardware.h> | 47 | #include <mach/hardware.h> |
@@ -49,7 +50,6 @@ | |||
49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
50 | #include <asm/param.h> /* HZ */ | 51 | #include <asm/param.h> /* HZ */ |
51 | #include <asm/mach-types.h> | 52 | #include <asm/mach-types.h> |
52 | #include <asm/sched_clock.h> | ||
53 | 53 | ||
54 | #include <mach/lm.h> | 54 | #include <mach/lm.h> |
55 | #include <mach/irqs.h> | 55 | #include <mach/irqs.h> |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 6600cff6bd92..58307cff1f18 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
33 | #include <linux/sched_clock.h> | ||
33 | 34 | ||
34 | #include <mach/udc.h> | 35 | #include <mach/udc.h> |
35 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
@@ -38,7 +39,6 @@ | |||
38 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
39 | #include <asm/page.h> | 40 | #include <asm/page.h> |
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
41 | #include <asm/sched_clock.h> | ||
42 | #include <asm/system_misc.h> | 42 | #include <asm/system_misc.h> |
43 | 43 | ||
44 | #include <asm/mach/map.h> | 44 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 86a18b3d252e..7ac41e83cfef 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | 29 | #include <linux/of_address.h> |
30 | #include <linux/of_irq.h> | 30 | #include <linux/of_irq.h> |
31 | #include <linux/sched_clock.h> | ||
31 | 32 | ||
32 | #include <asm/sched_clock.h> | ||
33 | #include <mach/addr-map.h> | 33 | #include <mach/addr-map.h> |
34 | #include <mach/regs-timers.h> | 34 | #include <mach/regs-timers.h> |
35 | #include <mach/regs-apbc.h> | 35 | #include <mach/regs-apbc.h> |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 284313f3e02c..b6418fd5fe0d 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -23,10 +23,10 @@ | |||
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/of_irq.h> | 25 | #include <linux/of_irq.h> |
26 | #include <linux/sched_clock.h> | ||
26 | 27 | ||
27 | #include <asm/mach/time.h> | 28 | #include <asm/mach/time.h> |
28 | #include <asm/localtimer.h> | 29 | #include <asm/localtimer.h> |
29 | #include <asm/sched_clock.h> | ||
30 | 30 | ||
31 | #include "common.h" | 31 | #include "common.h" |
32 | 32 | ||
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 726ec23d29c7..80603d2fef77 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c | |||
@@ -43,9 +43,9 @@ | |||
43 | #include <linux/clocksource.h> | 43 | #include <linux/clocksource.h> |
44 | #include <linux/clockchips.h> | 44 | #include <linux/clockchips.h> |
45 | #include <linux/io.h> | 45 | #include <linux/io.h> |
46 | #include <linux/sched_clock.h> | ||
46 | 47 | ||
47 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
48 | #include <asm/sched_clock.h> | ||
49 | 49 | ||
50 | #include <mach/hardware.h> | 50 | #include <mach/hardware.h> |
51 | #include <asm/mach/irq.h> | 51 | #include <asm/mach/irq.h> |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index f8b23b8040d9..4c069b0cab21 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -41,10 +41,10 @@ | |||
41 | #include <linux/of_irq.h> | 41 | #include <linux/of_irq.h> |
42 | #include <linux/platform_device.h> | 42 | #include <linux/platform_device.h> |
43 | #include <linux/platform_data/dmtimer-omap.h> | 43 | #include <linux/platform_data/dmtimer-omap.h> |
44 | #include <linux/sched_clock.h> | ||
44 | 45 | ||
45 | #include <asm/mach/time.h> | 46 | #include <asm/mach/time.h> |
46 | #include <asm/smp_twd.h> | 47 | #include <asm/smp_twd.h> |
47 | #include <asm/sched_clock.h> | ||
48 | 48 | ||
49 | #include "omap_hwmod.h" | 49 | #include "omap_hwmod.h" |
50 | #include "omap_device.h" | 50 | #include "omap_device.h" |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8f1ee92aea30..9aa852a8fab9 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c | |||
@@ -16,11 +16,11 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/sched_clock.h> | ||
19 | 20 | ||
20 | #include <asm/div64.h> | 21 | #include <asm/div64.h> |
21 | #include <asm/mach/irq.h> | 22 | #include <asm/mach/irq.h> |
22 | #include <asm/mach/time.h> | 23 | #include <asm/mach/time.h> |
23 | #include <asm/sched_clock.h> | ||
24 | #include <mach/regs-ost.h> | 24 | #include <mach/regs-ost.h> |
25 | #include <mach/irqs.h> | 25 | #include <mach/irqs.h> |
26 | 26 | ||
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index a59a13a665a6..713c86cd3d64 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c | |||
@@ -14,9 +14,9 @@ | |||
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/timex.h> | 15 | #include <linux/timex.h> |
16 | #include <linux/clockchips.h> | 16 | #include <linux/clockchips.h> |
17 | #include <linux/sched_clock.h> | ||
17 | 18 | ||
18 | #include <asm/mach/time.h> | 19 | #include <asm/mach/time.h> |
19 | #include <asm/sched_clock.h> | ||
20 | #include <mach/hardware.h> | 20 | #include <mach/hardware.h> |
21 | #include <mach/irqs.h> | 21 | #include <mach/irqs.h> |
22 | 22 | ||
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index d9e73209c9b8..af771b76fe1c 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c | |||
@@ -18,12 +18,12 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
21 | #include <linux/sched_clock.h> | ||
21 | 22 | ||
22 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
23 | #include <mach/irqs.h> | 24 | #include <mach/irqs.h> |
24 | 25 | ||
25 | /* Generic stuff */ | 26 | /* Generic stuff */ |
26 | #include <asm/sched_clock.h> | ||
27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
28 | #include <asm/mach/time.h> | 28 | #include <asm/mach/time.h> |
29 | 29 | ||
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 837a2d52e9db..29606bd75f3f 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c | |||
@@ -22,9 +22,9 @@ | |||
22 | #include <linux/clocksource.h> | 22 | #include <linux/clocksource.h> |
23 | #include <linux/clockchips.h> | 23 | #include <linux/clockchips.h> |
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/sched_clock.h> | ||
25 | #include <mach/hardware.h> | 26 | #include <mach/hardware.h> |
26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
27 | #include <asm/sched_clock.h> | ||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/mach/irq.h> | 29 | #include <asm/mach/irq.h> |
30 | #include <asm/mach/time.h> | 30 | #include <asm/mach/time.h> |
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 5b0b86bb34bb..d9bc98eb2a6b 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/clocksource.h> | 20 | #include <linux/clocksource.h> |
21 | #include <linux/sched_clock.h> | ||
21 | 22 | ||
22 | #include <asm/mach/time.h> | 23 | #include <asm/mach/time.h> |
23 | #include <asm/sched_clock.h> | ||
24 | 24 | ||
25 | #include <plat/counter-32k.h> | 25 | #include <plat/counter-32k.h> |
26 | 26 | ||
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 5d5ac0f05422..9d2b2ac74938 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/clockchips.h> | 16 | #include <linux/clockchips.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
19 | #include <asm/sched_clock.h> | 19 | #include <linux/sched_clock.h> |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * MBus bridge block registers. | 22 | * MBus bridge block registers. |
diff --git a/arch/arm/plat-samsung/samsung-time.c b/arch/arm/plat-samsung/samsung-time.c index f899cbc9b288..2957075ca836 100644 --- a/arch/arm/plat-samsung/samsung-time.c +++ b/arch/arm/plat-samsung/samsung-time.c | |||
@@ -15,12 +15,12 @@ | |||
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/clockchips.h> | 16 | #include <linux/clockchips.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sched_clock.h> | ||
18 | 19 | ||
19 | #include <asm/smp_twd.h> | 20 | #include <asm/smp_twd.h> |
20 | #include <asm/mach/time.h> | 21 | #include <asm/mach/time.h> |
21 | #include <asm/mach/arch.h> | 22 | #include <asm/mach/arch.h> |
22 | #include <asm/mach/map.h> | 23 | #include <asm/mach/map.h> |
23 | #include <asm/sched_clock.h> | ||
24 | 24 | ||
25 | #include <mach/map.h> | 25 | #include <mach/map.h> |
26 | #include <plat/devs.h> | 26 | #include <plat/devs.h> |
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c index b33b74c87232..51b109e3b6c3 100644 --- a/arch/arm/plat-versatile/sched-clock.c +++ b/arch/arm/plat-versatile/sched-clock.c | |||
@@ -20,8 +20,8 @@ | |||
20 | */ | 20 | */ |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/sched_clock.h> | ||
23 | 24 | ||
24 | #include <asm/sched_clock.h> | ||
25 | #include <plat/sched_clock.h> | 25 | #include <plat/sched_clock.h> |
26 | 26 | ||
27 | static void __iomem *ctr; | 27 | static void __iomem *ctr; |
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index d354fb781c57..a55c7efcc4ed 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h | |||
@@ -95,8 +95,8 @@ static inline unsigned char current_lock_cmos_reg(void) | |||
95 | unsigned char rtc_cmos_read(unsigned char addr); | 95 | unsigned char rtc_cmos_read(unsigned char addr); |
96 | void rtc_cmos_write(unsigned char val, unsigned char addr); | 96 | void rtc_cmos_write(unsigned char val, unsigned char addr); |
97 | 97 | ||
98 | extern int mach_set_rtc_mmss(unsigned long nowtime); | 98 | extern int mach_set_rtc_mmss(const struct timespec *now); |
99 | extern unsigned long mach_get_cmos_time(void); | 99 | extern void mach_get_cmos_time(struct timespec *now); |
100 | 100 | ||
101 | #define RTC_IRQ 8 | 101 | #define RTC_IRQ 8 |
102 | 102 | ||
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h index 73668abdbedf..1e69a75412a4 100644 --- a/arch/x86/include/asm/mrst-vrtc.h +++ b/arch/x86/include/asm/mrst-vrtc.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | extern unsigned char vrtc_cmos_read(unsigned char reg); | 4 | extern unsigned char vrtc_cmos_read(unsigned char reg); |
5 | extern void vrtc_cmos_write(unsigned char val, unsigned char reg); | 5 | extern void vrtc_cmos_write(unsigned char val, unsigned char reg); |
6 | extern unsigned long vrtc_get_time(void); | 6 | extern void vrtc_get_time(struct timespec *now); |
7 | extern int vrtc_set_mmss(unsigned long nowtime); | 7 | extern int vrtc_set_mmss(const struct timespec *now); |
8 | 8 | ||
9 | #endif | 9 | #endif |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index d8d99222b36a..828a1565ba57 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -142,6 +142,8 @@ struct x86_cpuinit_ops { | |||
142 | void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); | 142 | void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); |
143 | }; | 143 | }; |
144 | 144 | ||
145 | struct timespec; | ||
146 | |||
145 | /** | 147 | /** |
146 | * struct x86_platform_ops - platform specific runtime functions | 148 | * struct x86_platform_ops - platform specific runtime functions |
147 | * @calibrate_tsc: calibrate TSC | 149 | * @calibrate_tsc: calibrate TSC |
@@ -156,8 +158,8 @@ struct x86_cpuinit_ops { | |||
156 | */ | 158 | */ |
157 | struct x86_platform_ops { | 159 | struct x86_platform_ops { |
158 | unsigned long (*calibrate_tsc)(void); | 160 | unsigned long (*calibrate_tsc)(void); |
159 | unsigned long (*get_wallclock)(void); | 161 | void (*get_wallclock)(struct timespec *ts); |
160 | int (*set_wallclock)(unsigned long nowtime); | 162 | int (*set_wallclock)(const struct timespec *ts); |
161 | void (*iommu_shutdown)(void); | 163 | void (*iommu_shutdown)(void); |
162 | bool (*is_untracked_pat_range)(u64 start, u64 end); | 164 | bool (*is_untracked_pat_range)(u64 start, u64 end); |
163 | void (*nmi_init)(void); | 165 | void (*nmi_init)(void); |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 3dd37ebd591b..1f354f4b602b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -48,10 +48,9 @@ static struct pvclock_wall_clock wall_clock; | |||
48 | * have elapsed since the hypervisor wrote the data. So we try to account for | 48 | * have elapsed since the hypervisor wrote the data. So we try to account for |
49 | * that with system time | 49 | * that with system time |
50 | */ | 50 | */ |
51 | static unsigned long kvm_get_wallclock(void) | 51 | static void kvm_get_wallclock(struct timespec *now) |
52 | { | 52 | { |
53 | struct pvclock_vcpu_time_info *vcpu_time; | 53 | struct pvclock_vcpu_time_info *vcpu_time; |
54 | struct timespec ts; | ||
55 | int low, high; | 54 | int low, high; |
56 | int cpu; | 55 | int cpu; |
57 | 56 | ||
@@ -64,14 +63,12 @@ static unsigned long kvm_get_wallclock(void) | |||
64 | cpu = smp_processor_id(); | 63 | cpu = smp_processor_id(); |
65 | 64 | ||
66 | vcpu_time = &hv_clock[cpu].pvti; | 65 | vcpu_time = &hv_clock[cpu].pvti; |
67 | pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); | 66 | pvclock_read_wallclock(&wall_clock, vcpu_time, now); |
68 | 67 | ||
69 | preempt_enable(); | 68 | preempt_enable(); |
70 | |||
71 | return ts.tv_sec; | ||
72 | } | 69 | } |
73 | 70 | ||
74 | static int kvm_set_wallclock(unsigned long now) | 71 | static int kvm_set_wallclock(const struct timespec *now) |
75 | { | 72 | { |
76 | return -1; | 73 | return -1; |
77 | } | 74 | } |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 198eb201ed3b..0aa29394ed6f 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -38,8 +38,9 @@ EXPORT_SYMBOL(rtc_lock); | |||
38 | * jump to the next second precisely 500 ms later. Check the Motorola | 38 | * jump to the next second precisely 500 ms later. Check the Motorola |
39 | * MC146818A or Dallas DS12887 data sheet for details. | 39 | * MC146818A or Dallas DS12887 data sheet for details. |
40 | */ | 40 | */ |
41 | int mach_set_rtc_mmss(unsigned long nowtime) | 41 | int mach_set_rtc_mmss(const struct timespec *now) |
42 | { | 42 | { |
43 | unsigned long nowtime = now->tv_sec; | ||
43 | struct rtc_time tm; | 44 | struct rtc_time tm; |
44 | int retval = 0; | 45 | int retval = 0; |
45 | 46 | ||
@@ -58,7 +59,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
58 | return retval; | 59 | return retval; |
59 | } | 60 | } |
60 | 61 | ||
61 | unsigned long mach_get_cmos_time(void) | 62 | void mach_get_cmos_time(struct timespec *now) |
62 | { | 63 | { |
63 | unsigned int status, year, mon, day, hour, min, sec, century = 0; | 64 | unsigned int status, year, mon, day, hour, min, sec, century = 0; |
64 | unsigned long flags; | 65 | unsigned long flags; |
@@ -107,7 +108,8 @@ unsigned long mach_get_cmos_time(void) | |||
107 | } else | 108 | } else |
108 | year += CMOS_YEARS_OFFS; | 109 | year += CMOS_YEARS_OFFS; |
109 | 110 | ||
110 | return mktime(year, mon, day, hour, min, sec); | 111 | now->tv_sec = mktime(year, mon, day, hour, min, sec); |
112 | now->tv_nsec = 0; | ||
111 | } | 113 | } |
112 | 114 | ||
113 | /* Routines for accessing the CMOS RAM/RTC. */ | 115 | /* Routines for accessing the CMOS RAM/RTC. */ |
@@ -135,18 +137,13 @@ EXPORT_SYMBOL(rtc_cmos_write); | |||
135 | 137 | ||
136 | int update_persistent_clock(struct timespec now) | 138 | int update_persistent_clock(struct timespec now) |
137 | { | 139 | { |
138 | return x86_platform.set_wallclock(now.tv_sec); | 140 | return x86_platform.set_wallclock(&now); |
139 | } | 141 | } |
140 | 142 | ||
141 | /* not static: needed by APM */ | 143 | /* not static: needed by APM */ |
142 | void read_persistent_clock(struct timespec *ts) | 144 | void read_persistent_clock(struct timespec *ts) |
143 | { | 145 | { |
144 | unsigned long retval; | 146 | x86_platform.get_wallclock(ts); |
145 | |||
146 | retval = x86_platform.get_wallclock(); | ||
147 | |||
148 | ts->tv_sec = retval; | ||
149 | ts->tv_nsec = 0; | ||
150 | } | 147 | } |
151 | 148 | ||
152 | 149 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7114c63f047d..8424d5adcfa2 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -882,9 +882,9 @@ int lguest_setup_irq(unsigned int irq) | |||
882 | * It would be far better for everyone if the Guest had its own clock, but | 882 | * It would be far better for everyone if the Guest had its own clock, but |
883 | * until then the Host gives us the time on every interrupt. | 883 | * until then the Host gives us the time on every interrupt. |
884 | */ | 884 | */ |
885 | static unsigned long lguest_get_wallclock(void) | 885 | static void lguest_get_wallclock(struct timespec *now) |
886 | { | 886 | { |
887 | return lguest_data.time.tv_sec; | 887 | *now = lguest_data.time; |
888 | } | 888 | } |
889 | 889 | ||
890 | /* | 890 | /* |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d2fbcedcf6ea..90f6ed127096 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -274,8 +274,9 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm, | |||
274 | return status; | 274 | return status; |
275 | } | 275 | } |
276 | 276 | ||
277 | int efi_set_rtc_mmss(unsigned long nowtime) | 277 | int efi_set_rtc_mmss(const struct timespec *now) |
278 | { | 278 | { |
279 | unsigned long nowtime = now->tv_sec; | ||
279 | efi_status_t status; | 280 | efi_status_t status; |
280 | efi_time_t eft; | 281 | efi_time_t eft; |
281 | efi_time_cap_t cap; | 282 | efi_time_cap_t cap; |
@@ -310,7 +311,7 @@ int efi_set_rtc_mmss(unsigned long nowtime) | |||
310 | return 0; | 311 | return 0; |
311 | } | 312 | } |
312 | 313 | ||
313 | unsigned long efi_get_time(void) | 314 | void efi_get_time(struct timespec *now) |
314 | { | 315 | { |
315 | efi_status_t status; | 316 | efi_status_t status; |
316 | efi_time_t eft; | 317 | efi_time_t eft; |
@@ -320,8 +321,9 @@ unsigned long efi_get_time(void) | |||
320 | if (status != EFI_SUCCESS) | 321 | if (status != EFI_SUCCESS) |
321 | pr_err("Oops: efitime: can't read time!\n"); | 322 | pr_err("Oops: efitime: can't read time!\n"); |
322 | 323 | ||
323 | return mktime(eft.year, eft.month, eft.day, eft.hour, | 324 | now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour, |
324 | eft.minute, eft.second); | 325 | eft.minute, eft.second); |
326 | now->tv_nsec = 0; | ||
325 | } | 327 | } |
326 | 328 | ||
327 | /* | 329 | /* |
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index d62b0a3b5c14..5e355b134ba4 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -56,7 +56,7 @@ void vrtc_cmos_write(unsigned char val, unsigned char reg) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(vrtc_cmos_write); | 57 | EXPORT_SYMBOL_GPL(vrtc_cmos_write); |
58 | 58 | ||
59 | unsigned long vrtc_get_time(void) | 59 | void vrtc_get_time(struct timespec *now) |
60 | { | 60 | { |
61 | u8 sec, min, hour, mday, mon; | 61 | u8 sec, min, hour, mday, mon; |
62 | unsigned long flags; | 62 | unsigned long flags; |
@@ -82,17 +82,18 @@ unsigned long vrtc_get_time(void) | |||
82 | printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " | 82 | printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " |
83 | "mon: %d year: %d\n", sec, min, hour, mday, mon, year); | 83 | "mon: %d year: %d\n", sec, min, hour, mday, mon, year); |
84 | 84 | ||
85 | return mktime(year, mon, mday, hour, min, sec); | 85 | now->tv_sec = mktime(year, mon, mday, hour, min, sec); |
86 | now->tv_nsec = 0; | ||
86 | } | 87 | } |
87 | 88 | ||
88 | int vrtc_set_mmss(unsigned long nowtime) | 89 | int vrtc_set_mmss(const struct timespec *now) |
89 | { | 90 | { |
90 | unsigned long flags; | 91 | unsigned long flags; |
91 | struct rtc_time tm; | 92 | struct rtc_time tm; |
92 | int year; | 93 | int year; |
93 | int retval = 0; | 94 | int retval = 0; |
94 | 95 | ||
95 | rtc_time_to_tm(nowtime, &tm); | 96 | rtc_time_to_tm(now->tv_sec, &tm); |
96 | if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { | 97 | if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { |
97 | /* | 98 | /* |
98 | * tm.year is the number of years since 1900, and the | 99 | * tm.year is the number of years since 1900, and the |
@@ -110,7 +111,7 @@ int vrtc_set_mmss(unsigned long nowtime) | |||
110 | } else { | 111 | } else { |
111 | printk(KERN_ERR | 112 | printk(KERN_ERR |
112 | "%s: Invalid vRTC value: write of %lx to vRTC failed\n", | 113 | "%s: Invalid vRTC value: write of %lx to vRTC failed\n", |
113 | __FUNCTION__, nowtime); | 114 | __FUNCTION__, now->tv_sec); |
114 | retval = -EINVAL; | 115 | retval = -EINVAL; |
115 | } | 116 | } |
116 | return retval; | 117 | return retval; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 3d88bfdf9e1c..7a5671b4fec6 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/math64.h> | 15 | #include <linux/math64.h> |
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <linux/pvclock_gtod.h> | ||
17 | 18 | ||
18 | #include <asm/pvclock.h> | 19 | #include <asm/pvclock.h> |
19 | #include <asm/xen/hypervisor.h> | 20 | #include <asm/xen/hypervisor.h> |
@@ -191,34 +192,56 @@ static void xen_read_wallclock(struct timespec *ts) | |||
191 | put_cpu_var(xen_vcpu); | 192 | put_cpu_var(xen_vcpu); |
192 | } | 193 | } |
193 | 194 | ||
194 | static unsigned long xen_get_wallclock(void) | 195 | static void xen_get_wallclock(struct timespec *now) |
195 | { | 196 | { |
196 | struct timespec ts; | 197 | xen_read_wallclock(now); |
198 | } | ||
197 | 199 | ||
198 | xen_read_wallclock(&ts); | 200 | static int xen_set_wallclock(const struct timespec *now) |
199 | return ts.tv_sec; | 201 | { |
202 | return -1; | ||
200 | } | 203 | } |
201 | 204 | ||
202 | static int xen_set_wallclock(unsigned long now) | 205 | static int xen_pvclock_gtod_notify(struct notifier_block *nb, |
206 | unsigned long was_set, void *priv) | ||
203 | { | 207 | { |
208 | /* Protected by the calling core code serialization */ | ||
209 | static struct timespec next_sync; | ||
210 | |||
204 | struct xen_platform_op op; | 211 | struct xen_platform_op op; |
205 | int rc; | 212 | struct timespec now; |
206 | 213 | ||
207 | /* do nothing for domU */ | 214 | now = __current_kernel_time(); |
208 | if (!xen_initial_domain()) | 215 | |
209 | return -1; | 216 | /* |
217 | * We only take the expensive HV call when the clock was set | ||
218 | * or when the 11 minutes RTC synchronization time elapsed. | ||
219 | */ | ||
220 | if (!was_set && timespec_compare(&now, &next_sync) < 0) | ||
221 | return NOTIFY_OK; | ||
210 | 222 | ||
211 | op.cmd = XENPF_settime; | 223 | op.cmd = XENPF_settime; |
212 | op.u.settime.secs = now; | 224 | op.u.settime.secs = now.tv_sec; |
213 | op.u.settime.nsecs = 0; | 225 | op.u.settime.nsecs = now.tv_nsec; |
214 | op.u.settime.system_time = xen_clocksource_read(); | 226 | op.u.settime.system_time = xen_clocksource_read(); |
215 | 227 | ||
216 | rc = HYPERVISOR_dom0_op(&op); | 228 | (void)HYPERVISOR_dom0_op(&op); |
217 | WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now); | ||
218 | 229 | ||
219 | return rc; | 230 | /* |
231 | * Move the next drift compensation time 11 minutes | ||
232 | * ahead. That's emulating the sync_cmos_clock() update for | ||
233 | * the hardware RTC. | ||
234 | */ | ||
235 | next_sync = now; | ||
236 | next_sync.tv_sec += 11 * 60; | ||
237 | |||
238 | return NOTIFY_OK; | ||
220 | } | 239 | } |
221 | 240 | ||
241 | static struct notifier_block xen_pvclock_gtod_notifier = { | ||
242 | .notifier_call = xen_pvclock_gtod_notify, | ||
243 | }; | ||
244 | |||
222 | static struct clocksource xen_clocksource __read_mostly = { | 245 | static struct clocksource xen_clocksource __read_mostly = { |
223 | .name = "xen", | 246 | .name = "xen", |
224 | .rating = 400, | 247 | .rating = 400, |
@@ -480,6 +503,9 @@ static void __init xen_time_init(void) | |||
480 | xen_setup_runstate_info(cpu); | 503 | xen_setup_runstate_info(cpu); |
481 | xen_setup_timer(cpu); | 504 | xen_setup_timer(cpu); |
482 | xen_setup_cpu_clockevents(); | 505 | xen_setup_cpu_clockevents(); |
506 | |||
507 | if (xen_initial_domain()) | ||
508 | pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); | ||
483 | } | 509 | } |
484 | 510 | ||
485 | void __init xen_init_time_ops(void) | 511 | void __init xen_init_time_ops(void) |
@@ -492,7 +518,9 @@ void __init xen_init_time_ops(void) | |||
492 | 518 | ||
493 | x86_platform.calibrate_tsc = xen_tsc_khz; | 519 | x86_platform.calibrate_tsc = xen_tsc_khz; |
494 | x86_platform.get_wallclock = xen_get_wallclock; | 520 | x86_platform.get_wallclock = xen_get_wallclock; |
495 | x86_platform.set_wallclock = xen_set_wallclock; | 521 | /* Dom0 uses the native method to set the hardware RTC. */ |
522 | if (!xen_initial_domain()) | ||
523 | x86_platform.set_wallclock = xen_set_wallclock; | ||
496 | } | 524 | } |
497 | 525 | ||
498 | #ifdef CONFIG_XEN_PVHVM | 526 | #ifdef CONFIG_XEN_PVHVM |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index f151c6cf27c3..0a04257edf65 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -85,3 +85,8 @@ config CLKSRC_SAMSUNG_PWM | |||
85 | Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver | 85 | Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver |
86 | for all devicetree enabled platforms. This driver will be | 86 | for all devicetree enabled platforms. This driver will be |
87 | needed only on systems that do not have the Exynos MCT available. | 87 | needed only on systems that do not have the Exynos MCT available. |
88 | |||
89 | config VF_PIT_TIMER | ||
90 | bool | ||
91 | help | ||
92 | Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d979c72aa94..9ba8b4d867e3 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -22,10 +22,13 @@ obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o | |||
22 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o | 22 | obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o |
23 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o | 23 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o |
24 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o | 24 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o |
25 | obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o | ||
25 | obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o | 26 | obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o |
26 | obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o | 27 | obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o |
27 | obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o | 28 | obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o |
28 | obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o | 29 | obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o |
30 | obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o | ||
29 | 31 | ||
30 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | 32 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
31 | obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o | 33 | obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o |
34 | obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o | ||
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 766611d29945..07ea7ce900dc 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/sched_clock.h> | ||
31 | 32 | ||
32 | #include <asm/sched_clock.h> | ||
33 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
34 | 34 | ||
35 | #define REG_CONTROL 0x00 | 35 | #define REG_CONTROL 0x00 |
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 54f3d119d99c..0a7fb2440e29 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c | |||
@@ -14,8 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | #include <linux/clockchips.h> | 15 | #include <linux/clockchips.h> |
16 | #include <linux/clksrc-dbx500-prcmu.h> | 16 | #include <linux/clksrc-dbx500-prcmu.h> |
17 | 17 | #include <linux/sched_clock.h> | |
18 | #include <asm/sched_clock.h> | ||
19 | 18 | ||
20 | #define RATE_32K 32768 | 19 | #define RATE_32K 32768 |
21 | 20 | ||
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c new file mode 100644 index 000000000000..1f55f9620338 --- /dev/null +++ b/drivers/clocksource/dummy_timer.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * linux/drivers/clocksource/dummy_timer.c | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/clockchips.h> | ||
12 | #include <linux/cpu.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/cpumask.h> | ||
16 | |||
17 | static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); | ||
18 | |||
19 | static void dummy_timer_set_mode(enum clock_event_mode mode, | ||
20 | struct clock_event_device *evt) | ||
21 | { | ||
22 | /* | ||
23 | * Core clockevents code will call this when exchanging timer devices. | ||
24 | * We don't need to do anything here. | ||
25 | */ | ||
26 | } | ||
27 | |||
28 | static void __cpuinit dummy_timer_setup(void) | ||
29 | { | ||
30 | int cpu = smp_processor_id(); | ||
31 | struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); | ||
32 | |||
33 | evt->name = "dummy_timer"; | ||
34 | evt->features = CLOCK_EVT_FEAT_PERIODIC | | ||
35 | CLOCK_EVT_FEAT_ONESHOT | | ||
36 | CLOCK_EVT_FEAT_DUMMY; | ||
37 | evt->rating = 100; | ||
38 | evt->set_mode = dummy_timer_set_mode; | ||
39 | evt->cpumask = cpumask_of(cpu); | ||
40 | |||
41 | clockevents_register_device(evt); | ||
42 | } | ||
43 | |||
44 | static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self, | ||
45 | unsigned long action, void *hcpu) | ||
46 | { | ||
47 | if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) | ||
48 | dummy_timer_setup(); | ||
49 | |||
50 | return NOTIFY_OK; | ||
51 | } | ||
52 | |||
53 | static struct notifier_block dummy_timer_cpu_nb __cpuinitdata = { | ||
54 | .notifier_call = dummy_timer_cpu_notify, | ||
55 | }; | ||
56 | |||
57 | static int __init dummy_timer_register(void) | ||
58 | { | ||
59 | int err = register_cpu_notifier(&dummy_timer_cpu_nb); | ||
60 | if (err) | ||
61 | return err; | ||
62 | |||
63 | /* We won't get a call on the boot CPU, so register immediately */ | ||
64 | if (num_possible_cpus() > 1) | ||
65 | dummy_timer_setup(); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | early_initcall(dummy_timer_register); | ||
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 8c2a35f26d9b..e54ca1062d8e 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c | |||
@@ -387,15 +387,3 @@ cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) | |||
387 | { | 387 | { |
388 | return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); | 388 | return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); |
389 | } | 389 | } |
390 | |||
391 | /** | ||
392 | * dw_apb_clocksource_unregister() - unregister and free a clocksource. | ||
393 | * | ||
394 | * @dw_cs: The clocksource to unregister/free. | ||
395 | */ | ||
396 | void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs) | ||
397 | { | ||
398 | clocksource_unregister(&dw_cs->cs); | ||
399 | |||
400 | kfree(dw_cs); | ||
401 | } | ||
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index ab09ed3742ee..d9a1e8d51751 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c | |||
@@ -20,9 +20,7 @@ | |||
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
23 | 23 | #include <linux/sched_clock.h> | |
24 | #include <asm/mach/time.h> | ||
25 | #include <asm/sched_clock.h> | ||
26 | 24 | ||
27 | static void timer_get_base_and_rate(struct device_node *np, | 25 | static void timer_get_base_and_rate(struct device_node *np, |
28 | void __iomem **base, u32 *rate) | 26 | void __iomem **base, u32 *rate) |
@@ -44,7 +42,7 @@ static void add_clockevent(struct device_node *event_timer) | |||
44 | u32 irq, rate; | 42 | u32 irq, rate; |
45 | 43 | ||
46 | irq = irq_of_parse_and_map(event_timer, 0); | 44 | irq = irq_of_parse_and_map(event_timer, 0); |
47 | if (irq == NO_IRQ) | 45 | if (irq == 0) |
48 | panic("No IRQ for clock event timer"); | 46 | panic("No IRQ for clock event timer"); |
49 | 47 | ||
50 | timer_get_base_and_rate(event_timer, &iobase, &rate); | 48 | timer_get_base_and_rate(event_timer, &iobase, &rate); |
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 02af4204af86..0f5e65f74dc3 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c | |||
@@ -29,9 +29,9 @@ | |||
29 | #include <linux/of_address.h> | 29 | #include <linux/of_address.h> |
30 | #include <linux/of_irq.h> | 30 | #include <linux/of_irq.h> |
31 | #include <linux/stmp_device.h> | 31 | #include <linux/stmp_device.h> |
32 | #include <linux/sched_clock.h> | ||
32 | 33 | ||
33 | #include <asm/mach/time.h> | 34 | #include <asm/mach/time.h> |
34 | #include <asm/sched_clock.h> | ||
35 | 35 | ||
36 | /* | 36 | /* |
37 | * There are 2 versions of the timrot on Freescale MXS-based SoCs. | 37 | * There are 2 versions of the timrot on Freescale MXS-based SoCs. |
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index e405531e1cc5..8864c17841c8 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/platform_data/clocksource-nomadik-mtu.h> | 20 | #include <linux/platform_data/clocksource-nomadik-mtu.h> |
21 | #include <linux/sched_clock.h> | ||
21 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
22 | #include <asm/sched_clock.h> | ||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * The MTU device hosts four different counters, with 4 set of | 25 | * The MTU device hosts four different counters, with 4 set of |
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 0234c8d2c8f2..584b5472eea3 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c | |||
@@ -21,10 +21,10 @@ | |||
21 | #include <linux/of_irq.h> | 21 | #include <linux/of_irq.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/sched_clock.h> | ||
24 | 25 | ||
25 | #include <clocksource/samsung_pwm.h> | 26 | #include <clocksource/samsung_pwm.h> |
26 | 27 | ||
27 | #include <asm/sched_clock.h> | ||
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Clocksource driver | 30 | * Clocksource driver |
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index ae877b021b54..93961703b887 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c | |||
@@ -26,10 +26,10 @@ | |||
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | #include <linux/of_address.h> | 27 | #include <linux/of_address.h> |
28 | #include <linux/of_irq.h> | 28 | #include <linux/of_irq.h> |
29 | #include <linux/sched_clock.h> | ||
29 | 30 | ||
30 | #include <asm/mach/time.h> | 31 | #include <asm/mach/time.h> |
31 | #include <asm/smp_twd.h> | 32 | #include <asm/smp_twd.h> |
32 | #include <asm/sched_clock.h> | ||
33 | 33 | ||
34 | #define RTC_SECONDS 0x08 | 34 | #define RTC_SECONDS 0x08 |
35 | #define RTC_SHADOW_SECONDS 0x0c | 35 | #define RTC_SHADOW_SECONDS 0x0c |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 47a673070d70..efdca3263afe 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <linux/of_address.h> | 27 | #include <linux/of_address.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/sched_clock.h> | ||
30 | 31 | ||
31 | #include <asm/sched_clock.h> | ||
32 | #include <asm/localtimer.h> | 32 | #include <asm/localtimer.h> |
33 | #include <linux/percpu.h> | 33 | #include <linux/percpu.h> |
34 | /* | 34 | /* |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 97738dbf3e3b..e5dc9129ca26 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/of_irq.h> | 18 | #include <linux/of_irq.h> |
19 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
20 | #include <asm/sched_clock.h> | 20 | #include <linux/sched_clock.h> |
21 | #include <asm/localtimer.h> | 21 | #include <asm/localtimer.h> |
22 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
23 | 23 | ||
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index 760882665d7a..ef3cfb269d8b 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
20 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
21 | #include <asm/sched_clock.h> | 21 | #include <linux/sched_clock.h> |
22 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
23 | 23 | ||
24 | #define SIRFSOC_TIMER_COUNTER_LO 0x0000 | 24 | #define SIRFSOC_TIMER_COUNTER_LO 0x0000 |
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c new file mode 100644 index 000000000000..587e0202a70b --- /dev/null +++ b/drivers/clocksource/vf_pit_timer.c | |||
@@ -0,0 +1,194 @@ | |||
1 | /* | ||
2 | * Copyright 2012-2013 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version 2 | ||
7 | * of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/clockchips.h> | ||
12 | #include <linux/clk.h> | ||
13 | #include <linux/of_address.h> | ||
14 | #include <linux/of_irq.h> | ||
15 | #include <linux/sched_clock.h> | ||
16 | |||
17 | /* | ||
18 | * Each pit takes 0x10 Bytes register space | ||
19 | */ | ||
20 | #define PITMCR 0x00 | ||
21 | #define PIT0_OFFSET 0x100 | ||
22 | #define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n)) | ||
23 | #define PITLDVAL 0x00 | ||
24 | #define PITCVAL 0x04 | ||
25 | #define PITTCTRL 0x08 | ||
26 | #define PITTFLG 0x0c | ||
27 | |||
28 | #define PITMCR_MDIS (0x1 << 1) | ||
29 | |||
30 | #define PITTCTRL_TEN (0x1 << 0) | ||
31 | #define PITTCTRL_TIE (0x1 << 1) | ||
32 | #define PITCTRL_CHN (0x1 << 2) | ||
33 | |||
34 | #define PITTFLG_TIF 0x1 | ||
35 | |||
36 | static void __iomem *clksrc_base; | ||
37 | static void __iomem *clkevt_base; | ||
38 | static unsigned long cycle_per_jiffy; | ||
39 | |||
40 | static inline void pit_timer_enable(void) | ||
41 | { | ||
42 | __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); | ||
43 | } | ||
44 | |||
45 | static inline void pit_timer_disable(void) | ||
46 | { | ||
47 | __raw_writel(0, clkevt_base + PITTCTRL); | ||
48 | } | ||
49 | |||
50 | static inline void pit_irq_acknowledge(void) | ||
51 | { | ||
52 | __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); | ||
53 | } | ||
54 | |||
55 | static unsigned int pit_read_sched_clock(void) | ||
56 | { | ||
57 | return __raw_readl(clksrc_base + PITCVAL); | ||
58 | } | ||
59 | |||
60 | static int __init pit_clocksource_init(unsigned long rate) | ||
61 | { | ||
62 | /* set the max load value and start the clock source counter */ | ||
63 | __raw_writel(0, clksrc_base + PITTCTRL); | ||
64 | __raw_writel(~0UL, clksrc_base + PITLDVAL); | ||
65 | __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); | ||
66 | |||
67 | setup_sched_clock(pit_read_sched_clock, 32, rate); | ||
68 | return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, | ||
69 | 300, 32, clocksource_mmio_readl_down); | ||
70 | } | ||
71 | |||
72 | static int pit_set_next_event(unsigned long delta, | ||
73 | struct clock_event_device *unused) | ||
74 | { | ||
75 | /* | ||
76 | * set a new value to PITLDVAL register will not restart the timer, | ||
77 | * to abort the current cycle and start a timer period with the new | ||
78 | * value, the timer must be disabled and enabled again. | ||
79 | * and the PITLAVAL should be set to delta minus one according to pit | ||
80 | * hardware requirement. | ||
81 | */ | ||
82 | pit_timer_disable(); | ||
83 | __raw_writel(delta - 1, clkevt_base + PITLDVAL); | ||
84 | pit_timer_enable(); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static void pit_set_mode(enum clock_event_mode mode, | ||
90 | struct clock_event_device *evt) | ||
91 | { | ||
92 | switch (mode) { | ||
93 | case CLOCK_EVT_MODE_PERIODIC: | ||
94 | pit_set_next_event(cycle_per_jiffy, evt); | ||
95 | break; | ||
96 | default: | ||
97 | break; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) | ||
102 | { | ||
103 | struct clock_event_device *evt = dev_id; | ||
104 | |||
105 | pit_irq_acknowledge(); | ||
106 | |||
107 | /* | ||
108 | * pit hardware doesn't support oneshot, it will generate an interrupt | ||
109 | * and reload the counter value from PITLDVAL when PITCVAL reach zero, | ||
110 | * and start the counter again. So software need to disable the timer | ||
111 | * to stop the counter loop in ONESHOT mode. | ||
112 | */ | ||
113 | if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) | ||
114 | pit_timer_disable(); | ||
115 | |||
116 | evt->event_handler(evt); | ||
117 | |||
118 | return IRQ_HANDLED; | ||
119 | } | ||
120 | |||
121 | static struct clock_event_device clockevent_pit = { | ||
122 | .name = "VF pit timer", | ||
123 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
124 | .set_mode = pit_set_mode, | ||
125 | .set_next_event = pit_set_next_event, | ||
126 | .rating = 300, | ||
127 | }; | ||
128 | |||
129 | static struct irqaction pit_timer_irq = { | ||
130 | .name = "VF pit timer", | ||
131 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
132 | .handler = pit_timer_interrupt, | ||
133 | .dev_id = &clockevent_pit, | ||
134 | }; | ||
135 | |||
136 | static int __init pit_clockevent_init(unsigned long rate, int irq) | ||
137 | { | ||
138 | __raw_writel(0, clkevt_base + PITTCTRL); | ||
139 | __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); | ||
140 | |||
141 | BUG_ON(setup_irq(irq, &pit_timer_irq)); | ||
142 | |||
143 | clockevent_pit.cpumask = cpumask_of(0); | ||
144 | clockevent_pit.irq = irq; | ||
145 | /* | ||
146 | * The value for the LDVAL register trigger is calculated as: | ||
147 | * LDVAL trigger = (period / clock period) - 1 | ||
148 | * The pit is a 32-bit down count timer, when the conter value | ||
149 | * reaches 0, it will generate an interrupt, thus the minimal | ||
150 | * LDVAL trigger value is 1. And then the min_delta is | ||
151 | * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. | ||
152 | */ | ||
153 | clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static void __init pit_timer_init(struct device_node *np) | ||
159 | { | ||
160 | struct clk *pit_clk; | ||
161 | void __iomem *timer_base; | ||
162 | unsigned long clk_rate; | ||
163 | int irq; | ||
164 | |||
165 | timer_base = of_iomap(np, 0); | ||
166 | BUG_ON(!timer_base); | ||
167 | |||
168 | /* | ||
169 | * PIT0 and PIT1 can be chained to build a 64-bit timer, | ||
170 | * so choose PIT2 as clocksource, PIT3 as clockevent device, | ||
171 | * and leave PIT0 and PIT1 unused for anyone else who needs them. | ||
172 | */ | ||
173 | clksrc_base = timer_base + PITn_OFFSET(2); | ||
174 | clkevt_base = timer_base + PITn_OFFSET(3); | ||
175 | |||
176 | irq = irq_of_parse_and_map(np, 0); | ||
177 | BUG_ON(irq <= 0); | ||
178 | |||
179 | pit_clk = of_clk_get(np, 0); | ||
180 | BUG_ON(IS_ERR(pit_clk)); | ||
181 | |||
182 | BUG_ON(clk_prepare_enable(pit_clk)); | ||
183 | |||
184 | clk_rate = clk_get_rate(pit_clk); | ||
185 | cycle_per_jiffy = clk_rate / (HZ); | ||
186 | |||
187 | /* enable the pit module */ | ||
188 | __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); | ||
189 | |||
190 | BUG_ON(pit_clocksource_init(clk_rate)); | ||
191 | |||
192 | pit_clockevent_init(clk_rate, irq); | ||
193 | } | ||
194 | CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); | ||
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c new file mode 100644 index 000000000000..ca81809d159d --- /dev/null +++ b/drivers/clocksource/zevio-timer.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * linux/drivers/clocksource/zevio-timer.c | ||
3 | * | ||
4 | * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/io.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_address.h> | ||
16 | #include <linux/of_irq.h> | ||
17 | #include <linux/clk.h> | ||
18 | #include <linux/clockchips.h> | ||
19 | #include <linux/cpumask.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #define IO_CURRENT_VAL 0x00 | ||
24 | #define IO_DIVIDER 0x04 | ||
25 | #define IO_CONTROL 0x08 | ||
26 | |||
27 | #define IO_TIMER1 0x00 | ||
28 | #define IO_TIMER2 0x0C | ||
29 | |||
30 | #define IO_MATCH_BEGIN 0x18 | ||
31 | #define IO_MATCH(x) (IO_MATCH_BEGIN + ((x) << 2)) | ||
32 | |||
33 | #define IO_INTR_STS 0x00 | ||
34 | #define IO_INTR_ACK 0x00 | ||
35 | #define IO_INTR_MSK 0x04 | ||
36 | |||
37 | #define CNTL_STOP_TIMER (1 << 4) | ||
38 | #define CNTL_RUN_TIMER (0 << 4) | ||
39 | |||
40 | #define CNTL_INC (1 << 3) | ||
41 | #define CNTL_DEC (0 << 3) | ||
42 | |||
43 | #define CNTL_TOZERO 0 | ||
44 | #define CNTL_MATCH(x) ((x) + 1) | ||
45 | #define CNTL_FOREVER 7 | ||
46 | |||
47 | /* There are 6 match registers but we only use one. */ | ||
48 | #define TIMER_MATCH 0 | ||
49 | |||
50 | #define TIMER_INTR_MSK (1 << (TIMER_MATCH)) | ||
51 | #define TIMER_INTR_ALL 0x3F | ||
52 | |||
53 | struct zevio_timer { | ||
54 | void __iomem *base; | ||
55 | void __iomem *timer1, *timer2; | ||
56 | void __iomem *interrupt_regs; | ||
57 | |||
58 | struct clk *clk; | ||
59 | struct clock_event_device clkevt; | ||
60 | struct irqaction clkevt_irq; | ||
61 | |||
62 | char clocksource_name[64]; | ||
63 | char clockevent_name[64]; | ||
64 | }; | ||
65 | |||
66 | static int zevio_timer_set_event(unsigned long delta, | ||
67 | struct clock_event_device *dev) | ||
68 | { | ||
69 | struct zevio_timer *timer = container_of(dev, struct zevio_timer, | ||
70 | clkevt); | ||
71 | |||
72 | writel(delta, timer->timer1 + IO_CURRENT_VAL); | ||
73 | writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH), | ||
74 | timer->timer1 + IO_CONTROL); | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void zevio_timer_set_mode(enum clock_event_mode mode, | ||
80 | struct clock_event_device *dev) | ||
81 | { | ||
82 | struct zevio_timer *timer = container_of(dev, struct zevio_timer, | ||
83 | clkevt); | ||
84 | |||
85 | switch (mode) { | ||
86 | case CLOCK_EVT_MODE_RESUME: | ||
87 | case CLOCK_EVT_MODE_ONESHOT: | ||
88 | /* Enable timer interrupts */ | ||
89 | writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); | ||
90 | writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); | ||
91 | break; | ||
92 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
93 | case CLOCK_EVT_MODE_UNUSED: | ||
94 | /* Disable timer interrupts */ | ||
95 | writel(0, timer->interrupt_regs + IO_INTR_MSK); | ||
96 | writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); | ||
97 | /* Stop timer */ | ||
98 | writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); | ||
99 | break; | ||
100 | case CLOCK_EVT_MODE_PERIODIC: | ||
101 | default: | ||
102 | /* Unsupported */ | ||
103 | break; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) | ||
108 | { | ||
109 | struct zevio_timer *timer = dev_id; | ||
110 | u32 intr; | ||
111 | |||
112 | intr = readl(timer->interrupt_regs + IO_INTR_ACK); | ||
113 | if (!(intr & TIMER_INTR_MSK)) | ||
114 | return IRQ_NONE; | ||
115 | |||
116 | writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK); | ||
117 | writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); | ||
118 | |||
119 | if (timer->clkevt.event_handler) | ||
120 | timer->clkevt.event_handler(&timer->clkevt); | ||
121 | |||
122 | return IRQ_HANDLED; | ||
123 | } | ||
124 | |||
125 | static int __init zevio_timer_add(struct device_node *node) | ||
126 | { | ||
127 | struct zevio_timer *timer; | ||
128 | struct resource res; | ||
129 | int irqnr, ret; | ||
130 | |||
131 | timer = kzalloc(sizeof(*timer), GFP_KERNEL); | ||
132 | if (!timer) | ||
133 | return -ENOMEM; | ||
134 | |||
135 | timer->base = of_iomap(node, 0); | ||
136 | if (!timer->base) { | ||
137 | ret = -EINVAL; | ||
138 | goto error_free; | ||
139 | } | ||
140 | timer->timer1 = timer->base + IO_TIMER1; | ||
141 | timer->timer2 = timer->base + IO_TIMER2; | ||
142 | |||
143 | timer->clk = of_clk_get(node, 0); | ||
144 | if (IS_ERR(timer->clk)) { | ||
145 | ret = PTR_ERR(timer->clk); | ||
146 | pr_err("Timer clock not found! (error %d)\n", ret); | ||
147 | goto error_unmap; | ||
148 | } | ||
149 | |||
150 | timer->interrupt_regs = of_iomap(node, 1); | ||
151 | irqnr = irq_of_parse_and_map(node, 0); | ||
152 | |||
153 | of_address_to_resource(node, 0, &res); | ||
154 | scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name), | ||
155 | "%llx.%s_clocksource", | ||
156 | (unsigned long long)res.start, node->name); | ||
157 | |||
158 | scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name), | ||
159 | "%llx.%s_clockevent", | ||
160 | (unsigned long long)res.start, node->name); | ||
161 | |||
162 | if (timer->interrupt_regs && irqnr) { | ||
163 | timer->clkevt.name = timer->clockevent_name; | ||
164 | timer->clkevt.set_next_event = zevio_timer_set_event; | ||
165 | timer->clkevt.set_mode = zevio_timer_set_mode; | ||
166 | timer->clkevt.rating = 200; | ||
167 | timer->clkevt.cpumask = cpu_all_mask; | ||
168 | timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; | ||
169 | timer->clkevt.irq = irqnr; | ||
170 | |||
171 | writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); | ||
172 | writel(0, timer->timer1 + IO_DIVIDER); | ||
173 | |||
174 | /* Start with timer interrupts disabled */ | ||
175 | writel(0, timer->interrupt_regs + IO_INTR_MSK); | ||
176 | writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); | ||
177 | |||
178 | /* Interrupt to occur when timer value matches 0 */ | ||
179 | writel(0, timer->base + IO_MATCH(TIMER_MATCH)); | ||
180 | |||
181 | timer->clkevt_irq.name = timer->clockevent_name; | ||
182 | timer->clkevt_irq.handler = zevio_timer_interrupt; | ||
183 | timer->clkevt_irq.dev_id = timer; | ||
184 | timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL; | ||
185 | |||
186 | setup_irq(irqnr, &timer->clkevt_irq); | ||
187 | |||
188 | clockevents_config_and_register(&timer->clkevt, | ||
189 | clk_get_rate(timer->clk), 0x0001, 0xffff); | ||
190 | pr_info("Added %s as clockevent\n", timer->clockevent_name); | ||
191 | } | ||
192 | |||
193 | writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL); | ||
194 | writel(0, timer->timer2 + IO_CURRENT_VAL); | ||
195 | writel(0, timer->timer2 + IO_DIVIDER); | ||
196 | writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC, | ||
197 | timer->timer2 + IO_CONTROL); | ||
198 | |||
199 | clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL, | ||
200 | timer->clocksource_name, | ||
201 | clk_get_rate(timer->clk), | ||
202 | 200, 16, | ||
203 | clocksource_mmio_readw_up); | ||
204 | |||
205 | pr_info("Added %s as clocksource\n", timer->clocksource_name); | ||
206 | |||
207 | return 0; | ||
208 | error_unmap: | ||
209 | iounmap(timer->base); | ||
210 | error_free: | ||
211 | kfree(timer); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); | ||
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 412b96cc5305..421da856135d 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -166,9 +166,6 @@ out_resume: | |||
166 | 166 | ||
167 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 167 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
168 | 168 | ||
169 | /* Make sure timer events get retriggered on all CPUs */ | ||
170 | clock_was_set(); | ||
171 | |||
172 | out_thaw: | 169 | out_thaw: |
173 | #ifdef CONFIG_PREEMPT | 170 | #ifdef CONFIG_PREEMPT |
174 | thaw_processes(); | 171 | thaw_processes(); |
diff --git a/fs/timerfd.c b/fs/timerfd.c index 32b644f03690..929312180dd0 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/alarmtimer.h> | ||
11 | #include <linux/file.h> | 12 | #include <linux/file.h> |
12 | #include <linux/poll.h> | 13 | #include <linux/poll.h> |
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -26,7 +27,10 @@ | |||
26 | #include <linux/rcupdate.h> | 27 | #include <linux/rcupdate.h> |
27 | 28 | ||
28 | struct timerfd_ctx { | 29 | struct timerfd_ctx { |
29 | struct hrtimer tmr; | 30 | union { |
31 | struct hrtimer tmr; | ||
32 | struct alarm alarm; | ||
33 | } t; | ||
30 | ktime_t tintv; | 34 | ktime_t tintv; |
31 | ktime_t moffs; | 35 | ktime_t moffs; |
32 | wait_queue_head_t wqh; | 36 | wait_queue_head_t wqh; |
@@ -41,14 +45,19 @@ struct timerfd_ctx { | |||
41 | static LIST_HEAD(cancel_list); | 45 | static LIST_HEAD(cancel_list); |
42 | static DEFINE_SPINLOCK(cancel_lock); | 46 | static DEFINE_SPINLOCK(cancel_lock); |
43 | 47 | ||
48 | static inline bool isalarm(struct timerfd_ctx *ctx) | ||
49 | { | ||
50 | return ctx->clockid == CLOCK_REALTIME_ALARM || | ||
51 | ctx->clockid == CLOCK_BOOTTIME_ALARM; | ||
52 | } | ||
53 | |||
44 | /* | 54 | /* |
45 | * This gets called when the timer event triggers. We set the "expired" | 55 | * This gets called when the timer event triggers. We set the "expired" |
46 | * flag, but we do not re-arm the timer (in case it's necessary, | 56 | * flag, but we do not re-arm the timer (in case it's necessary, |
47 | * tintv.tv64 != 0) until the timer is accessed. | 57 | * tintv.tv64 != 0) until the timer is accessed. |
48 | */ | 58 | */ |
49 | static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) | 59 | static void timerfd_triggered(struct timerfd_ctx *ctx) |
50 | { | 60 | { |
51 | struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr); | ||
52 | unsigned long flags; | 61 | unsigned long flags; |
53 | 62 | ||
54 | spin_lock_irqsave(&ctx->wqh.lock, flags); | 63 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
@@ -56,10 +65,25 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) | |||
56 | ctx->ticks++; | 65 | ctx->ticks++; |
57 | wake_up_locked(&ctx->wqh); | 66 | wake_up_locked(&ctx->wqh); |
58 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); | 67 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
68 | } | ||
59 | 69 | ||
70 | static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) | ||
71 | { | ||
72 | struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, | ||
73 | t.tmr); | ||
74 | timerfd_triggered(ctx); | ||
60 | return HRTIMER_NORESTART; | 75 | return HRTIMER_NORESTART; |
61 | } | 76 | } |
62 | 77 | ||
78 | static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm, | ||
79 | ktime_t now) | ||
80 | { | ||
81 | struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx, | ||
82 | t.alarm); | ||
83 | timerfd_triggered(ctx); | ||
84 | return ALARMTIMER_NORESTART; | ||
85 | } | ||
86 | |||
63 | /* | 87 | /* |
64 | * Called when the clock was set to cancel the timers in the cancel | 88 | * Called when the clock was set to cancel the timers in the cancel |
65 | * list. This will wake up processes waiting on these timers. The | 89 | * list. This will wake up processes waiting on these timers. The |
@@ -107,8 +131,9 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx) | |||
107 | 131 | ||
108 | static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) | 132 | static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) |
109 | { | 133 | { |
110 | if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) && | 134 | if ((ctx->clockid == CLOCK_REALTIME || |
111 | (flags & TFD_TIMER_CANCEL_ON_SET)) { | 135 | ctx->clockid == CLOCK_REALTIME_ALARM) && |
136 | (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { | ||
112 | if (!ctx->might_cancel) { | 137 | if (!ctx->might_cancel) { |
113 | ctx->might_cancel = true; | 138 | ctx->might_cancel = true; |
114 | spin_lock(&cancel_lock); | 139 | spin_lock(&cancel_lock); |
@@ -124,7 +149,11 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) | |||
124 | { | 149 | { |
125 | ktime_t remaining; | 150 | ktime_t remaining; |
126 | 151 | ||
127 | remaining = hrtimer_expires_remaining(&ctx->tmr); | 152 | if (isalarm(ctx)) |
153 | remaining = alarm_expires_remaining(&ctx->t.alarm); | ||
154 | else | ||
155 | remaining = hrtimer_expires_remaining(&ctx->t.tmr); | ||
156 | |||
128 | return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; | 157 | return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; |
129 | } | 158 | } |
130 | 159 | ||
@@ -142,11 +171,28 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags, | |||
142 | ctx->expired = 0; | 171 | ctx->expired = 0; |
143 | ctx->ticks = 0; | 172 | ctx->ticks = 0; |
144 | ctx->tintv = timespec_to_ktime(ktmr->it_interval); | 173 | ctx->tintv = timespec_to_ktime(ktmr->it_interval); |
145 | hrtimer_init(&ctx->tmr, clockid, htmode); | 174 | |
146 | hrtimer_set_expires(&ctx->tmr, texp); | 175 | if (isalarm(ctx)) { |
147 | ctx->tmr.function = timerfd_tmrproc; | 176 | alarm_init(&ctx->t.alarm, |
177 | ctx->clockid == CLOCK_REALTIME_ALARM ? | ||
178 | ALARM_REALTIME : ALARM_BOOTTIME, | ||
179 | timerfd_alarmproc); | ||
180 | } else { | ||
181 | hrtimer_init(&ctx->t.tmr, clockid, htmode); | ||
182 | hrtimer_set_expires(&ctx->t.tmr, texp); | ||
183 | ctx->t.tmr.function = timerfd_tmrproc; | ||
184 | } | ||
185 | |||
148 | if (texp.tv64 != 0) { | 186 | if (texp.tv64 != 0) { |
149 | hrtimer_start(&ctx->tmr, texp, htmode); | 187 | if (isalarm(ctx)) { |
188 | if (flags & TFD_TIMER_ABSTIME) | ||
189 | alarm_start(&ctx->t.alarm, texp); | ||
190 | else | ||
191 | alarm_start_relative(&ctx->t.alarm, texp); | ||
192 | } else { | ||
193 | hrtimer_start(&ctx->t.tmr, texp, htmode); | ||
194 | } | ||
195 | |||
150 | if (timerfd_canceled(ctx)) | 196 | if (timerfd_canceled(ctx)) |
151 | return -ECANCELED; | 197 | return -ECANCELED; |
152 | } | 198 | } |
@@ -158,7 +204,11 @@ static int timerfd_release(struct inode *inode, struct file *file) | |||
158 | struct timerfd_ctx *ctx = file->private_data; | 204 | struct timerfd_ctx *ctx = file->private_data; |
159 | 205 | ||
160 | timerfd_remove_cancel(ctx); | 206 | timerfd_remove_cancel(ctx); |
161 | hrtimer_cancel(&ctx->tmr); | 207 | |
208 | if (isalarm(ctx)) | ||
209 | alarm_cancel(&ctx->t.alarm); | ||
210 | else | ||
211 | hrtimer_cancel(&ctx->t.tmr); | ||
162 | kfree_rcu(ctx, rcu); | 212 | kfree_rcu(ctx, rcu); |
163 | return 0; | 213 | return 0; |
164 | } | 214 | } |
@@ -215,9 +265,15 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, | |||
215 | * callback to avoid DoS attacks specifying a very | 265 | * callback to avoid DoS attacks specifying a very |
216 | * short timer period. | 266 | * short timer period. |
217 | */ | 267 | */ |
218 | ticks += hrtimer_forward_now(&ctx->tmr, | 268 | if (isalarm(ctx)) { |
219 | ctx->tintv) - 1; | 269 | ticks += alarm_forward_now( |
220 | hrtimer_restart(&ctx->tmr); | 270 | &ctx->t.alarm, ctx->tintv) - 1; |
271 | alarm_restart(&ctx->t.alarm); | ||
272 | } else { | ||
273 | ticks += hrtimer_forward_now(&ctx->t.tmr, | ||
274 | ctx->tintv) - 1; | ||
275 | hrtimer_restart(&ctx->t.tmr); | ||
276 | } | ||
221 | } | 277 | } |
222 | ctx->expired = 0; | 278 | ctx->expired = 0; |
223 | ctx->ticks = 0; | 279 | ctx->ticks = 0; |
@@ -259,7 +315,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
259 | 315 | ||
260 | if ((flags & ~TFD_CREATE_FLAGS) || | 316 | if ((flags & ~TFD_CREATE_FLAGS) || |
261 | (clockid != CLOCK_MONOTONIC && | 317 | (clockid != CLOCK_MONOTONIC && |
262 | clockid != CLOCK_REALTIME)) | 318 | clockid != CLOCK_REALTIME && |
319 | clockid != CLOCK_REALTIME_ALARM && | ||
320 | clockid != CLOCK_BOOTTIME_ALARM)) | ||
263 | return -EINVAL; | 321 | return -EINVAL; |
264 | 322 | ||
265 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | 323 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
@@ -268,7 +326,15 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
268 | 326 | ||
269 | init_waitqueue_head(&ctx->wqh); | 327 | init_waitqueue_head(&ctx->wqh); |
270 | ctx->clockid = clockid; | 328 | ctx->clockid = clockid; |
271 | hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); | 329 | |
330 | if (isalarm(ctx)) | ||
331 | alarm_init(&ctx->t.alarm, | ||
332 | ctx->clockid == CLOCK_REALTIME_ALARM ? | ||
333 | ALARM_REALTIME : ALARM_BOOTTIME, | ||
334 | timerfd_alarmproc); | ||
335 | else | ||
336 | hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); | ||
337 | |||
272 | ctx->moffs = ktime_get_monotonic_offset(); | 338 | ctx->moffs = ktime_get_monotonic_offset(); |
273 | 339 | ||
274 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, | 340 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, |
@@ -305,8 +371,14 @@ static int do_timerfd_settime(int ufd, int flags, | |||
305 | */ | 371 | */ |
306 | for (;;) { | 372 | for (;;) { |
307 | spin_lock_irq(&ctx->wqh.lock); | 373 | spin_lock_irq(&ctx->wqh.lock); |
308 | if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) | 374 | |
309 | break; | 375 | if (isalarm(ctx)) { |
376 | if (alarm_try_to_cancel(&ctx->t.alarm) >= 0) | ||
377 | break; | ||
378 | } else { | ||
379 | if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0) | ||
380 | break; | ||
381 | } | ||
310 | spin_unlock_irq(&ctx->wqh.lock); | 382 | spin_unlock_irq(&ctx->wqh.lock); |
311 | cpu_relax(); | 383 | cpu_relax(); |
312 | } | 384 | } |
@@ -317,8 +389,12 @@ static int do_timerfd_settime(int ufd, int flags, | |||
317 | * We do not update "ticks" and "expired" since the timer will be | 389 | * We do not update "ticks" and "expired" since the timer will be |
318 | * re-programmed again in the following timerfd_setup() call. | 390 | * re-programmed again in the following timerfd_setup() call. |
319 | */ | 391 | */ |
320 | if (ctx->expired && ctx->tintv.tv64) | 392 | if (ctx->expired && ctx->tintv.tv64) { |
321 | hrtimer_forward_now(&ctx->tmr, ctx->tintv); | 393 | if (isalarm(ctx)) |
394 | alarm_forward_now(&ctx->t.alarm, ctx->tintv); | ||
395 | else | ||
396 | hrtimer_forward_now(&ctx->t.tmr, ctx->tintv); | ||
397 | } | ||
322 | 398 | ||
323 | old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); | 399 | old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); |
324 | old->it_interval = ktime_to_timespec(ctx->tintv); | 400 | old->it_interval = ktime_to_timespec(ctx->tintv); |
@@ -345,9 +421,18 @@ static int do_timerfd_gettime(int ufd, struct itimerspec *t) | |||
345 | spin_lock_irq(&ctx->wqh.lock); | 421 | spin_lock_irq(&ctx->wqh.lock); |
346 | if (ctx->expired && ctx->tintv.tv64) { | 422 | if (ctx->expired && ctx->tintv.tv64) { |
347 | ctx->expired = 0; | 423 | ctx->expired = 0; |
348 | ctx->ticks += | 424 | |
349 | hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1; | 425 | if (isalarm(ctx)) { |
350 | hrtimer_restart(&ctx->tmr); | 426 | ctx->ticks += |
427 | alarm_forward_now( | ||
428 | &ctx->t.alarm, ctx->tintv) - 1; | ||
429 | alarm_restart(&ctx->t.alarm); | ||
430 | } else { | ||
431 | ctx->ticks += | ||
432 | hrtimer_forward_now(&ctx->t.tmr, ctx->tintv) | ||
433 | - 1; | ||
434 | hrtimer_restart(&ctx->t.tmr); | ||
435 | } | ||
351 | } | 436 | } |
352 | t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); | 437 | t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); |
353 | t->it_interval = ktime_to_timespec(ctx->tintv); | 438 | t->it_interval = ktime_to_timespec(ctx->tintv); |
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9069694e70eb..a899402a5a0e 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h | |||
@@ -44,10 +44,14 @@ struct alarm { | |||
44 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | 44 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, |
45 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); | 45 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); |
46 | int alarm_start(struct alarm *alarm, ktime_t start); | 46 | int alarm_start(struct alarm *alarm, ktime_t start); |
47 | int alarm_start_relative(struct alarm *alarm, ktime_t start); | ||
48 | void alarm_restart(struct alarm *alarm); | ||
47 | int alarm_try_to_cancel(struct alarm *alarm); | 49 | int alarm_try_to_cancel(struct alarm *alarm); |
48 | int alarm_cancel(struct alarm *alarm); | 50 | int alarm_cancel(struct alarm *alarm); |
49 | 51 | ||
50 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); | 52 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); |
53 | u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); | ||
54 | ktime_t alarm_expires_remaining(const struct alarm *alarm); | ||
51 | 55 | ||
52 | /* Provide way to access the rtc device being used by alarmtimers */ | 56 | /* Provide way to access the rtc device being used by alarmtimers */ |
53 | struct rtc_device *alarmtimer_get_rtcdev(void); | 57 | struct rtc_device *alarmtimer_get_rtcdev(void); |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 963d71431388..0857922e8ad0 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -30,6 +30,7 @@ enum clock_event_nofitiers { | |||
30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | 31 | ||
32 | struct clock_event_device; | 32 | struct clock_event_device; |
33 | struct module; | ||
33 | 34 | ||
34 | /* Clock event mode commands */ | 35 | /* Clock event mode commands */ |
35 | enum clock_event_mode { | 36 | enum clock_event_mode { |
@@ -83,6 +84,7 @@ enum clock_event_mode { | |||
83 | * @irq: IRQ number (only for non CPU local devices) | 84 | * @irq: IRQ number (only for non CPU local devices) |
84 | * @cpumask: cpumask to indicate for which CPUs this device works | 85 | * @cpumask: cpumask to indicate for which CPUs this device works |
85 | * @list: list head for the management code | 86 | * @list: list head for the management code |
87 | * @owner: module reference | ||
86 | */ | 88 | */ |
87 | struct clock_event_device { | 89 | struct clock_event_device { |
88 | void (*event_handler)(struct clock_event_device *); | 90 | void (*event_handler)(struct clock_event_device *); |
@@ -112,6 +114,7 @@ struct clock_event_device { | |||
112 | int irq; | 114 | int irq; |
113 | const struct cpumask *cpumask; | 115 | const struct cpumask *cpumask; |
114 | struct list_head list; | 116 | struct list_head list; |
117 | struct module *owner; | ||
115 | } ____cacheline_aligned; | 118 | } ____cacheline_aligned; |
116 | 119 | ||
117 | /* | 120 | /* |
@@ -138,6 +141,7 @@ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, | |||
138 | extern u64 clockevent_delta2ns(unsigned long latch, | 141 | extern u64 clockevent_delta2ns(unsigned long latch, |
139 | struct clock_event_device *evt); | 142 | struct clock_event_device *evt); |
140 | extern void clockevents_register_device(struct clock_event_device *dev); | 143 | extern void clockevents_register_device(struct clock_event_device *dev); |
144 | extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); | ||
141 | 145 | ||
142 | extern void clockevents_config(struct clock_event_device *dev, u32 freq); | 146 | extern void clockevents_config(struct clock_event_device *dev, u32 freq); |
143 | extern void clockevents_config_and_register(struct clock_event_device *dev, | 147 | extern void clockevents_config_and_register(struct clock_event_device *dev, |
@@ -150,7 +154,6 @@ extern void clockevents_exchange_device(struct clock_event_device *old, | |||
150 | struct clock_event_device *new); | 154 | struct clock_event_device *new); |
151 | extern void clockevents_set_mode(struct clock_event_device *dev, | 155 | extern void clockevents_set_mode(struct clock_event_device *dev, |
152 | enum clock_event_mode mode); | 156 | enum clock_event_mode mode); |
153 | extern int clockevents_register_notifier(struct notifier_block *nb); | ||
154 | extern int clockevents_program_event(struct clock_event_device *dev, | 157 | extern int clockevents_program_event(struct clock_event_device *dev, |
155 | ktime_t expires, bool force); | 158 | ktime_t expires, bool force); |
156 | 159 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7279b94c01da..2f39a4911668 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -21,6 +21,7 @@ | |||
21 | /* clocksource cycle base type */ | 21 | /* clocksource cycle base type */ |
22 | typedef u64 cycle_t; | 22 | typedef u64 cycle_t; |
23 | struct clocksource; | 23 | struct clocksource; |
24 | struct module; | ||
24 | 25 | ||
25 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA | 26 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
26 | #include <asm/clocksource.h> | 27 | #include <asm/clocksource.h> |
@@ -162,6 +163,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
162 | * @suspend: suspend function for the clocksource, if necessary | 163 | * @suspend: suspend function for the clocksource, if necessary |
163 | * @resume: resume function for the clocksource, if necessary | 164 | * @resume: resume function for the clocksource, if necessary |
164 | * @cycle_last: most recent cycle counter value seen by ::read() | 165 | * @cycle_last: most recent cycle counter value seen by ::read() |
166 | * @owner: module reference, must be set by clocksource in modules | ||
165 | */ | 167 | */ |
166 | struct clocksource { | 168 | struct clocksource { |
167 | /* | 169 | /* |
@@ -195,6 +197,7 @@ struct clocksource { | |||
195 | cycle_t cs_last; | 197 | cycle_t cs_last; |
196 | cycle_t wd_last; | 198 | cycle_t wd_last; |
197 | #endif | 199 | #endif |
200 | struct module *owner; | ||
198 | } ____cacheline_aligned; | 201 | } ____cacheline_aligned; |
199 | 202 | ||
200 | /* | 203 | /* |
@@ -279,7 +282,7 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) | |||
279 | 282 | ||
280 | 283 | ||
281 | extern int clocksource_register(struct clocksource*); | 284 | extern int clocksource_register(struct clocksource*); |
282 | extern void clocksource_unregister(struct clocksource*); | 285 | extern int clocksource_unregister(struct clocksource*); |
283 | extern void clocksource_touch_watchdog(void); | 286 | extern void clocksource_touch_watchdog(void); |
284 | extern struct clocksource* clocksource_get_next(void); | 287 | extern struct clocksource* clocksource_get_next(void); |
285 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 288 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
@@ -321,7 +324,7 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) | |||
321 | } | 324 | } |
322 | 325 | ||
323 | 326 | ||
324 | extern void timekeeping_notify(struct clocksource *clock); | 327 | extern int timekeeping_notify(struct clocksource *clock); |
325 | 328 | ||
326 | extern cycle_t clocksource_mmio_readl_up(struct clocksource *); | 329 | extern cycle_t clocksource_mmio_readl_up(struct clocksource *); |
327 | extern cycle_t clocksource_mmio_readl_down(struct clocksource *); | 330 | extern cycle_t clocksource_mmio_readl_down(struct clocksource *); |
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index dd755ce2a5eb..b1cd9597c241 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h | |||
@@ -51,7 +51,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, | |||
51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); | 51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); |
52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); | 52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); |
53 | cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); | 53 | cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); |
54 | void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs); | ||
55 | 54 | ||
56 | extern void dw_apb_timer_init(void); | 55 | extern void dw_apb_timer_init(void); |
57 | #endif /* __DW_APB_TIMER_H__ */ | 56 | #endif /* __DW_APB_TIMER_H__ */ |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 2bc0ad78d058..0068bba6f8b6 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -594,8 +594,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); | |||
594 | extern int __init efi_uart_console_only (void); | 594 | extern int __init efi_uart_console_only (void); |
595 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 595 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
596 | struct resource *data_resource, struct resource *bss_resource); | 596 | struct resource *data_resource, struct resource *bss_resource); |
597 | extern unsigned long efi_get_time(void); | 597 | extern void efi_get_time(struct timespec *now); |
598 | extern int efi_set_rtc_mmss(unsigned long nowtime); | 598 | extern int efi_set_rtc_mmss(const struct timespec *now); |
599 | extern void efi_reserve_boot_services(void); | 599 | extern void efi_reserve_boot_services(void); |
600 | extern struct efi_memory_map memmap; | 600 | extern struct efi_memory_map memmap; |
601 | 601 | ||
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index bbca12804d12..fc66b301b648 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -229,7 +229,8 @@ static inline ktime_t timespec_to_ktime(const struct timespec ts) | |||
229 | static inline ktime_t timeval_to_ktime(const struct timeval tv) | 229 | static inline ktime_t timeval_to_ktime(const struct timeval tv) |
230 | { | 230 | { |
231 | return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, | 231 | return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, |
232 | .nsec = (s32)tv.tv_usec * 1000 } }; | 232 | .nsec = (s32)(tv.tv_usec * |
233 | NSEC_PER_USEC) } }; | ||
233 | } | 234 | } |
234 | 235 | ||
235 | /** | 236 | /** |
@@ -320,12 +321,12 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) | |||
320 | 321 | ||
321 | static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) | 322 | static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) |
322 | { | 323 | { |
323 | return ktime_add_ns(kt, usec * 1000); | 324 | return ktime_add_ns(kt, usec * NSEC_PER_USEC); |
324 | } | 325 | } |
325 | 326 | ||
326 | static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) | 327 | static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) |
327 | { | 328 | { |
328 | return ktime_sub_ns(kt, usec * 1000); | 329 | return ktime_sub_ns(kt, usec * NSEC_PER_USEC); |
329 | } | 330 | } |
330 | 331 | ||
331 | extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); | 332 | extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); |
@@ -338,7 +339,8 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); | |||
338 | * | 339 | * |
339 | * Returns true if there was a successful conversion, false if kt was 0. | 340 | * Returns true if there was a successful conversion, false if kt was 0. |
340 | */ | 341 | */ |
341 | static inline bool ktime_to_timespec_cond(const ktime_t kt, struct timespec *ts) | 342 | static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, |
343 | struct timespec *ts) | ||
342 | { | 344 | { |
343 | if (kt.tv64) { | 345 | if (kt.tv64) { |
344 | *ts = ktime_to_timespec(kt); | 346 | *ts = ktime_to_timespec(kt); |
diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h index 0ca75825b60d..a71d2dbd3610 100644 --- a/include/linux/pvclock_gtod.h +++ b/include/linux/pvclock_gtod.h | |||
@@ -3,6 +3,13 @@ | |||
3 | 3 | ||
4 | #include <linux/notifier.h> | 4 | #include <linux/notifier.h> |
5 | 5 | ||
6 | /* | ||
7 | * The pvclock gtod notifier is called when the system time is updated | ||
8 | * and is used to keep guest time synchronized with host time. | ||
9 | * | ||
10 | * The 'action' parameter in the notifier function is false (0), or | ||
11 | * true (non-zero) if system time was stepped. | ||
12 | */ | ||
6 | extern int pvclock_gtod_register_notifier(struct notifier_block *nb); | 13 | extern int pvclock_gtod_register_notifier(struct notifier_block *nb); |
7 | extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb); | 14 | extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb); |
8 | 15 | ||
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h new file mode 100644 index 000000000000..fa7922c80a41 --- /dev/null +++ b/include/linux/sched_clock.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * sched_clock.h: support for extending counters to full 64-bit ns counter | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef LINUX_SCHED_CLOCK | ||
9 | #define LINUX_SCHED_CLOCK | ||
10 | |||
11 | #ifdef CONFIG_GENERIC_SCHED_CLOCK | ||
12 | extern void sched_clock_postinit(void); | ||
13 | #else | ||
14 | static inline void sched_clock_postinit(void) { } | ||
15 | #endif | ||
16 | |||
17 | extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); | ||
18 | |||
19 | extern unsigned long long (*sched_clock_func)(void); | ||
20 | |||
21 | #endif | ||
diff --git a/init/Kconfig b/init/Kconfig index 2d9b83104dcf..68174a5b8317 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -758,6 +758,9 @@ config LOG_BUF_SHIFT | |||
758 | config HAVE_UNSTABLE_SCHED_CLOCK | 758 | config HAVE_UNSTABLE_SCHED_CLOCK |
759 | bool | 759 | bool |
760 | 760 | ||
761 | config GENERIC_SCHED_CLOCK | ||
762 | bool | ||
763 | |||
761 | # | 764 | # |
762 | # For architectures that want to enable the support for NUMA-affine scheduler | 765 | # For architectures that want to enable the support for NUMA-affine scheduler |
763 | # balancing logic: | 766 | # balancing logic: |
diff --git a/init/main.c b/init/main.c index 9484f4ba88d0..bef4a6ac7c76 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/ptrace.h> | 74 | #include <linux/ptrace.h> |
75 | #include <linux/blkdev.h> | 75 | #include <linux/blkdev.h> |
76 | #include <linux/elevator.h> | 76 | #include <linux/elevator.h> |
77 | #include <linux/sched_clock.h> | ||
77 | 78 | ||
78 | #include <asm/io.h> | 79 | #include <asm/io.h> |
79 | #include <asm/bugs.h> | 80 | #include <asm/bugs.h> |
@@ -555,6 +556,7 @@ asmlinkage void __init start_kernel(void) | |||
555 | softirq_init(); | 556 | softirq_init(); |
556 | timekeeping_init(); | 557 | timekeeping_init(); |
557 | time_init(); | 558 | time_init(); |
559 | sched_clock_postinit(); | ||
558 | profile_init(); | 560 | profile_init(); |
559 | call_function_init(); | 561 | call_function_init(); |
560 | WARN(!irqs_disabled(), "Interrupts were enabled early\n"); | 562 | WARN(!irqs_disabled(), "Interrupts were enabled early\n"); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index fd4b13b131f8..e86827e94c9a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -773,15 +773,24 @@ void clock_was_set(void) | |||
773 | 773 | ||
774 | /* | 774 | /* |
775 | * During resume we might have to reprogram the high resolution timer | 775 | * During resume we might have to reprogram the high resolution timer |
776 | * interrupt (on the local CPU): | 776 | * interrupt on all online CPUs. However, all other CPUs will be |
777 | * stopped with IRQs interrupts disabled so the clock_was_set() call | ||
778 | * must be deferred to the softirq. | ||
779 | * | ||
780 | * The one-shot timer has already been programmed to fire immediately | ||
781 | * (see tick_resume_oneshot()) and this interrupt will trigger the | ||
782 | * softirq to run early enough to correctly reprogram the timers on | ||
783 | * all CPUs. | ||
777 | */ | 784 | */ |
778 | void hrtimers_resume(void) | 785 | void hrtimers_resume(void) |
779 | { | 786 | { |
787 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | ||
788 | |||
780 | WARN_ONCE(!irqs_disabled(), | 789 | WARN_ONCE(!irqs_disabled(), |
781 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | 790 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); |
782 | 791 | ||
783 | retrigger_next_event(NULL); | 792 | cpu_base->clock_was_set = 1; |
784 | timerfd_clock_was_set(); | 793 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
785 | } | 794 | } |
786 | 795 | ||
787 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 796 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index ff7d9d2ab504..9250130646f5 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -4,6 +4,8 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o | |||
4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o | 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o |
7 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o | ||
7 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o |
8 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 9 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
9 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o | 10 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o |
11 | obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index f11d83b12949..eec50fcef9e4 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -199,6 +199,13 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | |||
199 | 199 | ||
200 | } | 200 | } |
201 | 201 | ||
202 | ktime_t alarm_expires_remaining(const struct alarm *alarm) | ||
203 | { | ||
204 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
205 | return ktime_sub(alarm->node.expires, base->gettime()); | ||
206 | } | ||
207 | EXPORT_SYMBOL_GPL(alarm_expires_remaining); | ||
208 | |||
202 | #ifdef CONFIG_RTC_CLASS | 209 | #ifdef CONFIG_RTC_CLASS |
203 | /** | 210 | /** |
204 | * alarmtimer_suspend - Suspend time callback | 211 | * alarmtimer_suspend - Suspend time callback |
@@ -303,9 +310,10 @@ void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | |||
303 | alarm->type = type; | 310 | alarm->type = type; |
304 | alarm->state = ALARMTIMER_STATE_INACTIVE; | 311 | alarm->state = ALARMTIMER_STATE_INACTIVE; |
305 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(alarm_init); | ||
306 | 314 | ||
307 | /** | 315 | /** |
308 | * alarm_start - Sets an alarm to fire | 316 | * alarm_start - Sets an absolute alarm to fire |
309 | * @alarm: ptr to alarm to set | 317 | * @alarm: ptr to alarm to set |
310 | * @start: time to run the alarm | 318 | * @start: time to run the alarm |
311 | */ | 319 | */ |
@@ -323,6 +331,34 @@ int alarm_start(struct alarm *alarm, ktime_t start) | |||
323 | spin_unlock_irqrestore(&base->lock, flags); | 331 | spin_unlock_irqrestore(&base->lock, flags); |
324 | return ret; | 332 | return ret; |
325 | } | 333 | } |
334 | EXPORT_SYMBOL_GPL(alarm_start); | ||
335 | |||
336 | /** | ||
337 | * alarm_start_relative - Sets a relative alarm to fire | ||
338 | * @alarm: ptr to alarm to set | ||
339 | * @start: time relative to now to run the alarm | ||
340 | */ | ||
341 | int alarm_start_relative(struct alarm *alarm, ktime_t start) | ||
342 | { | ||
343 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
344 | |||
345 | start = ktime_add(start, base->gettime()); | ||
346 | return alarm_start(alarm, start); | ||
347 | } | ||
348 | EXPORT_SYMBOL_GPL(alarm_start_relative); | ||
349 | |||
350 | void alarm_restart(struct alarm *alarm) | ||
351 | { | ||
352 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
353 | unsigned long flags; | ||
354 | |||
355 | spin_lock_irqsave(&base->lock, flags); | ||
356 | hrtimer_set_expires(&alarm->timer, alarm->node.expires); | ||
357 | hrtimer_restart(&alarm->timer); | ||
358 | alarmtimer_enqueue(base, alarm); | ||
359 | spin_unlock_irqrestore(&base->lock, flags); | ||
360 | } | ||
361 | EXPORT_SYMBOL_GPL(alarm_restart); | ||
326 | 362 | ||
327 | /** | 363 | /** |
328 | * alarm_try_to_cancel - Tries to cancel an alarm timer | 364 | * alarm_try_to_cancel - Tries to cancel an alarm timer |
@@ -344,6 +380,7 @@ int alarm_try_to_cancel(struct alarm *alarm) | |||
344 | spin_unlock_irqrestore(&base->lock, flags); | 380 | spin_unlock_irqrestore(&base->lock, flags); |
345 | return ret; | 381 | return ret; |
346 | } | 382 | } |
383 | EXPORT_SYMBOL_GPL(alarm_try_to_cancel); | ||
347 | 384 | ||
348 | 385 | ||
349 | /** | 386 | /** |
@@ -361,6 +398,7 @@ int alarm_cancel(struct alarm *alarm) | |||
361 | cpu_relax(); | 398 | cpu_relax(); |
362 | } | 399 | } |
363 | } | 400 | } |
401 | EXPORT_SYMBOL_GPL(alarm_cancel); | ||
364 | 402 | ||
365 | 403 | ||
366 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) | 404 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) |
@@ -393,8 +431,15 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) | |||
393 | alarm->node.expires = ktime_add(alarm->node.expires, interval); | 431 | alarm->node.expires = ktime_add(alarm->node.expires, interval); |
394 | return overrun; | 432 | return overrun; |
395 | } | 433 | } |
434 | EXPORT_SYMBOL_GPL(alarm_forward); | ||
396 | 435 | ||
436 | u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) | ||
437 | { | ||
438 | struct alarm_base *base = &alarm_bases[alarm->type]; | ||
397 | 439 | ||
440 | return alarm_forward(alarm, base->gettime(), interval); | ||
441 | } | ||
442 | EXPORT_SYMBOL_GPL(alarm_forward_now); | ||
398 | 443 | ||
399 | 444 | ||
400 | /** | 445 | /** |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index c6d6400ee137..38959c866789 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -15,20 +15,23 @@ | |||
15 | #include <linux/hrtimer.h> | 15 | #include <linux/hrtimer.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/notifier.h> | ||
19 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/device.h> | ||
20 | 20 | ||
21 | #include "tick-internal.h" | 21 | #include "tick-internal.h" |
22 | 22 | ||
23 | /* The registered clock event devices */ | 23 | /* The registered clock event devices */ |
24 | static LIST_HEAD(clockevent_devices); | 24 | static LIST_HEAD(clockevent_devices); |
25 | static LIST_HEAD(clockevents_released); | 25 | static LIST_HEAD(clockevents_released); |
26 | |||
27 | /* Notification for clock events */ | ||
28 | static RAW_NOTIFIER_HEAD(clockevents_chain); | ||
29 | |||
30 | /* Protection for the above */ | 26 | /* Protection for the above */ |
31 | static DEFINE_RAW_SPINLOCK(clockevents_lock); | 27 | static DEFINE_RAW_SPINLOCK(clockevents_lock); |
28 | /* Protection for unbind operations */ | ||
29 | static DEFINE_MUTEX(clockevents_mutex); | ||
30 | |||
31 | struct ce_unbind { | ||
32 | struct clock_event_device *ce; | ||
33 | int res; | ||
34 | }; | ||
32 | 35 | ||
33 | /** | 36 | /** |
34 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds | 37 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds |
@@ -232,47 +235,107 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, | |||
232 | return (rc && force) ? clockevents_program_min_delta(dev) : rc; | 235 | return (rc && force) ? clockevents_program_min_delta(dev) : rc; |
233 | } | 236 | } |
234 | 237 | ||
235 | /** | 238 | /* |
236 | * clockevents_register_notifier - register a clock events change listener | 239 | * Called after a notify add to make devices available which were |
240 | * released from the notifier call. | ||
237 | */ | 241 | */ |
238 | int clockevents_register_notifier(struct notifier_block *nb) | 242 | static void clockevents_notify_released(void) |
239 | { | 243 | { |
240 | unsigned long flags; | 244 | struct clock_event_device *dev; |
241 | int ret; | ||
242 | 245 | ||
243 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 246 | while (!list_empty(&clockevents_released)) { |
244 | ret = raw_notifier_chain_register(&clockevents_chain, nb); | 247 | dev = list_entry(clockevents_released.next, |
245 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 248 | struct clock_event_device, list); |
249 | list_del(&dev->list); | ||
250 | list_add(&dev->list, &clockevent_devices); | ||
251 | tick_check_new_device(dev); | ||
252 | } | ||
253 | } | ||
246 | 254 | ||
247 | return ret; | 255 | /* |
256 | * Try to install a replacement clock event device | ||
257 | */ | ||
258 | static int clockevents_replace(struct clock_event_device *ced) | ||
259 | { | ||
260 | struct clock_event_device *dev, *newdev = NULL; | ||
261 | |||
262 | list_for_each_entry(dev, &clockevent_devices, list) { | ||
263 | if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) | ||
264 | continue; | ||
265 | |||
266 | if (!tick_check_replacement(newdev, dev)) | ||
267 | continue; | ||
268 | |||
269 | if (!try_module_get(dev->owner)) | ||
270 | continue; | ||
271 | |||
272 | if (newdev) | ||
273 | module_put(newdev->owner); | ||
274 | newdev = dev; | ||
275 | } | ||
276 | if (newdev) { | ||
277 | tick_install_replacement(newdev); | ||
278 | list_del_init(&ced->list); | ||
279 | } | ||
280 | return newdev ? 0 : -EBUSY; | ||
248 | } | 281 | } |
249 | 282 | ||
250 | /* | 283 | /* |
251 | * Notify about a clock event change. Called with clockevents_lock | 284 | * Called with clockevents_mutex and clockevents_lock held |
252 | * held. | ||
253 | */ | 285 | */ |
254 | static void clockevents_do_notify(unsigned long reason, void *dev) | 286 | static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) |
255 | { | 287 | { |
256 | raw_notifier_call_chain(&clockevents_chain, reason, dev); | 288 | /* Fast track. Device is unused */ |
289 | if (ced->mode == CLOCK_EVT_MODE_UNUSED) { | ||
290 | list_del_init(&ced->list); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; | ||
257 | } | 295 | } |
258 | 296 | ||
259 | /* | 297 | /* |
260 | * Called after a notify add to make devices available which were | 298 | * SMP function call to unbind a device |
261 | * released from the notifier call. | ||
262 | */ | 299 | */ |
263 | static void clockevents_notify_released(void) | 300 | static void __clockevents_unbind(void *arg) |
264 | { | 301 | { |
265 | struct clock_event_device *dev; | 302 | struct ce_unbind *cu = arg; |
303 | int res; | ||
304 | |||
305 | raw_spin_lock(&clockevents_lock); | ||
306 | res = __clockevents_try_unbind(cu->ce, smp_processor_id()); | ||
307 | if (res == -EAGAIN) | ||
308 | res = clockevents_replace(cu->ce); | ||
309 | cu->res = res; | ||
310 | raw_spin_unlock(&clockevents_lock); | ||
311 | } | ||
266 | 312 | ||
267 | while (!list_empty(&clockevents_released)) { | 313 | /* |
268 | dev = list_entry(clockevents_released.next, | 314 | * Issues smp function call to unbind a per cpu device. Called with |
269 | struct clock_event_device, list); | 315 | * clockevents_mutex held. |
270 | list_del(&dev->list); | 316 | */ |
271 | list_add(&dev->list, &clockevent_devices); | 317 | static int clockevents_unbind(struct clock_event_device *ced, int cpu) |
272 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | 318 | { |
273 | } | 319 | struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; |
320 | |||
321 | smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); | ||
322 | return cu.res; | ||
274 | } | 323 | } |
275 | 324 | ||
325 | /* | ||
326 | * Unbind a clockevents device. | ||
327 | */ | ||
328 | int clockevents_unbind_device(struct clock_event_device *ced, int cpu) | ||
329 | { | ||
330 | int ret; | ||
331 | |||
332 | mutex_lock(&clockevents_mutex); | ||
333 | ret = clockevents_unbind(ced, cpu); | ||
334 | mutex_unlock(&clockevents_mutex); | ||
335 | return ret; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(clockevents_unbind); | ||
338 | |||
276 | /** | 339 | /** |
277 | * clockevents_register_device - register a clock event device | 340 | * clockevents_register_device - register a clock event device |
278 | * @dev: device to register | 341 | * @dev: device to register |
@@ -290,7 +353,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
290 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 353 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
291 | 354 | ||
292 | list_add(&dev->list, &clockevent_devices); | 355 | list_add(&dev->list, &clockevent_devices); |
293 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | 356 | tick_check_new_device(dev); |
294 | clockevents_notify_released(); | 357 | clockevents_notify_released(); |
295 | 358 | ||
296 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 359 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
@@ -386,6 +449,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
386 | * released list and do a notify add later. | 449 | * released list and do a notify add later. |
387 | */ | 450 | */ |
388 | if (old) { | 451 | if (old) { |
452 | module_put(old->owner); | ||
389 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); | 453 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); |
390 | list_del(&old->list); | 454 | list_del(&old->list); |
391 | list_add(&old->list, &clockevents_released); | 455 | list_add(&old->list, &clockevents_released); |
@@ -433,10 +497,36 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
433 | int cpu; | 497 | int cpu; |
434 | 498 | ||
435 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 499 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
436 | clockevents_do_notify(reason, arg); | ||
437 | 500 | ||
438 | switch (reason) { | 501 | switch (reason) { |
502 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | ||
503 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | ||
504 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | ||
505 | tick_broadcast_on_off(reason, arg); | ||
506 | break; | ||
507 | |||
508 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: | ||
509 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: | ||
510 | tick_broadcast_oneshot_control(reason); | ||
511 | break; | ||
512 | |||
513 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
514 | tick_handover_do_timer(arg); | ||
515 | break; | ||
516 | |||
517 | case CLOCK_EVT_NOTIFY_SUSPEND: | ||
518 | tick_suspend(); | ||
519 | tick_suspend_broadcast(); | ||
520 | break; | ||
521 | |||
522 | case CLOCK_EVT_NOTIFY_RESUME: | ||
523 | tick_resume(); | ||
524 | break; | ||
525 | |||
439 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | 526 | case CLOCK_EVT_NOTIFY_CPU_DEAD: |
527 | tick_shutdown_broadcast_oneshot(arg); | ||
528 | tick_shutdown_broadcast(arg); | ||
529 | tick_shutdown(arg); | ||
440 | /* | 530 | /* |
441 | * Unregister the clock event devices which were | 531 | * Unregister the clock event devices which were |
442 | * released from the users in the notify chain. | 532 | * released from the users in the notify chain. |
@@ -462,4 +552,123 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
462 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 552 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
463 | } | 553 | } |
464 | EXPORT_SYMBOL_GPL(clockevents_notify); | 554 | EXPORT_SYMBOL_GPL(clockevents_notify); |
555 | |||
556 | #ifdef CONFIG_SYSFS | ||
557 | struct bus_type clockevents_subsys = { | ||
558 | .name = "clockevents", | ||
559 | .dev_name = "clockevent", | ||
560 | }; | ||
561 | |||
562 | static DEFINE_PER_CPU(struct device, tick_percpu_dev); | ||
563 | static struct tick_device *tick_get_tick_dev(struct device *dev); | ||
564 | |||
565 | static ssize_t sysfs_show_current_tick_dev(struct device *dev, | ||
566 | struct device_attribute *attr, | ||
567 | char *buf) | ||
568 | { | ||
569 | struct tick_device *td; | ||
570 | ssize_t count = 0; | ||
571 | |||
572 | raw_spin_lock_irq(&clockevents_lock); | ||
573 | td = tick_get_tick_dev(dev); | ||
574 | if (td && td->evtdev) | ||
575 | count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); | ||
576 | raw_spin_unlock_irq(&clockevents_lock); | ||
577 | return count; | ||
578 | } | ||
579 | static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); | ||
580 | |||
581 | /* We don't support the abomination of removable broadcast devices */ | ||
582 | static ssize_t sysfs_unbind_tick_dev(struct device *dev, | ||
583 | struct device_attribute *attr, | ||
584 | const char *buf, size_t count) | ||
585 | { | ||
586 | char name[CS_NAME_LEN]; | ||
587 | size_t ret = sysfs_get_uname(buf, name, count); | ||
588 | struct clock_event_device *ce; | ||
589 | |||
590 | if (ret < 0) | ||
591 | return ret; | ||
592 | |||
593 | ret = -ENODEV; | ||
594 | mutex_lock(&clockevents_mutex); | ||
595 | raw_spin_lock_irq(&clockevents_lock); | ||
596 | list_for_each_entry(ce, &clockevent_devices, list) { | ||
597 | if (!strcmp(ce->name, name)) { | ||
598 | ret = __clockevents_try_unbind(ce, dev->id); | ||
599 | break; | ||
600 | } | ||
601 | } | ||
602 | raw_spin_unlock_irq(&clockevents_lock); | ||
603 | /* | ||
604 | * We hold clockevents_mutex, so ce can't go away | ||
605 | */ | ||
606 | if (ret == -EAGAIN) | ||
607 | ret = clockevents_unbind(ce, dev->id); | ||
608 | mutex_unlock(&clockevents_mutex); | ||
609 | return ret ? ret : count; | ||
610 | } | ||
611 | static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); | ||
612 | |||
613 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | ||
614 | static struct device tick_bc_dev = { | ||
615 | .init_name = "broadcast", | ||
616 | .id = 0, | ||
617 | .bus = &clockevents_subsys, | ||
618 | }; | ||
619 | |||
620 | static struct tick_device *tick_get_tick_dev(struct device *dev) | ||
621 | { | ||
622 | return dev == &tick_bc_dev ? tick_get_broadcast_device() : | ||
623 | &per_cpu(tick_cpu_device, dev->id); | ||
624 | } | ||
625 | |||
626 | static __init int tick_broadcast_init_sysfs(void) | ||
627 | { | ||
628 | int err = device_register(&tick_bc_dev); | ||
629 | |||
630 | if (!err) | ||
631 | err = device_create_file(&tick_bc_dev, &dev_attr_current_device); | ||
632 | return err; | ||
633 | } | ||
634 | #else | ||
635 | static struct tick_device *tick_get_tick_dev(struct device *dev) | ||
636 | { | ||
637 | return &per_cpu(tick_cpu_device, dev->id); | ||
638 | } | ||
639 | static inline int tick_broadcast_init_sysfs(void) { return 0; } | ||
465 | #endif | 640 | #endif |
641 | |||
642 | static int __init tick_init_sysfs(void) | ||
643 | { | ||
644 | int cpu; | ||
645 | |||
646 | for_each_possible_cpu(cpu) { | ||
647 | struct device *dev = &per_cpu(tick_percpu_dev, cpu); | ||
648 | int err; | ||
649 | |||
650 | dev->id = cpu; | ||
651 | dev->bus = &clockevents_subsys; | ||
652 | err = device_register(dev); | ||
653 | if (!err) | ||
654 | err = device_create_file(dev, &dev_attr_current_device); | ||
655 | if (!err) | ||
656 | err = device_create_file(dev, &dev_attr_unbind_device); | ||
657 | if (err) | ||
658 | return err; | ||
659 | } | ||
660 | return tick_broadcast_init_sysfs(); | ||
661 | } | ||
662 | |||
663 | static int __init clockevents_init_sysfs(void) | ||
664 | { | ||
665 | int err = subsys_system_register(&clockevents_subsys, NULL); | ||
666 | |||
667 | if (!err) | ||
668 | err = tick_init_sysfs(); | ||
669 | return err; | ||
670 | } | ||
671 | device_initcall(clockevents_init_sysfs); | ||
672 | #endif /* SYSFS */ | ||
673 | |||
674 | #endif /* GENERIC_CLOCK_EVENTS */ | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c9583382141a..e713ef7d19a7 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/tick.h> | 31 | #include <linux/tick.h> |
32 | #include <linux/kthread.h> | 32 | #include <linux/kthread.h> |
33 | 33 | ||
34 | #include "tick-internal.h" | ||
35 | |||
34 | void timecounter_init(struct timecounter *tc, | 36 | void timecounter_init(struct timecounter *tc, |
35 | const struct cyclecounter *cc, | 37 | const struct cyclecounter *cc, |
36 | u64 start_tstamp) | 38 | u64 start_tstamp) |
@@ -174,7 +176,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) | |||
174 | static struct clocksource *curr_clocksource; | 176 | static struct clocksource *curr_clocksource; |
175 | static LIST_HEAD(clocksource_list); | 177 | static LIST_HEAD(clocksource_list); |
176 | static DEFINE_MUTEX(clocksource_mutex); | 178 | static DEFINE_MUTEX(clocksource_mutex); |
177 | static char override_name[32]; | 179 | static char override_name[CS_NAME_LEN]; |
178 | static int finished_booting; | 180 | static int finished_booting; |
179 | 181 | ||
180 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 182 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
@@ -388,28 +390,17 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
388 | 390 | ||
389 | static void clocksource_dequeue_watchdog(struct clocksource *cs) | 391 | static void clocksource_dequeue_watchdog(struct clocksource *cs) |
390 | { | 392 | { |
391 | struct clocksource *tmp; | ||
392 | unsigned long flags; | 393 | unsigned long flags; |
393 | 394 | ||
394 | spin_lock_irqsave(&watchdog_lock, flags); | 395 | spin_lock_irqsave(&watchdog_lock, flags); |
395 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | 396 | if (cs != watchdog) { |
396 | /* cs is a watched clocksource. */ | 397 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
397 | list_del_init(&cs->wd_list); | 398 | /* cs is a watched clocksource. */ |
398 | } else if (cs == watchdog) { | 399 | list_del_init(&cs->wd_list); |
399 | /* Reset watchdog cycles */ | 400 | /* Check if the watchdog timer needs to be stopped. */ |
400 | clocksource_reset_watchdog(); | 401 | clocksource_stop_watchdog(); |
401 | /* Current watchdog is removed. Find an alternative. */ | ||
402 | watchdog = NULL; | ||
403 | list_for_each_entry(tmp, &clocksource_list, list) { | ||
404 | if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) | ||
405 | continue; | ||
406 | if (!watchdog || tmp->rating > watchdog->rating) | ||
407 | watchdog = tmp; | ||
408 | } | 402 | } |
409 | } | 403 | } |
410 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; | ||
411 | /* Check if the watchdog timer needs to be stopped. */ | ||
412 | clocksource_stop_watchdog(); | ||
413 | spin_unlock_irqrestore(&watchdog_lock, flags); | 404 | spin_unlock_irqrestore(&watchdog_lock, flags); |
414 | } | 405 | } |
415 | 406 | ||
@@ -439,6 +430,11 @@ static int clocksource_watchdog_kthread(void *data) | |||
439 | return 0; | 430 | return 0; |
440 | } | 431 | } |
441 | 432 | ||
433 | static bool clocksource_is_watchdog(struct clocksource *cs) | ||
434 | { | ||
435 | return cs == watchdog; | ||
436 | } | ||
437 | |||
442 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ | 438 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
443 | 439 | ||
444 | static void clocksource_enqueue_watchdog(struct clocksource *cs) | 440 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
@@ -450,6 +446,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
450 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } | 446 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
451 | static inline void clocksource_resume_watchdog(void) { } | 447 | static inline void clocksource_resume_watchdog(void) { } |
452 | static inline int clocksource_watchdog_kthread(void *data) { return 0; } | 448 | static inline int clocksource_watchdog_kthread(void *data) { return 0; } |
449 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } | ||
453 | 450 | ||
454 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ | 451 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
455 | 452 | ||
@@ -553,24 +550,42 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
553 | 550 | ||
554 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET | 551 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
555 | 552 | ||
556 | /** | 553 | static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) |
557 | * clocksource_select - Select the best clocksource available | ||
558 | * | ||
559 | * Private function. Must hold clocksource_mutex when called. | ||
560 | * | ||
561 | * Select the clocksource with the best rating, or the clocksource, | ||
562 | * which is selected by userspace override. | ||
563 | */ | ||
564 | static void clocksource_select(void) | ||
565 | { | 554 | { |
566 | struct clocksource *best, *cs; | 555 | struct clocksource *cs; |
567 | 556 | ||
568 | if (!finished_booting || list_empty(&clocksource_list)) | 557 | if (!finished_booting || list_empty(&clocksource_list)) |
558 | return NULL; | ||
559 | |||
560 | /* | ||
561 | * We pick the clocksource with the highest rating. If oneshot | ||
562 | * mode is active, we pick the highres valid clocksource with | ||
563 | * the best rating. | ||
564 | */ | ||
565 | list_for_each_entry(cs, &clocksource_list, list) { | ||
566 | if (skipcur && cs == curr_clocksource) | ||
567 | continue; | ||
568 | if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) | ||
569 | continue; | ||
570 | return cs; | ||
571 | } | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | static void __clocksource_select(bool skipcur) | ||
576 | { | ||
577 | bool oneshot = tick_oneshot_mode_active(); | ||
578 | struct clocksource *best, *cs; | ||
579 | |||
580 | /* Find the best suitable clocksource */ | ||
581 | best = clocksource_find_best(oneshot, skipcur); | ||
582 | if (!best) | ||
569 | return; | 583 | return; |
570 | /* First clocksource on the list has the best rating. */ | 584 | |
571 | best = list_first_entry(&clocksource_list, struct clocksource, list); | ||
572 | /* Check for the override clocksource. */ | 585 | /* Check for the override clocksource. */ |
573 | list_for_each_entry(cs, &clocksource_list, list) { | 586 | list_for_each_entry(cs, &clocksource_list, list) { |
587 | if (skipcur && cs == curr_clocksource) | ||
588 | continue; | ||
574 | if (strcmp(cs->name, override_name) != 0) | 589 | if (strcmp(cs->name, override_name) != 0) |
575 | continue; | 590 | continue; |
576 | /* | 591 | /* |
@@ -578,8 +593,7 @@ static void clocksource_select(void) | |||
578 | * capable clocksource if the tick code is in oneshot | 593 | * capable clocksource if the tick code is in oneshot |
579 | * mode (highres or nohz) | 594 | * mode (highres or nohz) |
580 | */ | 595 | */ |
581 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && | 596 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { |
582 | tick_oneshot_mode_active()) { | ||
583 | /* Override clocksource cannot be used. */ | 597 | /* Override clocksource cannot be used. */ |
584 | printk(KERN_WARNING "Override clocksource %s is not " | 598 | printk(KERN_WARNING "Override clocksource %s is not " |
585 | "HRT compatible. Cannot switch while in " | 599 | "HRT compatible. Cannot switch while in " |
@@ -590,16 +604,35 @@ static void clocksource_select(void) | |||
590 | best = cs; | 604 | best = cs; |
591 | break; | 605 | break; |
592 | } | 606 | } |
593 | if (curr_clocksource != best) { | 607 | |
594 | printk(KERN_INFO "Switching to clocksource %s\n", best->name); | 608 | if (curr_clocksource != best && !timekeeping_notify(best)) { |
609 | pr_info("Switched to clocksource %s\n", best->name); | ||
595 | curr_clocksource = best; | 610 | curr_clocksource = best; |
596 | timekeeping_notify(curr_clocksource); | ||
597 | } | 611 | } |
598 | } | 612 | } |
599 | 613 | ||
614 | /** | ||
615 | * clocksource_select - Select the best clocksource available | ||
616 | * | ||
617 | * Private function. Must hold clocksource_mutex when called. | ||
618 | * | ||
619 | * Select the clocksource with the best rating, or the clocksource, | ||
620 | * which is selected by userspace override. | ||
621 | */ | ||
622 | static void clocksource_select(void) | ||
623 | { | ||
624 | return __clocksource_select(false); | ||
625 | } | ||
626 | |||
627 | static void clocksource_select_fallback(void) | ||
628 | { | ||
629 | return __clocksource_select(true); | ||
630 | } | ||
631 | |||
600 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ | 632 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ |
601 | 633 | ||
602 | static inline void clocksource_select(void) { } | 634 | static inline void clocksource_select(void) { } |
635 | static inline void clocksource_select_fallback(void) { } | ||
603 | 636 | ||
604 | #endif | 637 | #endif |
605 | 638 | ||
@@ -772,17 +805,42 @@ void clocksource_change_rating(struct clocksource *cs, int rating) | |||
772 | } | 805 | } |
773 | EXPORT_SYMBOL(clocksource_change_rating); | 806 | EXPORT_SYMBOL(clocksource_change_rating); |
774 | 807 | ||
808 | /* | ||
809 | * Unbind clocksource @cs. Called with clocksource_mutex held | ||
810 | */ | ||
811 | static int clocksource_unbind(struct clocksource *cs) | ||
812 | { | ||
813 | /* | ||
814 | * I really can't convince myself to support this on hardware | ||
815 | * designed by lobotomized monkeys. | ||
816 | */ | ||
817 | if (clocksource_is_watchdog(cs)) | ||
818 | return -EBUSY; | ||
819 | |||
820 | if (cs == curr_clocksource) { | ||
821 | /* Select and try to install a replacement clock source */ | ||
822 | clocksource_select_fallback(); | ||
823 | if (curr_clocksource == cs) | ||
824 | return -EBUSY; | ||
825 | } | ||
826 | clocksource_dequeue_watchdog(cs); | ||
827 | list_del_init(&cs->list); | ||
828 | return 0; | ||
829 | } | ||
830 | |||
775 | /** | 831 | /** |
776 | * clocksource_unregister - remove a registered clocksource | 832 | * clocksource_unregister - remove a registered clocksource |
777 | * @cs: clocksource to be unregistered | 833 | * @cs: clocksource to be unregistered |
778 | */ | 834 | */ |
779 | void clocksource_unregister(struct clocksource *cs) | 835 | int clocksource_unregister(struct clocksource *cs) |
780 | { | 836 | { |
837 | int ret = 0; | ||
838 | |||
781 | mutex_lock(&clocksource_mutex); | 839 | mutex_lock(&clocksource_mutex); |
782 | clocksource_dequeue_watchdog(cs); | 840 | if (!list_empty(&cs->list)) |
783 | list_del(&cs->list); | 841 | ret = clocksource_unbind(cs); |
784 | clocksource_select(); | ||
785 | mutex_unlock(&clocksource_mutex); | 842 | mutex_unlock(&clocksource_mutex); |
843 | return ret; | ||
786 | } | 844 | } |
787 | EXPORT_SYMBOL(clocksource_unregister); | 845 | EXPORT_SYMBOL(clocksource_unregister); |
788 | 846 | ||
@@ -808,6 +866,23 @@ sysfs_show_current_clocksources(struct device *dev, | |||
808 | return count; | 866 | return count; |
809 | } | 867 | } |
810 | 868 | ||
869 | size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) | ||
870 | { | ||
871 | size_t ret = cnt; | ||
872 | |||
873 | /* strings from sysfs write are not 0 terminated! */ | ||
874 | if (!cnt || cnt >= CS_NAME_LEN) | ||
875 | return -EINVAL; | ||
876 | |||
877 | /* strip of \n: */ | ||
878 | if (buf[cnt-1] == '\n') | ||
879 | cnt--; | ||
880 | if (cnt > 0) | ||
881 | memcpy(dst, buf, cnt); | ||
882 | dst[cnt] = 0; | ||
883 | return ret; | ||
884 | } | ||
885 | |||
811 | /** | 886 | /** |
812 | * sysfs_override_clocksource - interface for manually overriding clocksource | 887 | * sysfs_override_clocksource - interface for manually overriding clocksource |
813 | * @dev: unused | 888 | * @dev: unused |
@@ -822,22 +897,13 @@ static ssize_t sysfs_override_clocksource(struct device *dev, | |||
822 | struct device_attribute *attr, | 897 | struct device_attribute *attr, |
823 | const char *buf, size_t count) | 898 | const char *buf, size_t count) |
824 | { | 899 | { |
825 | size_t ret = count; | 900 | size_t ret; |
826 | |||
827 | /* strings from sysfs write are not 0 terminated! */ | ||
828 | if (count >= sizeof(override_name)) | ||
829 | return -EINVAL; | ||
830 | |||
831 | /* strip of \n: */ | ||
832 | if (buf[count-1] == '\n') | ||
833 | count--; | ||
834 | 901 | ||
835 | mutex_lock(&clocksource_mutex); | 902 | mutex_lock(&clocksource_mutex); |
836 | 903 | ||
837 | if (count > 0) | 904 | ret = sysfs_get_uname(buf, override_name, count); |
838 | memcpy(override_name, buf, count); | 905 | if (ret >= 0) |
839 | override_name[count] = 0; | 906 | clocksource_select(); |
840 | clocksource_select(); | ||
841 | 907 | ||
842 | mutex_unlock(&clocksource_mutex); | 908 | mutex_unlock(&clocksource_mutex); |
843 | 909 | ||
@@ -845,6 +911,40 @@ static ssize_t sysfs_override_clocksource(struct device *dev, | |||
845 | } | 911 | } |
846 | 912 | ||
847 | /** | 913 | /** |
914 | * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource | ||
915 | * @dev: unused | ||
916 | * @attr: unused | ||
917 | * @buf: unused | ||
918 | * @count: length of buffer | ||
919 | * | ||
920 | * Takes input from sysfs interface for manually unbinding a clocksource. | ||
921 | */ | ||
922 | static ssize_t sysfs_unbind_clocksource(struct device *dev, | ||
923 | struct device_attribute *attr, | ||
924 | const char *buf, size_t count) | ||
925 | { | ||
926 | struct clocksource *cs; | ||
927 | char name[CS_NAME_LEN]; | ||
928 | size_t ret; | ||
929 | |||
930 | ret = sysfs_get_uname(buf, name, count); | ||
931 | if (ret < 0) | ||
932 | return ret; | ||
933 | |||
934 | ret = -ENODEV; | ||
935 | mutex_lock(&clocksource_mutex); | ||
936 | list_for_each_entry(cs, &clocksource_list, list) { | ||
937 | if (strcmp(cs->name, name)) | ||
938 | continue; | ||
939 | ret = clocksource_unbind(cs); | ||
940 | break; | ||
941 | } | ||
942 | mutex_unlock(&clocksource_mutex); | ||
943 | |||
944 | return ret ? ret : count; | ||
945 | } | ||
946 | |||
947 | /** | ||
848 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource | 948 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource |
849 | * @dev: unused | 949 | * @dev: unused |
850 | * @attr: unused | 950 | * @attr: unused |
@@ -886,6 +986,8 @@ sysfs_show_available_clocksources(struct device *dev, | |||
886 | static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, | 986 | static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, |
887 | sysfs_override_clocksource); | 987 | sysfs_override_clocksource); |
888 | 988 | ||
989 | static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource); | ||
990 | |||
889 | static DEVICE_ATTR(available_clocksource, 0444, | 991 | static DEVICE_ATTR(available_clocksource, 0444, |
890 | sysfs_show_available_clocksources, NULL); | 992 | sysfs_show_available_clocksources, NULL); |
891 | 993 | ||
@@ -910,6 +1012,9 @@ static int __init init_clocksource_sysfs(void) | |||
910 | &device_clocksource, | 1012 | &device_clocksource, |
911 | &dev_attr_current_clocksource); | 1013 | &dev_attr_current_clocksource); |
912 | if (!error) | 1014 | if (!error) |
1015 | error = device_create_file(&device_clocksource, | ||
1016 | &dev_attr_unbind_clocksource); | ||
1017 | if (!error) | ||
913 | error = device_create_file( | 1018 | error = device_create_file( |
914 | &device_clocksource, | 1019 | &device_clocksource, |
915 | &dev_attr_available_clocksource); | 1020 | &dev_attr_available_clocksource); |
diff --git a/arch/arm/kernel/sched_clock.c b/kernel/time/sched_clock.c index e8edcaa0e432..a326f27d7f09 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
@@ -13,8 +13,7 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/syscore_ops.h> | 14 | #include <linux/syscore_ops.h> |
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | 16 | #include <linux/sched_clock.h> | |
17 | #include <asm/sched_clock.h> | ||
18 | 17 | ||
19 | struct clock_data { | 18 | struct clock_data { |
20 | u64 epoch_ns; | 19 | u64 epoch_ns; |
@@ -24,7 +23,6 @@ struct clock_data { | |||
24 | u32 mult; | 23 | u32 mult; |
25 | u32 shift; | 24 | u32 shift; |
26 | bool suspended; | 25 | bool suspended; |
27 | bool needs_suspend; | ||
28 | }; | 26 | }; |
29 | 27 | ||
30 | static void sched_clock_poll(unsigned long wrap_ticks); | 28 | static void sched_clock_poll(unsigned long wrap_ticks); |
@@ -51,10 +49,11 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) | |||
51 | return (cyc * mult) >> shift; | 49 | return (cyc * mult) >> shift; |
52 | } | 50 | } |
53 | 51 | ||
54 | static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) | 52 | static unsigned long long notrace sched_clock_32(void) |
55 | { | 53 | { |
56 | u64 epoch_ns; | 54 | u64 epoch_ns; |
57 | u32 epoch_cyc; | 55 | u32 epoch_cyc; |
56 | u32 cyc; | ||
58 | 57 | ||
59 | if (cd.suspended) | 58 | if (cd.suspended) |
60 | return cd.epoch_ns; | 59 | return cd.epoch_ns; |
@@ -73,7 +72,9 @@ static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) | |||
73 | smp_rmb(); | 72 | smp_rmb(); |
74 | } while (epoch_cyc != cd.epoch_cyc_copy); | 73 | } while (epoch_cyc != cd.epoch_cyc_copy); |
75 | 74 | ||
76 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); | 75 | cyc = read_sched_clock(); |
76 | cyc = (cyc - epoch_cyc) & sched_clock_mask; | ||
77 | return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); | ||
77 | } | 78 | } |
78 | 79 | ||
79 | /* | 80 | /* |
@@ -165,12 +166,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) | |||
165 | pr_debug("Registered %pF as sched_clock source\n", read); | 166 | pr_debug("Registered %pF as sched_clock source\n", read); |
166 | } | 167 | } |
167 | 168 | ||
168 | static unsigned long long notrace sched_clock_32(void) | ||
169 | { | ||
170 | u32 cyc = read_sched_clock(); | ||
171 | return cyc_to_sched_clock(cyc, sched_clock_mask); | ||
172 | } | ||
173 | |||
174 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; | 169 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; |
175 | 170 | ||
176 | unsigned long long notrace sched_clock(void) | 171 | unsigned long long notrace sched_clock(void) |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 20d6fba70652..6d3f91631de6 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/module.h> | ||
22 | 23 | ||
23 | #include "tick-internal.h" | 24 | #include "tick-internal.h" |
24 | 25 | ||
@@ -29,6 +30,7 @@ | |||
29 | 30 | ||
30 | static struct tick_device tick_broadcast_device; | 31 | static struct tick_device tick_broadcast_device; |
31 | static cpumask_var_t tick_broadcast_mask; | 32 | static cpumask_var_t tick_broadcast_mask; |
33 | static cpumask_var_t tick_broadcast_on; | ||
32 | static cpumask_var_t tmpmask; | 34 | static cpumask_var_t tmpmask; |
33 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | 35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
34 | static int tick_broadcast_force; | 36 | static int tick_broadcast_force; |
@@ -64,17 +66,34 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |||
64 | /* | 66 | /* |
65 | * Check, if the device can be utilized as broadcast device: | 67 | * Check, if the device can be utilized as broadcast device: |
66 | */ | 68 | */ |
67 | int tick_check_broadcast_device(struct clock_event_device *dev) | 69 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
70 | struct clock_event_device *newdev) | ||
71 | { | ||
72 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | ||
73 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
74 | return false; | ||
75 | |||
76 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | ||
77 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | ||
78 | return false; | ||
79 | |||
80 | return !curdev || newdev->rating > curdev->rating; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Conditionally install/replace broadcast device | ||
85 | */ | ||
86 | void tick_install_broadcast_device(struct clock_event_device *dev) | ||
68 | { | 87 | { |
69 | struct clock_event_device *cur = tick_broadcast_device.evtdev; | 88 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
70 | 89 | ||
71 | if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || | 90 | if (!tick_check_broadcast_device(cur, dev)) |
72 | (tick_broadcast_device.evtdev && | 91 | return; |
73 | tick_broadcast_device.evtdev->rating >= dev->rating) || | ||
74 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
75 | return 0; | ||
76 | 92 | ||
77 | clockevents_exchange_device(tick_broadcast_device.evtdev, dev); | 93 | if (!try_module_get(dev->owner)) |
94 | return; | ||
95 | |||
96 | clockevents_exchange_device(cur, dev); | ||
78 | if (cur) | 97 | if (cur) |
79 | cur->event_handler = clockevents_handle_noop; | 98 | cur->event_handler = clockevents_handle_noop; |
80 | tick_broadcast_device.evtdev = dev; | 99 | tick_broadcast_device.evtdev = dev; |
@@ -90,7 +109,6 @@ int tick_check_broadcast_device(struct clock_event_device *dev) | |||
90 | */ | 109 | */ |
91 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | 110 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) |
92 | tick_clock_notify(); | 111 | tick_clock_notify(); |
93 | return 1; | ||
94 | } | 112 | } |
95 | 113 | ||
96 | /* | 114 | /* |
@@ -123,8 +141,9 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev) | |||
123 | */ | 141 | */ |
124 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | 142 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) |
125 | { | 143 | { |
144 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
126 | unsigned long flags; | 145 | unsigned long flags; |
127 | int ret = 0; | 146 | int ret; |
128 | 147 | ||
129 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 148 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
130 | 149 | ||
@@ -138,20 +157,59 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
138 | dev->event_handler = tick_handle_periodic; | 157 | dev->event_handler = tick_handle_periodic; |
139 | tick_device_setup_broadcast_func(dev); | 158 | tick_device_setup_broadcast_func(dev); |
140 | cpumask_set_cpu(cpu, tick_broadcast_mask); | 159 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
141 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); | 160 | tick_broadcast_start_periodic(bc); |
142 | ret = 1; | 161 | ret = 1; |
143 | } else { | 162 | } else { |
144 | /* | 163 | /* |
145 | * When the new device is not affected by the stop | 164 | * Clear the broadcast bit for this cpu if the |
146 | * feature and the cpu is marked in the broadcast mask | 165 | * device is not power state affected. |
147 | * then clear the broadcast bit. | ||
148 | */ | 166 | */ |
149 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { | 167 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
150 | int cpu = smp_processor_id(); | ||
151 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 168 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
152 | tick_broadcast_clear_oneshot(cpu); | 169 | else |
153 | } else { | ||
154 | tick_device_setup_broadcast_func(dev); | 170 | tick_device_setup_broadcast_func(dev); |
171 | |||
172 | /* | ||
173 | * Clear the broadcast bit if the CPU is not in | ||
174 | * periodic broadcast on state. | ||
175 | */ | ||
176 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | ||
177 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | ||
178 | |||
179 | switch (tick_broadcast_device.mode) { | ||
180 | case TICKDEV_MODE_ONESHOT: | ||
181 | /* | ||
182 | * If the system is in oneshot mode we can | ||
183 | * unconditionally clear the oneshot mask bit, | ||
184 | * because the CPU is running and therefore | ||
185 | * not in an idle state which causes the power | ||
186 | * state affected device to stop. Let the | ||
187 | * caller initialize the device. | ||
188 | */ | ||
189 | tick_broadcast_clear_oneshot(cpu); | ||
190 | ret = 0; | ||
191 | break; | ||
192 | |||
193 | case TICKDEV_MODE_PERIODIC: | ||
194 | /* | ||
195 | * If the system is in periodic mode, check | ||
196 | * whether the broadcast device can be | ||
197 | * switched off now. | ||
198 | */ | ||
199 | if (cpumask_empty(tick_broadcast_mask) && bc) | ||
200 | clockevents_shutdown(bc); | ||
201 | /* | ||
202 | * If we kept the cpu in the broadcast mask, | ||
203 | * tell the caller to leave the per cpu device | ||
204 | * in shutdown state. The periodic interrupt | ||
205 | * is delivered by the broadcast device. | ||
206 | */ | ||
207 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | ||
208 | break; | ||
209 | default: | ||
210 | /* Nothing to do */ | ||
211 | ret = 0; | ||
212 | break; | ||
155 | } | 213 | } |
156 | } | 214 | } |
157 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 215 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -281,6 +339,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
281 | switch (*reason) { | 339 | switch (*reason) { |
282 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 340 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
283 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 341 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
342 | cpumask_set_cpu(cpu, tick_broadcast_on); | ||
284 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { | 343 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
285 | if (tick_broadcast_device.mode == | 344 | if (tick_broadcast_device.mode == |
286 | TICKDEV_MODE_PERIODIC) | 345 | TICKDEV_MODE_PERIODIC) |
@@ -290,8 +349,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
290 | tick_broadcast_force = 1; | 349 | tick_broadcast_force = 1; |
291 | break; | 350 | break; |
292 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 351 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
293 | if (!tick_broadcast_force && | 352 | if (tick_broadcast_force) |
294 | cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | 353 | break; |
354 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
355 | if (!tick_device_is_functional(dev)) | ||
356 | break; | ||
357 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | ||
295 | if (tick_broadcast_device.mode == | 358 | if (tick_broadcast_device.mode == |
296 | TICKDEV_MODE_PERIODIC) | 359 | TICKDEV_MODE_PERIODIC) |
297 | tick_setup_periodic(dev, 0); | 360 | tick_setup_periodic(dev, 0); |
@@ -349,6 +412,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
349 | 412 | ||
350 | bc = tick_broadcast_device.evtdev; | 413 | bc = tick_broadcast_device.evtdev; |
351 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 414 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
415 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
352 | 416 | ||
353 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 417 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
354 | if (bc && cpumask_empty(tick_broadcast_mask)) | 418 | if (bc && cpumask_empty(tick_broadcast_mask)) |
@@ -475,7 +539,15 @@ void tick_check_oneshot_broadcast(int cpu) | |||
475 | if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { | 539 | if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { |
476 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | 540 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
477 | 541 | ||
478 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); | 542 | /* |
543 | * We might be in the middle of switching over from | ||
544 | * periodic to oneshot. If the CPU has not yet | ||
545 | * switched over, leave the device alone. | ||
546 | */ | ||
547 | if (td->mode == TICKDEV_MODE_ONESHOT) { | ||
548 | clockevents_set_mode(td->evtdev, | ||
549 | CLOCK_EVT_MODE_ONESHOT); | ||
550 | } | ||
479 | } | 551 | } |
480 | } | 552 | } |
481 | 553 | ||
@@ -522,6 +594,13 @@ again: | |||
522 | cpumask_clear(tick_broadcast_force_mask); | 594 | cpumask_clear(tick_broadcast_force_mask); |
523 | 595 | ||
524 | /* | 596 | /* |
597 | * Sanity check. Catch the case where we try to broadcast to | ||
598 | * offline cpus. | ||
599 | */ | ||
600 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | ||
601 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | ||
602 | |||
603 | /* | ||
525 | * Wakeup the cpus which have an expired event. | 604 | * Wakeup the cpus which have an expired event. |
526 | */ | 605 | */ |
527 | tick_do_broadcast(tmpmask); | 606 | tick_do_broadcast(tmpmask); |
@@ -761,10 +840,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
761 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 840 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
762 | 841 | ||
763 | /* | 842 | /* |
764 | * Clear the broadcast mask flag for the dead cpu, but do not | 843 | * Clear the broadcast masks for the dead cpu, but do not stop |
765 | * stop the broadcast device! | 844 | * the broadcast device! |
766 | */ | 845 | */ |
767 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 846 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
847 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
848 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | ||
768 | 849 | ||
769 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 850 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
770 | } | 851 | } |
@@ -792,6 +873,7 @@ bool tick_broadcast_oneshot_available(void) | |||
792 | void __init tick_broadcast_init(void) | 873 | void __init tick_broadcast_init(void) |
793 | { | 874 | { |
794 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); | 875 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
876 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); | ||
795 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); | 877 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
796 | #ifdef CONFIG_TICK_ONESHOT | 878 | #ifdef CONFIG_TICK_ONESHOT |
797 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); | 879 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 5d3fb100bc06..64522ecdfe0e 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | 19 | #include <linux/profile.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/module.h> | ||
21 | 22 | ||
22 | #include <asm/irq_regs.h> | 23 | #include <asm/irq_regs.h> |
23 | 24 | ||
@@ -33,7 +34,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
34 | ktime_t tick_period; | 35 | ktime_t tick_period; |
35 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
36 | static DEFINE_RAW_SPINLOCK(tick_device_lock); | ||
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Debugging: see timer_list.c | 39 | * Debugging: see timer_list.c |
@@ -194,7 +194,8 @@ static void tick_setup_device(struct tick_device *td, | |||
194 | * When global broadcasting is active, check if the current | 194 | * When global broadcasting is active, check if the current |
195 | * device is registered as a placeholder for broadcast mode. | 195 | * device is registered as a placeholder for broadcast mode. |
196 | * This allows us to handle this x86 misfeature in a generic | 196 | * This allows us to handle this x86 misfeature in a generic |
197 | * way. | 197 | * way. This function also returns !=0 when we keep the |
198 | * current active broadcast state for this CPU. | ||
198 | */ | 199 | */ |
199 | if (tick_device_uses_broadcast(newdev, cpu)) | 200 | if (tick_device_uses_broadcast(newdev, cpu)) |
200 | return; | 201 | return; |
@@ -205,17 +206,75 @@ static void tick_setup_device(struct tick_device *td, | |||
205 | tick_setup_oneshot(newdev, handler, next_event); | 206 | tick_setup_oneshot(newdev, handler, next_event); |
206 | } | 207 | } |
207 | 208 | ||
209 | void tick_install_replacement(struct clock_event_device *newdev) | ||
210 | { | ||
211 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
212 | int cpu = smp_processor_id(); | ||
213 | |||
214 | clockevents_exchange_device(td->evtdev, newdev); | ||
215 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); | ||
216 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | ||
217 | tick_oneshot_notify(); | ||
218 | } | ||
219 | |||
220 | static bool tick_check_percpu(struct clock_event_device *curdev, | ||
221 | struct clock_event_device *newdev, int cpu) | ||
222 | { | ||
223 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | ||
224 | return false; | ||
225 | if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) | ||
226 | return true; | ||
227 | /* Check if irq affinity can be set */ | ||
228 | if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq)) | ||
229 | return false; | ||
230 | /* Prefer an existing cpu local device */ | ||
231 | if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) | ||
232 | return false; | ||
233 | return true; | ||
234 | } | ||
235 | |||
236 | static bool tick_check_preferred(struct clock_event_device *curdev, | ||
237 | struct clock_event_device *newdev) | ||
238 | { | ||
239 | /* Prefer oneshot capable device */ | ||
240 | if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) { | ||
241 | if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT)) | ||
242 | return false; | ||
243 | if (tick_oneshot_mode_active()) | ||
244 | return false; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Use the higher rated one, but prefer a CPU local device with a lower | ||
249 | * rating than a non-CPU local device | ||
250 | */ | ||
251 | return !curdev || | ||
252 | newdev->rating > curdev->rating || | ||
253 | !cpumask_equal(curdev->cpumask, newdev->cpumask); | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Check whether the new device is a better fit than curdev. curdev | ||
258 | * can be NULL ! | ||
259 | */ | ||
260 | bool tick_check_replacement(struct clock_event_device *curdev, | ||
261 | struct clock_event_device *newdev) | ||
262 | { | ||
263 | if (tick_check_percpu(curdev, newdev, smp_processor_id())) | ||
264 | return false; | ||
265 | |||
266 | return tick_check_preferred(curdev, newdev); | ||
267 | } | ||
268 | |||
208 | /* | 269 | /* |
209 | * Check, if the new registered device should be used. | 270 | * Check, if the new registered device should be used. Called with |
271 | * clockevents_lock held and interrupts disabled. | ||
210 | */ | 272 | */ |
211 | static int tick_check_new_device(struct clock_event_device *newdev) | 273 | void tick_check_new_device(struct clock_event_device *newdev) |
212 | { | 274 | { |
213 | struct clock_event_device *curdev; | 275 | struct clock_event_device *curdev; |
214 | struct tick_device *td; | 276 | struct tick_device *td; |
215 | int cpu, ret = NOTIFY_OK; | 277 | int cpu; |
216 | unsigned long flags; | ||
217 | |||
218 | raw_spin_lock_irqsave(&tick_device_lock, flags); | ||
219 | 278 | ||
220 | cpu = smp_processor_id(); | 279 | cpu = smp_processor_id(); |
221 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | 280 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) |
@@ -225,40 +284,15 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
225 | curdev = td->evtdev; | 284 | curdev = td->evtdev; |
226 | 285 | ||
227 | /* cpu local device ? */ | 286 | /* cpu local device ? */ |
228 | if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { | 287 | if (!tick_check_percpu(curdev, newdev, cpu)) |
229 | 288 | goto out_bc; | |
230 | /* | ||
231 | * If the cpu affinity of the device interrupt can not | ||
232 | * be set, ignore it. | ||
233 | */ | ||
234 | if (!irq_can_set_affinity(newdev->irq)) | ||
235 | goto out_bc; | ||
236 | 289 | ||
237 | /* | 290 | /* Preference decision */ |
238 | * If we have a cpu local device already, do not replace it | 291 | if (!tick_check_preferred(curdev, newdev)) |
239 | * by a non cpu local device | 292 | goto out_bc; |
240 | */ | ||
241 | if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) | ||
242 | goto out_bc; | ||
243 | } | ||
244 | 293 | ||
245 | /* | 294 | if (!try_module_get(newdev->owner)) |
246 | * If we have an active device, then check the rating and the oneshot | 295 | return; |
247 | * feature. | ||
248 | */ | ||
249 | if (curdev) { | ||
250 | /* | ||
251 | * Prefer one shot capable devices ! | ||
252 | */ | ||
253 | if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && | ||
254 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | ||
255 | goto out_bc; | ||
256 | /* | ||
257 | * Check the rating | ||
258 | */ | ||
259 | if (curdev->rating >= newdev->rating) | ||
260 | goto out_bc; | ||
261 | } | ||
262 | 296 | ||
263 | /* | 297 | /* |
264 | * Replace the eventually existing device by the new | 298 | * Replace the eventually existing device by the new |
@@ -273,20 +307,13 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
273 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); | 307 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
274 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | 308 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
275 | tick_oneshot_notify(); | 309 | tick_oneshot_notify(); |
276 | 310 | return; | |
277 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||
278 | return NOTIFY_STOP; | ||
279 | 311 | ||
280 | out_bc: | 312 | out_bc: |
281 | /* | 313 | /* |
282 | * Can the new device be used as a broadcast device ? | 314 | * Can the new device be used as a broadcast device ? |
283 | */ | 315 | */ |
284 | if (tick_check_broadcast_device(newdev)) | 316 | tick_install_broadcast_device(newdev); |
285 | ret = NOTIFY_STOP; | ||
286 | |||
287 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||
288 | |||
289 | return ret; | ||
290 | } | 317 | } |
291 | 318 | ||
292 | /* | 319 | /* |
@@ -294,7 +321,7 @@ out_bc: | |||
294 | * | 321 | * |
295 | * Called with interrupts disabled. | 322 | * Called with interrupts disabled. |
296 | */ | 323 | */ |
297 | static void tick_handover_do_timer(int *cpup) | 324 | void tick_handover_do_timer(int *cpup) |
298 | { | 325 | { |
299 | if (*cpup == tick_do_timer_cpu) { | 326 | if (*cpup == tick_do_timer_cpu) { |
300 | int cpu = cpumask_first(cpu_online_mask); | 327 | int cpu = cpumask_first(cpu_online_mask); |
@@ -311,13 +338,11 @@ static void tick_handover_do_timer(int *cpup) | |||
311 | * access the hardware device itself. | 338 | * access the hardware device itself. |
312 | * We just set the mode and remove it from the lists. | 339 | * We just set the mode and remove it from the lists. |
313 | */ | 340 | */ |
314 | static void tick_shutdown(unsigned int *cpup) | 341 | void tick_shutdown(unsigned int *cpup) |
315 | { | 342 | { |
316 | struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); | 343 | struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); |
317 | struct clock_event_device *dev = td->evtdev; | 344 | struct clock_event_device *dev = td->evtdev; |
318 | unsigned long flags; | ||
319 | 345 | ||
320 | raw_spin_lock_irqsave(&tick_device_lock, flags); | ||
321 | td->mode = TICKDEV_MODE_PERIODIC; | 346 | td->mode = TICKDEV_MODE_PERIODIC; |
322 | if (dev) { | 347 | if (dev) { |
323 | /* | 348 | /* |
@@ -329,26 +354,20 @@ static void tick_shutdown(unsigned int *cpup) | |||
329 | dev->event_handler = clockevents_handle_noop; | 354 | dev->event_handler = clockevents_handle_noop; |
330 | td->evtdev = NULL; | 355 | td->evtdev = NULL; |
331 | } | 356 | } |
332 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||
333 | } | 357 | } |
334 | 358 | ||
335 | static void tick_suspend(void) | 359 | void tick_suspend(void) |
336 | { | 360 | { |
337 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 361 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); |
338 | unsigned long flags; | ||
339 | 362 | ||
340 | raw_spin_lock_irqsave(&tick_device_lock, flags); | ||
341 | clockevents_shutdown(td->evtdev); | 363 | clockevents_shutdown(td->evtdev); |
342 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||
343 | } | 364 | } |
344 | 365 | ||
345 | static void tick_resume(void) | 366 | void tick_resume(void) |
346 | { | 367 | { |
347 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 368 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); |
348 | unsigned long flags; | ||
349 | int broadcast = tick_resume_broadcast(); | 369 | int broadcast = tick_resume_broadcast(); |
350 | 370 | ||
351 | raw_spin_lock_irqsave(&tick_device_lock, flags); | ||
352 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); | 371 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); |
353 | 372 | ||
354 | if (!broadcast) { | 373 | if (!broadcast) { |
@@ -357,68 +376,12 @@ static void tick_resume(void) | |||
357 | else | 376 | else |
358 | tick_resume_oneshot(); | 377 | tick_resume_oneshot(); |
359 | } | 378 | } |
360 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); | ||
361 | } | 379 | } |
362 | 380 | ||
363 | /* | ||
364 | * Notification about clock event devices | ||
365 | */ | ||
366 | static int tick_notify(struct notifier_block *nb, unsigned long reason, | ||
367 | void *dev) | ||
368 | { | ||
369 | switch (reason) { | ||
370 | |||
371 | case CLOCK_EVT_NOTIFY_ADD: | ||
372 | return tick_check_new_device(dev); | ||
373 | |||
374 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | ||
375 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | ||
376 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | ||
377 | tick_broadcast_on_off(reason, dev); | ||
378 | break; | ||
379 | |||
380 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: | ||
381 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: | ||
382 | tick_broadcast_oneshot_control(reason); | ||
383 | break; | ||
384 | |||
385 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
386 | tick_handover_do_timer(dev); | ||
387 | break; | ||
388 | |||
389 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | ||
390 | tick_shutdown_broadcast_oneshot(dev); | ||
391 | tick_shutdown_broadcast(dev); | ||
392 | tick_shutdown(dev); | ||
393 | break; | ||
394 | |||
395 | case CLOCK_EVT_NOTIFY_SUSPEND: | ||
396 | tick_suspend(); | ||
397 | tick_suspend_broadcast(); | ||
398 | break; | ||
399 | |||
400 | case CLOCK_EVT_NOTIFY_RESUME: | ||
401 | tick_resume(); | ||
402 | break; | ||
403 | |||
404 | default: | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | return NOTIFY_OK; | ||
409 | } | ||
410 | |||
411 | static struct notifier_block tick_notifier = { | ||
412 | .notifier_call = tick_notify, | ||
413 | }; | ||
414 | |||
415 | /** | 381 | /** |
416 | * tick_init - initialize the tick control | 382 | * tick_init - initialize the tick control |
417 | * | ||
418 | * Register the notifier with the clockevents framework | ||
419 | */ | 383 | */ |
420 | void __init tick_init(void) | 384 | void __init tick_init(void) |
421 | { | 385 | { |
422 | clockevents_register_notifier(&tick_notifier); | ||
423 | tick_broadcast_init(); | 386 | tick_broadcast_init(); |
424 | } | 387 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f0299eae4602..bc906cad709b 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -6,6 +6,8 @@ | |||
6 | 6 | ||
7 | extern seqlock_t jiffies_lock; | 7 | extern seqlock_t jiffies_lock; |
8 | 8 | ||
9 | #define CS_NAME_LEN 32 | ||
10 | |||
9 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD | 11 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD |
10 | 12 | ||
11 | #define TICK_DO_TIMER_NONE -1 | 13 | #define TICK_DO_TIMER_NONE -1 |
@@ -18,9 +20,19 @@ extern int tick_do_timer_cpu __read_mostly; | |||
18 | 20 | ||
19 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 21 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
20 | extern void tick_handle_periodic(struct clock_event_device *dev); | 22 | extern void tick_handle_periodic(struct clock_event_device *dev); |
23 | extern void tick_check_new_device(struct clock_event_device *dev); | ||
24 | extern void tick_handover_do_timer(int *cpup); | ||
25 | extern void tick_shutdown(unsigned int *cpup); | ||
26 | extern void tick_suspend(void); | ||
27 | extern void tick_resume(void); | ||
28 | extern bool tick_check_replacement(struct clock_event_device *curdev, | ||
29 | struct clock_event_device *newdev); | ||
30 | extern void tick_install_replacement(struct clock_event_device *dev); | ||
21 | 31 | ||
22 | extern void clockevents_shutdown(struct clock_event_device *dev); | 32 | extern void clockevents_shutdown(struct clock_event_device *dev); |
23 | 33 | ||
34 | extern size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); | ||
35 | |||
24 | /* | 36 | /* |
25 | * NO_HZ / high resolution timer shared code | 37 | * NO_HZ / high resolution timer shared code |
26 | */ | 38 | */ |
@@ -90,7 +102,7 @@ static inline bool tick_broadcast_oneshot_available(void) { return false; } | |||
90 | */ | 102 | */ |
91 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 103 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
92 | extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); | 104 | extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); |
93 | extern int tick_check_broadcast_device(struct clock_event_device *dev); | 105 | extern void tick_install_broadcast_device(struct clock_event_device *dev); |
94 | extern int tick_is_broadcast_device(struct clock_event_device *dev); | 106 | extern int tick_is_broadcast_device(struct clock_event_device *dev); |
95 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); | 107 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); |
96 | extern void tick_shutdown_broadcast(unsigned int *cpup); | 108 | extern void tick_shutdown_broadcast(unsigned int *cpup); |
@@ -102,9 +114,8 @@ tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | |||
102 | 114 | ||
103 | #else /* !BROADCAST */ | 115 | #else /* !BROADCAST */ |
104 | 116 | ||
105 | static inline int tick_check_broadcast_device(struct clock_event_device *dev) | 117 | static inline void tick_install_broadcast_device(struct clock_event_device *dev) |
106 | { | 118 | { |
107 | return 0; | ||
108 | } | 119 | } |
109 | 120 | ||
110 | static inline int tick_is_broadcast_device(struct clock_event_device *dev) | 121 | static inline int tick_is_broadcast_device(struct clock_event_device *dev) |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index baeeb5c87cf1..48b9fffabdc2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -25,6 +25,11 @@ | |||
25 | 25 | ||
26 | #include "tick-internal.h" | 26 | #include "tick-internal.h" |
27 | #include "ntp_internal.h" | 27 | #include "ntp_internal.h" |
28 | #include "timekeeping_internal.h" | ||
29 | |||
30 | #define TK_CLEAR_NTP (1 << 0) | ||
31 | #define TK_MIRROR (1 << 1) | ||
32 | #define TK_CLOCK_WAS_SET (1 << 2) | ||
28 | 33 | ||
29 | static struct timekeeper timekeeper; | 34 | static struct timekeeper timekeeper; |
30 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); | 35 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); |
@@ -200,9 +205,9 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | |||
200 | 205 | ||
201 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); | 206 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); |
202 | 207 | ||
203 | static void update_pvclock_gtod(struct timekeeper *tk) | 208 | static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) |
204 | { | 209 | { |
205 | raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk); | 210 | raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk); |
206 | } | 211 | } |
207 | 212 | ||
208 | /** | 213 | /** |
@@ -216,7 +221,7 @@ int pvclock_gtod_register_notifier(struct notifier_block *nb) | |||
216 | 221 | ||
217 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 222 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
218 | ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); | 223 | ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); |
219 | update_pvclock_gtod(tk); | 224 | update_pvclock_gtod(tk, true); |
220 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 225 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
221 | 226 | ||
222 | return ret; | 227 | return ret; |
@@ -241,16 +246,16 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb) | |||
241 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); | 246 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); |
242 | 247 | ||
243 | /* must hold timekeeper_lock */ | 248 | /* must hold timekeeper_lock */ |
244 | static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror) | 249 | static void timekeeping_update(struct timekeeper *tk, unsigned int action) |
245 | { | 250 | { |
246 | if (clearntp) { | 251 | if (action & TK_CLEAR_NTP) { |
247 | tk->ntp_error = 0; | 252 | tk->ntp_error = 0; |
248 | ntp_clear(); | 253 | ntp_clear(); |
249 | } | 254 | } |
250 | update_vsyscall(tk); | 255 | update_vsyscall(tk); |
251 | update_pvclock_gtod(tk); | 256 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); |
252 | 257 | ||
253 | if (mirror) | 258 | if (action & TK_MIRROR) |
254 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); | 259 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); |
255 | } | 260 | } |
256 | 261 | ||
@@ -508,7 +513,7 @@ int do_settimeofday(const struct timespec *tv) | |||
508 | 513 | ||
509 | tk_set_xtime(tk, tv); | 514 | tk_set_xtime(tk, tv); |
510 | 515 | ||
511 | timekeeping_update(tk, true, true); | 516 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
512 | 517 | ||
513 | write_seqcount_end(&timekeeper_seq); | 518 | write_seqcount_end(&timekeeper_seq); |
514 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 519 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -552,7 +557,7 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
552 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); | 557 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); |
553 | 558 | ||
554 | error: /* even if we error out, we forwarded the time, so call update */ | 559 | error: /* even if we error out, we forwarded the time, so call update */ |
555 | timekeeping_update(tk, true, true); | 560 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
556 | 561 | ||
557 | write_seqcount_end(&timekeeper_seq); | 562 | write_seqcount_end(&timekeeper_seq); |
558 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 563 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -627,13 +632,22 @@ static int change_clocksource(void *data) | |||
627 | write_seqcount_begin(&timekeeper_seq); | 632 | write_seqcount_begin(&timekeeper_seq); |
628 | 633 | ||
629 | timekeeping_forward_now(tk); | 634 | timekeeping_forward_now(tk); |
630 | if (!new->enable || new->enable(new) == 0) { | 635 | /* |
631 | old = tk->clock; | 636 | * If the cs is in module, get a module reference. Succeeds |
632 | tk_setup_internals(tk, new); | 637 | * for built-in code (owner == NULL) as well. |
633 | if (old->disable) | 638 | */ |
634 | old->disable(old); | 639 | if (try_module_get(new->owner)) { |
640 | if (!new->enable || new->enable(new) == 0) { | ||
641 | old = tk->clock; | ||
642 | tk_setup_internals(tk, new); | ||
643 | if (old->disable) | ||
644 | old->disable(old); | ||
645 | module_put(old->owner); | ||
646 | } else { | ||
647 | module_put(new->owner); | ||
648 | } | ||
635 | } | 649 | } |
636 | timekeeping_update(tk, true, true); | 650 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
637 | 651 | ||
638 | write_seqcount_end(&timekeeper_seq); | 652 | write_seqcount_end(&timekeeper_seq); |
639 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 653 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -648,14 +662,15 @@ static int change_clocksource(void *data) | |||
648 | * This function is called from clocksource.c after a new, better clock | 662 | * This function is called from clocksource.c after a new, better clock |
649 | * source has been registered. The caller holds the clocksource_mutex. | 663 | * source has been registered. The caller holds the clocksource_mutex. |
650 | */ | 664 | */ |
651 | void timekeeping_notify(struct clocksource *clock) | 665 | int timekeeping_notify(struct clocksource *clock) |
652 | { | 666 | { |
653 | struct timekeeper *tk = &timekeeper; | 667 | struct timekeeper *tk = &timekeeper; |
654 | 668 | ||
655 | if (tk->clock == clock) | 669 | if (tk->clock == clock) |
656 | return; | 670 | return 0; |
657 | stop_machine(change_clocksource, clock, NULL); | 671 | stop_machine(change_clocksource, clock, NULL); |
658 | tick_clock_notify(); | 672 | tick_clock_notify(); |
673 | return tk->clock == clock ? 0 : -1; | ||
659 | } | 674 | } |
660 | 675 | ||
661 | /** | 676 | /** |
@@ -841,6 +856,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | |||
841 | tk_xtime_add(tk, delta); | 856 | tk_xtime_add(tk, delta); |
842 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); | 857 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); |
843 | tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); | 858 | tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); |
859 | tk_debug_account_sleep_time(delta); | ||
844 | } | 860 | } |
845 | 861 | ||
846 | /** | 862 | /** |
@@ -872,7 +888,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
872 | 888 | ||
873 | __timekeeping_inject_sleeptime(tk, delta); | 889 | __timekeeping_inject_sleeptime(tk, delta); |
874 | 890 | ||
875 | timekeeping_update(tk, true, true); | 891 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
876 | 892 | ||
877 | write_seqcount_end(&timekeeper_seq); | 893 | write_seqcount_end(&timekeeper_seq); |
878 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 894 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -954,7 +970,7 @@ static void timekeeping_resume(void) | |||
954 | tk->cycle_last = clock->cycle_last = cycle_now; | 970 | tk->cycle_last = clock->cycle_last = cycle_now; |
955 | tk->ntp_error = 0; | 971 | tk->ntp_error = 0; |
956 | timekeeping_suspended = 0; | 972 | timekeeping_suspended = 0; |
957 | timekeeping_update(tk, false, true); | 973 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
958 | write_seqcount_end(&timekeeper_seq); | 974 | write_seqcount_end(&timekeeper_seq); |
959 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 975 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
960 | 976 | ||
@@ -1236,9 +1252,10 @@ out_adjust: | |||
1236 | * It also calls into the NTP code to handle leapsecond processing. | 1252 | * It also calls into the NTP code to handle leapsecond processing. |
1237 | * | 1253 | * |
1238 | */ | 1254 | */ |
1239 | static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) | 1255 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) |
1240 | { | 1256 | { |
1241 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; | 1257 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; |
1258 | unsigned int action = 0; | ||
1242 | 1259 | ||
1243 | while (tk->xtime_nsec >= nsecps) { | 1260 | while (tk->xtime_nsec >= nsecps) { |
1244 | int leap; | 1261 | int leap; |
@@ -1261,8 +1278,10 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) | |||
1261 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); | 1278 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); |
1262 | 1279 | ||
1263 | clock_was_set_delayed(); | 1280 | clock_was_set_delayed(); |
1281 | action = TK_CLOCK_WAS_SET; | ||
1264 | } | 1282 | } |
1265 | } | 1283 | } |
1284 | return action; | ||
1266 | } | 1285 | } |
1267 | 1286 | ||
1268 | /** | 1287 | /** |
@@ -1347,6 +1366,7 @@ static void update_wall_time(void) | |||
1347 | struct timekeeper *tk = &shadow_timekeeper; | 1366 | struct timekeeper *tk = &shadow_timekeeper; |
1348 | cycle_t offset; | 1367 | cycle_t offset; |
1349 | int shift = 0, maxshift; | 1368 | int shift = 0, maxshift; |
1369 | unsigned int action; | ||
1350 | unsigned long flags; | 1370 | unsigned long flags; |
1351 | 1371 | ||
1352 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1372 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
@@ -1399,7 +1419,7 @@ static void update_wall_time(void) | |||
1399 | * Finally, make sure that after the rounding | 1419 | * Finally, make sure that after the rounding |
1400 | * xtime_nsec isn't larger than NSEC_PER_SEC | 1420 | * xtime_nsec isn't larger than NSEC_PER_SEC |
1401 | */ | 1421 | */ |
1402 | accumulate_nsecs_to_secs(tk); | 1422 | action = accumulate_nsecs_to_secs(tk); |
1403 | 1423 | ||
1404 | write_seqcount_begin(&timekeeper_seq); | 1424 | write_seqcount_begin(&timekeeper_seq); |
1405 | /* Update clock->cycle_last with the new value */ | 1425 | /* Update clock->cycle_last with the new value */ |
@@ -1415,7 +1435,7 @@ static void update_wall_time(void) | |||
1415 | * updating. | 1435 | * updating. |
1416 | */ | 1436 | */ |
1417 | memcpy(real_tk, tk, sizeof(*tk)); | 1437 | memcpy(real_tk, tk, sizeof(*tk)); |
1418 | timekeeping_update(real_tk, false, false); | 1438 | timekeeping_update(real_tk, action); |
1419 | write_seqcount_end(&timekeeper_seq); | 1439 | write_seqcount_end(&timekeeper_seq); |
1420 | out: | 1440 | out: |
1421 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1441 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
@@ -1677,6 +1697,7 @@ int do_adjtimex(struct timex *txc) | |||
1677 | 1697 | ||
1678 | if (tai != orig_tai) { | 1698 | if (tai != orig_tai) { |
1679 | __timekeeping_set_tai_offset(tk, tai); | 1699 | __timekeeping_set_tai_offset(tk, tai); |
1700 | update_pvclock_gtod(tk, true); | ||
1680 | clock_was_set_delayed(); | 1701 | clock_was_set_delayed(); |
1681 | } | 1702 | } |
1682 | write_seqcount_end(&timekeeper_seq); | 1703 | write_seqcount_end(&timekeeper_seq); |
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c new file mode 100644 index 000000000000..802433a4f5eb --- /dev/null +++ b/kernel/time/timekeeping_debug.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * debugfs file to track time spent in suspend | ||
3 | * | ||
4 | * Copyright (c) 2011, Google, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/time.h> | ||
23 | |||
24 | static unsigned int sleep_time_bin[32] = {0}; | ||
25 | |||
26 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) | ||
27 | { | ||
28 | unsigned int bin; | ||
29 | seq_puts(s, " time (secs) count\n"); | ||
30 | seq_puts(s, "------------------------------\n"); | ||
31 | for (bin = 0; bin < 32; bin++) { | ||
32 | if (sleep_time_bin[bin] == 0) | ||
33 | continue; | ||
34 | seq_printf(s, "%10u - %-10u %4u\n", | ||
35 | bin ? 1 << (bin - 1) : 0, 1 << bin, | ||
36 | sleep_time_bin[bin]); | ||
37 | } | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int tk_debug_sleep_time_open(struct inode *inode, struct file *file) | ||
42 | { | ||
43 | return single_open(file, tk_debug_show_sleep_time, NULL); | ||
44 | } | ||
45 | |||
46 | static const struct file_operations tk_debug_sleep_time_fops = { | ||
47 | .open = tk_debug_sleep_time_open, | ||
48 | .read = seq_read, | ||
49 | .llseek = seq_lseek, | ||
50 | .release = single_release, | ||
51 | }; | ||
52 | |||
53 | static int __init tk_debug_sleep_time_init(void) | ||
54 | { | ||
55 | struct dentry *d; | ||
56 | |||
57 | d = debugfs_create_file("sleep_time", 0444, NULL, NULL, | ||
58 | &tk_debug_sleep_time_fops); | ||
59 | if (!d) { | ||
60 | pr_err("Failed to create sleep_time debug file\n"); | ||
61 | return -ENOMEM; | ||
62 | } | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | late_initcall(tk_debug_sleep_time_init); | ||
67 | |||
68 | void tk_debug_account_sleep_time(struct timespec *t) | ||
69 | { | ||
70 | sleep_time_bin[fls(t->tv_sec)]++; | ||
71 | } | ||
72 | |||
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h new file mode 100644 index 000000000000..13323ea08ffa --- /dev/null +++ b/kernel/time/timekeeping_internal.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _TIMEKEEPING_INTERNAL_H | ||
2 | #define _TIMEKEEPING_INTERNAL_H | ||
3 | /* | ||
4 | * timekeeping debug functions | ||
5 | */ | ||
6 | #include <linux/time.h> | ||
7 | |||
8 | #ifdef CONFIG_DEBUG_FS | ||
9 | extern void tk_debug_account_sleep_time(struct timespec *t); | ||
10 | #else | ||
11 | #define tk_debug_account_sleep_time(x) | ||
12 | #endif | ||
13 | |||
14 | #endif /* _TIMEKEEPING_INTERNAL_H */ | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 15ffdb3f1948..15bc1b41021d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -149,9 +149,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu, | |||
149 | /* now that we have rounded, subtract the extra skew again */ | 149 | /* now that we have rounded, subtract the extra skew again */ |
150 | j -= cpu * 3; | 150 | j -= cpu * 3; |
151 | 151 | ||
152 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | 152 | /* |
153 | return original; | 153 | * Make sure j is still in the future. Otherwise return the |
154 | return j; | 154 | * unmodified value. |
155 | */ | ||
156 | return time_is_after_jiffies(j) ? j : original; | ||
155 | } | 157 | } |
156 | 158 | ||
157 | /** | 159 | /** |