aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 23:43:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 23:43:12 -0400
commit55392c4c06204c8149dc333309cf474691f1cc3c (patch)
treeac4ee3c3d1f9eb3acc185de08c3d3e784a57349c
parentc410614c902531d1ce2e46aec8ac91aa4dc89968 (diff)
parent1f3b0f8243cb934307f59bd4d8e43b868e61d4d9 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: "This update provides the following changes: - The rework of the timer wheel which addresses the shortcomings of the current wheel (cascading, slow search for next expiring timer, etc). That's the first major change of the wheel in almost 20 years since Finn implemted it. - A large overhaul of the clocksource drivers init functions to consolidate the Device Tree initialization - Some more Y2038 updates - A capability fix for timerfd - Yet another clock chip driver - The usual pile of updates, comment improvements all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (130 commits) tick/nohz: Optimize nohz idle enter clockevents: Make clockevents_subsys static clocksource/drivers/time-armada-370-xp: Fix return value check timers: Implement optimization for same expiry time in mod_timer() timers: Split out index calculation timers: Only wake softirq if necessary timers: Forward the wheel clock whenever possible timers/nohz: Remove pointless tick_nohz_kick_tick() function timers: Optimize collect_expired_timers() for NOHZ timers: Move __run_timers() function timers: Remove set_timer_slack() leftovers timers: Switch to a non-cascading wheel timers: Reduce the CPU index space to 256k timers: Give a few structs and members proper names hlist: Add hlist_is_singular_node() helper signals: Use hrtimer for sigtimedwait() timers: Remove the deprecated mod_timer_pinned() API timers, net/ipv4/inet: Initialize connection request timers as pinned timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned timers, drivers/tty/metag_da: Initialize the poll timer as pinned ...
-rw-r--r--Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt (renamed from Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt)6
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--arch/arc/kernel/time.c63
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/kernel/smp_twd.c3
-rw-r--r--arch/arm/mach-bcm/Kconfig2
-rw-r--r--arch/arm/mach-integrator/Kconfig2
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-moxart/Kconfig2
-rw-r--r--arch/arm/mach-mxs/Kconfig2
-rw-r--r--arch/arm/mach-nspire/Kconfig1
-rw-r--r--arch/arm/mach-prima2/Kconfig2
-rw-r--r--arch/arm/mach-u300/Kconfig2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi8
-rw-r--r--arch/microblaze/kernel/timer.c49
-rw-r--r--arch/mips/ralink/cevt-rt3352.c17
-rw-r--r--arch/nios2/kernel/time.c63
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--block/genhd.c5
-rw-r--r--drivers/clocksource/Kconfig116
-rw-r--r--drivers/clocksource/Makefile23
-rw-r--r--drivers/clocksource/arm_arch_timer.c56
-rw-r--r--drivers/clocksource/arm_global_timer.c26
-rw-r--r--drivers/clocksource/armv7m_systick.c17
-rw-r--r--drivers/clocksource/asm9260_timer.c22
-rw-r--r--drivers/clocksource/bcm2835_timer.c38
-rw-r--r--drivers/clocksource/bcm_kona_timer.c12
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c74
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c4
-rw-r--r--drivers/clocksource/clksrc-probe.c14
-rw-r--r--drivers/clocksource/clksrc_st_lpc.c20
-rw-r--r--drivers/clocksource/clps711x-timer.c10
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c4
-rw-r--r--drivers/clocksource/exynos_mct.c32
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c20
-rw-r--r--drivers/clocksource/h8300_timer16.c12
-rw-r--r--drivers/clocksource/h8300_timer8.c11
-rw-r--r--drivers/clocksource/h8300_tpu.c10
-rw-r--r--drivers/clocksource/meson6_timer.c19
-rw-r--r--drivers/clocksource/mips-gic-timer.c24
-rw-r--r--drivers/clocksource/moxart_timer.c39
-rw-r--r--drivers/clocksource/mps2-timer.c8
-rw-r--r--drivers/clocksource/mtk_timer.c8
-rw-r--r--drivers/clocksource/mxs_timer.c26
-rw-r--r--drivers/clocksource/nomadik-mtu.c43
-rw-r--r--drivers/clocksource/pxa_timer.c44
-rw-r--r--drivers/clocksource/qcom-timer.c23
-rw-r--r--drivers/clocksource/rockchip_timer.c53
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c70
-rw-r--r--drivers/clocksource/sun4i_timer.c43
-rw-r--r--drivers/clocksource/tango_xtal.c10
-rw-r--r--drivers/clocksource/tegra20_timer.c24
-rw-r--r--drivers/clocksource/time-armada-370-xp.c98
-rw-r--r--drivers/clocksource/time-efm32.c17
-rw-r--r--drivers/clocksource/time-lpc32xx.c10
-rw-r--r--drivers/clocksource/time-orion.c50
-rw-r--r--drivers/clocksource/time-pistachio.c18
-rw-r--r--drivers/clocksource/timer-atlas7.c30
-rw-r--r--drivers/clocksource/timer-atmel-pit.c41
-rw-r--r--drivers/clocksource/timer-atmel-st.c42
-rw-r--r--drivers/clocksource/timer-digicolor.c16
-rw-r--r--drivers/clocksource/timer-imx-gpt.c51
-rw-r--r--drivers/clocksource/timer-integrator-ap.c57
-rw-r--r--drivers/clocksource/timer-keystone.c13
-rw-r--r--drivers/clocksource/timer-nps.c14
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c297
-rw-r--r--drivers/clocksource/timer-prima2.c42
-rw-r--r--drivers/clocksource/timer-sp804.c86
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/clocksource/timer-sun5i.c33
-rw-r--r--drivers/clocksource/timer-ti-32k.c8
-rw-r--r--drivers/clocksource/timer-u300.c36
-rw-r--r--drivers/clocksource/versatile.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c25
-rw-r--r--drivers/clocksource/vt8500_timer.c24
-rw-r--r--drivers/clocksource/zevio-timer.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c5
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/power/bq27xxx_battery.c5
-rw-r--r--drivers/tty/metag_da.c4
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--fs/timerfd.c10
-rw-r--r--include/clocksource/timer-sp804.h8
-rw-r--r--include/linux/alarmtimer.h6
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/list.h10
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/timer.h34
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/time/alarmtimer.c1
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/test_udelay.c16
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c1
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c98
-rw-r--r--kernel/time/timeconv.c11
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/time/timer.c1111
-rw-r--r--kernel/time/timer_stats.c6
-rw-r--r--lib/random32.c1
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_timewait_sock.c5
111 files changed, 2460 insertions, 1208 deletions
diff --git a/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
new file mode 100644
index 000000000000..3ca89cd1caef
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
@@ -0,0 +1,17 @@
1Oxford Semiconductor OXNAS SoCs Family RPS Timer
2================================================
3
4Required properties:
5- compatible: Should be "oxsemi,ox810se-rps-timer"
6- reg : Specifies base physical address and size of the registers.
7- interrupts : The interrupts of the two timers
8- clocks : The phandle of the timer clock source
9
10example:
11
12timer0: timer@200 {
13 compatible = "oxsemi,ox810se-rps-timer";
14 reg = <0x200 0x40>;
15 clocks = <&rpsclk>;
16 interrupts = <4 5>;
17};
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
index 87f0b0042bae..a41b184d5538 100644
--- a/Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt
+++ b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
@@ -1,7 +1,9 @@
1Rockchip rk3288 timer 1Rockchip rk timer
2 2
3Required properties: 3Required properties:
4- compatible: shall be "rockchip,rk3288-timer" 4- compatible: shall be one of:
5 "rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368
6 "rockchip,rk3399-timer" - for rk3399
5- reg: base address of the timer register starting with TIMERS CONTROL register 7- reg: base address of the timer register starting with TIMERS CONTROL register
6- interrupts: should contain the interrupts for Timer0 8- interrupts: should contain the interrupts for Timer0
7- clocks : must contain an entry for each entry in clock-names 9- clocks : must contain an entry for each entry in clock-names
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fa8c6d470ad2..17e33dbbf226 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -687,6 +687,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
687 [SPARC64] tick 687 [SPARC64] tick
688 [X86-64] hpet,tsc 688 [X86-64] hpet,tsc
689 689
690 clocksource.arm_arch_timer.evtstrm=
691 [ARM,ARM64]
692 Format: <bool>
693 Enable/disable the eventstream feature of the ARM
694 architected timer so that code using WFE-based polling
695 loops can be debugged more effectively on production
696 systems.
697
690 clearcpuid=BITNUM [X86] 698 clearcpuid=BITNUM [X86]
691 Disable CPUID feature X for the kernel. See 699 Disable CPUID feature X for the kernel. See
692 arch/x86/include/asm/cpufeatures.h for the valid bit 700 arch/x86/include/asm/cpufeatures.h for the valid bit
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 4549ab255dd1..98f22d2eb563 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -116,19 +116,19 @@ static struct clocksource arc_counter_gfrc = {
116 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 116 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
117}; 117};
118 118
119static void __init arc_cs_setup_gfrc(struct device_node *node) 119static int __init arc_cs_setup_gfrc(struct device_node *node)
120{ 120{
121 int exists = cpuinfo_arc700[0].extn.gfrc; 121 int exists = cpuinfo_arc700[0].extn.gfrc;
122 int ret; 122 int ret;
123 123
124 if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected")) 124 if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
125 return; 125 return -ENXIO;
126 126
127 ret = arc_get_timer_clk(node); 127 ret = arc_get_timer_clk(node);
128 if (ret) 128 if (ret)
129 return; 129 return ret;
130 130
131 clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); 131 return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
132} 132}
133CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); 133CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
134 134
@@ -172,25 +172,25 @@ static struct clocksource arc_counter_rtc = {
172 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 172 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
173}; 173};
174 174
175static void __init arc_cs_setup_rtc(struct device_node *node) 175static int __init arc_cs_setup_rtc(struct device_node *node)
176{ 176{
177 int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc; 177 int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
178 int ret; 178 int ret;
179 179
180 if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected")) 180 if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
181 return; 181 return -ENXIO;
182 182
183 /* Local to CPU hence not usable in SMP */ 183 /* Local to CPU hence not usable in SMP */
184 if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP")) 184 if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
185 return; 185 return -EINVAL;
186 186
187 ret = arc_get_timer_clk(node); 187 ret = arc_get_timer_clk(node);
188 if (ret) 188 if (ret)
189 return; 189 return ret;
190 190
191 write_aux_reg(AUX_RTC_CTRL, 1); 191 write_aux_reg(AUX_RTC_CTRL, 1);
192 192
193 clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); 193 return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
194} 194}
195CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); 195CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
196 196
@@ -213,23 +213,23 @@ static struct clocksource arc_counter_timer1 = {
213 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 213 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
214}; 214};
215 215
216static void __init arc_cs_setup_timer1(struct device_node *node) 216static int __init arc_cs_setup_timer1(struct device_node *node)
217{ 217{
218 int ret; 218 int ret;
219 219
220 /* Local to CPU hence not usable in SMP */ 220 /* Local to CPU hence not usable in SMP */
221 if (IS_ENABLED(CONFIG_SMP)) 221 if (IS_ENABLED(CONFIG_SMP))
222 return; 222 return -EINVAL;
223 223
224 ret = arc_get_timer_clk(node); 224 ret = arc_get_timer_clk(node);
225 if (ret) 225 if (ret)
226 return; 226 return ret;
227 227
228 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); 228 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
229 write_aux_reg(ARC_REG_TIMER1_CNT, 0); 229 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
230 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); 230 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
231 231
232 clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); 232 return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
233} 233}
234 234
235/********** Clock Event Device *********/ 235/********** Clock Event Device *********/
@@ -324,20 +324,28 @@ static struct notifier_block arc_timer_cpu_nb = {
324/* 324/*
325 * clockevent setup for boot CPU 325 * clockevent setup for boot CPU
326 */ 326 */
327static void __init arc_clockevent_setup(struct device_node *node) 327static int __init arc_clockevent_setup(struct device_node *node)
328{ 328{
329 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 329 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
330 int ret; 330 int ret;
331 331
332 register_cpu_notifier(&arc_timer_cpu_nb); 332 ret = register_cpu_notifier(&arc_timer_cpu_nb);
333 if (ret) {
334 pr_err("Failed to register cpu notifier");
335 return ret;
336 }
333 337
334 arc_timer_irq = irq_of_parse_and_map(node, 0); 338 arc_timer_irq = irq_of_parse_and_map(node, 0);
335 if (arc_timer_irq <= 0) 339 if (arc_timer_irq <= 0) {
336 panic("clockevent: missing irq"); 340 pr_err("clockevent: missing irq");
341 return -EINVAL;
342 }
337 343
338 ret = arc_get_timer_clk(node); 344 ret = arc_get_timer_clk(node);
339 if (ret) 345 if (ret) {
340 panic("clockevent: missing clk"); 346 pr_err("clockevent: missing clk");
347 return ret;
348 }
341 349
342 evt->irq = arc_timer_irq; 350 evt->irq = arc_timer_irq;
343 evt->cpumask = cpumask_of(smp_processor_id()); 351 evt->cpumask = cpumask_of(smp_processor_id());
@@ -347,22 +355,29 @@ static void __init arc_clockevent_setup(struct device_node *node)
347 /* Needs apriori irq_set_percpu_devid() done in intc map function */ 355 /* Needs apriori irq_set_percpu_devid() done in intc map function */
348 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, 356 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
349 "Timer0 (per-cpu-tick)", evt); 357 "Timer0 (per-cpu-tick)", evt);
350 if (ret) 358 if (ret) {
351 panic("clockevent: unable to request irq\n"); 359 pr_err("clockevent: unable to request irq\n");
360 return ret;
361 }
352 362
353 enable_percpu_irq(arc_timer_irq, 0); 363 enable_percpu_irq(arc_timer_irq, 0);
364
365 return 0;
354} 366}
355 367
356static void __init arc_of_timer_init(struct device_node *np) 368static int __init arc_of_timer_init(struct device_node *np)
357{ 369{
358 static int init_count = 0; 370 static int init_count = 0;
371 int ret;
359 372
360 if (!init_count) { 373 if (!init_count) {
361 init_count = 1; 374 init_count = 1;
362 arc_clockevent_setup(np); 375 ret = arc_clockevent_setup(np);
363 } else { 376 } else {
364 arc_cs_setup_timer1(np); 377 ret = arc_cs_setup_timer1(np);
365 } 378 }
379
380 return ret;
366} 381}
367CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); 382CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
368 383
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 90542db1220d..f0636ec94903 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -358,10 +358,10 @@ config ARCH_CLPS711X
358 bool "Cirrus Logic CLPS711x/EP721x/EP731x-based" 358 bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
359 select ARCH_REQUIRE_GPIOLIB 359 select ARCH_REQUIRE_GPIOLIB
360 select AUTO_ZRELADDR 360 select AUTO_ZRELADDR
361 select CLKSRC_MMIO
362 select COMMON_CLK 361 select COMMON_CLK
363 select CPU_ARM720T 362 select CPU_ARM720T
364 select GENERIC_CLOCKEVENTS 363 select GENERIC_CLOCKEVENTS
364 select CLPS711X_TIMER
365 select MFD_SYSCON 365 select MFD_SYSCON
366 select SOC_BUS 366 select SOC_BUS
367 help 367 help
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 1bfa7a7f5533..b6ec65e68009 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -390,7 +390,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
390} 390}
391 391
392#ifdef CONFIG_OF 392#ifdef CONFIG_OF
393static void __init twd_local_timer_of_register(struct device_node *np) 393static int __init twd_local_timer_of_register(struct device_node *np)
394{ 394{
395 int err; 395 int err;
396 396
@@ -410,6 +410,7 @@ static void __init twd_local_timer_of_register(struct device_node *np)
410 410
411out: 411out:
412 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 412 WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
413 return err;
413} 414}
414CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); 415CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
415CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); 416CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 68ab6412392a..4f1709b31822 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -89,6 +89,7 @@ config ARCH_BCM_MOBILE
89 select HAVE_ARM_ARCH_TIMER 89 select HAVE_ARM_ARCH_TIMER
90 select PINCTRL 90 select PINCTRL
91 select ARCH_BCM_MOBILE_SMP if SMP 91 select ARCH_BCM_MOBILE_SMP if SMP
92 select BCM_KONA_TIMER
92 help 93 help
93 This enables support for systems based on Broadcom mobile SoCs. 94 This enables support for systems based on Broadcom mobile SoCs.
94 95
@@ -143,6 +144,7 @@ config ARCH_BCM2835
143 select ARM_TIMER_SP804 144 select ARM_TIMER_SP804
144 select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7 145 select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
145 select CLKSRC_OF 146 select CLKSRC_OF
147 select BCM2835_TIMER
146 select PINCTRL 148 select PINCTRL
147 select PINCTRL_BCM2835 149 select PINCTRL_BCM2835
148 help 150 help
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index b2a85ba13f08..291262e5aeaf 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -20,7 +20,7 @@ if ARCH_INTEGRATOR
20 20
21config ARCH_INTEGRATOR_AP 21config ARCH_INTEGRATOR_AP
22 bool "Support Integrator/AP and Integrator/PP2 platforms" 22 bool "Support Integrator/AP and Integrator/PP2 platforms"
23 select CLKSRC_MMIO 23 select INTEGRATOR_AP_TIMER
24 select MIGHT_HAVE_PCI 24 select MIGHT_HAVE_PCI
25 select SERIAL_AMBA_PL010 if TTY 25 select SERIAL_AMBA_PL010 if TTY
26 select SERIAL_AMBA_PL010_CONSOLE if TTY 26 select SERIAL_AMBA_PL010_CONSOLE if TTY
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index ea955f6db8b7..bac577badc7e 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -4,7 +4,7 @@ config ARCH_KEYSTONE
4 depends on ARM_PATCH_PHYS_VIRT 4 depends on ARM_PATCH_PHYS_VIRT
5 select ARM_GIC 5 select ARM_GIC
6 select HAVE_ARM_ARCH_TIMER 6 select HAVE_ARM_ARCH_TIMER
7 select CLKSRC_MMIO 7 select KEYSTONE_TIMER
8 select ARM_ERRATA_798181 if SMP 8 select ARM_ERRATA_798181 if SMP
9 select COMMON_CLK_KEYSTONE 9 select COMMON_CLK_KEYSTONE
10 select ARCH_SUPPORTS_BIG_ENDIAN 10 select ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index 180d9d216719..ddc79cea32d3 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -3,7 +3,7 @@ menuconfig ARCH_MOXART
3 depends on ARCH_MULTI_V4 3 depends on ARCH_MULTI_V4
4 select CPU_FA526 4 select CPU_FA526
5 select ARM_DMA_MEM_BUFFERABLE 5 select ARM_DMA_MEM_BUFFERABLE
6 select CLKSRC_MMIO 6 select MOXART_TIMER
7 select GENERIC_IRQ_CHIP 7 select GENERIC_IRQ_CHIP
8 select ARCH_REQUIRE_GPIOLIB 8 select ARCH_REQUIRE_GPIOLIB
9 select PHYLIB if NETDEVICES 9 select PHYLIB if NETDEVICES
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 84794137b175..68a3a9ec605d 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -16,7 +16,7 @@ config ARCH_MXS
16 bool "Freescale MXS (i.MX23, i.MX28) support" 16 bool "Freescale MXS (i.MX23, i.MX28) support"
17 depends on ARCH_MULTI_V5 17 depends on ARCH_MULTI_V5
18 select ARCH_REQUIRE_GPIOLIB 18 select ARCH_REQUIRE_GPIOLIB
19 select CLKSRC_MMIO 19 select MXS_TIMER
20 select PINCTRL 20 select PINCTRL
21 select SOC_BUS 21 select SOC_BUS
22 select SOC_IMX23 22 select SOC_IMX23
diff --git a/arch/arm/mach-nspire/Kconfig b/arch/arm/mach-nspire/Kconfig
index bc41f26c1a12..d4985305cab2 100644
--- a/arch/arm/mach-nspire/Kconfig
+++ b/arch/arm/mach-nspire/Kconfig
@@ -7,5 +7,6 @@ config ARCH_NSPIRE
7 select ARM_AMBA 7 select ARM_AMBA
8 select ARM_VIC 8 select ARM_VIC
9 select ARM_TIMER_SP804 9 select ARM_TIMER_SP804
10 select NSPIRE_TIMER
10 help 11 help
11 This enables support for systems using the TI-NSPIRE CPU 12 This enables support for systems using the TI-NSPIRE CPU
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 0cf4426183cf..9e938f2961cf 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -28,6 +28,7 @@ config ARCH_ATLAS7
28 default y 28 default y
29 select ARM_GIC 29 select ARM_GIC
30 select CPU_V7 30 select CPU_V7
31 select ATLAS7_TIMER
31 select HAVE_ARM_SCU if SMP 32 select HAVE_ARM_SCU if SMP
32 select HAVE_SMP 33 select HAVE_SMP
33 help 34 help
@@ -38,6 +39,7 @@ config ARCH_PRIMA2
38 default y 39 default y
39 select SIRF_IRQ 40 select SIRF_IRQ
40 select ZONE_DMA 41 select ZONE_DMA
42 select PRIMA2_TIMER
41 help 43 help
42 Support for CSR SiRFSoC ARM Cortex A9 Platform 44 Support for CSR SiRFSoC ARM Cortex A9 Platform
43 45
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 301a98498453..4fdc3425ffbd 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -4,7 +4,7 @@ menuconfig ARCH_U300
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select ARM_AMBA 5 select ARM_AMBA
6 select ARM_VIC 6 select ARM_VIC
7 select CLKSRC_MMIO 7 select U300_TIMER
8 select CPU_ARM926T 8 select CPU_ARM926T
9 select HAVE_TCM 9 select HAVE_TCM
10 select PINCTRL 10 select PINCTRL
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d7f8e06910bc..188bbeab92b9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -492,6 +492,14 @@
492 interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 492 interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
493 }; 493 };
494 494
495 rktimer: rktimer@ff850000 {
496 compatible = "rockchip,rk3399-timer";
497 reg = <0x0 0xff850000 0x0 0x1000>;
498 interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
499 clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
500 clock-names = "pclk", "timer";
501 };
502
495 spdif: spdif@ff870000 { 503 spdif: spdif@ff870000 {
496 compatible = "rockchip,rk3399-spdif"; 504 compatible = "rockchip,rk3399-spdif";
497 reg = <0x0 0xff870000 0x0 0x1000>; 505 reg = <0x0 0xff870000 0x0 0x1000>;
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 67e2ef48d2d0..5bbf38b916ef 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -170,7 +170,7 @@ static struct irqaction timer_irqaction = {
170 .dev_id = &clockevent_xilinx_timer, 170 .dev_id = &clockevent_xilinx_timer,
171}; 171};
172 172
173static __init void xilinx_clockevent_init(void) 173static __init int xilinx_clockevent_init(void)
174{ 174{
175 clockevent_xilinx_timer.mult = 175 clockevent_xilinx_timer.mult =
176 div_sc(timer_clock_freq, NSEC_PER_SEC, 176 div_sc(timer_clock_freq, NSEC_PER_SEC,
@@ -181,6 +181,8 @@ static __init void xilinx_clockevent_init(void)
181 clockevent_delta2ns(1, &clockevent_xilinx_timer); 181 clockevent_delta2ns(1, &clockevent_xilinx_timer);
182 clockevent_xilinx_timer.cpumask = cpumask_of(0); 182 clockevent_xilinx_timer.cpumask = cpumask_of(0);
183 clockevents_register_device(&clockevent_xilinx_timer); 183 clockevents_register_device(&clockevent_xilinx_timer);
184
185 return 0;
184} 186}
185 187
186static u64 xilinx_clock_read(void) 188static u64 xilinx_clock_read(void)
@@ -229,8 +231,14 @@ static struct clocksource clocksource_microblaze = {
229 231
230static int __init xilinx_clocksource_init(void) 232static int __init xilinx_clocksource_init(void)
231{ 233{
232 if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) 234 int ret;
233 panic("failed to register clocksource"); 235
236 ret = clocksource_register_hz(&clocksource_microblaze,
237 timer_clock_freq);
238 if (ret) {
239 pr_err("failed to register clocksource");
240 return ret;
241 }
234 242
235 /* stop timer1 */ 243 /* stop timer1 */
236 write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT, 244 write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
@@ -239,16 +247,16 @@ static int __init xilinx_clocksource_init(void)
239 write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1); 247 write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
240 248
241 /* register timecounter - for ftrace support */ 249 /* register timecounter - for ftrace support */
242 init_xilinx_timecounter(); 250 return init_xilinx_timecounter();
243 return 0;
244} 251}
245 252
246static void __init xilinx_timer_init(struct device_node *timer) 253static int __init xilinx_timer_init(struct device_node *timer)
247{ 254{
248 struct clk *clk; 255 struct clk *clk;
249 static int initialized; 256 static int initialized;
250 u32 irq; 257 u32 irq;
251 u32 timer_num = 1; 258 u32 timer_num = 1;
259 int ret;
252 260
253 if (initialized) 261 if (initialized)
254 return; 262 return;
@@ -258,7 +266,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
258 timer_baseaddr = of_iomap(timer, 0); 266 timer_baseaddr = of_iomap(timer, 0);
259 if (!timer_baseaddr) { 267 if (!timer_baseaddr) {
260 pr_err("ERROR: invalid timer base address\n"); 268 pr_err("ERROR: invalid timer base address\n");
261 BUG(); 269 return -ENXIO;
262 } 270 }
263 271
264 write_fn = timer_write32; 272 write_fn = timer_write32;
@@ -271,11 +279,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
271 } 279 }
272 280
273 irq = irq_of_parse_and_map(timer, 0); 281 irq = irq_of_parse_and_map(timer, 0);
282 if (irq <= 0) {
283 pr_err("Failed to parse and map irq");
284 return -EINVAL;
285 }
274 286
275 of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); 287 of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
276 if (timer_num) { 288 if (timer_num) {
277 pr_emerg("Please enable two timers in HW\n"); 289 pr_err("Please enable two timers in HW\n");
278 BUG(); 290 return -EINVAL;
279 } 291 }
280 292
281 pr_info("%s: irq=%d\n", timer->full_name, irq); 293 pr_info("%s: irq=%d\n", timer->full_name, irq);
@@ -297,14 +309,27 @@ static void __init xilinx_timer_init(struct device_node *timer)
297 309
298 freq_div_hz = timer_clock_freq / HZ; 310 freq_div_hz = timer_clock_freq / HZ;
299 311
300 setup_irq(irq, &timer_irqaction); 312 ret = setup_irq(irq, &timer_irqaction);
313 if (ret) {
314 pr_err("Failed to setup IRQ");
315 return ret;
316 }
317
301#ifdef CONFIG_HEART_BEAT 318#ifdef CONFIG_HEART_BEAT
302 microblaze_setup_heartbeat(); 319 microblaze_setup_heartbeat();
303#endif 320#endif
304 xilinx_clocksource_init(); 321
305 xilinx_clockevent_init(); 322 ret = xilinx_clocksource_init();
323 if (ret)
324 return ret;
325
326 ret = xilinx_clockevent_init();
327 if (ret)
328 return ret;
306 329
307 sched_clock_register(xilinx_clock_read, 32, timer_clock_freq); 330 sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
331
332 return 0;
308} 333}
309 334
310CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a", 335CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 3ad0b0794f7d..f24eee04e16a 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
117 return 0; 117 return 0;
118} 118}
119 119
120static void __init ralink_systick_init(struct device_node *np) 120static int __init ralink_systick_init(struct device_node *np)
121{ 121{
122 int ret;
123
122 systick.membase = of_iomap(np, 0); 124 systick.membase = of_iomap(np, 0);
123 if (!systick.membase) 125 if (!systick.membase)
124 return; 126 return -ENXIO;
125 127
126 systick_irqaction.name = np->name; 128 systick_irqaction.name = np->name;
127 systick.dev.name = np->name; 129 systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
131 systick.dev.irq = irq_of_parse_and_map(np, 0); 133 systick.dev.irq = irq_of_parse_and_map(np, 0);
132 if (!systick.dev.irq) { 134 if (!systick.dev.irq) {
133 pr_err("%s: request_irq failed", np->name); 135 pr_err("%s: request_irq failed", np->name);
134 return; 136 return -EINVAL;
135 } 137 }
136 138
137 clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, 139 ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
138 SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up); 140 SYSTICK_FREQ, 301, 16,
141 clocksource_mmio_readl_up);
142 if (ret)
143 return ret;
139 144
140 clockevents_register_device(&systick.dev); 145 clockevents_register_device(&systick.dev);
141 146
142 pr_info("%s: running - mult: %d, shift: %d\n", 147 pr_info("%s: running - mult: %d, shift: %d\n",
143 np->name, systick.dev.mult, systick.dev.shift); 148 np->name, systick.dev.mult, systick.dev.shift);
149
150 return 0;
144} 151}
145 152
146CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init); 153CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index e835dda2bfe2..d9563ddb337e 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -206,15 +206,21 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
206 return IRQ_HANDLED; 206 return IRQ_HANDLED;
207} 207}
208 208
209static void __init nios2_timer_get_base_and_freq(struct device_node *np, 209static int __init nios2_timer_get_base_and_freq(struct device_node *np,
210 void __iomem **base, u32 *freq) 210 void __iomem **base, u32 *freq)
211{ 211{
212 *base = of_iomap(np, 0); 212 *base = of_iomap(np, 0);
213 if (!*base) 213 if (!*base) {
214 panic("Unable to map reg for %s\n", np->name); 214 pr_crit("Unable to map reg for %s\n", np->name);
215 return -ENXIO;
216 }
217
218 if (of_property_read_u32(np, "clock-frequency", freq)) {
219 pr_crit("Unable to get %s clock frequency\n", np->name);
220 return -EINVAL;
221 }
215 222
216 if (of_property_read_u32(np, "clock-frequency", freq)) 223 return 0;
217 panic("Unable to get %s clock frequency\n", np->name);
218} 224}
219 225
220static struct nios2_clockevent_dev nios2_ce = { 226static struct nios2_clockevent_dev nios2_ce = {
@@ -231,17 +237,21 @@ static struct nios2_clockevent_dev nios2_ce = {
231 }, 237 },
232}; 238};
233 239
234static __init void nios2_clockevent_init(struct device_node *timer) 240static __init int nios2_clockevent_init(struct device_node *timer)
235{ 241{
236 void __iomem *iobase; 242 void __iomem *iobase;
237 u32 freq; 243 u32 freq;
238 int irq; 244 int irq, ret;
239 245
240 nios2_timer_get_base_and_freq(timer, &iobase, &freq); 246 ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
247 if (ret)
248 return ret;
241 249
242 irq = irq_of_parse_and_map(timer, 0); 250 irq = irq_of_parse_and_map(timer, 0);
243 if (!irq) 251 if (!irq) {
244 panic("Unable to parse timer irq\n"); 252 pr_crit("Unable to parse timer irq\n");
253 return -EINVAL;
254 }
245 255
246 nios2_ce.timer.base = iobase; 256 nios2_ce.timer.base = iobase;
247 nios2_ce.timer.freq = freq; 257 nios2_ce.timer.freq = freq;
@@ -253,25 +263,35 @@ static __init void nios2_clockevent_init(struct device_node *timer)
253 /* clear pending interrupt */ 263 /* clear pending interrupt */
254 timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG); 264 timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
255 265
256 if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name, 266 ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
257 &nios2_ce.ced)) 267 &nios2_ce.ced);
258 panic("Unable to setup timer irq\n"); 268 if (ret) {
269 pr_crit("Unable to setup timer irq\n");
270 return ret;
271 }
259 272
260 clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX); 273 clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
274
275 return 0;
261} 276}
262 277
263static __init void nios2_clocksource_init(struct device_node *timer) 278static __init int nios2_clocksource_init(struct device_node *timer)
264{ 279{
265 unsigned int ctrl; 280 unsigned int ctrl;
266 void __iomem *iobase; 281 void __iomem *iobase;
267 u32 freq; 282 u32 freq;
283 int ret;
268 284
269 nios2_timer_get_base_and_freq(timer, &iobase, &freq); 285 ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
286 if (ret)
287 return ret;
270 288
271 nios2_cs.timer.base = iobase; 289 nios2_cs.timer.base = iobase;
272 nios2_cs.timer.freq = freq; 290 nios2_cs.timer.freq = freq;
273 291
274 clocksource_register_hz(&nios2_cs.cs, freq); 292 ret = clocksource_register_hz(&nios2_cs.cs, freq);
293 if (ret)
294 return ret;
275 295
276 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG); 296 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
277 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG); 297 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
@@ -282,6 +302,8 @@ static __init void nios2_clocksource_init(struct device_node *timer)
282 302
283 /* Calibrate the delay loop directly */ 303 /* Calibrate the delay loop directly */
284 lpj_fine = freq / HZ; 304 lpj_fine = freq / HZ;
305
306 return 0;
285} 307}
286 308
287/* 309/*
@@ -289,22 +311,25 @@ static __init void nios2_clocksource_init(struct device_node *timer)
289 * more instances, the second one gets used as clocksource and all 311 * more instances, the second one gets used as clocksource and all
290 * others are unused. 312 * others are unused.
291*/ 313*/
292static void __init nios2_time_init(struct device_node *timer) 314static int __init nios2_time_init(struct device_node *timer)
293{ 315{
294 static int num_called; 316 static int num_called;
317 int ret;
295 318
296 switch (num_called) { 319 switch (num_called) {
297 case 0: 320 case 0:
298 nios2_clockevent_init(timer); 321 ret = nios2_clockevent_init(timer);
299 break; 322 break;
300 case 1: 323 case 1:
301 nios2_clocksource_init(timer); 324 ret = nios2_clocksource_init(timer);
302 break; 325 break;
303 default: 326 default:
304 break; 327 break;
305 } 328 }
306 329
307 num_called++; 330 num_called++;
331
332 return ret;
308} 333}
309 334
310void read_persistent_clock(struct timespec *ts) 335void read_persistent_clock(struct timespec *ts)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 5a58c917179c..64dd38fbf218 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -918,7 +918,7 @@ static void uv_heartbeat(unsigned long ignored)
918 uv_set_scir_bits(bits); 918 uv_set_scir_bits(bits);
919 919
920 /* enable next timer period */ 920 /* enable next timer period */
921 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 921 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
922} 922}
923 923
924static void uv_heartbeat_enable(int cpu) 924static void uv_heartbeat_enable(int cpu)
@@ -927,7 +927,7 @@ static void uv_heartbeat_enable(int cpu)
927 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; 927 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
928 928
929 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 929 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
930 setup_timer(timer, uv_heartbeat, cpu); 930 setup_pinned_timer(timer, uv_heartbeat, cpu);
931 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 931 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
932 add_timer_on(timer, cpu); 932 add_timer_on(timer, cpu);
933 uv_cpu_scir_info(cpu)->enabled = 1; 933 uv_cpu_scir_info(cpu)->enabled = 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 58af6300992d..79d8ec849468 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
1309 1309
1310 if (timer_pending(t)) { 1310 if (timer_pending(t)) {
1311 if (time_before(when, t->expires)) 1311 if (time_before(when, t->expires))
1312 mod_timer_pinned(t, when); 1312 mod_timer(t, when);
1313 } else { 1313 } else {
1314 t->expires = round_jiffies(when); 1314 t->expires = round_jiffies(when);
1315 add_timer_on(t, smp_processor_id()); 1315 add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
1735 struct timer_list *t = this_cpu_ptr(&mce_timer); 1735 struct timer_list *t = this_cpu_ptr(&mce_timer);
1736 unsigned int cpu = smp_processor_id(); 1736 unsigned int cpu = smp_processor_id();
1737 1737
1738 setup_timer(t, mce_timer_fn, cpu); 1738 setup_pinned_timer(t, mce_timer_fn, cpu);
1739 mce_start_timer(cpu, t); 1739 mce_start_timer(cpu, t);
1740} 1740}
1741 1741
diff --git a/block/genhd.c b/block/genhd.c
index 9f42526b4d62..f06d7f3b075b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1523 if (--ev->block) 1523 if (--ev->block)
1524 goto out_unlock; 1524 goto out_unlock;
1525 1525
1526 /*
1527 * Not exactly a latency critical operation, set poll timer
1528 * slack to 25% and kick event check.
1529 */
1530 intv = disk_events_poll_jiffies(disk); 1526 intv = disk_events_poll_jiffies(disk);
1531 set_timer_slack(&ev->dwork.timer, intv / 4);
1532 if (check_now) 1527 if (check_now)
1533 queue_delayed_work(system_freezable_power_efficient_wq, 1528 queue_delayed_work(system_freezable_power_efficient_wq,
1534 &ev->dwork, 0); 1529 &ev->dwork, 0);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 47352d25c15e..567788664723 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -27,6 +27,20 @@ config CLKBLD_I8253
27config CLKSRC_MMIO 27config CLKSRC_MMIO
28 bool 28 bool
29 29
30config BCM2835_TIMER
31 bool "BCM2835 timer driver" if COMPILE_TEST
32 depends on GENERIC_CLOCKEVENTS
33 select CLKSRC_MMIO
34 help
35 Enables the support for the BCM2835 timer driver.
36
37config BCM_KONA_TIMER
38 bool "BCM mobile timer driver" if COMPILE_TEST
39 depends on GENERIC_CLOCKEVENTS
40 select CLKSRC_MMIO
41 help
42 Enables the support for the BCM Kona mobile timer driver.
43
30config DIGICOLOR_TIMER 44config DIGICOLOR_TIMER
31 bool "Digicolor timer driver" if COMPILE_TEST 45 bool "Digicolor timer driver" if COMPILE_TEST
32 depends on GENERIC_CLOCKEVENTS 46 depends on GENERIC_CLOCKEVENTS
@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
141 help 155 help
142 Use the always on PRCMU Timer as clocksource 156 Use the always on PRCMU Timer as clocksource
143 157
158config CLPS711X_TIMER
159 bool "Cirrus logic timer driver" if COMPILE_TEST
160 depends on GENERIC_CLOCKEVENTS
161 select CLKSRC_MMIO
162 help
163 Enables support for the Cirrus Logic PS711 timer.
164
165config ATLAS7_TIMER
166 bool "Atlas7 timer driver" if COMPILE_TEST
167 depends on GENERIC_CLOCKEVENTS
168 select CLKSRC_MMIO
169 help
170 Enables support for the Atlas7 timer.
171
172config MOXART_TIMER
173 bool "Moxart timer driver" if COMPILE_TEST
174 depends on GENERIC_CLOCKEVENTS
175 select CLKSRC_MMIO
176 help
177 Enables support for the Moxart timer.
178
179config MXS_TIMER
180 bool "Mxs timer driver" if COMPILE_TEST
181 depends on GENERIC_CLOCKEVENTS
182 select CLKSRC_MMIO
183 select STMP_DEVICE
184 help
185 Enables support for the Mxs timer.
186
187config PRIMA2_TIMER
188 bool "Prima2 timer driver" if COMPILE_TEST
189 depends on GENERIC_CLOCKEVENTS
190 select CLKSRC_MMIO
191 help
192 Enables support for the Prima2 timer.
193
194config U300_TIMER
195 bool "U300 timer driver" if COMPILE_TEST
196 depends on GENERIC_CLOCKEVENTS
197 depends on ARM
198 select CLKSRC_MMIO
199 help
200 Enables support for the U300 timer.
201
202config NSPIRE_TIMER
203 bool "NSpire timer driver" if COMPILE_TEST
204 depends on GENERIC_CLOCKEVENTS
205 select CLKSRC_MMIO
206 help
207 Enables support for the Nspire timer.
208
209config KEYSTONE_TIMER
210 bool "Keystone timer driver" if COMPILE_TEST
211 depends on GENERIC_CLOCKEVENTS
212 depends on ARM || ARM64
213 select CLKSRC_MMIO
214 help
215 Enables support for the Keystone timer.
216
217config INTEGRATOR_AP_TIMER
218 bool "Integrator-ap timer driver" if COMPILE_TEST
219 depends on GENERIC_CLOCKEVENTS
220 select CLKSRC_MMIO
221 help
222 Enables support for the Integrator-ap timer.
223
144config CLKSRC_DBX500_PRCMU_SCHED_CLOCK 224config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
145 bool "Clocksource PRCMU Timer sched_clock" 225 bool "Clocksource PRCMU Timer sched_clock"
146 depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK) 226 depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
208 select CLKSRC_ACPI if ACPI 288 select CLKSRC_ACPI if ACPI
209 289
210config ARM_ARCH_TIMER_EVTSTREAM 290config ARM_ARCH_TIMER_EVTSTREAM
211 bool "Support for ARM architected timer event stream generation" 291 bool "Enable ARM architected timer event stream generation by default"
212 default y if ARM_ARCH_TIMER 292 default y if ARM_ARCH_TIMER
213 depends on ARM_ARCH_TIMER 293 depends on ARM_ARCH_TIMER
214 help 294 help
215 This option enables support for event stream generation based on 295 This option enables support by default for event stream generation
216 the ARM architected timer. It is used for waking up CPUs executing 296 based on the ARM architected timer. It is used for waking up CPUs
217 the wfe instruction at a frequency represented as a power-of-2 297 executing the wfe instruction at a frequency represented as a
218 divisor of the clock rate. 298 power-of-2 divisor of the clock rate. The behaviour can also be
299 overridden on the command line using the
300 clocksource.arm_arch_timer.evtstream parameter.
219 The main use of the event stream is wfe-based timeouts of userspace 301 The main use of the event stream is wfe-based timeouts of userspace
220 locking implementations. It might also be useful for imposing timeout 302 locking implementations. It might also be useful for imposing timeout
221 on wfe to safeguard against any programming errors in case an expected 303 on wfe to safeguard against any programming errors in case an expected
@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
224 hardware anomalies of missing events. 306 hardware anomalies of missing events.
225 307
226config ARM_GLOBAL_TIMER 308config ARM_GLOBAL_TIMER
227 bool 309 bool "Support for the ARM global timer" if COMPILE_TEST
228 select CLKSRC_OF if OF 310 select CLKSRC_OF if OF
311 depends on ARM
229 help 312 help
230 This options enables support for the ARM global timer unit 313 This options enables support for the ARM global timer unit
231 314
@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
243 Use ARM global timer clock source as sched_clock 326 Use ARM global timer clock source as sched_clock
244 327
245config ARMV7M_SYSTICK 328config ARMV7M_SYSTICK
246 bool 329 bool "Support for the ARMv7M system time" if COMPILE_TEST
247 select CLKSRC_OF if OF 330 select CLKSRC_OF if OF
248 select CLKSRC_MMIO 331 select CLKSRC_MMIO
249 help 332 help
@@ -254,9 +337,12 @@ config ATMEL_PIT
254 def_bool SOC_AT91SAM9 || SOC_SAMA5 337 def_bool SOC_AT91SAM9 || SOC_SAMA5
255 338
256config ATMEL_ST 339config ATMEL_ST
257 bool 340 bool "Atmel ST timer support" if COMPILE_TEST
341 depends on GENERIC_CLOCKEVENTS
258 select CLKSRC_OF 342 select CLKSRC_OF
259 select MFD_SYSCON 343 select MFD_SYSCON
344 help
345 Support for the Atmel ST timer.
260 346
261config CLKSRC_METAG_GENERIC 347config CLKSRC_METAG_GENERIC
262 def_bool y if METAG 348 def_bool y if METAG
@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
270 Support for Multi Core Timer controller on Exynos SoCs. 356 Support for Multi Core Timer controller on Exynos SoCs.
271 357
272config CLKSRC_SAMSUNG_PWM 358config CLKSRC_SAMSUNG_PWM
273 bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST 359 bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
274 depends on GENERIC_CLOCKEVENTS 360 depends on GENERIC_CLOCKEVENTS
275 depends on HAS_IOMEM 361 depends on HAS_IOMEM
276 help 362 help
@@ -293,6 +379,14 @@ config VF_PIT_TIMER
293 help 379 help
294 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. 380 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
295 381
382config OXNAS_RPS_TIMER
383 bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
384 depends on GENERIC_CLOCKEVENTS
385 select CLKSRC_OF
386 select CLKSRC_MMIO
387 help
388 This enables support for the Oxford Semiconductor OXNAS RPS timers.
389
296config SYS_SUPPORTS_SH_CMT 390config SYS_SUPPORTS_SH_CMT
297 bool 391 bool
298 392
@@ -361,8 +455,8 @@ config CLKSRC_QCOM
361 Qualcomm SoCs. 455 Qualcomm SoCs.
362 456
363config CLKSRC_VERSATILE 457config CLKSRC_VERSATILE
364 bool "ARM Versatile (Express) reference platforms clock source" 458 bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
365 depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET 459 depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
366 select CLKSRC_OF 460 select CLKSRC_OF
367 default y if MFD_VEXPRESS_SYSREG 461 default y if MFD_VEXPRESS_SYSREG
368 help 462 help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 473974f9590a..fd9d6df0bbc0 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
19obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o 19obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
20obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 20obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
21obj-$(CONFIG_ORION_TIMER) += time-orion.o 21obj-$(CONFIG_ORION_TIMER) += time-orion.o
22obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o 22obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
23obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o 23obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
24obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o 24obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
25obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o 25obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o
26obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 26obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
27obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o 27obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
28obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 28obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
29obj-$(CONFIG_ARCH_U300) += timer-u300.o 29obj-$(CONFIG_U300_TIMER) += timer-u300.o
30obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o 30obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
31obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o 31obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
32obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o 32obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
33obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o 33obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
34obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o 34obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
35obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o 35obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
36obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o 36obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
37obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o 37obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
38obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o 38obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
39obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o 39obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
48obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o 48obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
49obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o 49obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
50obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o 50obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
51obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
51 52
52obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 53obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
53obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 54obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
55obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o 56obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
56obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o 57obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
57obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 58obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
58obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o 59obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
59obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o 60obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
60obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 61obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
61obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o 62obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
62obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o 63obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4814446a0024..5effd3027319 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
79static bool arch_timer_c3stop; 79static bool arch_timer_c3stop;
80static bool arch_timer_mem_use_virtual; 80static bool arch_timer_mem_use_virtual;
81 81
82static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
83
84static int __init early_evtstrm_cfg(char *buf)
85{
86 return strtobool(buf, &evtstrm_enable);
87}
88early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
89
82/* 90/*
83 * Architected system timer support. 91 * Architected system timer support.
84 */ 92 */
@@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk)
372 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 380 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
373 381
374 arch_counter_set_user_access(); 382 arch_counter_set_user_access();
375 if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) 383 if (evtstrm_enable)
376 arch_timer_configure_evtstream(); 384 arch_timer_configure_evtstream();
377 385
378 return 0; 386 return 0;
@@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
693 return needs_probing; 701 return needs_probing;
694} 702}
695 703
696static void __init arch_timer_common_init(void) 704static int __init arch_timer_common_init(void)
697{ 705{
698 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 706 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
699 707
700 /* Wait until both nodes are probed if we have two timers */ 708 /* Wait until both nodes are probed if we have two timers */
701 if ((arch_timers_present & mask) != mask) { 709 if ((arch_timers_present & mask) != mask) {
702 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match)) 710 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
703 return; 711 return 0;
704 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match)) 712 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
705 return; 713 return 0;
706 } 714 }
707 715
708 arch_timer_banner(arch_timers_present); 716 arch_timer_banner(arch_timers_present);
709 arch_counter_register(arch_timers_present); 717 arch_counter_register(arch_timers_present);
710 arch_timer_arch_init(); 718 return arch_timer_arch_init();
711} 719}
712 720
713static void __init arch_timer_init(void) 721static int __init arch_timer_init(void)
714{ 722{
723 int ret;
715 /* 724 /*
716 * If HYP mode is available, we know that the physical timer 725 * If HYP mode is available, we know that the physical timer
717 * has been configured to be accessible from PL1. Use it, so 726 * has been configured to be accessible from PL1. Use it, so
@@ -739,23 +748,30 @@ static void __init arch_timer_init(void)
739 748
740 if (!has_ppi) { 749 if (!has_ppi) {
741 pr_warn("arch_timer: No interrupt available, giving up\n"); 750 pr_warn("arch_timer: No interrupt available, giving up\n");
742 return; 751 return -EINVAL;
743 } 752 }
744 } 753 }
745 754
746 arch_timer_register(); 755 ret = arch_timer_register();
747 arch_timer_common_init(); 756 if (ret)
757 return ret;
758
759 ret = arch_timer_common_init();
760 if (ret)
761 return ret;
748 762
749 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI]; 763 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
764
765 return 0;
750} 766}
751 767
752static void __init arch_timer_of_init(struct device_node *np) 768static int __init arch_timer_of_init(struct device_node *np)
753{ 769{
754 int i; 770 int i;
755 771
756 if (arch_timers_present & ARCH_CP15_TIMER) { 772 if (arch_timers_present & ARCH_CP15_TIMER) {
757 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 773 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
758 return; 774 return 0;
759 } 775 }
760 776
761 arch_timers_present |= ARCH_CP15_TIMER; 777 arch_timers_present |= ARCH_CP15_TIMER;
@@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np)
774 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 790 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
775 arch_timer_uses_ppi = PHYS_SECURE_PPI; 791 arch_timer_uses_ppi = PHYS_SECURE_PPI;
776 792
777 arch_timer_init(); 793 return arch_timer_init();
778} 794}
779CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 795CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
780CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 796CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
781 797
782static void __init arch_timer_mem_init(struct device_node *np) 798static int __init arch_timer_mem_init(struct device_node *np)
783{ 799{
784 struct device_node *frame, *best_frame = NULL; 800 struct device_node *frame, *best_frame = NULL;
785 void __iomem *cntctlbase, *base; 801 void __iomem *cntctlbase, *base;
786 unsigned int irq; 802 unsigned int irq, ret = -EINVAL;
787 u32 cnttidr; 803 u32 cnttidr;
788 804
789 arch_timers_present |= ARCH_MEM_TIMER; 805 arch_timers_present |= ARCH_MEM_TIMER;
790 cntctlbase = of_iomap(np, 0); 806 cntctlbase = of_iomap(np, 0);
791 if (!cntctlbase) { 807 if (!cntctlbase) {
792 pr_err("arch_timer: Can't find CNTCTLBase\n"); 808 pr_err("arch_timer: Can't find CNTCTLBase\n");
793 return; 809 return -ENXIO;
794 } 810 }
795 811
796 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 812 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
@@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
830 best_frame = of_node_get(frame); 846 best_frame = of_node_get(frame);
831 } 847 }
832 848
849 ret= -ENXIO;
833 base = arch_counter_base = of_iomap(best_frame, 0); 850 base = arch_counter_base = of_iomap(best_frame, 0);
834 if (!base) { 851 if (!base) {
835 pr_err("arch_timer: Can't map frame's registers\n"); 852 pr_err("arch_timer: Can't map frame's registers\n");
@@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
841 else 858 else
842 irq = irq_of_parse_and_map(best_frame, 0); 859 irq = irq_of_parse_and_map(best_frame, 0);
843 860
861 ret = -EINVAL;
844 if (!irq) { 862 if (!irq) {
845 pr_err("arch_timer: Frame missing %s irq", 863 pr_err("arch_timer: Frame missing %s irq",
846 arch_timer_mem_use_virtual ? "virt" : "phys"); 864 arch_timer_mem_use_virtual ? "virt" : "phys");
@@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
848 } 866 }
849 867
850 arch_timer_detect_rate(base, np); 868 arch_timer_detect_rate(base, np);
851 arch_timer_mem_register(base, irq); 869 ret = arch_timer_mem_register(base, irq);
852 arch_timer_common_init(); 870 if (ret)
871 goto out;
872
873 return arch_timer_common_init();
853out: 874out:
854 iounmap(cntctlbase); 875 iounmap(cntctlbase);
855 of_node_put(best_frame); 876 of_node_put(best_frame);
877 return ret;
856} 878}
857CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 879CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
858 arch_timer_mem_init); 880 arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 9df0d1699d22..2a9ceb6e93f9 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void)
238 register_current_timer_delay(&gt_delay_timer); 238 register_current_timer_delay(&gt_delay_timer);
239} 239}
240 240
241static void __init gt_clocksource_init(void) 241static int __init gt_clocksource_init(void)
242{ 242{
243 writel(0, gt_base + GT_CONTROL); 243 writel(0, gt_base + GT_CONTROL);
244 writel(0, gt_base + GT_COUNTER0); 244 writel(0, gt_base + GT_COUNTER0);
@@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void)
249#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 249#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
250 sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); 250 sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
251#endif 251#endif
252 clocksource_register_hz(&gt_clocksource, gt_clk_rate); 252 return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
253} 253}
254 254
255static int gt_cpu_notify(struct notifier_block *self, unsigned long action, 255static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
@@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = {
270 .notifier_call = gt_cpu_notify, 270 .notifier_call = gt_cpu_notify,
271}; 271};
272 272
273static void __init global_timer_of_register(struct device_node *np) 273static int __init global_timer_of_register(struct device_node *np)
274{ 274{
275 struct clk *gt_clk; 275 struct clk *gt_clk;
276 int err = 0; 276 int err = 0;
@@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np)
283 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9 283 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
284 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 284 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
285 pr_warn("global-timer: non support for this cpu version.\n"); 285 pr_warn("global-timer: non support for this cpu version.\n");
286 return; 286 return -ENOSYS;
287 } 287 }
288 288
289 gt_ppi = irq_of_parse_and_map(np, 0); 289 gt_ppi = irq_of_parse_and_map(np, 0);
290 if (!gt_ppi) { 290 if (!gt_ppi) {
291 pr_warn("global-timer: unable to parse irq\n"); 291 pr_warn("global-timer: unable to parse irq\n");
292 return; 292 return -EINVAL;
293 } 293 }
294 294
295 gt_base = of_iomap(np, 0); 295 gt_base = of_iomap(np, 0);
296 if (!gt_base) { 296 if (!gt_base) {
297 pr_warn("global-timer: invalid base address\n"); 297 pr_warn("global-timer: invalid base address\n");
298 return; 298 return -ENXIO;
299 } 299 }
300 300
301 gt_clk = of_clk_get(np, 0); 301 gt_clk = of_clk_get(np, 0);
@@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np)
332 } 332 }
333 333
334 /* Immediately configure the timer on the boot CPU */ 334 /* Immediately configure the timer on the boot CPU */
335 gt_clocksource_init(); 335 err = gt_clocksource_init();
336 gt_clockevents_init(this_cpu_ptr(gt_evt)); 336 if (err)
337 goto out_irq;
338
339 err = gt_clockevents_init(this_cpu_ptr(gt_evt));
340 if (err)
341 goto out_irq;
342
337 gt_delay_timer_init(); 343 gt_delay_timer_init();
338 344
339 return; 345 return 0;
340 346
341out_irq: 347out_irq:
342 free_percpu_irq(gt_ppi, gt_evt); 348 free_percpu_irq(gt_ppi, gt_evt);
@@ -347,6 +353,8 @@ out_clk:
347out_unmap: 353out_unmap:
348 iounmap(gt_base); 354 iounmap(gt_base);
349 WARN(err, "ARM Global timer register failed (%d)\n", err); 355 WARN(err, "ARM Global timer register failed (%d)\n", err);
356
357 return err;
350} 358}
351 359
352/* Only tested on r2p2 and r3p0 */ 360/* Only tested on r2p2 and r3p0 */
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
index addfd2c64f54..a315491b7047 100644
--- a/drivers/clocksource/armv7m_systick.c
+++ b/drivers/clocksource/armv7m_systick.c
@@ -7,6 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/clocksource.h> 8#include <linux/clocksource.h>
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/io.h>
10#include <linux/of.h> 11#include <linux/of.h>
11#include <linux/of_address.h> 12#include <linux/of_address.h>
12#include <linux/clk.h> 13#include <linux/clk.h>
@@ -21,7 +22,7 @@
21 22
22#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF 23#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
23 24
24static void __init system_timer_of_register(struct device_node *np) 25static int __init system_timer_of_register(struct device_node *np)
25{ 26{
26 struct clk *clk = NULL; 27 struct clk *clk = NULL;
27 void __iomem *base; 28 void __iomem *base;
@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
31 base = of_iomap(np, 0); 32 base = of_iomap(np, 0);
32 if (!base) { 33 if (!base) {
33 pr_warn("system-timer: invalid base address\n"); 34 pr_warn("system-timer: invalid base address\n");
34 return; 35 return -ENXIO;
35 } 36 }
36 37
37 ret = of_property_read_u32(np, "clock-frequency", &rate); 38 ret = of_property_read_u32(np, "clock-frequency", &rate);
38 if (ret) { 39 if (ret) {
39 clk = of_clk_get(np, 0); 40 clk = of_clk_get(np, 0);
40 if (IS_ERR(clk)) 41 if (IS_ERR(clk)) {
42 ret = PTR_ERR(clk);
41 goto out_unmap; 43 goto out_unmap;
44 }
42 45
43 ret = clk_prepare_enable(clk); 46 ret = clk_prepare_enable(clk);
44 if (ret) 47 if (ret)
45 goto out_clk_put; 48 goto out_clk_put;
46 49
47 rate = clk_get_rate(clk); 50 rate = clk_get_rate(clk);
48 if (!rate) 51 if (!rate) {
52 ret = -EINVAL;
49 goto out_clk_disable; 53 goto out_clk_disable;
54 }
50 } 55 }
51 56
52 writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR); 57 writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
64 69
65 pr_info("ARM System timer initialized as clocksource\n"); 70 pr_info("ARM System timer initialized as clocksource\n");
66 71
67 return; 72 return 0;
68 73
69out_clk_disable: 74out_clk_disable:
70 clk_disable_unprepare(clk); 75 clk_disable_unprepare(clk);
@@ -73,6 +78,8 @@ out_clk_put:
73out_unmap: 78out_unmap:
74 iounmap(base); 79 iounmap(base);
75 pr_warn("ARM System timer register failed (%d)\n", ret); 80 pr_warn("ARM System timer register failed (%d)\n", ret);
81
82 return ret;
76} 83}
77 84
78CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick", 85CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 217438d39eb3..1ba871b7fe11 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
184 * Timer initialization 184 * Timer initialization
185 * --------------------------------------------------------------------------- 185 * ---------------------------------------------------------------------------
186 */ 186 */
187static void __init asm9260_timer_init(struct device_node *np) 187static int __init asm9260_timer_init(struct device_node *np)
188{ 188{
189 int irq; 189 int irq;
190 struct clk *clk; 190 struct clk *clk;
@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
192 unsigned long rate; 192 unsigned long rate;
193 193
194 priv.base = of_io_request_and_map(np, 0, np->name); 194 priv.base = of_io_request_and_map(np, 0, np->name);
195 if (IS_ERR(priv.base)) 195 if (IS_ERR(priv.base)) {
196 panic("%s: unable to map resource", np->name); 196 pr_err("%s: unable to map resource", np->name);
197 return PTR_ERR(priv.base);
198 }
197 199
198 clk = of_clk_get(np, 0); 200 clk = of_clk_get(np, 0);
199 201
200 ret = clk_prepare_enable(clk); 202 ret = clk_prepare_enable(clk);
201 if (ret) 203 if (ret) {
202 panic("Failed to enable clk!\n"); 204 pr_err("Failed to enable clk!\n");
205 return ret;
206 }
203 207
204 irq = irq_of_parse_and_map(np, 0); 208 irq = irq_of_parse_and_map(np, 0);
205 ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER, 209 ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
206 DRIVER_NAME, &event_dev); 210 DRIVER_NAME, &event_dev);
207 if (ret) 211 if (ret) {
208 panic("Failed to setup irq!\n"); 212 pr_err("Failed to setup irq!\n");
213 return ret;
214 }
209 215
210 /* set all timers for count-up */ 216 /* set all timers for count-up */
211 writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR); 217 writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
229 priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); 235 priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
230 event_dev.cpumask = cpumask_of(0); 236 event_dev.cpumask = cpumask_of(0);
231 clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe); 237 clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
238
239 return 0;
232} 240}
233CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer", 241CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
234 asm9260_timer_init); 242 asm9260_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 6f2822928963..e71acf231c89 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
80 } 80 }
81} 81}
82 82
83static void __init bcm2835_timer_init(struct device_node *node) 83static int __init bcm2835_timer_init(struct device_node *node)
84{ 84{
85 void __iomem *base; 85 void __iomem *base;
86 u32 freq; 86 u32 freq;
87 int irq; 87 int irq, ret;
88 struct bcm2835_timer *timer; 88 struct bcm2835_timer *timer;
89 89
90 base = of_iomap(node, 0); 90 base = of_iomap(node, 0);
91 if (!base) 91 if (!base) {
92 panic("Can't remap registers"); 92 pr_err("Can't remap registers");
93 return -ENXIO;
94 }
93 95
94 if (of_property_read_u32(node, "clock-frequency", &freq)) 96 ret = of_property_read_u32(node, "clock-frequency", &freq);
95 panic("Can't read clock-frequency"); 97 if (ret) {
98 pr_err("Can't read clock-frequency");
99 return ret;
100 }
96 101
97 system_clock = base + REG_COUNTER_LO; 102 system_clock = base + REG_COUNTER_LO;
98 sched_clock_register(bcm2835_sched_read, 32, freq); 103 sched_clock_register(bcm2835_sched_read, 32, freq);
@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
101 freq, 300, 32, clocksource_mmio_readl_up); 106 freq, 300, 32, clocksource_mmio_readl_up);
102 107
103 irq = irq_of_parse_and_map(node, DEFAULT_TIMER); 108 irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
104 if (irq <= 0) 109 if (irq <= 0) {
105 panic("Can't parse IRQ"); 110 pr_err("Can't parse IRQ");
111 return -EINVAL;
112 }
106 113
107 timer = kzalloc(sizeof(*timer), GFP_KERNEL); 114 timer = kzalloc(sizeof(*timer), GFP_KERNEL);
108 if (!timer) 115 if (!timer) {
109 panic("Can't allocate timer struct\n"); 116 pr_err("Can't allocate timer struct\n");
117 return -ENOMEM;
118 }
110 119
111 timer->control = base + REG_CONTROL; 120 timer->control = base + REG_CONTROL;
112 timer->compare = base + REG_COMPARE(DEFAULT_TIMER); 121 timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
121 timer->act.dev_id = timer; 130 timer->act.dev_id = timer;
122 timer->act.handler = bcm2835_time_interrupt; 131 timer->act.handler = bcm2835_time_interrupt;
123 132
124 if (setup_irq(irq, &timer->act)) 133 ret = setup_irq(irq, &timer->act);
125 panic("Can't set up timer IRQ\n"); 134 if (ret) {
135 pr_err("Can't set up timer IRQ\n");
136 return ret;
137 }
126 138
127 clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); 139 clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
128 140
129 pr_info("bcm2835: system timer (irq = %d)\n", irq); 141 pr_info("bcm2835: system timer (irq = %d)\n", irq);
142
143 return 0;
130} 144}
131CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", 145CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
132 bcm2835_timer_init); 146 bcm2835_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index e717e87df9bc..7e3fd375a627 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -20,7 +20,6 @@
20#include <linux/clk.h> 20#include <linux/clk.h>
21 21
22#include <linux/io.h> 22#include <linux/io.h>
23#include <asm/mach/time.h>
24 23
25#include <linux/of.h> 24#include <linux/of.h>
26#include <linux/of_address.h> 25#include <linux/of_address.h>
@@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = {
163 .handler = kona_timer_interrupt, 162 .handler = kona_timer_interrupt,
164}; 163};
165 164
166static void __init kona_timer_init(struct device_node *node) 165static int __init kona_timer_init(struct device_node *node)
167{ 166{
168 u32 freq; 167 u32 freq;
169 struct clk *external_clk; 168 struct clk *external_clk;
170 169
171 if (!of_device_is_available(node)) {
172 pr_info("Kona Timer v1 marked as disabled in device tree\n");
173 return;
174 }
175
176 external_clk = of_clk_get_by_name(node, NULL); 170 external_clk = of_clk_get_by_name(node, NULL);
177 171
178 if (!IS_ERR(external_clk)) { 172 if (!IS_ERR(external_clk)) {
@@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node)
182 arch_timer_rate = freq; 176 arch_timer_rate = freq;
183 } else { 177 } else {
184 pr_err("Kona Timer v1 unable to determine clock-frequency"); 178 pr_err("Kona Timer v1 unable to determine clock-frequency");
185 return; 179 return -EINVAL;
186 } 180 }
187 181
188 /* Setup IRQ numbers */ 182 /* Setup IRQ numbers */
@@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node)
196 kona_timer_clockevents_init(); 190 kona_timer_clockevents_init();
197 setup_irq(timers.tmr_irq, &kona_timer_irq); 191 setup_irq(timers.tmr_irq, &kona_timer_irq);
198 kona_timer_set_next_event((arch_timer_rate / HZ), NULL); 192 kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
193
194 return 0;
199} 195}
200 196
201CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init); 197CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 9be6018bd2b8..fbfbdec13b08 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
322 return NOTIFY_DONE; 322 return NOTIFY_DONE;
323} 323}
324 324
325static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, 325static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
326 u32 timer_width) 326 u32 timer_width)
327{ 327{
328 struct ttc_timer_clocksource *ttccs; 328 struct ttc_timer_clocksource *ttccs;
329 int err; 329 int err;
330 330
331 ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); 331 ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
332 if (WARN_ON(!ttccs)) 332 if (!ttccs)
333 return; 333 return -ENOMEM;
334 334
335 ttccs->ttc.clk = clk; 335 ttccs->ttc.clk = clk;
336 336
337 err = clk_prepare_enable(ttccs->ttc.clk); 337 err = clk_prepare_enable(ttccs->ttc.clk);
338 if (WARN_ON(err)) { 338 if (err) {
339 kfree(ttccs); 339 kfree(ttccs);
340 return; 340 return err;
341 } 341 }
342 342
343 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); 343 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
345 ttccs->ttc.clk_rate_change_nb.notifier_call = 345 ttccs->ttc.clk_rate_change_nb.notifier_call =
346 ttc_rate_change_clocksource_cb; 346 ttc_rate_change_clocksource_cb;
347 ttccs->ttc.clk_rate_change_nb.next = NULL; 347 ttccs->ttc.clk_rate_change_nb.next = NULL;
348 if (clk_notifier_register(ttccs->ttc.clk, 348
349 &ttccs->ttc.clk_rate_change_nb)) 349 err = clk_notifier_register(ttccs->ttc.clk,
350 &ttccs->ttc.clk_rate_change_nb);
351 if (err)
350 pr_warn("Unable to register clock notifier.\n"); 352 pr_warn("Unable to register clock notifier.\n");
351 353
352 ttccs->ttc.base_addr = base; 354 ttccs->ttc.base_addr = base;
@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
368 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); 370 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
369 371
370 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); 372 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
371 if (WARN_ON(err)) { 373 if (err) {
372 kfree(ttccs); 374 kfree(ttccs);
373 return; 375 return err;
374 } 376 }
375 377
376 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; 378 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
377 sched_clock_register(ttc_sched_clock_read, timer_width, 379 sched_clock_register(ttc_sched_clock_read, timer_width,
378 ttccs->ttc.freq / PRESCALE); 380 ttccs->ttc.freq / PRESCALE);
381
382 return 0;
379} 383}
380 384
381static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, 385static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
401 } 405 }
402} 406}
403 407
404static void __init ttc_setup_clockevent(struct clk *clk, 408static int __init ttc_setup_clockevent(struct clk *clk,
405 void __iomem *base, u32 irq) 409 void __iomem *base, u32 irq)
406{ 410{
407 struct ttc_timer_clockevent *ttcce; 411 struct ttc_timer_clockevent *ttcce;
408 int err; 412 int err;
409 413
410 ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); 414 ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
411 if (WARN_ON(!ttcce)) 415 if (!ttcce)
412 return; 416 return -ENOMEM;
413 417
414 ttcce->ttc.clk = clk; 418 ttcce->ttc.clk = clk;
415 419
416 err = clk_prepare_enable(ttcce->ttc.clk); 420 err = clk_prepare_enable(ttcce->ttc.clk);
417 if (WARN_ON(err)) { 421 if (err) {
418 kfree(ttcce); 422 kfree(ttcce);
419 return; 423 return err;
420 } 424 }
421 425
422 ttcce->ttc.clk_rate_change_nb.notifier_call = 426 ttcce->ttc.clk_rate_change_nb.notifier_call =
423 ttc_rate_change_clockevent_cb; 427 ttc_rate_change_clockevent_cb;
424 ttcce->ttc.clk_rate_change_nb.next = NULL; 428 ttcce->ttc.clk_rate_change_nb.next = NULL;
425 if (clk_notifier_register(ttcce->ttc.clk, 429
426 &ttcce->ttc.clk_rate_change_nb)) 430 err = clk_notifier_register(ttcce->ttc.clk,
431 &ttcce->ttc.clk_rate_change_nb);
432 if (err) {
427 pr_warn("Unable to register clock notifier.\n"); 433 pr_warn("Unable to register clock notifier.\n");
434 return err;
435 }
436
428 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); 437 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
429 438
430 ttcce->ttc.base_addr = base; 439 ttcce->ttc.base_addr = base;
@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
451 460
452 err = request_irq(irq, ttc_clock_event_interrupt, 461 err = request_irq(irq, ttc_clock_event_interrupt,
453 IRQF_TIMER, ttcce->ce.name, ttcce); 462 IRQF_TIMER, ttcce->ce.name, ttcce);
454 if (WARN_ON(err)) { 463 if (err) {
455 kfree(ttcce); 464 kfree(ttcce);
456 return; 465 return err;
457 } 466 }
458 467
459 clockevents_config_and_register(&ttcce->ce, 468 clockevents_config_and_register(&ttcce->ce,
460 ttcce->ttc.freq / PRESCALE, 1, 0xfffe); 469 ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
470
471 return 0;
461} 472}
462 473
463/** 474/**
@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
466 * Initializes the timer hardware and register the clock source and clock event 477 * Initializes the timer hardware and register the clock source and clock event
467 * timers with Linux kernal timer framework 478 * timers with Linux kernal timer framework
468 */ 479 */
469static void __init ttc_timer_init(struct device_node *timer) 480static int __init ttc_timer_init(struct device_node *timer)
470{ 481{
471 unsigned int irq; 482 unsigned int irq;
472 void __iomem *timer_baseaddr; 483 void __iomem *timer_baseaddr;
473 struct clk *clk_cs, *clk_ce; 484 struct clk *clk_cs, *clk_ce;
474 static int initialized; 485 static int initialized;
475 int clksel; 486 int clksel, ret;
476 u32 timer_width = 16; 487 u32 timer_width = 16;
477 488
478 if (initialized) 489 if (initialized)
479 return; 490 return 0;
480 491
481 initialized = 1; 492 initialized = 1;
482 493
@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
488 timer_baseaddr = of_iomap(timer, 0); 499 timer_baseaddr = of_iomap(timer, 0);
489 if (!timer_baseaddr) { 500 if (!timer_baseaddr) {
490 pr_err("ERROR: invalid timer base address\n"); 501 pr_err("ERROR: invalid timer base address\n");
491 BUG(); 502 return -ENXIO;
492 } 503 }
493 504
494 irq = irq_of_parse_and_map(timer, 1); 505 irq = irq_of_parse_and_map(timer, 1);
495 if (irq <= 0) { 506 if (irq <= 0) {
496 pr_err("ERROR: invalid interrupt number\n"); 507 pr_err("ERROR: invalid interrupt number\n");
497 BUG(); 508 return -EINVAL;
498 } 509 }
499 510
500 of_property_read_u32(timer, "timer-width", &timer_width); 511 of_property_read_u32(timer, "timer-width", &timer_width);
@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
504 clk_cs = of_clk_get(timer, clksel); 515 clk_cs = of_clk_get(timer, clksel);
505 if (IS_ERR(clk_cs)) { 516 if (IS_ERR(clk_cs)) {
506 pr_err("ERROR: timer input clock not found\n"); 517 pr_err("ERROR: timer input clock not found\n");
507 BUG(); 518 return PTR_ERR(clk_cs);
508 } 519 }
509 520
510 clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); 521 clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
512 clk_ce = of_clk_get(timer, clksel); 523 clk_ce = of_clk_get(timer, clksel);
513 if (IS_ERR(clk_ce)) { 524 if (IS_ERR(clk_ce)) {
514 pr_err("ERROR: timer input clock not found\n"); 525 pr_err("ERROR: timer input clock not found\n");
515 BUG(); 526 return PTR_ERR(clk_ce);
516 } 527 }
517 528
518 ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); 529 ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
519 ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); 530 if (ret)
531 return ret;
532
533 ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
534 if (ret)
535 return ret;
520 536
521 pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); 537 pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
538
539 return 0;
522} 540}
523 541
524CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); 542CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index dfad6eb99662..77a365f573d7 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
64 64
65#endif 65#endif
66 66
67static void __init clksrc_dbx500_prcmu_init(struct device_node *node) 67static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
68{ 68{
69 clksrc_dbx500_timer_base = of_iomap(node, 0); 69 clksrc_dbx500_timer_base = of_iomap(node, 0);
70 70
@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
84#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK 84#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
85 sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K); 85 sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
86#endif 86#endif
87 clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); 87 return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
88} 88}
89CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4", 89CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
90 clksrc_dbx500_prcmu_init); 90 clksrc_dbx500_prcmu_init);
diff --git a/drivers/clocksource/clksrc-probe.c b/drivers/clocksource/clksrc-probe.c
index 7cb6c923a836..bc62be97f0a8 100644
--- a/drivers/clocksource/clksrc-probe.c
+++ b/drivers/clocksource/clksrc-probe.c
@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
28{ 28{
29 struct device_node *np; 29 struct device_node *np;
30 const struct of_device_id *match; 30 const struct of_device_id *match;
31 of_init_fn_1 init_func; 31 of_init_fn_1_ret init_func_ret;
32 unsigned clocksources = 0; 32 unsigned clocksources = 0;
33 int ret;
33 34
34 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 35 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
35 if (!of_device_is_available(np)) 36 if (!of_device_is_available(np))
36 continue; 37 continue;
37 38
38 init_func = match->data; 39 init_func_ret = match->data;
39 init_func(np); 40
41 ret = init_func_ret(np);
42 if (ret) {
43 pr_err("Failed to initialize '%s': %d",
44 of_node_full_name(np), ret);
45 continue;
46 }
47
40 clocksources++; 48 clocksources++;
41 } 49 }
42 50
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
index 65ec4674416d..03cc49217bb4 100644
--- a/drivers/clocksource/clksrc_st_lpc.c
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
92 return 0; 92 return 0;
93} 93}
94 94
95static void __init st_clksrc_of_register(struct device_node *np) 95static int __init st_clksrc_of_register(struct device_node *np)
96{ 96{
97 int ret; 97 int ret;
98 uint32_t mode; 98 uint32_t mode;
@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
100 ret = of_property_read_u32(np, "st,lpc-mode", &mode); 100 ret = of_property_read_u32(np, "st,lpc-mode", &mode);
101 if (ret) { 101 if (ret) {
102 pr_err("clksrc-st-lpc: An LPC mode must be provided\n"); 102 pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
103 return; 103 return ret;
104 } 104 }
105 105
106 /* LPC can either run as a Clocksource or in RTC or WDT mode */ 106 /* LPC can either run as a Clocksource or in RTC or WDT mode */
107 if (mode != ST_LPC_MODE_CLKSRC) 107 if (mode != ST_LPC_MODE_CLKSRC)
108 return; 108 return 0;
109 109
110 ddata.base = of_iomap(np, 0); 110 ddata.base = of_iomap(np, 0);
111 if (!ddata.base) { 111 if (!ddata.base) {
112 pr_err("clksrc-st-lpc: Unable to map iomem\n"); 112 pr_err("clksrc-st-lpc: Unable to map iomem\n");
113 return; 113 return -ENXIO;
114 } 114 }
115 115
116 if (st_clksrc_setup_clk(np)) { 116 ret = st_clksrc_setup_clk(np);
117 if (ret) {
117 iounmap(ddata.base); 118 iounmap(ddata.base);
118 return; 119 return ret;
119 } 120 }
120 121
121 if (st_clksrc_init()) { 122 ret = st_clksrc_init();
123 if (ret) {
122 clk_disable_unprepare(ddata.clk); 124 clk_disable_unprepare(ddata.clk);
123 clk_put(ddata.clk); 125 clk_put(ddata.clk);
124 iounmap(ddata.base); 126 iounmap(ddata.base);
125 return; 127 return ret;
126 } 128 }
127 129
128 pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n", 130 pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
129 clk_get_rate(ddata.clk)); 131 clk_get_rate(ddata.clk));
132
133 return ret;
130} 134}
131CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register); 135CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index cdd86e3525bb..84aed78261e4 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
104} 104}
105 105
106#ifdef CONFIG_CLKSRC_OF 106#ifdef CONFIG_CLKSRC_OF
107static void __init clps711x_timer_init(struct device_node *np) 107static int __init clps711x_timer_init(struct device_node *np)
108{ 108{
109 unsigned int irq = irq_of_parse_and_map(np, 0); 109 unsigned int irq = irq_of_parse_and_map(np, 0);
110 struct clk *clock = of_clk_get(np, 0); 110 struct clk *clock = of_clk_get(np, 0);
@@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np)
112 112
113 switch (of_alias_get_id(np, "timer")) { 113 switch (of_alias_get_id(np, "timer")) {
114 case CLPS711X_CLKSRC_CLOCKSOURCE: 114 case CLPS711X_CLKSRC_CLOCKSOURCE:
115 BUG_ON(_clps711x_clksrc_init(clock, base)); 115 return _clps711x_clksrc_init(clock, base);
116 break;
117 case CLPS711X_CLKSRC_CLOCKEVENT: 116 case CLPS711X_CLKSRC_CLOCKEVENT:
118 BUG_ON(_clps711x_clkevt_init(clock, base, irq)); 117 return _clps711x_clkevt_init(clock, base, irq);
119 break;
120 default: 118 default:
121 break; 119 return -EINVAL;
122 } 120 }
123} 121}
124CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init); 122CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 860843cef572..aee6c0d39a7c 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
143#endif 143#endif
144 144
145static int num_called; 145static int num_called;
146static void __init dw_apb_timer_init(struct device_node *timer) 146static int __init dw_apb_timer_init(struct device_node *timer)
147{ 147{
148 switch (num_called) { 148 switch (num_called) {
149 case 0: 149 case 0:
@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
164 } 164 }
165 165
166 num_called++; 166 num_called++;
167
168 return 0;
167} 169}
168CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 170CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
169CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); 171CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index be09bc0b5e26..0d18dd4b3bd2 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
232 return exynos4_read_count_32(); 232 return exynos4_read_count_32();
233} 233}
234 234
235static void __init exynos4_clocksource_init(void) 235static int __init exynos4_clocksource_init(void)
236{ 236{
237 exynos4_mct_frc_start(); 237 exynos4_mct_frc_start();
238 238
@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
244 panic("%s: can't register clocksource\n", mct_frc.name); 244 panic("%s: can't register clocksource\n", mct_frc.name);
245 245
246 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); 246 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
247
248 return 0;
247} 249}
248 250
249static void exynos4_mct_comp0_stop(void) 251static void exynos4_mct_comp0_stop(void)
@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
335 .dev_id = &mct_comp_device, 337 .dev_id = &mct_comp_device,
336}; 338};
337 339
338static void exynos4_clockevent_init(void) 340static int exynos4_clockevent_init(void)
339{ 341{
340 mct_comp_device.cpumask = cpumask_of(0); 342 mct_comp_device.cpumask = cpumask_of(0);
341 clockevents_config_and_register(&mct_comp_device, clk_rate, 343 clockevents_config_and_register(&mct_comp_device, clk_rate,
342 0xf, 0xffffffff); 344 0xf, 0xffffffff);
343 setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); 345 setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
346
347 return 0;
344} 348}
345 349
346static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); 350static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
@@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
516 .notifier_call = exynos4_mct_cpu_notify, 520 .notifier_call = exynos4_mct_cpu_notify,
517}; 521};
518 522
519static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) 523static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
520{ 524{
521 int err, cpu; 525 int err, cpu;
522 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); 526 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
@@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
572 576
573 /* Immediately configure the timer on the boot CPU */ 577 /* Immediately configure the timer on the boot CPU */
574 exynos4_local_timer_setup(mevt); 578 exynos4_local_timer_setup(mevt);
575 return; 579 return 0;
576 580
577out_irq: 581out_irq:
578 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); 582 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
583 return err;
579} 584}
580 585
581static void __init mct_init_dt(struct device_node *np, unsigned int int_type) 586static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
582{ 587{
583 u32 nr_irqs, i; 588 u32 nr_irqs, i;
589 int ret;
584 590
585 mct_int_type = int_type; 591 mct_int_type = int_type;
586 592
@@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
600 for (i = MCT_L0_IRQ; i < nr_irqs; i++) 606 for (i = MCT_L0_IRQ; i < nr_irqs; i++)
601 mct_irqs[i] = irq_of_parse_and_map(np, i); 607 mct_irqs[i] = irq_of_parse_and_map(np, i);
602 608
603 exynos4_timer_resources(np, of_iomap(np, 0)); 609 ret = exynos4_timer_resources(np, of_iomap(np, 0));
604 exynos4_clocksource_init(); 610 if (ret)
605 exynos4_clockevent_init(); 611 return ret;
612
613 ret = exynos4_clocksource_init();
614 if (ret)
615 return ret;
616
617 return exynos4_clockevent_init();
606} 618}
607 619
608 620
609static void __init mct_init_spi(struct device_node *np) 621static int __init mct_init_spi(struct device_node *np)
610{ 622{
611 return mct_init_dt(np, MCT_INT_SPI); 623 return mct_init_dt(np, MCT_INT_SPI);
612} 624}
613 625
614static void __init mct_init_ppi(struct device_node *np) 626static int __init mct_init_ppi(struct device_node *np)
615{ 627{
616 return mct_init_dt(np, MCT_INT_PPI); 628 return mct_init_dt(np, MCT_INT_PPI);
617} 629}
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 517e1c7624d4..738515b89073 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
316 return 0; 316 return 0;
317} 317}
318 318
319static void __init ftm_timer_init(struct device_node *np) 319static int __init ftm_timer_init(struct device_node *np)
320{ 320{
321 unsigned long freq; 321 unsigned long freq;
322 int irq; 322 int ret, irq;
323 323
324 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 324 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
325 if (!priv) 325 if (!priv)
326 return; 326 return -ENOMEM;
327 327
328 ret = -ENXIO;
328 priv->clkevt_base = of_iomap(np, 0); 329 priv->clkevt_base = of_iomap(np, 0);
329 if (!priv->clkevt_base) { 330 if (!priv->clkevt_base) {
330 pr_err("ftm: unable to map event timer registers\n"); 331 pr_err("ftm: unable to map event timer registers\n");
@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
337 goto err; 338 goto err;
338 } 339 }
339 340
341 ret = -EINVAL;
340 irq = irq_of_parse_and_map(np, 0); 342 irq = irq_of_parse_and_map(np, 0);
341 if (irq <= 0) { 343 if (irq <= 0) {
342 pr_err("ftm: unable to get IRQ from DT, %d\n", irq); 344 pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
349 if (!freq) 351 if (!freq)
350 goto err; 352 goto err;
351 353
352 if (ftm_calc_closest_round_cyc(freq)) 354 ret = ftm_calc_closest_round_cyc(freq);
355 if (ret)
353 goto err; 356 goto err;
354 357
355 if (ftm_clocksource_init(freq)) 358 ret = ftm_clocksource_init(freq);
359 if (ret)
356 goto err; 360 goto err;
357 361
358 if (ftm_clockevent_init(freq, irq)) 362 ret = ftm_clockevent_init(freq, irq);
363 if (ret)
359 goto err; 364 goto err;
360 365
361 return; 366 return 0;
362 367
363err: 368err:
364 kfree(priv); 369 kfree(priv);
370 return ret;
365} 371}
366CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); 372CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 75c44079b345..07d9d5be9054 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
126#define REG_CH 0 126#define REG_CH 0
127#define REG_COMM 1 127#define REG_COMM 1
128 128
129static void __init h8300_16timer_init(struct device_node *node) 129static int __init h8300_16timer_init(struct device_node *node)
130{ 130{
131 void __iomem *base[2]; 131 void __iomem *base[2];
132 int ret, irq; 132 int ret, irq;
@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
136 clk = of_clk_get(node, 0); 136 clk = of_clk_get(node, 0);
137 if (IS_ERR(clk)) { 137 if (IS_ERR(clk)) {
138 pr_err("failed to get clock for clocksource\n"); 138 pr_err("failed to get clock for clocksource\n");
139 return; 139 return PTR_ERR(clk);
140 } 140 }
141 141
142 ret = -ENXIO;
142 base[REG_CH] = of_iomap(node, 0); 143 base[REG_CH] = of_iomap(node, 0);
143 if (!base[REG_CH]) { 144 if (!base[REG_CH]) {
144 pr_err("failed to map registers for clocksource\n"); 145 pr_err("failed to map registers for clocksource\n");
@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
151 goto unmap_ch; 152 goto unmap_ch;
152 } 153 }
153 154
155 ret = -EINVAL;
154 irq = irq_of_parse_and_map(node, 0); 156 irq = irq_of_parse_and_map(node, 0);
155 if (!irq) { 157 if (!irq) {
156 pr_err("failed to get irq for clockevent\n"); 158 pr_err("failed to get irq for clockevent\n");
@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
174 176
175 clocksource_register_hz(&timer16_priv.cs, 177 clocksource_register_hz(&timer16_priv.cs,
176 clk_get_rate(clk) / 8); 178 clk_get_rate(clk) / 8);
177 return; 179 return 0;
178 180
179unmap_comm: 181unmap_comm:
180 iounmap(base[REG_COMM]); 182 iounmap(base[REG_COMM]);
@@ -182,6 +184,8 @@ unmap_ch:
182 iounmap(base[REG_CH]); 184 iounmap(base[REG_CH]);
183free_clk: 185free_clk:
184 clk_put(clk); 186 clk_put(clk);
187 return ret;
185} 188}
186 189
187CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init); 190CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
191 h8300_16timer_init);
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index c151941e1956..546bb180f5a4 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
164 }, 164 },
165}; 165};
166 166
167static void __init h8300_8timer_init(struct device_node *node) 167static int __init h8300_8timer_init(struct device_node *node)
168{ 168{
169 void __iomem *base; 169 void __iomem *base;
170 int irq; 170 int irq, ret;
171 struct clk *clk; 171 struct clk *clk;
172 172
173 clk = of_clk_get(node, 0); 173 clk = of_clk_get(node, 0);
174 if (IS_ERR(clk)) { 174 if (IS_ERR(clk)) {
175 pr_err("failed to get clock for clockevent\n"); 175 pr_err("failed to get clock for clockevent\n");
176 return; 176 return PTR_ERR(clk);
177 } 177 }
178 178
179 ret = ENXIO;
179 base = of_iomap(node, 0); 180 base = of_iomap(node, 0);
180 if (!base) { 181 if (!base) {
181 pr_err("failed to map registers for clockevent\n"); 182 pr_err("failed to map registers for clockevent\n");
182 goto free_clk; 183 goto free_clk;
183 } 184 }
184 185
186 ret = -EINVAL;
185 irq = irq_of_parse_and_map(node, 0); 187 irq = irq_of_parse_and_map(node, 0);
186 if (!irq) { 188 if (!irq) {
187 pr_err("failed to get irq for clockevent\n"); 189 pr_err("failed to get irq for clockevent\n");
@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
205 clockevents_config_and_register(&timer8_priv.ced, 207 clockevents_config_and_register(&timer8_priv.ced,
206 timer8_priv.rate, 1, 0x0000ffff); 208 timer8_priv.rate, 1, 0x0000ffff);
207 209
208 return; 210 return 0;
209unmap_reg: 211unmap_reg:
210 iounmap(base); 212 iounmap(base);
211free_clk: 213free_clk:
212 clk_put(clk); 214 clk_put(clk);
215 return ret;
213} 216}
214 217
215CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init); 218CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index d4c1a287c262..7bdf1991c847 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
119#define CH_L 0 119#define CH_L 0
120#define CH_H 1 120#define CH_H 1
121 121
122static void __init h8300_tpu_init(struct device_node *node) 122static int __init h8300_tpu_init(struct device_node *node)
123{ 123{
124 void __iomem *base[2]; 124 void __iomem *base[2];
125 struct clk *clk; 125 struct clk *clk;
126 int ret = -ENXIO;
126 127
127 clk = of_clk_get(node, 0); 128 clk = of_clk_get(node, 0);
128 if (IS_ERR(clk)) { 129 if (IS_ERR(clk)) {
129 pr_err("failed to get clock for clocksource\n"); 130 pr_err("failed to get clock for clocksource\n");
130 return; 131 return PTR_ERR(clk);
131 } 132 }
132 133
133 base[CH_L] = of_iomap(node, CH_L); 134 base[CH_L] = of_iomap(node, CH_L);
@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
144 tpu_priv.mapbase1 = base[CH_L]; 145 tpu_priv.mapbase1 = base[CH_L];
145 tpu_priv.mapbase2 = base[CH_H]; 146 tpu_priv.mapbase2 = base[CH_H];
146 147
147 clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); 148 return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
148
149 return;
150 149
151unmap_L: 150unmap_L:
152 iounmap(base[CH_H]); 151 iounmap(base[CH_H]);
153free_clk: 152free_clk:
154 clk_put(clk); 153 clk_put(clk);
154 return ret;
155} 155}
156 156
157CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init); 157CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 1fa22c4d2d49..52af591a9fc7 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
126 .dev_id = &meson6_clockevent, 126 .dev_id = &meson6_clockevent,
127}; 127};
128 128
129static void __init meson6_timer_init(struct device_node *node) 129static int __init meson6_timer_init(struct device_node *node)
130{ 130{
131 u32 val; 131 u32 val;
132 int ret, irq; 132 int ret, irq;
133 133
134 timer_base = of_io_request_and_map(node, 0, "meson6-timer"); 134 timer_base = of_io_request_and_map(node, 0, "meson6-timer");
135 if (IS_ERR(timer_base)) 135 if (IS_ERR(timer_base)) {
136 panic("Can't map registers"); 136 pr_err("Can't map registers");
137 return -ENXIO;
138 }
137 139
138 irq = irq_of_parse_and_map(node, 0); 140 irq = irq_of_parse_and_map(node, 0);
139 if (irq <= 0) 141 if (irq <= 0) {
140 panic("Can't parse IRQ"); 142 pr_err("Can't parse IRQ");
143 return -EINVAL;
144 }
141 145
142 /* Set 1us for timer E */ 146 /* Set 1us for timer E */
143 val = readl(timer_base + TIMER_ISA_MUX); 147 val = readl(timer_base + TIMER_ISA_MUX);
@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
158 meson6_clkevt_time_stop(CED_ID); 162 meson6_clkevt_time_stop(CED_ID);
159 163
160 ret = setup_irq(irq, &meson6_timer_irq); 164 ret = setup_irq(irq, &meson6_timer_irq);
161 if (ret) 165 if (ret) {
162 pr_warn("failed to setup irq %d\n", irq); 166 pr_warn("failed to setup irq %d\n", irq);
167 return ret;
168 }
163 169
164 meson6_clockevent.cpumask = cpu_possible_mask; 170 meson6_clockevent.cpumask = cpu_possible_mask;
165 meson6_clockevent.irq = irq; 171 meson6_clockevent.irq = irq;
166 172
167 clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC, 173 clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
168 1, 0xfffe); 174 1, 0xfffe);
175 return 0;
169} 176}
170CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer", 177CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
171 meson6_timer_init); 178 meson6_timer_init);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 89d3e4d7900c..1572c7a778ab 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = {
146 .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC }, 146 .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
147}; 147};
148 148
149static void __init __gic_clocksource_init(void) 149static int __init __gic_clocksource_init(void)
150{ 150{
151 int ret; 151 int ret;
152 152
@@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void)
159 ret = clocksource_register_hz(&gic_clocksource, gic_frequency); 159 ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
160 if (ret < 0) 160 if (ret < 0)
161 pr_warn("GIC: Unable to register clocksource\n"); 161 pr_warn("GIC: Unable to register clocksource\n");
162
163 return ret;
162} 164}
163 165
164void __init gic_clocksource_init(unsigned int frequency) 166void __init gic_clocksource_init(unsigned int frequency)
@@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node)
179 struct clk *clk; 181 struct clk *clk;
180 int ret; 182 int ret;
181 183
182 if (WARN_ON(!gic_present || !node->parent || 184 if (!gic_present || !node->parent ||
183 !of_device_is_compatible(node->parent, "mti,gic"))) 185 !of_device_is_compatible(node->parent, "mti,gic")) {
184 return; 186 pr_warn("No DT definition for the mips gic driver");
187 return -ENXIO;
188 }
185 189
186 clk = of_clk_get(node, 0); 190 clk = of_clk_get(node, 0);
187 if (!IS_ERR(clk)) { 191 if (!IS_ERR(clk)) {
188 if (clk_prepare_enable(clk) < 0) { 192 if (clk_prepare_enable(clk) < 0) {
189 pr_err("GIC failed to enable clock\n"); 193 pr_err("GIC failed to enable clock\n");
190 clk_put(clk); 194 clk_put(clk);
191 return; 195 return PTR_ERR(clk);
192 } 196 }
193 197
194 gic_frequency = clk_get_rate(clk); 198 gic_frequency = clk_get_rate(clk);
195 } else if (of_property_read_u32(node, "clock-frequency", 199 } else if (of_property_read_u32(node, "clock-frequency",
196 &gic_frequency)) { 200 &gic_frequency)) {
197 pr_err("GIC frequency not specified.\n"); 201 pr_err("GIC frequency not specified.\n");
198 return; 202 return -EINVAL;;
199 } 203 }
200 gic_timer_irq = irq_of_parse_and_map(node, 0); 204 gic_timer_irq = irq_of_parse_and_map(node, 0);
201 if (!gic_timer_irq) { 205 if (!gic_timer_irq) {
202 pr_err("GIC timer IRQ not specified.\n"); 206 pr_err("GIC timer IRQ not specified.\n");
203 return; 207 return -EINVAL;;
204 } 208 }
205 209
206 __gic_clocksource_init(); 210 ret = __gic_clocksource_init();
211 if (ret)
212 return ret;
207 213
208 ret = gic_clockevent_init(); 214 ret = gic_clockevent_init();
209 if (!ret && !IS_ERR(clk)) { 215 if (!ret && !IS_ERR(clk)) {
@@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
213 219
214 /* And finally start the counter */ 220 /* And finally start the counter */
215 gic_start_count(); 221 gic_start_count();
222
223 return 0;
216} 224}
217CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer", 225CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
218 gic_clocksource_of_init); 226 gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 19857af651c1..841454417acd 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
119 .dev_id = &moxart_clockevent, 119 .dev_id = &moxart_clockevent,
120}; 120};
121 121
122static void __init moxart_timer_init(struct device_node *node) 122static int __init moxart_timer_init(struct device_node *node)
123{ 123{
124 int ret, irq; 124 int ret, irq;
125 unsigned long pclk; 125 unsigned long pclk;
126 struct clk *clk; 126 struct clk *clk;
127 127
128 base = of_iomap(node, 0); 128 base = of_iomap(node, 0);
129 if (!base) 129 if (!base) {
130 panic("%s: of_iomap failed\n", node->full_name); 130 pr_err("%s: of_iomap failed\n", node->full_name);
131 return -ENXIO;
132 }
131 133
132 irq = irq_of_parse_and_map(node, 0); 134 irq = irq_of_parse_and_map(node, 0);
133 if (irq <= 0) 135 if (irq <= 0) {
134 panic("%s: irq_of_parse_and_map failed\n", node->full_name); 136 pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
137 return -EINVAL;
138 }
135 139
136 ret = setup_irq(irq, &moxart_timer_irq); 140 ret = setup_irq(irq, &moxart_timer_irq);
137 if (ret) 141 if (ret) {
138 panic("%s: setup_irq failed\n", node->full_name); 142 pr_err("%s: setup_irq failed\n", node->full_name);
143 return ret;
144 }
139 145
140 clk = of_clk_get(node, 0); 146 clk = of_clk_get(node, 0);
141 if (IS_ERR(clk)) 147 if (IS_ERR(clk)) {
142 panic("%s: of_clk_get failed\n", node->full_name); 148 pr_err("%s: of_clk_get failed\n", node->full_name);
149 return PTR_ERR(clk);
150 }
143 151
144 pclk = clk_get_rate(clk); 152 pclk = clk_get_rate(clk);
145 153
146 if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, 154 ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
147 "moxart_timer", pclk, 200, 32, 155 "moxart_timer", pclk, 200, 32,
148 clocksource_mmio_readl_down)) 156 clocksource_mmio_readl_down);
149 panic("%s: clocksource_mmio_init failed\n", node->full_name); 157 if (ret) {
158 pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
159 return ret;
160 }
150 161
151 clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); 162 clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
152 163
@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
164 */ 175 */
165 clockevents_config_and_register(&moxart_clockevent, pclk, 176 clockevents_config_and_register(&moxart_clockevent, pclk,
166 0x4, 0xfffffffe); 177 0x4, 0xfffffffe);
178
179 return 0;
167} 180}
168CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); 181CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 3d33a5e23dee..3e4431ed9aa9 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -250,7 +250,7 @@ out:
250 return ret; 250 return ret;
251} 251}
252 252
253static void __init mps2_timer_init(struct device_node *np) 253static int __init mps2_timer_init(struct device_node *np)
254{ 254{
255 static int has_clocksource, has_clockevent; 255 static int has_clocksource, has_clockevent;
256 int ret; 256 int ret;
@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
259 ret = mps2_clocksource_init(np); 259 ret = mps2_clocksource_init(np);
260 if (!ret) { 260 if (!ret) {
261 has_clocksource = 1; 261 has_clocksource = 1;
262 return; 262 return 0;
263 } 263 }
264 } 264 }
265 265
@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
267 ret = mps2_clockevent_init(np); 267 ret = mps2_clockevent_init(np);
268 if (!ret) { 268 if (!ret) {
269 has_clockevent = 1; 269 has_clockevent = 1;
270 return; 270 return 0;
271 } 271 }
272 } 272 }
273
274 return 0;
273} 275}
274 276
275CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init); 277CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 7e583f8ea5f4..90659493c59c 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
181 evt->gpt_base + GPT_IRQ_EN_REG); 181 evt->gpt_base + GPT_IRQ_EN_REG);
182} 182}
183 183
184static void __init mtk_timer_init(struct device_node *node) 184static int __init mtk_timer_init(struct device_node *node)
185{ 185{
186 struct mtk_clock_event_device *evt; 186 struct mtk_clock_event_device *evt;
187 struct resource res; 187 struct resource res;
@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
190 190
191 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 191 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
192 if (!evt) 192 if (!evt)
193 return; 193 return -ENOMEM;
194 194
195 evt->dev.name = "mtk_tick"; 195 evt->dev.name = "mtk_tick";
196 evt->dev.rating = 300; 196 evt->dev.rating = 300;
@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
248 248
249 mtk_timer_enable_irq(evt, GPT_CLK_EVT); 249 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
250 250
251 return; 251 return 0;
252 252
253err_clk_disable: 253err_clk_disable:
254 clk_disable_unprepare(clk); 254 clk_disable_unprepare(clk);
@@ -262,5 +262,7 @@ err_mem:
262 release_mem_region(res.start, resource_size(&res)); 262 release_mem_region(res.start, resource_size(&res));
263err_kzalloc: 263err_kzalloc:
264 kfree(evt); 264 kfree(evt);
265
266 return -EINVAL;
265} 267}
266CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init); 268CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f5ce2961c0d6..0ba0a913b41d 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -31,8 +31,6 @@
31#include <linux/stmp_device.h> 31#include <linux/stmp_device.h>
32#include <linux/sched_clock.h> 32#include <linux/sched_clock.h>
33 33
34#include <asm/mach/time.h>
35
36/* 34/*
37 * There are 2 versions of the timrot on Freescale MXS-based SoCs. 35 * There are 2 versions of the timrot on Freescale MXS-based SoCs.
38 * The v1 on MX23 only gets 16 bits counter, while v2 on MX28 36 * The v1 on MX23 only gets 16 bits counter, while v2 on MX28
@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
226 return 0; 224 return 0;
227} 225}
228 226
229static void __init mxs_timer_init(struct device_node *np) 227static int __init mxs_timer_init(struct device_node *np)
230{ 228{
231 struct clk *timer_clk; 229 struct clk *timer_clk;
232 int irq; 230 int irq, ret;
233 231
234 mxs_timrot_base = of_iomap(np, 0); 232 mxs_timrot_base = of_iomap(np, 0);
235 WARN_ON(!mxs_timrot_base); 233 WARN_ON(!mxs_timrot_base);
@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
237 timer_clk = of_clk_get(np, 0); 235 timer_clk = of_clk_get(np, 0);
238 if (IS_ERR(timer_clk)) { 236 if (IS_ERR(timer_clk)) {
239 pr_err("%s: failed to get clk\n", __func__); 237 pr_err("%s: failed to get clk\n", __func__);
240 return; 238 return PTR_ERR(timer_clk);
241 } 239 }
242 240
243 clk_prepare_enable(timer_clk); 241 ret = clk_prepare_enable(timer_clk);
242 if (ret)
243 return ret;
244 244
245 /* 245 /*
246 * Initialize timers to a known state 246 * Initialize timers to a known state
@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
278 mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); 278 mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
279 279
280 /* init and register the timer to the framework */ 280 /* init and register the timer to the framework */
281 mxs_clocksource_init(timer_clk); 281 ret = mxs_clocksource_init(timer_clk);
282 mxs_clockevent_init(timer_clk); 282 if (ret)
283 return ret;
284
285 ret = mxs_clockevent_init(timer_clk);
286 if (ret)
287 return ret;
283 288
284 /* Make irqs happen */ 289 /* Make irqs happen */
285 irq = irq_of_parse_and_map(np, 0); 290 irq = irq_of_parse_and_map(np, 0);
286 setup_irq(irq, &mxs_timer_irq); 291 if (irq <= 0)
292 return -EINVAL;
293
294 return setup_irq(irq, &mxs_timer_irq);
287} 295}
288CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init); 296CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index bc8dd443c727..3c124d1ca600 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
193 .dev_id = &nmdk_clkevt, 193 .dev_id = &nmdk_clkevt,
194}; 194};
195 195
196static void __init nmdk_timer_init(void __iomem *base, int irq, 196static int __init nmdk_timer_init(void __iomem *base, int irq,
197 struct clk *pclk, struct clk *clk) 197 struct clk *pclk, struct clk *clk)
198{ 198{
199 unsigned long rate; 199 unsigned long rate;
200 int ret;
200 201
201 mtu_base = base; 202 mtu_base = base;
202 203
@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
226 /* Timer 0 is the free running clocksource */ 227 /* Timer 0 is the free running clocksource */
227 nmdk_clksrc_reset(); 228 nmdk_clksrc_reset();
228 229
229 if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", 230 ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
230 rate, 200, 32, clocksource_mmio_readl_down)) 231 rate, 200, 32, clocksource_mmio_readl_down);
231 pr_err("timer: failed to initialize clock source %s\n", 232 if (ret) {
232 "mtu_0"); 233 pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
234 return ret;
235 }
233 236
234#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK 237#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
235 sched_clock_register(nomadik_read_sched_clock, 32, rate); 238 sched_clock_register(nomadik_read_sched_clock, 32, rate);
@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
244 mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer; 247 mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
245 mtu_delay_timer.freq = rate; 248 mtu_delay_timer.freq = rate;
246 register_current_timer_delay(&mtu_delay_timer); 249 register_current_timer_delay(&mtu_delay_timer);
250
251 return 0;
247} 252}
248 253
249static void __init nmdk_timer_of_init(struct device_node *node) 254static int __init nmdk_timer_of_init(struct device_node *node)
250{ 255{
251 struct clk *pclk; 256 struct clk *pclk;
252 struct clk *clk; 257 struct clk *clk;
@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
254 int irq; 259 int irq;
255 260
256 base = of_iomap(node, 0); 261 base = of_iomap(node, 0);
257 if (!base) 262 if (!base) {
258 panic("Can't remap registers"); 263 pr_err("Can't remap registers");
264 return -ENXIO;
265 }
259 266
260 pclk = of_clk_get_by_name(node, "apb_pclk"); 267 pclk = of_clk_get_by_name(node, "apb_pclk");
261 if (IS_ERR(pclk)) 268 if (IS_ERR(pclk)) {
262 panic("could not get apb_pclk"); 269 pr_err("could not get apb_pclk");
270 return PTR_ERR(pclk);
271 }
263 272
264 clk = of_clk_get_by_name(node, "timclk"); 273 clk = of_clk_get_by_name(node, "timclk");
265 if (IS_ERR(clk)) 274 if (IS_ERR(clk)) {
266 panic("could not get timclk"); 275 pr_err("could not get timclk");
276 return PTR_ERR(clk);
277 }
267 278
268 irq = irq_of_parse_and_map(node, 0); 279 irq = irq_of_parse_and_map(node, 0);
269 if (irq <= 0) 280 if (irq <= 0) {
270 panic("Can't parse IRQ"); 281 pr_err("Can't parse IRQ");
282 return -EINVAL;
283 }
271 284
272 nmdk_timer_init(base, irq, pclk, clk); 285 return nmdk_timer_init(base, irq, pclk, clk);
273} 286}
274CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu", 287CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
275 nmdk_timer_of_init); 288 nmdk_timer_of_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 45b6a4999713..937e10b84d58 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = {
150 .dev_id = &ckevt_pxa_osmr0, 150 .dev_id = &ckevt_pxa_osmr0,
151}; 151};
152 152
153static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 153static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
154{ 154{
155 int ret;
156
155 timer_writel(0, OIER); 157 timer_writel(0, OIER);
156 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 158 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
157 159
@@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
159 161
160 ckevt_pxa_osmr0.cpumask = cpumask_of(0); 162 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
161 163
162 setup_irq(irq, &pxa_ost0_irq); 164 ret = setup_irq(irq, &pxa_ost0_irq);
165 if (ret) {
166 pr_err("Failed to setup irq");
167 return ret;
168 }
169
170 ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
171 32, clocksource_mmio_readl_up);
172 if (ret) {
173 pr_err("Failed to init clocksource");
174 return ret;
175 }
163 176
164 clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
165 32, clocksource_mmio_readl_up);
166 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, 177 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
167 MIN_OSCR_DELTA * 2, 0x7fffffff); 178 MIN_OSCR_DELTA * 2, 0x7fffffff);
179
180 return 0;
168} 181}
169 182
170static void __init pxa_timer_dt_init(struct device_node *np) 183static int __init pxa_timer_dt_init(struct device_node *np)
171{ 184{
172 struct clk *clk; 185 struct clk *clk;
173 int irq; 186 int irq, ret;
174 187
175 /* timer registers are shared with watchdog timer */ 188 /* timer registers are shared with watchdog timer */
176 timer_base = of_iomap(np, 0); 189 timer_base = of_iomap(np, 0);
177 if (!timer_base) 190 if (!timer_base) {
178 panic("%s: unable to map resource\n", np->name); 191 pr_err("%s: unable to map resource\n", np->name);
192 return -ENXIO;
193 }
179 194
180 clk = of_clk_get(np, 0); 195 clk = of_clk_get(np, 0);
181 if (IS_ERR(clk)) { 196 if (IS_ERR(clk)) {
182 pr_crit("%s: unable to get clk\n", np->name); 197 pr_crit("%s: unable to get clk\n", np->name);
183 return; 198 return PTR_ERR(clk);
199 }
200
201 ret = clk_prepare_enable(clk);
202 if (ret) {
203 pr_crit("Failed to prepare clock");
204 return ret;
184 } 205 }
185 clk_prepare_enable(clk);
186 206
187 /* we are only interested in OS-timer0 irq */ 207 /* we are only interested in OS-timer0 irq */
188 irq = irq_of_parse_and_map(np, 0); 208 irq = irq_of_parse_and_map(np, 0);
189 if (irq <= 0) { 209 if (irq <= 0) {
190 pr_crit("%s: unable to parse OS-timer0 irq\n", np->name); 210 pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
191 return; 211 return -EINVAL;
192 } 212 }
193 213
194 pxa_timer_common_init(irq, clk_get_rate(clk)); 214 return pxa_timer_common_init(irq, clk_get_rate(clk));
195} 215}
196CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); 216CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
197 217
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index f8e09f923651..662576339049 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = {
178 .read_current_timer = msm_read_current_timer, 178 .read_current_timer = msm_read_current_timer,
179}; 179};
180 180
181static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, 181static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
182 bool percpu) 182 bool percpu)
183{ 183{
184 struct clocksource *cs = &msm_clocksource; 184 struct clocksource *cs = &msm_clocksource;
@@ -218,12 +218,14 @@ err:
218 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); 218 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
219 msm_delay_timer.freq = dgt_hz; 219 msm_delay_timer.freq = dgt_hz;
220 register_current_timer_delay(&msm_delay_timer); 220 register_current_timer_delay(&msm_delay_timer);
221
222 return res;
221} 223}
222 224
223static void __init msm_dt_timer_init(struct device_node *np) 225static int __init msm_dt_timer_init(struct device_node *np)
224{ 226{
225 u32 freq; 227 u32 freq;
226 int irq; 228 int irq, ret;
227 struct resource res; 229 struct resource res;
228 u32 percpu_offset; 230 u32 percpu_offset;
229 void __iomem *base; 231 void __iomem *base;
@@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
232 base = of_iomap(np, 0); 234 base = of_iomap(np, 0);
233 if (!base) { 235 if (!base) {
234 pr_err("Failed to map event base\n"); 236 pr_err("Failed to map event base\n");
235 return; 237 return -ENXIO;
236 } 238 }
237 239
238 /* We use GPT0 for the clockevent */ 240 /* We use GPT0 for the clockevent */
239 irq = irq_of_parse_and_map(np, 1); 241 irq = irq_of_parse_and_map(np, 1);
240 if (irq <= 0) { 242 if (irq <= 0) {
241 pr_err("Can't get irq\n"); 243 pr_err("Can't get irq\n");
242 return; 244 return -EINVAL;
243 } 245 }
244 246
245 /* We use CPU0's DGT for the clocksource */ 247 /* We use CPU0's DGT for the clocksource */
246 if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) 248 if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
247 percpu_offset = 0; 249 percpu_offset = 0;
248 250
249 if (of_address_to_resource(np, 0, &res)) { 251 ret = of_address_to_resource(np, 0, &res);
252 if (ret) {
250 pr_err("Failed to parse DGT resource\n"); 253 pr_err("Failed to parse DGT resource\n");
251 return; 254 return ret;
252 } 255 }
253 256
254 cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); 257 cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
255 if (!cpu0_base) { 258 if (!cpu0_base) {
256 pr_err("Failed to map source base\n"); 259 pr_err("Failed to map source base\n");
257 return; 260 return -EINVAL;
258 } 261 }
259 262
260 if (of_property_read_u32(np, "clock-frequency", &freq)) { 263 if (of_property_read_u32(np, "clock-frequency", &freq)) {
261 pr_err("Unknown frequency\n"); 264 pr_err("Unknown frequency\n");
262 return; 265 return -EINVAL;
263 } 266 }
264 267
265 event_base = base + 0x4; 268 event_base = base + 0x4;
@@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
268 freq /= 4; 271 freq /= 4;
269 writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); 272 writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
270 273
271 msm_timer_init(freq, 32, irq, !!percpu_offset); 274 return msm_timer_init(freq, 32, irq, !!percpu_offset);
272} 275}
273CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); 276CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
274CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); 277CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index b991b288c803..23e267acba25 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -19,7 +19,8 @@
19 19
20#define TIMER_LOAD_COUNT0 0x00 20#define TIMER_LOAD_COUNT0 0x00
21#define TIMER_LOAD_COUNT1 0x04 21#define TIMER_LOAD_COUNT1 0x04
22#define TIMER_CONTROL_REG 0x10 22#define TIMER_CONTROL_REG3288 0x10
23#define TIMER_CONTROL_REG3399 0x1c
23#define TIMER_INT_STATUS 0x18 24#define TIMER_INT_STATUS 0x18
24 25
25#define TIMER_DISABLE 0x0 26#define TIMER_DISABLE 0x0
@@ -31,6 +32,7 @@
31struct bc_timer { 32struct bc_timer {
32 struct clock_event_device ce; 33 struct clock_event_device ce;
33 void __iomem *base; 34 void __iomem *base;
35 void __iomem *ctrl;
34 u32 freq; 36 u32 freq;
35}; 37};
36 38
@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
46 return rk_timer(ce)->base; 48 return rk_timer(ce)->base;
47} 49}
48 50
51static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
52{
53 return rk_timer(ce)->ctrl;
54}
55
49static inline void rk_timer_disable(struct clock_event_device *ce) 56static inline void rk_timer_disable(struct clock_event_device *ce)
50{ 57{
51 writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG); 58 writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
52} 59}
53 60
54static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags) 61static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
55{ 62{
56 writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags, 63 writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
57 rk_base(ce) + TIMER_CONTROL_REG); 64 rk_ctrl(ce));
58} 65}
59 66
60static void rk_timer_update_counter(unsigned long cycles, 67static void rk_timer_update_counter(unsigned long cycles,
@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
106 return IRQ_HANDLED; 113 return IRQ_HANDLED;
107} 114}
108 115
109static void __init rk_timer_init(struct device_node *np) 116static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
110{ 117{
111 struct clock_event_device *ce = &bc_timer.ce; 118 struct clock_event_device *ce = &bc_timer.ce;
112 struct clk *timer_clk; 119 struct clk *timer_clk;
113 struct clk *pclk; 120 struct clk *pclk;
114 int ret, irq; 121 int ret = -EINVAL, irq;
115 122
116 bc_timer.base = of_iomap(np, 0); 123 bc_timer.base = of_iomap(np, 0);
117 if (!bc_timer.base) { 124 if (!bc_timer.base) {
118 pr_err("Failed to get base address for '%s'\n", TIMER_NAME); 125 pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
119 return; 126 return -ENXIO;
120 } 127 }
128 bc_timer.ctrl = bc_timer.base + ctrl_reg;
121 129
122 pclk = of_clk_get_by_name(np, "pclk"); 130 pclk = of_clk_get_by_name(np, "pclk");
123 if (IS_ERR(pclk)) { 131 if (IS_ERR(pclk)) {
132 ret = PTR_ERR(pclk);
124 pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); 133 pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
125 goto out_unmap; 134 goto out_unmap;
126 } 135 }
127 136
128 if (clk_prepare_enable(pclk)) { 137 ret = clk_prepare_enable(pclk);
138 if (ret) {
129 pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); 139 pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
130 goto out_unmap; 140 goto out_unmap;
131 } 141 }
132 142
133 timer_clk = of_clk_get_by_name(np, "timer"); 143 timer_clk = of_clk_get_by_name(np, "timer");
134 if (IS_ERR(timer_clk)) { 144 if (IS_ERR(timer_clk)) {
145 ret = PTR_ERR(timer_clk);
135 pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); 146 pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
136 goto out_timer_clk; 147 goto out_timer_clk;
137 } 148 }
138 149
139 if (clk_prepare_enable(timer_clk)) { 150 ret = clk_prepare_enable(timer_clk);
151 if (ret) {
140 pr_err("Failed to enable timer clock\n"); 152 pr_err("Failed to enable timer clock\n");
141 goto out_timer_clk; 153 goto out_timer_clk;
142 } 154 }
@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
145 157
146 irq = irq_of_parse_and_map(np, 0); 158 irq = irq_of_parse_and_map(np, 0);
147 if (!irq) { 159 if (!irq) {
160 ret = -EINVAL;
148 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 161 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
149 goto out_irq; 162 goto out_irq;
150 } 163 }
151 164
152 ce->name = TIMER_NAME; 165 ce->name = TIMER_NAME;
153 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 166 ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
167 CLOCK_EVT_FEAT_DYNIRQ;
154 ce->set_next_event = rk_timer_set_next_event; 168 ce->set_next_event = rk_timer_set_next_event;
155 ce->set_state_shutdown = rk_timer_shutdown; 169 ce->set_state_shutdown = rk_timer_shutdown;
156 ce->set_state_periodic = rk_timer_set_periodic; 170 ce->set_state_periodic = rk_timer_set_periodic;
157 ce->irq = irq; 171 ce->irq = irq;
158 ce->cpumask = cpumask_of(0); 172 ce->cpumask = cpu_possible_mask;
159 ce->rating = 250; 173 ce->rating = 250;
160 174
161 rk_timer_interrupt_clear(ce); 175 rk_timer_interrupt_clear(ce);
@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
169 183
170 clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); 184 clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
171 185
172 return; 186 return 0;
173 187
174out_irq: 188out_irq:
175 clk_disable_unprepare(timer_clk); 189 clk_disable_unprepare(timer_clk);
@@ -177,6 +191,21 @@ out_timer_clk:
177 clk_disable_unprepare(pclk); 191 clk_disable_unprepare(pclk);
178out_unmap: 192out_unmap:
179 iounmap(bc_timer.base); 193 iounmap(bc_timer.base);
194
195 return ret;
196}
197
198static int __init rk3288_timer_init(struct device_node *np)
199{
200 return rk_timer_init(np, TIMER_CONTROL_REG3288);
201}
202
203static int __init rk3399_timer_init(struct device_node *np)
204{
205 return rk_timer_init(np, TIMER_CONTROL_REG3399);
180} 206}
181 207
182CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); 208CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
209 rk3288_timer_init);
210CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
211 rk3399_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 9502bc4c3f6d..54565bd0093b 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
130 130
131 spin_lock_irqsave(&samsung_pwm_lock, flags); 131 spin_lock_irqsave(&samsung_pwm_lock, flags);
132 132
133 tcon = __raw_readl(pwm.base + REG_TCON); 133 tcon = readl_relaxed(pwm.base + REG_TCON);
134 tcon &= ~TCON_START(channel); 134 tcon &= ~TCON_START(channel);
135 __raw_writel(tcon, pwm.base + REG_TCON); 135 writel_relaxed(tcon, pwm.base + REG_TCON);
136 136
137 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 137 spin_unlock_irqrestore(&samsung_pwm_lock, flags);
138} 138}
@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
148 148
149 spin_lock_irqsave(&samsung_pwm_lock, flags); 149 spin_lock_irqsave(&samsung_pwm_lock, flags);
150 150
151 tcon = __raw_readl(pwm.base + REG_TCON); 151 tcon = readl_relaxed(pwm.base + REG_TCON);
152 152
153 tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan)); 153 tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
154 tcon |= TCON_MANUALUPDATE(tcon_chan); 154 tcon |= TCON_MANUALUPDATE(tcon_chan);
155 155
156 __raw_writel(tcnt, pwm.base + REG_TCNTB(channel)); 156 writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
157 __raw_writel(tcnt, pwm.base + REG_TCMPB(channel)); 157 writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
158 __raw_writel(tcon, pwm.base + REG_TCON); 158 writel_relaxed(tcon, pwm.base + REG_TCON);
159 159
160 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 160 spin_unlock_irqrestore(&samsung_pwm_lock, flags);
161} 161}
@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
170 170
171 spin_lock_irqsave(&samsung_pwm_lock, flags); 171 spin_lock_irqsave(&samsung_pwm_lock, flags);
172 172
173 tcon = __raw_readl(pwm.base + REG_TCON); 173 tcon = readl_relaxed(pwm.base + REG_TCON);
174 174
175 tcon &= ~TCON_MANUALUPDATE(channel); 175 tcon &= ~TCON_MANUALUPDATE(channel);
176 tcon |= TCON_START(channel); 176 tcon |= TCON_START(channel);
@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
180 else 180 else
181 tcon &= ~TCON_AUTORELOAD(channel); 181 tcon &= ~TCON_AUTORELOAD(channel);
182 182
183 __raw_writel(tcon, pwm.base + REG_TCON); 183 writel_relaxed(tcon, pwm.base + REG_TCON);
184 184
185 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 185 spin_unlock_irqrestore(&samsung_pwm_lock, flags);
186} 186}
@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
333 return samsung_clocksource_read(NULL); 333 return samsung_clocksource_read(NULL);
334} 334}
335 335
336static void __init samsung_clocksource_init(void) 336static int __init samsung_clocksource_init(void)
337{ 337{
338 unsigned long pclk; 338 unsigned long pclk;
339 unsigned long clock_rate; 339 unsigned long clock_rate;
340 int ret;
341 340
342 pclk = clk_get_rate(pwm.timerclk); 341 pclk = clk_get_rate(pwm.timerclk);
343 342
@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
358 pwm.variant.bits, clock_rate); 357 pwm.variant.bits, clock_rate);
359 358
360 samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); 359 samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
361 ret = clocksource_register_hz(&samsung_clocksource, clock_rate); 360 return clocksource_register_hz(&samsung_clocksource, clock_rate);
362 if (ret)
363 panic("samsung_clocksource_timer: can't register clocksource\n");
364} 361}
365 362
366static void __init samsung_timer_resources(void) 363static void __init samsung_timer_resources(void)
@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
380/* 377/*
381 * PWM master driver 378 * PWM master driver
382 */ 379 */
383static void __init _samsung_pwm_clocksource_init(void) 380static int __init _samsung_pwm_clocksource_init(void)
384{ 381{
385 u8 mask; 382 u8 mask;
386 int channel; 383 int channel;
387 384
388 mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); 385 mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
389 channel = fls(mask) - 1; 386 channel = fls(mask) - 1;
390 if (channel < 0) 387 if (channel < 0) {
391 panic("failed to find PWM channel for clocksource"); 388 pr_crit("failed to find PWM channel for clocksource");
389 return -EINVAL;
390 }
392 pwm.source_id = channel; 391 pwm.source_id = channel;
393 392
394 mask &= ~(1 << channel); 393 mask &= ~(1 << channel);
395 channel = fls(mask) - 1; 394 channel = fls(mask) - 1;
396 if (channel < 0) 395 if (channel < 0) {
397 panic("failed to find PWM channel for clock event"); 396 pr_crit("failed to find PWM channel for clock event");
397 return -EINVAL;
398 }
398 pwm.event_id = channel; 399 pwm.event_id = channel;
399 400
400 samsung_timer_resources(); 401 samsung_timer_resources();
401 samsung_clockevent_init(); 402 samsung_clockevent_init();
402 samsung_clocksource_init(); 403
404 return samsung_clocksource_init();
403} 405}
404 406
405void __init samsung_pwm_clocksource_init(void __iomem *base, 407void __init samsung_pwm_clocksource_init(void __iomem *base,
@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
417} 419}
418 420
419#ifdef CONFIG_CLKSRC_OF 421#ifdef CONFIG_CLKSRC_OF
420static void __init samsung_pwm_alloc(struct device_node *np, 422static int __init samsung_pwm_alloc(struct device_node *np,
421 const struct samsung_pwm_variant *variant) 423 const struct samsung_pwm_variant *variant)
422{ 424{
423 struct property *prop; 425 struct property *prop;
424 const __be32 *cur; 426 const __be32 *cur;
@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
441 pwm.base = of_iomap(np, 0); 443 pwm.base = of_iomap(np, 0);
442 if (!pwm.base) { 444 if (!pwm.base) {
443 pr_err("%s: failed to map PWM registers\n", __func__); 445 pr_err("%s: failed to map PWM registers\n", __func__);
444 return; 446 return -ENXIO;
445 } 447 }
446 448
447 pwm.timerclk = of_clk_get_by_name(np, "timers"); 449 pwm.timerclk = of_clk_get_by_name(np, "timers");
448 if (IS_ERR(pwm.timerclk)) 450 if (IS_ERR(pwm.timerclk)) {
449 panic("failed to get timers clock for timer"); 451 pr_crit("failed to get timers clock for timer");
452 return PTR_ERR(pwm.timerclk);
453 }
450 454
451 _samsung_pwm_clocksource_init(); 455 return _samsung_pwm_clocksource_init();
452} 456}
453 457
454static const struct samsung_pwm_variant s3c24xx_variant = { 458static const struct samsung_pwm_variant s3c24xx_variant = {
@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
458 .tclk_mask = (1 << 4), 462 .tclk_mask = (1 << 4),
459}; 463};
460 464
461static void __init s3c2410_pwm_clocksource_init(struct device_node *np) 465static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
462{ 466{
463 samsung_pwm_alloc(np, &s3c24xx_variant); 467 return samsung_pwm_alloc(np, &s3c24xx_variant);
464} 468}
465CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init); 469CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
466 470
@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
471 .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), 475 .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
472}; 476};
473 477
474static void __init s3c64xx_pwm_clocksource_init(struct device_node *np) 478static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
475{ 479{
476 samsung_pwm_alloc(np, &s3c64xx_variant); 480 return samsung_pwm_alloc(np, &s3c64xx_variant);
477} 481}
478CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init); 482CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
479 483
@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
484 .tclk_mask = 0, 488 .tclk_mask = 0,
485}; 489};
486 490
487static void __init s5p64x0_pwm_clocksource_init(struct device_node *np) 491static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
488{ 492{
489 samsung_pwm_alloc(np, &s5p64x0_variant); 493 return samsung_pwm_alloc(np, &s5p64x0_variant);
490} 494}
491CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init); 495CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
492 496
@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
497 .tclk_mask = (1 << 5), 501 .tclk_mask = (1 << 5),
498}; 502};
499 503
500static void __init s5p_pwm_clocksource_init(struct device_node *np) 504static int __init s5p_pwm_clocksource_init(struct device_node *np)
501{ 505{
502 samsung_pwm_alloc(np, &s5p_variant); 506 return samsung_pwm_alloc(np, &s5p_variant);
503} 507}
504CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init); 508CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
505#endif 509#endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 6f3719d73390..97669ee4df2a 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void)
146 return ~readl(timer_base + TIMER_CNTVAL_REG(1)); 146 return ~readl(timer_base + TIMER_CNTVAL_REG(1));
147} 147}
148 148
149static void __init sun4i_timer_init(struct device_node *node) 149static int __init sun4i_timer_init(struct device_node *node)
150{ 150{
151 unsigned long rate = 0; 151 unsigned long rate = 0;
152 struct clk *clk; 152 struct clk *clk;
@@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node)
154 u32 val; 154 u32 val;
155 155
156 timer_base = of_iomap(node, 0); 156 timer_base = of_iomap(node, 0);
157 if (!timer_base) 157 if (!timer_base) {
158 panic("Can't map registers"); 158 pr_crit("Can't map registers");
159 return -ENXIO;
160 }
159 161
160 irq = irq_of_parse_and_map(node, 0); 162 irq = irq_of_parse_and_map(node, 0);
161 if (irq <= 0) 163 if (irq <= 0) {
162 panic("Can't parse IRQ"); 164 pr_crit("Can't parse IRQ");
165 return -EINVAL;
166 }
163 167
164 clk = of_clk_get(node, 0); 168 clk = of_clk_get(node, 0);
165 if (IS_ERR(clk)) 169 if (IS_ERR(clk)) {
166 panic("Can't get timer clock"); 170 pr_crit("Can't get timer clock");
167 clk_prepare_enable(clk); 171 return PTR_ERR(clk);
172 }
173
174 ret = clk_prepare_enable(clk);
175 if (ret) {
176 pr_err("Failed to prepare clock");
177 return ret;
178 }
168 179
169 rate = clk_get_rate(clk); 180 rate = clk_get_rate(clk);
170 181
@@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node)
182 of_machine_is_compatible("allwinner,sun5i-a10s")) 193 of_machine_is_compatible("allwinner,sun5i-a10s"))
183 sched_clock_register(sun4i_timer_sched_read, 32, rate); 194 sched_clock_register(sun4i_timer_sched_read, 32, rate);
184 195
185 clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, 196 ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
186 rate, 350, 32, clocksource_mmio_readl_down); 197 rate, 350, 32, clocksource_mmio_readl_down);
198 if (ret) {
199 pr_err("Failed to register clocksource");
200 return ret;
201 }
187 202
188 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 203 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
189 204
@@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node)
200 TIMER_SYNC_TICKS, 0xffffffff); 215 TIMER_SYNC_TICKS, 0xffffffff);
201 216
202 ret = setup_irq(irq, &sun4i_timer_irq); 217 ret = setup_irq(irq, &sun4i_timer_irq);
203 if (ret) 218 if (ret) {
204 pr_warn("failed to setup irq %d\n", irq); 219 pr_err("failed to setup irq %d\n", irq);
220 return ret;
221 }
205 222
206 /* Enable timer0 interrupt */ 223 /* Enable timer0 interrupt */
207 val = readl(timer_base + TIMER_IRQ_EN_REG); 224 val = readl(timer_base + TIMER_IRQ_EN_REG);
208 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 225 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
226
227 return ret;
209} 228}
210CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", 229CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
211 sun4i_timer_init); 230 sun4i_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c407c47a3232..12fcef8cf2d3 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
19 return read_xtal_counter(); 19 return read_xtal_counter();
20} 20}
21 21
22static void __init tango_clocksource_init(struct device_node *np) 22static int __init tango_clocksource_init(struct device_node *np)
23{ 23{
24 struct clk *clk; 24 struct clk *clk;
25 int xtal_freq, ret; 25 int xtal_freq, ret;
@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
27 xtal_in_cnt = of_iomap(np, 0); 27 xtal_in_cnt = of_iomap(np, 0);
28 if (xtal_in_cnt == NULL) { 28 if (xtal_in_cnt == NULL) {
29 pr_err("%s: invalid address\n", np->full_name); 29 pr_err("%s: invalid address\n", np->full_name);
30 return; 30 return -ENXIO;
31 } 31 }
32 32
33 clk = of_clk_get(np, 0); 33 clk = of_clk_get(np, 0);
34 if (IS_ERR(clk)) { 34 if (IS_ERR(clk)) {
35 pr_err("%s: invalid clock\n", np->full_name); 35 pr_err("%s: invalid clock\n", np->full_name);
36 return; 36 return PTR_ERR(clk);
37 } 37 }
38 38
39 xtal_freq = clk_get_rate(clk); 39 xtal_freq = clk_get_rate(clk);
@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
44 32, clocksource_mmio_readl_up); 44 32, clocksource_mmio_readl_up);
45 if (ret) { 45 if (ret) {
46 pr_err("%s: registration failed\n", np->full_name); 46 pr_err("%s: registration failed\n", np->full_name);
47 return; 47 return ret;
48 } 48 }
49 49
50 sched_clock_register(read_sched_clock, 32, xtal_freq); 50 sched_clock_register(read_sched_clock, 32, xtal_freq);
51 register_current_timer_delay(&delay_timer); 51 register_current_timer_delay(&delay_timer);
52
53 return 0;
52} 54}
53 55
54CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init); 56CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 7b94ad2ab278..f960891aa04e 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
165 .dev_id = &tegra_clockevent, 165 .dev_id = &tegra_clockevent,
166}; 166};
167 167
168static void __init tegra20_init_timer(struct device_node *np) 168static int __init tegra20_init_timer(struct device_node *np)
169{ 169{
170 struct clk *clk; 170 struct clk *clk;
171 unsigned long rate; 171 unsigned long rate;
@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
174 timer_reg_base = of_iomap(np, 0); 174 timer_reg_base = of_iomap(np, 0);
175 if (!timer_reg_base) { 175 if (!timer_reg_base) {
176 pr_err("Can't map timer registers\n"); 176 pr_err("Can't map timer registers\n");
177 BUG(); 177 return -ENXIO;
178 } 178 }
179 179
180 tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); 180 tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
181 if (tegra_timer_irq.irq <= 0) { 181 if (tegra_timer_irq.irq <= 0) {
182 pr_err("Failed to map timer IRQ\n"); 182 pr_err("Failed to map timer IRQ\n");
183 BUG(); 183 return -EINVAL;
184 } 184 }
185 185
186 clk = of_clk_get(np, 0); 186 clk = of_clk_get(np, 0);
@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
211 211
212 sched_clock_register(tegra_read_sched_clock, 32, 1000000); 212 sched_clock_register(tegra_read_sched_clock, 32, 1000000);
213 213
214 if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, 214 ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
215 "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { 215 "timer_us", 1000000, 300, 32,
216 clocksource_mmio_readl_up);
217 if (ret) {
216 pr_err("Failed to register clocksource\n"); 218 pr_err("Failed to register clocksource\n");
217 BUG(); 219 return ret;
218 } 220 }
219 221
220 tegra_delay_timer.read_current_timer = 222 tegra_delay_timer.read_current_timer =
@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
225 ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); 227 ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
226 if (ret) { 228 if (ret) {
227 pr_err("Failed to register timer IRQ: %d\n", ret); 229 pr_err("Failed to register timer IRQ: %d\n", ret);
228 BUG(); 230 return ret;
229 } 231 }
230 232
231 tegra_clockevent.cpumask = cpu_all_mask; 233 tegra_clockevent.cpumask = cpu_all_mask;
232 tegra_clockevent.irq = tegra_timer_irq.irq; 234 tegra_clockevent.irq = tegra_timer_irq.irq;
233 clockevents_config_and_register(&tegra_clockevent, 1000000, 235 clockevents_config_and_register(&tegra_clockevent, 1000000,
234 0x1, 0x1fffffff); 236 0x1, 0x1fffffff);
237
238 return 0;
235} 239}
236CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer); 240CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
237 241
238static void __init tegra20_init_rtc(struct device_node *np) 242static int __init tegra20_init_rtc(struct device_node *np)
239{ 243{
240 struct clk *clk; 244 struct clk *clk;
241 245
242 rtc_base = of_iomap(np, 0); 246 rtc_base = of_iomap(np, 0);
243 if (!rtc_base) { 247 if (!rtc_base) {
244 pr_err("Can't map RTC registers"); 248 pr_err("Can't map RTC registers");
245 BUG(); 249 return -ENXIO;
246 } 250 }
247 251
248 /* 252 /*
@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
255 else 259 else
256 clk_prepare_enable(clk); 260 clk_prepare_enable(clk);
257 261
258 register_persistent_clock(NULL, tegra_read_persistent_clock64); 262 return register_persistent_clock(NULL, tegra_read_persistent_clock64);
259} 263}
260CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); 264CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d93ec3c4f139..20ec066481fe 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void)
246 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); 246 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
247} 247}
248 248
249struct syscore_ops armada_370_xp_timer_syscore_ops = { 249static struct syscore_ops armada_370_xp_timer_syscore_ops = {
250 .suspend = armada_370_xp_timer_suspend, 250 .suspend = armada_370_xp_timer_suspend,
251 .resume = armada_370_xp_timer_resume, 251 .resume = armada_370_xp_timer_resume,
252}; 252};
@@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = {
260 .read_current_timer = armada_370_delay_timer_read, 260 .read_current_timer = armada_370_delay_timer_read,
261}; 261};
262 262
263static void __init armada_370_xp_timer_common_init(struct device_node *np) 263static int __init armada_370_xp_timer_common_init(struct device_node *np)
264{ 264{
265 u32 clr = 0, set = 0; 265 u32 clr = 0, set = 0;
266 int res; 266 int res;
267 267
268 timer_base = of_iomap(np, 0); 268 timer_base = of_iomap(np, 0);
269 WARN_ON(!timer_base); 269 if (!timer_base) {
270 pr_err("Failed to iomap");
271 return -ENXIO;
272 }
273
270 local_base = of_iomap(np, 1); 274 local_base = of_iomap(np, 1);
275 if (!local_base) {
276 pr_err("Failed to iomap");
277 return -ENXIO;
278 }
271 279
272 if (timer25Mhz) { 280 if (timer25Mhz) {
273 set = TIMER0_25MHZ; 281 set = TIMER0_25MHZ;
@@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
306 */ 314 */
307 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); 315 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
308 316
309 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 317 res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
310 "armada_370_xp_clocksource", 318 "armada_370_xp_clocksource",
311 timer_clk, 300, 32, clocksource_mmio_readl_down); 319 timer_clk, 300, 32, clocksource_mmio_readl_down);
320 if (res) {
321 pr_err("Failed to initialize clocksource mmio");
322 return res;
323 }
312 324
313 register_cpu_notifier(&armada_370_xp_timer_cpu_nb); 325 register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
314 326
315 armada_370_xp_evt = alloc_percpu(struct clock_event_device); 327 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
316 328 if (!armada_370_xp_evt)
329 return -ENOMEM;
317 330
318 /* 331 /*
319 * Setup clockevent timer (interrupt-driven). 332 * Setup clockevent timer (interrupt-driven).
@@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
323 "armada_370_xp_per_cpu_tick", 336 "armada_370_xp_per_cpu_tick",
324 armada_370_xp_evt); 337 armada_370_xp_evt);
325 /* Immediately configure the timer on the boot CPU */ 338 /* Immediately configure the timer on the boot CPU */
326 if (!res) 339 if (res) {
327 armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); 340 pr_err("Failed to request percpu irq");
341 return res;
342 }
343
344 res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
345 if (res) {
346 pr_err("Failed to setup timer");
347 return res;
348 }
328 349
329 register_syscore_ops(&armada_370_xp_timer_syscore_ops); 350 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
351
352 return 0;
330} 353}
331 354
332static void __init armada_xp_timer_init(struct device_node *np) 355static int __init armada_xp_timer_init(struct device_node *np)
333{ 356{
334 struct clk *clk = of_clk_get_by_name(np, "fixed"); 357 struct clk *clk = of_clk_get_by_name(np, "fixed");
358 int ret;
359
360 clk = of_clk_get(np, 0);
361 if (IS_ERR(clk)) {
362 pr_err("Failed to get clock");
363 return PTR_ERR(clk);
364 }
365
366 ret = clk_prepare_enable(clk);
367 if (ret)
368 return ret;
335 369
336 /* The 25Mhz fixed clock is mandatory, and must always be available */
337 BUG_ON(IS_ERR(clk));
338 clk_prepare_enable(clk);
339 timer_clk = clk_get_rate(clk); 370 timer_clk = clk_get_rate(clk);
340 371
341 armada_370_xp_timer_common_init(np); 372 return armada_370_xp_timer_common_init(np);
342} 373}
343CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", 374CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
344 armada_xp_timer_init); 375 armada_xp_timer_init);
345 376
346static void __init armada_375_timer_init(struct device_node *np) 377static int __init armada_375_timer_init(struct device_node *np)
347{ 378{
348 struct clk *clk; 379 struct clk *clk;
380 int ret;
349 381
350 clk = of_clk_get_by_name(np, "fixed"); 382 clk = of_clk_get_by_name(np, "fixed");
351 if (!IS_ERR(clk)) { 383 if (!IS_ERR(clk)) {
352 clk_prepare_enable(clk); 384 ret = clk_prepare_enable(clk);
385 if (ret)
386 return ret;
353 timer_clk = clk_get_rate(clk); 387 timer_clk = clk_get_rate(clk);
354 } else { 388 } else {
355 389
@@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np)
360 clk = of_clk_get(np, 0); 394 clk = of_clk_get(np, 0);
361 395
362 /* Must have at least a clock */ 396 /* Must have at least a clock */
363 BUG_ON(IS_ERR(clk)); 397 if (IS_ERR(clk)) {
364 clk_prepare_enable(clk); 398 pr_err("Failed to get clock");
399 return PTR_ERR(clk);
400 }
401
402 ret = clk_prepare_enable(clk);
403 if (ret)
404 return ret;
405
365 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; 406 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
366 timer25Mhz = false; 407 timer25Mhz = false;
367 } 408 }
368 409
369 armada_370_xp_timer_common_init(np); 410 return armada_370_xp_timer_common_init(np);
370} 411}
371CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer", 412CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
372 armada_375_timer_init); 413 armada_375_timer_init);
373 414
374static void __init armada_370_timer_init(struct device_node *np) 415static int __init armada_370_timer_init(struct device_node *np)
375{ 416{
376 struct clk *clk = of_clk_get(np, 0); 417 struct clk *clk;
418 int ret;
419
420 clk = of_clk_get(np, 0);
421 if (IS_ERR(clk)) {
422 pr_err("Failed to get clock");
423 return PTR_ERR(clk);
424 }
425
426 ret = clk_prepare_enable(clk);
427 if (ret)
428 return ret;
377 429
378 BUG_ON(IS_ERR(clk));
379 clk_prepare_enable(clk);
380 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; 430 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
381 timer25Mhz = false; 431 timer25Mhz = false;
382 432
383 armada_370_xp_timer_common_init(np); 433 return armada_370_xp_timer_common_init(np);
384} 434}
385CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer", 435CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
386 armada_370_timer_init); 436 armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index b06e4c2be406..5ac344b383e1 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
233 DIV_ROUND_CLOSEST(rate, 1024), 233 DIV_ROUND_CLOSEST(rate, 1024),
234 0xf, 0xffff); 234 0xf, 0xffff);
235 235
236 setup_irq(irq, &efm32_clock_event_irq); 236 ret = setup_irq(irq, &efm32_clock_event_irq);
237 if (ret) {
238 pr_err("Failed setup irq");
239 goto err_setup_irq;
240 }
237 241
238 return 0; 242 return 0;
239 243
244err_setup_irq:
240err_get_irq: 245err_get_irq:
241 246
242 iounmap(base); 247 iounmap(base);
@@ -255,16 +260,16 @@ err_clk_get:
255 * This function asserts that we have exactly one clocksource and one 260 * This function asserts that we have exactly one clocksource and one
256 * clock_event_device in the end. 261 * clock_event_device in the end.
257 */ 262 */
258static void __init efm32_timer_init(struct device_node *np) 263static int __init efm32_timer_init(struct device_node *np)
259{ 264{
260 static int has_clocksource, has_clockevent; 265 static int has_clocksource, has_clockevent;
261 int ret; 266 int ret = 0;
262 267
263 if (!has_clocksource) { 268 if (!has_clocksource) {
264 ret = efm32_clocksource_init(np); 269 ret = efm32_clocksource_init(np);
265 if (!ret) { 270 if (!ret) {
266 has_clocksource = 1; 271 has_clocksource = 1;
267 return; 272 return 0;
268 } 273 }
269 } 274 }
270 275
@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
272 ret = efm32_clockevent_init(np); 277 ret = efm32_clockevent_init(np);
273 if (!ret) { 278 if (!ret) {
274 has_clockevent = 1; 279 has_clockevent = 1;
275 return; 280 return 0;
276 } 281 }
277 } 282 }
283
284 return ret;
278} 285}
279CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); 286CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
280CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); 287CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index daae61e8c820..9649cfdb9213 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -288,16 +288,16 @@ err_clk_enable:
288 * This function asserts that we have exactly one clocksource and one 288 * This function asserts that we have exactly one clocksource and one
289 * clock_event_device in the end. 289 * clock_event_device in the end.
290 */ 290 */
291static void __init lpc32xx_timer_init(struct device_node *np) 291static int __init lpc32xx_timer_init(struct device_node *np)
292{ 292{
293 static int has_clocksource, has_clockevent; 293 static int has_clocksource, has_clockevent;
294 int ret; 294 int ret = 0;
295 295
296 if (!has_clocksource) { 296 if (!has_clocksource) {
297 ret = lpc32xx_clocksource_init(np); 297 ret = lpc32xx_clocksource_init(np);
298 if (!ret) { 298 if (!ret) {
299 has_clocksource = 1; 299 has_clocksource = 1;
300 return; 300 return 0;
301 } 301 }
302 } 302 }
303 303
@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
305 ret = lpc32xx_clockevent_init(np); 305 ret = lpc32xx_clockevent_init(np);
306 if (!ret) { 306 if (!ret) {
307 has_clockevent = 1; 307 has_clockevent = 1;
308 return; 308 return 0;
309 } 309 }
310 } 310 }
311
312 return ret;
311} 313}
312CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init); 314CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0ece7427b497..a28f496e97cf 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
104 .handler = orion_clkevt_irq_handler, 104 .handler = orion_clkevt_irq_handler,
105}; 105};
106 106
107static void __init orion_timer_init(struct device_node *np) 107static int __init orion_timer_init(struct device_node *np)
108{ 108{
109 struct clk *clk; 109 struct clk *clk;
110 int irq; 110 int irq, ret;
111 111
112 /* timer registers are shared with watchdog timer */ 112 /* timer registers are shared with watchdog timer */
113 timer_base = of_iomap(np, 0); 113 timer_base = of_iomap(np, 0);
114 if (!timer_base) 114 if (!timer_base) {
115 panic("%s: unable to map resource\n", np->name); 115 pr_err("%s: unable to map resource\n", np->name);
116 return -ENXIO;
117 }
116 118
117 clk = of_clk_get(np, 0); 119 clk = of_clk_get(np, 0);
118 if (IS_ERR(clk)) 120 if (IS_ERR(clk)) {
119 panic("%s: unable to get clk\n", np->name); 121 pr_err("%s: unable to get clk\n", np->name);
120 clk_prepare_enable(clk); 122 return PTR_ERR(clk);
123 }
124
125 ret = clk_prepare_enable(clk);
126 if (ret) {
127 pr_err("Failed to prepare clock");
128 return ret;
129 }
121 130
122 /* we are only interested in timer1 irq */ 131 /* we are only interested in timer1 irq */
123 irq = irq_of_parse_and_map(np, 1); 132 irq = irq_of_parse_and_map(np, 1);
124 if (irq <= 0) 133 if (irq <= 0) {
125 panic("%s: unable to parse timer1 irq\n", np->name); 134 pr_err("%s: unable to parse timer1 irq\n", np->name);
135 return -EINVAL;
136 }
126 137
127 /* setup timer0 as free-running clocksource */ 138 /* setup timer0 as free-running clocksource */
128 writel(~0, timer_base + TIMER0_VAL); 139 writel(~0, timer_base + TIMER0_VAL);
@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
130 atomic_io_modify(timer_base + TIMER_CTRL, 141 atomic_io_modify(timer_base + TIMER_CTRL,
131 TIMER0_RELOAD_EN | TIMER0_EN, 142 TIMER0_RELOAD_EN | TIMER0_EN,
132 TIMER0_RELOAD_EN | TIMER0_EN); 143 TIMER0_RELOAD_EN | TIMER0_EN);
133 clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", 144
134 clk_get_rate(clk), 300, 32, 145 ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
135 clocksource_mmio_readl_down); 146 clk_get_rate(clk), 300, 32,
147 clocksource_mmio_readl_down);
148 if (ret) {
149 pr_err("Failed to initialize mmio timer");
150 return ret;
151 }
152
136 sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); 153 sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
137 154
138 /* setup timer1 as clockevent timer */ 155 /* setup timer1 as clockevent timer */
139 if (setup_irq(irq, &orion_clkevt_irq)) 156 ret = setup_irq(irq, &orion_clkevt_irq);
140 panic("%s: unable to setup irq\n", np->name); 157 if (ret) {
158 pr_err("%s: unable to setup irq\n", np->name);
159 return ret;
160 }
141 161
142 ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; 162 ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
143 orion_clkevt.cpumask = cpumask_of(0); 163 orion_clkevt.cpumask = cpumask_of(0);
144 orion_clkevt.irq = irq; 164 orion_clkevt.irq = irq;
145 clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), 165 clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
146 ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); 166 ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
167
168 return 0;
147} 169}
148CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); 170CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 376e59bc5fa0..a7d9a08e4b0e 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
148 }, 148 },
149}; 149};
150 150
151static void __init pistachio_clksrc_of_init(struct device_node *node) 151static int __init pistachio_clksrc_of_init(struct device_node *node)
152{ 152{
153 struct clk *sys_clk, *fast_clk; 153 struct clk *sys_clk, *fast_clk;
154 struct regmap *periph_regs; 154 struct regmap *periph_regs;
@@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
158 pcs_gpt.base = of_iomap(node, 0); 158 pcs_gpt.base = of_iomap(node, 0);
159 if (!pcs_gpt.base) { 159 if (!pcs_gpt.base) {
160 pr_err("cannot iomap\n"); 160 pr_err("cannot iomap\n");
161 return; 161 return -ENXIO;
162 } 162 }
163 163
164 periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); 164 periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
165 if (IS_ERR(periph_regs)) { 165 if (IS_ERR(periph_regs)) {
166 pr_err("cannot get peripheral regmap (%ld)\n", 166 pr_err("cannot get peripheral regmap (%ld)\n",
167 PTR_ERR(periph_regs)); 167 PTR_ERR(periph_regs));
168 return; 168 return PTR_ERR(periph_regs);
169 } 169 }
170 170
171 /* Switch to using the fast counter clock */ 171 /* Switch to using the fast counter clock */
172 ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, 172 ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
173 0xf, 0x0); 173 0xf, 0x0);
174 if (ret) 174 if (ret)
175 return; 175 return ret;
176 176
177 sys_clk = of_clk_get_by_name(node, "sys"); 177 sys_clk = of_clk_get_by_name(node, "sys");
178 if (IS_ERR(sys_clk)) { 178 if (IS_ERR(sys_clk)) {
179 pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk)); 179 pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
180 return; 180 return PTR_ERR(sys_clk);
181 } 181 }
182 182
183 fast_clk = of_clk_get_by_name(node, "fast"); 183 fast_clk = of_clk_get_by_name(node, "fast");
184 if (IS_ERR(fast_clk)) { 184 if (IS_ERR(fast_clk)) {
185 pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); 185 pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
186 return; 186 return PTR_ERR(fast_clk);
187 } 187 }
188 188
189 ret = clk_prepare_enable(sys_clk); 189 ret = clk_prepare_enable(sys_clk);
190 if (ret < 0) { 190 if (ret < 0) {
191 pr_err("failed to enable clock (%d)\n", ret); 191 pr_err("failed to enable clock (%d)\n", ret);
192 return; 192 return ret;
193 } 193 }
194 194
195 ret = clk_prepare_enable(fast_clk); 195 ret = clk_prepare_enable(fast_clk);
196 if (ret < 0) { 196 if (ret < 0) {
197 pr_err("failed to enable clock (%d)\n", ret); 197 pr_err("failed to enable clock (%d)\n", ret);
198 clk_disable_unprepare(sys_clk); 198 clk_disable_unprepare(sys_clk);
199 return; 199 return ret;
200 } 200 }
201 201
202 rate = clk_get_rate(fast_clk); 202 rate = clk_get_rate(fast_clk);
@@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
212 212
213 raw_spin_lock_init(&pcs_gpt.lock); 213 raw_spin_lock_init(&pcs_gpt.lock);
214 sched_clock_register(pistachio_read_sched_clock, 32, rate); 214 sched_clock_register(pistachio_read_sched_clock, 32, rate);
215 clocksource_register_hz(&pcs_gpt.cs, rate); 215 return clocksource_register_hz(&pcs_gpt.cs, rate);
216} 216}
217CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", 217CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
218 pistachio_clksrc_of_init); 218 pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 27fa13680be1..90f8fbc154a4 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = {
238 .notifier_call = sirfsoc_cpu_notify, 238 .notifier_call = sirfsoc_cpu_notify,
239}; 239};
240 240
241static void __init sirfsoc_clockevent_init(void) 241static int __init sirfsoc_clockevent_init(void)
242{ 242{
243 sirfsoc_clockevent = alloc_percpu(struct clock_event_device); 243 sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
244 BUG_ON(!sirfsoc_clockevent); 244 BUG_ON(!sirfsoc_clockevent);
@@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void)
246 BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); 246 BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
247 247
248 /* Immediately configure the timer on the boot CPU */ 248 /* Immediately configure the timer on the boot CPU */
249 sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); 249 return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
250} 250}
251 251
252/* initialize the kernel jiffy timer source */ 252/* initialize the kernel jiffy timer source */
253static void __init sirfsoc_atlas7_timer_init(struct device_node *np) 253static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
254{ 254{
255 struct clk *clk; 255 struct clk *clk;
256 256
@@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
279 279
280 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate)); 280 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
281 281
282 sirfsoc_clockevent_init(); 282 return sirfsoc_clockevent_init();
283} 283}
284 284
285static void __init sirfsoc_of_timer_init(struct device_node *np) 285static int __init sirfsoc_of_timer_init(struct device_node *np)
286{ 286{
287 sirfsoc_timer_base = of_iomap(np, 0); 287 sirfsoc_timer_base = of_iomap(np, 0);
288 if (!sirfsoc_timer_base) 288 if (!sirfsoc_timer_base) {
289 panic("unable to map timer cpu registers\n"); 289 pr_err("unable to map timer cpu registers\n");
290 return -ENXIO;
291 }
290 292
291 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 293 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
292 if (!sirfsoc_timer_irq.irq) 294 if (!sirfsoc_timer_irq.irq) {
293 panic("No irq passed for timer0 via DT\n"); 295 pr_err("No irq passed for timer0 via DT\n");
296 return -EINVAL;
297 }
294 298
295 sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); 299 sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
296 if (!sirfsoc_timer1_irq.irq) 300 if (!sirfsoc_timer1_irq.irq) {
297 panic("No irq passed for timer1 via DT\n"); 301 pr_err("No irq passed for timer1 via DT\n");
302 return -EINVAL;
303 }
298 304
299 sirfsoc_atlas7_timer_init(np); 305 return sirfsoc_atlas7_timer_init(np);
300} 306}
301CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init); 307CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index d911c5dca8f1..1ffac0cb0cb7 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
177/* 177/*
178 * Set up both clocksource and clockevent support. 178 * Set up both clocksource and clockevent support.
179 */ 179 */
180static void __init at91sam926x_pit_common_init(struct pit_data *data) 180static int __init at91sam926x_pit_common_init(struct pit_data *data)
181{ 181{
182 unsigned long pit_rate; 182 unsigned long pit_rate;
183 unsigned bits; 183 unsigned bits;
@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
204 data->clksrc.rating = 175; 204 data->clksrc.rating = 175;
205 data->clksrc.read = read_pit_clk; 205 data->clksrc.read = read_pit_clk;
206 data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; 206 data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
207 clocksource_register_hz(&data->clksrc, pit_rate); 207
208 ret = clocksource_register_hz(&data->clksrc, pit_rate);
209 if (ret) {
210 pr_err("Failed to register clocksource");
211 return ret;
212 }
208 213
209 /* Set up irq handler */ 214 /* Set up irq handler */
210 ret = request_irq(data->irq, at91sam926x_pit_interrupt, 215 ret = request_irq(data->irq, at91sam926x_pit_interrupt,
211 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, 216 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
212 "at91_tick", data); 217 "at91_tick", data);
213 if (ret) 218 if (ret) {
214 panic(pr_fmt("Unable to setup IRQ\n")); 219 pr_err("Unable to setup IRQ\n");
220 return ret;
221 }
215 222
216 /* Set up and register clockevents */ 223 /* Set up and register clockevents */
217 data->clkevt.name = "pit"; 224 data->clkevt.name = "pit";
@@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
226 data->clkevt.resume = at91sam926x_pit_resume; 233 data->clkevt.resume = at91sam926x_pit_resume;
227 data->clkevt.suspend = at91sam926x_pit_suspend; 234 data->clkevt.suspend = at91sam926x_pit_suspend;
228 clockevents_register_device(&data->clkevt); 235 clockevents_register_device(&data->clkevt);
236
237 return 0;
229} 238}
230 239
231static void __init at91sam926x_pit_dt_init(struct device_node *node) 240static int __init at91sam926x_pit_dt_init(struct device_node *node)
232{ 241{
233 struct pit_data *data; 242 struct pit_data *data;
234 243
235 data = kzalloc(sizeof(*data), GFP_KERNEL); 244 data = kzalloc(sizeof(*data), GFP_KERNEL);
236 if (!data) 245 if (!data)
237 panic(pr_fmt("Unable to allocate memory\n")); 246 return -ENOMEM;
238 247
239 data->base = of_iomap(node, 0); 248 data->base = of_iomap(node, 0);
240 if (!data->base) 249 if (!data->base) {
241 panic(pr_fmt("Could not map PIT address\n")); 250 pr_err("Could not map PIT address\n");
251 return -ENXIO;
252 }
242 253
243 data->mck = of_clk_get(node, 0); 254 data->mck = of_clk_get(node, 0);
244 if (IS_ERR(data->mck)) 255 if (IS_ERR(data->mck))
245 /* Fallback on clkdev for !CCF-based boards */ 256 /* Fallback on clkdev for !CCF-based boards */
246 data->mck = clk_get(NULL, "mck"); 257 data->mck = clk_get(NULL, "mck");
247 258
248 if (IS_ERR(data->mck)) 259 if (IS_ERR(data->mck)) {
249 panic(pr_fmt("Unable to get mck clk\n")); 260 pr_err("Unable to get mck clk\n");
261 return PTR_ERR(data->mck);
262 }
250 263
251 /* Get the interrupts property */ 264 /* Get the interrupts property */
252 data->irq = irq_of_parse_and_map(node, 0); 265 data->irq = irq_of_parse_and_map(node, 0);
253 if (!data->irq) 266 if (!data->irq) {
254 panic(pr_fmt("Unable to get IRQ from DT\n")); 267 pr_err("Unable to get IRQ from DT\n");
268 return -EINVAL;
269 }
255 270
256 at91sam926x_pit_common_init(data); 271 return at91sam926x_pit_common_init(data);
257} 272}
258CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", 273CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
259 at91sam926x_pit_dt_init); 274 at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 29d21d68df5a..e90ab5b63a90 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
194/* 194/*
195 * ST (system timer) module supports both clockevents and clocksource. 195 * ST (system timer) module supports both clockevents and clocksource.
196 */ 196 */
197static void __init atmel_st_timer_init(struct device_node *node) 197static int __init atmel_st_timer_init(struct device_node *node)
198{ 198{
199 struct clk *sclk; 199 struct clk *sclk;
200 unsigned int sclk_rate, val; 200 unsigned int sclk_rate, val;
201 int irq, ret; 201 int irq, ret;
202 202
203 regmap_st = syscon_node_to_regmap(node); 203 regmap_st = syscon_node_to_regmap(node);
204 if (IS_ERR(regmap_st)) 204 if (IS_ERR(regmap_st)) {
205 panic(pr_fmt("Unable to get regmap\n")); 205 pr_err("Unable to get regmap\n");
206 return PTR_ERR(regmap_st);
207 }
206 208
207 /* Disable all timer interrupts, and clear any pending ones */ 209 /* Disable all timer interrupts, and clear any pending ones */
208 regmap_write(regmap_st, AT91_ST_IDR, 210 regmap_write(regmap_st, AT91_ST_IDR,
@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
211 213
212 /* Get the interrupts property */ 214 /* Get the interrupts property */
213 irq = irq_of_parse_and_map(node, 0); 215 irq = irq_of_parse_and_map(node, 0);
214 if (!irq) 216 if (!irq) {
215 panic(pr_fmt("Unable to get IRQ from DT\n")); 217 pr_err("Unable to get IRQ from DT\n");
218 return -EINVAL;
219 }
216 220
217 /* Make IRQs happen for the system timer */ 221 /* Make IRQs happen for the system timer */
218 ret = request_irq(irq, at91rm9200_timer_interrupt, 222 ret = request_irq(irq, at91rm9200_timer_interrupt,
219 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, 223 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
220 "at91_tick", regmap_st); 224 "at91_tick", regmap_st);
221 if (ret) 225 if (ret) {
222 panic(pr_fmt("Unable to setup IRQ\n")); 226 pr_err("Unable to setup IRQ\n");
227 return ret;
228 }
223 229
224 sclk = of_clk_get(node, 0); 230 sclk = of_clk_get(node, 0);
225 if (IS_ERR(sclk)) 231 if (IS_ERR(sclk)) {
226 panic(pr_fmt("Unable to get slow clock\n")); 232 pr_err("Unable to get slow clock\n");
233 return PTR_ERR(sclk);
234 }
227 235
228 clk_prepare_enable(sclk); 236 ret = clk_prepare_enable(sclk);
229 if (ret) 237 if (ret) {
230 panic(pr_fmt("Could not enable slow clock\n")); 238 pr_err("Could not enable slow clock\n");
239 return ret;
240 }
231 241
232 sclk_rate = clk_get_rate(sclk); 242 sclk_rate = clk_get_rate(sclk);
233 if (!sclk_rate) 243 if (!sclk_rate) {
234 panic(pr_fmt("Invalid slow clock rate\n")); 244 pr_err("Invalid slow clock rate\n");
245 return -EINVAL;
246 }
235 timer_latch = (sclk_rate + HZ / 2) / HZ; 247 timer_latch = (sclk_rate + HZ / 2) / HZ;
236 248
237 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used 249 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
246 2, AT91_ST_ALMV); 258 2, AT91_ST_ALMV);
247 259
248 /* register clocksource */ 260 /* register clocksource */
249 clocksource_register_hz(&clk32k, sclk_rate); 261 return clocksource_register_hz(&clk32k, sclk_rate);
250} 262}
251CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st", 263CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
252 atmel_st_timer_init); 264 atmel_st_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index a536eeb634d8..10318cc99c0e 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -63,7 +63,7 @@ struct digicolor_timer {
63 int timer_id; /* one of TIMER_* */ 63 int timer_id; /* one of TIMER_* */
64}; 64};
65 65
66struct digicolor_timer *dc_timer(struct clock_event_device *ce) 66static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
67{ 67{
68 return container_of(ce, struct digicolor_timer, ce); 68 return container_of(ce, struct digicolor_timer, ce);
69} 69}
@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
148 return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); 148 return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
149} 149}
150 150
151static void __init digicolor_timer_init(struct device_node *node) 151static int __init digicolor_timer_init(struct device_node *node)
152{ 152{
153 unsigned long rate; 153 unsigned long rate;
154 struct clk *clk; 154 struct clk *clk;
@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
161 dc_timer_dev.base = of_iomap(node, 0); 161 dc_timer_dev.base = of_iomap(node, 0);
162 if (!dc_timer_dev.base) { 162 if (!dc_timer_dev.base) {
163 pr_err("Can't map registers"); 163 pr_err("Can't map registers");
164 return; 164 return -ENXIO;
165 } 165 }
166 166
167 irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id); 167 irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
168 if (irq <= 0) { 168 if (irq <= 0) {
169 pr_err("Can't parse IRQ"); 169 pr_err("Can't parse IRQ");
170 return; 170 return -EINVAL;
171 } 171 }
172 172
173 clk = of_clk_get(node, 0); 173 clk = of_clk_get(node, 0);
174 if (IS_ERR(clk)) { 174 if (IS_ERR(clk)) {
175 pr_err("Can't get timer clock"); 175 pr_err("Can't get timer clock");
176 return; 176 return PTR_ERR(clk);
177 } 177 }
178 clk_prepare_enable(clk); 178 clk_prepare_enable(clk);
179 rate = clk_get_rate(clk); 179 rate = clk_get_rate(clk);
@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
190 ret = request_irq(irq, digicolor_timer_interrupt, 190 ret = request_irq(irq, digicolor_timer_interrupt,
191 IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC", 191 IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
192 &dc_timer_dev.ce); 192 &dc_timer_dev.ce);
193 if (ret) 193 if (ret) {
194 pr_warn("request of timer irq %d failed (%d)\n", irq, ret); 194 pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
195 return ret;
196 }
195 197
196 dc_timer_dev.ce.cpumask = cpu_possible_mask; 198 dc_timer_dev.ce.cpumask = cpu_possible_mask;
197 dc_timer_dev.ce.irq = irq; 199 dc_timer_dev.ce.irq = irq;
198 200
199 clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff); 201 clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
202
203 return 0;
200} 204}
201CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer", 205CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
202 digicolor_timer_init); 206 digicolor_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 99ec96769dda..f595460bfc58 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
407 .set_next_event = v2_set_next_event, 407 .set_next_event = v2_set_next_event,
408}; 408};
409 409
410static void __init _mxc_timer_init(struct imx_timer *imxtm) 410static int __init _mxc_timer_init(struct imx_timer *imxtm)
411{ 411{
412 int ret;
413
412 switch (imxtm->type) { 414 switch (imxtm->type) {
413 case GPT_TYPE_IMX1: 415 case GPT_TYPE_IMX1:
414 imxtm->gpt = &imx1_gpt_data; 416 imxtm->gpt = &imx1_gpt_data;
@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
423 imxtm->gpt = &imx6dl_gpt_data; 425 imxtm->gpt = &imx6dl_gpt_data;
424 break; 426 break;
425 default: 427 default:
426 BUG(); 428 return -EINVAL;
427 } 429 }
428 430
429 if (IS_ERR(imxtm->clk_per)) { 431 if (IS_ERR(imxtm->clk_per)) {
430 pr_err("i.MX timer: unable to get clk\n"); 432 pr_err("i.MX timer: unable to get clk\n");
431 return; 433 return PTR_ERR(imxtm->clk_per);
432 } 434 }
433 435
434 if (!IS_ERR(imxtm->clk_ipg)) 436 if (!IS_ERR(imxtm->clk_ipg))
@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
446 imxtm->gpt->gpt_setup_tctl(imxtm); 448 imxtm->gpt->gpt_setup_tctl(imxtm);
447 449
448 /* init and register the timer to the framework */ 450 /* init and register the timer to the framework */
449 mxc_clocksource_init(imxtm); 451 ret = mxc_clocksource_init(imxtm);
450 mxc_clockevent_init(imxtm); 452 if (ret)
453 return ret;
454
455 return mxc_clockevent_init(imxtm);
451} 456}
452 457
453void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) 458void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
469 _mxc_timer_init(imxtm); 474 _mxc_timer_init(imxtm);
470} 475}
471 476
472static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) 477static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
473{ 478{
474 struct imx_timer *imxtm; 479 struct imx_timer *imxtm;
475 static int initialized; 480 static int initialized;
481 int ret;
476 482
477 /* Support one instance only */ 483 /* Support one instance only */
478 if (initialized) 484 if (initialized)
479 return; 485 return 0;
480 486
481 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); 487 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
482 BUG_ON(!imxtm); 488 if (!imxtm)
489 return -ENOMEM;
483 490
484 imxtm->base = of_iomap(np, 0); 491 imxtm->base = of_iomap(np, 0);
485 WARN_ON(!imxtm->base); 492 if (!imxtm->base)
493 return -ENXIO;
494
486 imxtm->irq = irq_of_parse_and_map(np, 0); 495 imxtm->irq = irq_of_parse_and_map(np, 0);
496 if (imxtm->irq <= 0)
497 return -EINVAL;
487 498
488 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); 499 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
489 500
@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type
494 505
495 imxtm->type = type; 506 imxtm->type = type;
496 507
497 _mxc_timer_init(imxtm); 508 ret = _mxc_timer_init(imxtm);
509 if (ret)
510 return ret;
498 511
499 initialized = 1; 512 initialized = 1;
513
514 return 0;
500} 515}
501 516
502static void __init imx1_timer_init_dt(struct device_node *np) 517static int __init imx1_timer_init_dt(struct device_node *np)
503{ 518{
504 mxc_timer_init_dt(np, GPT_TYPE_IMX1); 519 return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
505} 520}
506 521
507static void __init imx21_timer_init_dt(struct device_node *np) 522static int __init imx21_timer_init_dt(struct device_node *np)
508{ 523{
509 mxc_timer_init_dt(np, GPT_TYPE_IMX21); 524 return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
510} 525}
511 526
512static void __init imx31_timer_init_dt(struct device_node *np) 527static int __init imx31_timer_init_dt(struct device_node *np)
513{ 528{
514 enum imx_gpt_type type = GPT_TYPE_IMX31; 529 enum imx_gpt_type type = GPT_TYPE_IMX31;
515 530
@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
522 if (of_machine_is_compatible("fsl,imx6dl")) 537 if (of_machine_is_compatible("fsl,imx6dl"))
523 type = GPT_TYPE_IMX6DL; 538 type = GPT_TYPE_IMX6DL;
524 539
525 mxc_timer_init_dt(np, type); 540 return mxc_timer_init_dt(np, type);
526} 541}
527 542
528static void __init imx6dl_timer_init_dt(struct device_node *np) 543static int __init imx6dl_timer_init_dt(struct device_node *np)
529{ 544{
530 mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); 545 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
531} 546}
532 547
533CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 548CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 3f59ac2180dc..df6e672afc04 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
36 return -readl(sched_clk_base + TIMER_VALUE); 36 return -readl(sched_clk_base + TIMER_VALUE);
37} 37}
38 38
39static void integrator_clocksource_init(unsigned long inrate, 39static int integrator_clocksource_init(unsigned long inrate,
40 void __iomem *base) 40 void __iomem *base)
41{ 41{
42 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; 42 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
43 unsigned long rate = inrate; 43 unsigned long rate = inrate;
44 int ret;
44 45
45 if (rate >= 1500000) { 46 if (rate >= 1500000) {
46 rate /= 16; 47 rate /= 16;
@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
50 writel(0xffff, base + TIMER_LOAD); 51 writel(0xffff, base + TIMER_LOAD);
51 writel(ctrl, base + TIMER_CTRL); 52 writel(ctrl, base + TIMER_CTRL);
52 53
53 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 54 ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
54 rate, 200, 16, clocksource_mmio_readl_down); 55 rate, 200, 16, clocksource_mmio_readl_down);
56 if (ret)
57 return ret;
55 58
56 sched_clk_base = base; 59 sched_clk_base = base;
57 sched_clock_register(integrator_read_sched_clock, 16, rate); 60 sched_clock_register(integrator_read_sched_clock, 16, rate);
61
62 return 0;
58} 63}
59 64
60static unsigned long timer_reload; 65static unsigned long timer_reload;
@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
138 .dev_id = &integrator_clockevent, 143 .dev_id = &integrator_clockevent,
139}; 144};
140 145
141static void integrator_clockevent_init(unsigned long inrate, 146static int integrator_clockevent_init(unsigned long inrate,
142 void __iomem *base, int irq) 147 void __iomem *base, int irq)
143{ 148{
144 unsigned long rate = inrate; 149 unsigned long rate = inrate;
145 unsigned int ctrl = 0; 150 unsigned int ctrl = 0;
151 int ret;
146 152
147 clkevt_base = base; 153 clkevt_base = base;
148 /* Calculate and program a divisor */ 154 /* Calculate and program a divisor */
@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
156 timer_reload = rate / HZ; 162 timer_reload = rate / HZ;
157 writel(ctrl, clkevt_base + TIMER_CTRL); 163 writel(ctrl, clkevt_base + TIMER_CTRL);
158 164
159 setup_irq(irq, &integrator_timer_irq); 165 ret = setup_irq(irq, &integrator_timer_irq);
166 if (ret)
167 return ret;
168
160 clockevents_config_and_register(&integrator_clockevent, 169 clockevents_config_and_register(&integrator_clockevent,
161 rate, 170 rate,
162 1, 171 1,
163 0xffffU); 172 0xffffU);
173 return 0;
164} 174}
165 175
166static void __init integrator_ap_timer_init_of(struct device_node *node) 176static int __init integrator_ap_timer_init_of(struct device_node *node)
167{ 177{
168 const char *path; 178 const char *path;
169 void __iomem *base; 179 void __iomem *base;
@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
176 186
177 base = of_io_request_and_map(node, 0, "integrator-timer"); 187 base = of_io_request_and_map(node, 0, "integrator-timer");
178 if (IS_ERR(base)) 188 if (IS_ERR(base))
179 return; 189 return PTR_ERR(base);
180 190
181 clk = of_clk_get(node, 0); 191 clk = of_clk_get(node, 0);
182 if (IS_ERR(clk)) { 192 if (IS_ERR(clk)) {
183 pr_err("No clock for %s\n", node->name); 193 pr_err("No clock for %s\n", node->name);
184 return; 194 return PTR_ERR(clk);
185 } 195 }
186 clk_prepare_enable(clk); 196 clk_prepare_enable(clk);
187 rate = clk_get_rate(clk); 197 rate = clk_get_rate(clk);
@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
189 199
190 err = of_property_read_string(of_aliases, 200 err = of_property_read_string(of_aliases,
191 "arm,timer-primary", &path); 201 "arm,timer-primary", &path);
192 if (WARN_ON(err)) 202 if (err) {
193 return; 203 pr_warn("Failed to read property");
204 return err;
205 }
206
194 pri_node = of_find_node_by_path(path); 207 pri_node = of_find_node_by_path(path);
208
195 err = of_property_read_string(of_aliases, 209 err = of_property_read_string(of_aliases,
196 "arm,timer-secondary", &path); 210 "arm,timer-secondary", &path);
197 if (WARN_ON(err)) 211 if (err) {
198 return; 212 pr_warn("Failed to read property");
213 return err;
214 }
215
216
199 sec_node = of_find_node_by_path(path); 217 sec_node = of_find_node_by_path(path);
200 218
201 if (node == pri_node) { 219 if (node == pri_node)
202 /* The primary timer lacks IRQ, use as clocksource */ 220 /* The primary timer lacks IRQ, use as clocksource */
203 integrator_clocksource_init(rate, base); 221 return integrator_clocksource_init(rate, base);
204 return;
205 }
206 222
207 if (node == sec_node) { 223 if (node == sec_node) {
208 /* The secondary timer will drive the clock event */ 224 /* The secondary timer will drive the clock event */
209 irq = irq_of_parse_and_map(node, 0); 225 irq = irq_of_parse_and_map(node, 0);
210 integrator_clockevent_init(rate, base, irq); 226 return integrator_clockevent_init(rate, base, irq);
211 return;
212 } 227 }
213 228
214 pr_info("Timer @%p unused\n", base); 229 pr_info("Timer @%p unused\n", base);
215 clk_disable_unprepare(clk); 230 clk_disable_unprepare(clk);
231
232 return 0;
216} 233}
217 234
218CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer", 235CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 1cea08cf603e..ab68a47ab3b4 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
144 return 0; 144 return 0;
145} 145}
146 146
147static void __init keystone_timer_init(struct device_node *np) 147static int __init keystone_timer_init(struct device_node *np)
148{ 148{
149 struct clock_event_device *event_dev = &timer.event_dev; 149 struct clock_event_device *event_dev = &timer.event_dev;
150 unsigned long rate; 150 unsigned long rate;
@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
154 irq = irq_of_parse_and_map(np, 0); 154 irq = irq_of_parse_and_map(np, 0);
155 if (!irq) { 155 if (!irq) {
156 pr_err("%s: failed to map interrupts\n", __func__); 156 pr_err("%s: failed to map interrupts\n", __func__);
157 return; 157 return -EINVAL;
158 } 158 }
159 159
160 timer.base = of_iomap(np, 0); 160 timer.base = of_iomap(np, 0);
161 if (!timer.base) { 161 if (!timer.base) {
162 pr_err("%s: failed to map registers\n", __func__); 162 pr_err("%s: failed to map registers\n", __func__);
163 return; 163 return -ENXIO;
164 } 164 }
165 165
166 clk = of_clk_get(np, 0); 166 clk = of_clk_get(np, 0);
167 if (IS_ERR(clk)) { 167 if (IS_ERR(clk)) {
168 pr_err("%s: failed to get clock\n", __func__); 168 pr_err("%s: failed to get clock\n", __func__);
169 iounmap(timer.base); 169 iounmap(timer.base);
170 return; 170 return PTR_ERR(clk);
171 } 171 }
172 172
173 error = clk_prepare_enable(clk); 173 error = clk_prepare_enable(clk);
@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
219 clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); 219 clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
220 220
221 pr_info("keystone timer clock @%lu Hz\n", rate); 221 pr_info("keystone timer clock @%lu Hz\n", rate);
222 return; 222 return 0;
223err: 223err:
224 clk_put(clk); 224 clk_put(clk);
225 iounmap(timer.base); 225 iounmap(timer.base);
226 return error;
226} 227}
227 228
228CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", 229CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
229 keystone_timer_init); 230 keystone_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index d46108920b2c..70c149af8ee0 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
55 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); 55 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
56} 56}
57 57
58static void __init nps_setup_clocksource(struct device_node *node, 58static int __init nps_setup_clocksource(struct device_node *node,
59 struct clk *clk) 59 struct clk *clk)
60{ 60{
61 int ret, cluster; 61 int ret, cluster;
62 62
@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
68 ret = clk_prepare_enable(clk); 68 ret = clk_prepare_enable(clk);
69 if (ret) { 69 if (ret) {
70 pr_err("Couldn't enable parent clock\n"); 70 pr_err("Couldn't enable parent clock\n");
71 return; 71 return ret;
72 } 72 }
73 73
74 nps_timer_rate = clk_get_rate(clk); 74 nps_timer_rate = clk_get_rate(clk);
@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
79 pr_err("Couldn't register clock source.\n"); 79 pr_err("Couldn't register clock source.\n");
80 clk_disable_unprepare(clk); 80 clk_disable_unprepare(clk);
81 } 81 }
82
83 return ret;
82} 84}
83 85
84static void __init nps_timer_init(struct device_node *node) 86static int __init nps_timer_init(struct device_node *node)
85{ 87{
86 struct clk *clk; 88 struct clk *clk;
87 89
88 clk = of_clk_get(node, 0); 90 clk = of_clk_get(node, 0);
89 if (IS_ERR(clk)) { 91 if (IS_ERR(clk)) {
90 pr_err("Can't get timer clock.\n"); 92 pr_err("Can't get timer clock.\n");
91 return; 93 return PTR_ERR(clk);
92 } 94 }
93 95
94 nps_setup_clocksource(node, clk); 96 return nps_setup_clocksource(node, clk);
95} 97}
96 98
97CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", 99CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
new file mode 100644
index 000000000000..bd887e2a8cf8
--- /dev/null
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -0,0 +1,297 @@
1/*
2 * drivers/clocksource/timer-oxnas-rps.c
3 *
4 * Copyright (C) 2009 Oxford Semiconductor Ltd
5 * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
6 * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/init.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/of_irq.h>
30#include <linux/of_address.h>
31#include <linux/clockchips.h>
32#include <linux/sched_clock.h>
33
34/* TIMER1 used as tick
35 * TIMER2 used as clocksource
36 */
37
38/* Registers definitions */
39
40#define TIMER_LOAD_REG 0x0
41#define TIMER_CURR_REG 0x4
42#define TIMER_CTRL_REG 0x8
43#define TIMER_CLRINT_REG 0xC
44
45#define TIMER_BITS 24
46
47#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
48
49#define TIMER_PERIODIC BIT(6)
50#define TIMER_ENABLE BIT(7)
51
52#define TIMER_DIV1 (0)
53#define TIMER_DIV16 (1 << 2)
54#define TIMER_DIV256 (2 << 2)
55
56#define TIMER1_REG_OFFSET 0
57#define TIMER2_REG_OFFSET 0x20
58
59/* Clockevent & Clocksource data */
60
61struct oxnas_rps_timer {
62 struct clock_event_device clkevent;
63 void __iomem *clksrc_base;
64 void __iomem *clkevt_base;
65 unsigned long timer_period;
66 unsigned int timer_prescaler;
67 struct clk *clk;
68 int irq;
69};
70
71static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
72{
73 struct oxnas_rps_timer *rps = dev_id;
74
75 writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
76
77 rps->clkevent.event_handler(&rps->clkevent);
78
79 return IRQ_HANDLED;
80}
81
82static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
83 unsigned long period,
84 unsigned int periodic)
85{
86 uint32_t cfg = rps->timer_prescaler;
87
88 if (period)
89 cfg |= TIMER_ENABLE;
90
91 if (periodic)
92 cfg |= TIMER_PERIODIC;
93
94 writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
95 writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
96}
97
98static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
99{
100 struct oxnas_rps_timer *rps =
101 container_of(evt, struct oxnas_rps_timer, clkevent);
102
103 oxnas_rps_timer_config(rps, 0, 0);
104
105 return 0;
106}
107
108static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
109{
110 struct oxnas_rps_timer *rps =
111 container_of(evt, struct oxnas_rps_timer, clkevent);
112
113 oxnas_rps_timer_config(rps, rps->timer_period, 1);
114
115 return 0;
116}
117
118static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
119{
120 struct oxnas_rps_timer *rps =
121 container_of(evt, struct oxnas_rps_timer, clkevent);
122
123 oxnas_rps_timer_config(rps, rps->timer_period, 0);
124
125 return 0;
126}
127
128static int oxnas_rps_timer_next_event(unsigned long delta,
129 struct clock_event_device *evt)
130{
131 struct oxnas_rps_timer *rps =
132 container_of(evt, struct oxnas_rps_timer, clkevent);
133
134 oxnas_rps_timer_config(rps, delta, 0);
135
136 return 0;
137}
138
139static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
140{
141 ulong clk_rate = clk_get_rate(rps->clk);
142 ulong timer_rate;
143
144 /* Start with prescaler 1 */
145 rps->timer_prescaler = TIMER_DIV1;
146 rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
147 timer_rate = clk_rate;
148
149 if (rps->timer_period > TIMER_MAX_VAL) {
150 rps->timer_prescaler = TIMER_DIV16;
151 timer_rate = clk_rate / 16;
152 rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
153 }
154 if (rps->timer_period > TIMER_MAX_VAL) {
155 rps->timer_prescaler = TIMER_DIV256;
156 timer_rate = clk_rate / 256;
157 rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
158 }
159
160 rps->clkevent.name = "oxnas-rps";
161 rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
162 CLOCK_EVT_FEAT_ONESHOT |
163 CLOCK_EVT_FEAT_DYNIRQ;
164 rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
165 rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
166 rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
167 rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
168 rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
169 rps->clkevent.rating = 200;
170 rps->clkevent.cpumask = cpu_possible_mask;
171 rps->clkevent.irq = rps->irq;
172 clockevents_config_and_register(&rps->clkevent,
173 timer_rate,
174 1,
175 TIMER_MAX_VAL);
176
177 pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
178 clk_rate,
179 rps->timer_prescaler,
180 rps->timer_period);
181
182 return 0;
183}
184
185/* Clocksource */
186
187static void __iomem *timer_sched_base;
188
189static u64 notrace oxnas_rps_read_sched_clock(void)
190{
191 return ~readl_relaxed(timer_sched_base);
192}
193
194static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
195{
196 ulong clk_rate = clk_get_rate(rps->clk);
197 int ret;
198
199 /* use prescale 16 */
200 clk_rate = clk_rate / 16;
201
202 writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
203 writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
204 rps->clksrc_base + TIMER_CTRL_REG);
205
206 timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
207 sched_clock_register(oxnas_rps_read_sched_clock,
208 TIMER_BITS, clk_rate);
209 ret = clocksource_mmio_init(timer_sched_base,
210 "oxnas_rps_clocksource_timer",
211 clk_rate, 250, TIMER_BITS,
212 clocksource_mmio_readl_down);
213 if (WARN_ON(ret)) {
214 pr_err("can't register clocksource\n");
215 return ret;
216 }
217
218 pr_info("Registered clocksource rate %luHz\n", clk_rate);
219
220 return 0;
221}
222
223static int __init oxnas_rps_timer_init(struct device_node *np)
224{
225 struct oxnas_rps_timer *rps;
226 void __iomem *base;
227 int ret;
228
229 rps = kzalloc(sizeof(*rps), GFP_KERNEL);
230 if (!rps)
231 return -ENOMEM;
232
233 rps->clk = of_clk_get(np, 0);
234 if (IS_ERR(rps->clk)) {
235 ret = PTR_ERR(rps->clk);
236 goto err_alloc;
237 }
238
239 ret = clk_prepare_enable(rps->clk);
240 if (ret)
241 goto err_clk;
242
243 base = of_iomap(np, 0);
244 if (!base) {
245 ret = -ENXIO;
246 goto err_clk_prepare;
247 }
248
249 rps->irq = irq_of_parse_and_map(np, 0);
250 if (rps->irq < 0) {
251 ret = -EINVAL;
252 goto err_iomap;
253 }
254
255 rps->clkevt_base = base + TIMER1_REG_OFFSET;
256 rps->clksrc_base = base + TIMER2_REG_OFFSET;
257
258 /* Disable timers */
259 writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
260 writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
261 writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
262 writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
263 writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
264 writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
265
266 ret = request_irq(rps->irq, oxnas_rps_timer_irq,
267 IRQF_TIMER | IRQF_IRQPOLL,
268 "rps-timer", rps);
269 if (ret)
270 goto err_iomap;
271
272 ret = oxnas_rps_clocksource_init(rps);
273 if (ret)
274 goto err_irqreq;
275
276 ret = oxnas_rps_clockevent_init(rps);
277 if (ret)
278 goto err_irqreq;
279
280 return 0;
281
282err_irqreq:
283 free_irq(rps->irq, rps);
284err_iomap:
285 iounmap(base);
286err_clk_prepare:
287 clk_disable_unprepare(rps->clk);
288err_clk:
289 clk_put(rps->clk);
290err_alloc:
291 kfree(rps);
292
293 return ret;
294}
295
296CLOCKSOURCE_OF_DECLARE(ox810se_rps,
297 "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 2854c663e8b5..c32148ec7a38 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -19,7 +19,6 @@
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/sched_clock.h> 21#include <linux/sched_clock.h>
22#include <asm/mach/time.h>
23 22
24#define PRIMA2_CLOCK_FREQ 1000000 23#define PRIMA2_CLOCK_FREQ 1000000
25 24
@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
189} 188}
190 189
191/* initialize the kernel jiffy timer source */ 190/* initialize the kernel jiffy timer source */
192static void __init sirfsoc_prima2_timer_init(struct device_node *np) 191static int __init sirfsoc_prima2_timer_init(struct device_node *np)
193{ 192{
194 unsigned long rate; 193 unsigned long rate;
195 struct clk *clk; 194 struct clk *clk;
195 int ret;
196 196
197 clk = of_clk_get(np, 0); 197 clk = of_clk_get(np, 0);
198 BUG_ON(IS_ERR(clk)); 198 if (IS_ERR(clk)) {
199 pr_err("Failed to get clock");
200 return PTR_ERR(clk);
201 }
199 202
200 BUG_ON(clk_prepare_enable(clk)); 203 ret = clk_prepare_enable(clk);
204 if (ret) {
205 pr_err("Failed to enable clock");
206 return ret;
207 }
201 208
202 rate = clk_get_rate(clk); 209 rate = clk_get_rate(clk);
203 210
204 BUG_ON(rate < PRIMA2_CLOCK_FREQ); 211 if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
205 BUG_ON(rate % PRIMA2_CLOCK_FREQ); 212 pr_err("Invalid clock rate");
213 return -EINVAL;
214 }
206 215
207 sirfsoc_timer_base = of_iomap(np, 0); 216 sirfsoc_timer_base = of_iomap(np, 0);
208 if (!sirfsoc_timer_base) 217 if (!sirfsoc_timer_base) {
209 panic("unable to map timer cpu registers\n"); 218 pr_err("unable to map timer cpu registers\n");
219 return -ENXIO;
220 }
210 221
211 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 222 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
212 223
@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
216 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); 227 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
217 writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); 228 writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
218 229
219 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, 230 ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
220 PRIMA2_CLOCK_FREQ)); 231 if (ret) {
232 pr_err("Failed to register clocksource");
233 return ret;
234 }
221 235
222 sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); 236 sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
223 237
224 BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); 238 ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
239 if (ret) {
240 pr_err("Failed to setup irq");
241 return ret;
242 }
225 243
226 sirfsoc_clockevent_init(); 244 sirfsoc_clockevent_init();
245
246 return 0;
227} 247}
228CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, 248CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
229 "sirf,prima2-tick", sirfsoc_prima2_timer_init); 249 "sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 5f45b9adef60..d07863388e05 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
77 writel(0, base + TIMER_CTRL); 77 writel(0, base + TIMER_CTRL);
78} 78}
79 79
80void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, 80int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
81 const char *name, 81 const char *name,
82 struct clk *clk, 82 struct clk *clk,
83 int use_sched_clock) 83 int use_sched_clock)
@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
89 if (IS_ERR(clk)) { 89 if (IS_ERR(clk)) {
90 pr_err("sp804: clock not found: %d\n", 90 pr_err("sp804: clock not found: %d\n",
91 (int)PTR_ERR(clk)); 91 (int)PTR_ERR(clk));
92 return; 92 return PTR_ERR(clk);
93 } 93 }
94 } 94 }
95 95
96 rate = sp804_get_clock_rate(clk); 96 rate = sp804_get_clock_rate(clk);
97
98 if (rate < 0) 97 if (rate < 0)
99 return; 98 return -EINVAL;
100 99
101 /* setup timer 0 as free-running clocksource */ 100 /* setup timer 0 as free-running clocksource */
102 writel(0, base + TIMER_CTRL); 101 writel(0, base + TIMER_CTRL);
@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
112 sched_clock_base = base; 111 sched_clock_base = base;
113 sched_clock_register(sp804_read, 32, rate); 112 sched_clock_register(sp804_read, 32, rate);
114 } 113 }
114
115 return 0;
115} 116}
116 117
117 118
@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
186 .dev_id = &sp804_clockevent, 187 .dev_id = &sp804_clockevent,
187}; 188};
188 189
189void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) 190int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
190{ 191{
191 struct clock_event_device *evt = &sp804_clockevent; 192 struct clock_event_device *evt = &sp804_clockevent;
192 long rate; 193 long rate;
@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
196 if (IS_ERR(clk)) { 197 if (IS_ERR(clk)) {
197 pr_err("sp804: %s clock not found: %d\n", name, 198 pr_err("sp804: %s clock not found: %d\n", name,
198 (int)PTR_ERR(clk)); 199 (int)PTR_ERR(clk));
199 return; 200 return PTR_ERR(clk);
200 } 201 }
201 202
202 rate = sp804_get_clock_rate(clk); 203 rate = sp804_get_clock_rate(clk);
203 if (rate < 0) 204 if (rate < 0)
204 return; 205 return -EINVAL;
205 206
206 clkevt_base = base; 207 clkevt_base = base;
207 clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); 208 clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
213 214
214 setup_irq(irq, &sp804_timer_irq); 215 setup_irq(irq, &sp804_timer_irq);
215 clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); 216 clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
217
218 return 0;
216} 219}
217 220
218static void __init sp804_of_init(struct device_node *np) 221static int __init sp804_of_init(struct device_node *np)
219{ 222{
220 static bool initialized = false; 223 static bool initialized = false;
221 void __iomem *base; 224 void __iomem *base;
222 int irq; 225 int irq, ret = -EINVAL;
223 u32 irq_num = 0; 226 u32 irq_num = 0;
224 struct clk *clk1, *clk2; 227 struct clk *clk1, *clk2;
225 const char *name = of_get_property(np, "compatible", NULL); 228 const char *name = of_get_property(np, "compatible", NULL);
226 229
227 base = of_iomap(np, 0); 230 base = of_iomap(np, 0);
228 if (WARN_ON(!base)) 231 if (!base)
229 return; 232 return -ENXIO;
230 233
231 /* Ensure timers are disabled */ 234 /* Ensure timers are disabled */
232 writel(0, base + TIMER_CTRL); 235 writel(0, base + TIMER_CTRL);
233 writel(0, base + TIMER_2_BASE + TIMER_CTRL); 236 writel(0, base + TIMER_2_BASE + TIMER_CTRL);
234 237
235 if (initialized || !of_device_is_available(np)) 238 if (initialized || !of_device_is_available(np)) {
239 ret = -EINVAL;
236 goto err; 240 goto err;
241 }
237 242
238 clk1 = of_clk_get(np, 0); 243 clk1 = of_clk_get(np, 0);
239 if (IS_ERR(clk1)) 244 if (IS_ERR(clk1))
@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
256 261
257 of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); 262 of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
258 if (irq_num == 2) { 263 if (irq_num == 2) {
259 __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); 264
260 __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); 265 ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
266 if (ret)
267 goto err;
268
269 ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
270 if (ret)
271 goto err;
261 } else { 272 } else {
262 __sp804_clockevents_init(base, irq, clk1 , name); 273
263 __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, 274 ret = __sp804_clockevents_init(base, irq, clk1 , name);
264 name, clk2, 1); 275 if (ret)
276 goto err;
277
278 ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
279 name, clk2, 1);
280 if (ret)
281 goto err;
265 } 282 }
266 initialized = true; 283 initialized = true;
267 284
268 return; 285 return 0;
269err: 286err:
270 iounmap(base); 287 iounmap(base);
288 return ret;
271} 289}
272CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); 290CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
273 291
274static void __init integrator_cp_of_init(struct device_node *np) 292static int __init integrator_cp_of_init(struct device_node *np)
275{ 293{
276 static int init_count = 0; 294 static int init_count = 0;
277 void __iomem *base; 295 void __iomem *base;
278 int irq; 296 int irq, ret = -EINVAL;
279 const char *name = of_get_property(np, "compatible", NULL); 297 const char *name = of_get_property(np, "compatible", NULL);
280 struct clk *clk; 298 struct clk *clk;
281 299
282 base = of_iomap(np, 0); 300 base = of_iomap(np, 0);
283 if (WARN_ON(!base)) 301 if (!base) {
284 return; 302 pr_err("Failed to iomap");
303 return -ENXIO;
304 }
305
285 clk = of_clk_get(np, 0); 306 clk = of_clk_get(np, 0);
286 if (WARN_ON(IS_ERR(clk))) 307 if (IS_ERR(clk)) {
287 return; 308 pr_err("Failed to get clock");
309 return PTR_ERR(clk);
310 }
288 311
289 /* Ensure timer is disabled */ 312 /* Ensure timer is disabled */
290 writel(0, base + TIMER_CTRL); 313 writel(0, base + TIMER_CTRL);
@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
292 if (init_count == 2 || !of_device_is_available(np)) 315 if (init_count == 2 || !of_device_is_available(np))
293 goto err; 316 goto err;
294 317
295 if (!init_count) 318 if (!init_count) {
296 __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); 319 ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
297 else { 320 if (ret)
321 goto err;
322 } else {
298 irq = irq_of_parse_and_map(np, 0); 323 irq = irq_of_parse_and_map(np, 0);
299 if (irq <= 0) 324 if (irq <= 0)
300 goto err; 325 goto err;
301 326
302 __sp804_clockevents_init(base, irq, clk, name); 327 ret = __sp804_clockevents_init(base, irq, clk, name);
328 if (ret)
329 goto err;
303 } 330 }
304 331
305 init_count++; 332 init_count++;
306 return; 333 return 0;
307err: 334err:
308 iounmap(base); 335 iounmap(base);
336 return ret;
309} 337}
310CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init); 338CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index f3dcb76799b4..1b2574c4fb97 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
98 }, 98 },
99}; 99};
100 100
101static void __init stm32_clockevent_init(struct device_node *np) 101static int __init stm32_clockevent_init(struct device_node *np)
102{ 102{
103 struct stm32_clock_event_ddata *data = &clock_event_ddata; 103 struct stm32_clock_event_ddata *data = &clock_event_ddata;
104 struct clk *clk; 104 struct clk *clk;
@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
130 130
131 data->base = of_iomap(np, 0); 131 data->base = of_iomap(np, 0);
132 if (!data->base) { 132 if (!data->base) {
133 ret = -ENXIO;
133 pr_err("failed to map registers for clockevent\n"); 134 pr_err("failed to map registers for clockevent\n");
134 goto err_iomap; 135 goto err_iomap;
135 } 136 }
136 137
137 irq = irq_of_parse_and_map(np, 0); 138 irq = irq_of_parse_and_map(np, 0);
138 if (!irq) { 139 if (!irq) {
140 ret = -EINVAL;
139 pr_err("%s: failed to get irq.\n", np->full_name); 141 pr_err("%s: failed to get irq.\n", np->full_name);
140 goto err_get_irq; 142 goto err_get_irq;
141 } 143 }
@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
173 pr_info("%s: STM32 clockevent driver initialized (%d bits)\n", 175 pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
174 np->full_name, bits); 176 np->full_name, bits);
175 177
176 return; 178 return ret;
177 179
178err_get_irq: 180err_get_irq:
179 iounmap(data->base); 181 iounmap(data->base);
@@ -182,7 +184,7 @@ err_iomap:
182err_clk_enable: 184err_clk_enable:
183 clk_put(clk); 185 clk_put(clk);
184err_clk_get: 186err_clk_get:
185 return; 187 return ret;
186} 188}
187 189
188CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init); 190CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 24c83f9efd87..c184eb84101e 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -311,33 +311,42 @@ err_free:
311 return ret; 311 return ret;
312} 312}
313 313
314static void __init sun5i_timer_init(struct device_node *node) 314static int __init sun5i_timer_init(struct device_node *node)
315{ 315{
316 struct reset_control *rstc; 316 struct reset_control *rstc;
317 void __iomem *timer_base; 317 void __iomem *timer_base;
318 struct clk *clk; 318 struct clk *clk;
319 int irq; 319 int irq, ret;
320 320
321 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 321 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
322 if (IS_ERR(timer_base)) 322 if (IS_ERR(timer_base)) {
323 panic("Can't map registers"); 323 pr_err("Can't map registers");
324 return PTR_ERR(timer_base);;
325 }
324 326
325 irq = irq_of_parse_and_map(node, 0); 327 irq = irq_of_parse_and_map(node, 0);
326 if (irq <= 0) 328 if (irq <= 0) {
327 panic("Can't parse IRQ"); 329 pr_err("Can't parse IRQ");
330 return -EINVAL;
331 }
328 332
329 clk = of_clk_get(node, 0); 333 clk = of_clk_get(node, 0);
330 if (IS_ERR(clk)) 334 if (IS_ERR(clk)) {
331 panic("Can't get timer clock"); 335 pr_err("Can't get timer clock");
336 return PTR_ERR(clk);
337 }
332 338
333 rstc = of_reset_control_get(node, NULL); 339 rstc = of_reset_control_get(node, NULL);
334 if (!IS_ERR(rstc)) 340 if (!IS_ERR(rstc))
335 reset_control_deassert(rstc); 341 reset_control_deassert(rstc);
336 342
337 sun5i_setup_clocksource(node, timer_base, clk, irq); 343 ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
338 sun5i_setup_clockevent(node, timer_base, clk, irq); 344 if (ret)
345 return ret;
346
347 return sun5i_setup_clockevent(node, timer_base, clk, irq);
339} 348}
340CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 349CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
341 sun5i_timer_init); 350 sun5i_timer_init);
342CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", 351CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
343 sun5i_timer_init); 352 sun5i_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 8518d9dfba5c..92b7e390f6c8 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
88 return ti_32k_read_cycles(&ti_32k_timer.cs); 88 return ti_32k_read_cycles(&ti_32k_timer.cs);
89} 89}
90 90
91static void __init ti_32k_timer_init(struct device_node *np) 91static int __init ti_32k_timer_init(struct device_node *np)
92{ 92{
93 int ret; 93 int ret;
94 94
95 ti_32k_timer.base = of_iomap(np, 0); 95 ti_32k_timer.base = of_iomap(np, 0);
96 if (!ti_32k_timer.base) { 96 if (!ti_32k_timer.base) {
97 pr_err("Can't ioremap 32k timer base\n"); 97 pr_err("Can't ioremap 32k timer base\n");
98 return; 98 return -ENXIO;
99 } 99 }
100 100
101 ti_32k_timer.counter = ti_32k_timer.base; 101 ti_32k_timer.counter = ti_32k_timer.base;
@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
116 ret = clocksource_register_hz(&ti_32k_timer.cs, 32768); 116 ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
117 if (ret) { 117 if (ret) {
118 pr_err("32k_counter: can't register clocksource\n"); 118 pr_err("32k_counter: can't register clocksource\n");
119 return; 119 return ret;
120 } 120 }
121 121
122 sched_clock_register(omap_32k_read_sched_clock, 32, 32768); 122 sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
123 pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); 123 pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
124
125 return 0;
124} 126}
125CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k", 127CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
126 ti_32k_timer_init); 128 ti_32k_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 1744b243898a..704e40c6f151 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
359/* 359/*
360 * This sets up the system timers, clock source and clock event. 360 * This sets up the system timers, clock source and clock event.
361 */ 361 */
362static void __init u300_timer_init_of(struct device_node *np) 362static int __init u300_timer_init_of(struct device_node *np)
363{ 363{
364 unsigned int irq; 364 unsigned int irq;
365 struct clk *clk; 365 struct clk *clk;
366 unsigned long rate; 366 unsigned long rate;
367 int ret;
367 368
368 u300_timer_base = of_iomap(np, 0); 369 u300_timer_base = of_iomap(np, 0);
369 if (!u300_timer_base) 370 if (!u300_timer_base) {
370 panic("could not ioremap system timer\n"); 371 pr_err("could not ioremap system timer\n");
372 return -ENXIO;
373 }
371 374
372 /* Get the IRQ for the GP1 timer */ 375 /* Get the IRQ for the GP1 timer */
373 irq = irq_of_parse_and_map(np, 2); 376 irq = irq_of_parse_and_map(np, 2);
374 if (!irq) 377 if (!irq) {
375 panic("no IRQ for system timer\n"); 378 pr_err("no IRQ for system timer\n");
379 return -EINVAL;
380 }
376 381
377 pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); 382 pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
378 383
379 /* Clock the interrupt controller */ 384 /* Clock the interrupt controller */
380 clk = of_clk_get(np, 0); 385 clk = of_clk_get(np, 0);
381 BUG_ON(IS_ERR(clk)); 386 if (IS_ERR(clk))
382 clk_prepare_enable(clk); 387 return PTR_ERR(clk);
388
389 ret = clk_prepare_enable(clk);
390 if (ret)
391 return ret;
392
383 rate = clk_get_rate(clk); 393 rate = clk_get_rate(clk);
384 394
385 u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); 395 u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
410 u300_timer_base + U300_TIMER_APP_RGPT1); 420 u300_timer_base + U300_TIMER_APP_RGPT1);
411 421
412 /* Set up the IRQ handler */ 422 /* Set up the IRQ handler */
413 setup_irq(irq, &u300_timer_irq); 423 ret = setup_irq(irq, &u300_timer_irq);
424 if (ret)
425 return ret;
414 426
415 /* Reset the General Purpose timer 2 */ 427 /* Reset the General Purpose timer 2 */
416 writel(U300_TIMER_APP_RGPT2_TIMER_RESET, 428 writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
428 u300_timer_base + U300_TIMER_APP_EGPT2); 440 u300_timer_base + U300_TIMER_APP_EGPT2);
429 441
430 /* Use general purpose timer 2 as clock source */ 442 /* Use general purpose timer 2 as clock source */
431 if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, 443 ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
432 "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) 444 "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
445 if (ret) {
433 pr_err("timer: failed to initialize U300 clock source\n"); 446 pr_err("timer: failed to initialize U300 clock source\n");
447 return ret;
448 }
434 449
435 /* Configure and register the clockevent */ 450 /* Configure and register the clockevent */
436 clockevents_config_and_register(&u300_clockevent_data.cevd, rate, 451 clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
440 * TODO: init and register the rest of the timers too, they can be 455 * TODO: init and register the rest of the timers too, they can be
441 * used by hrtimers! 456 * used by hrtimers!
442 */ 457 */
458 return 0;
443} 459}
444 460
445CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer", 461CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 0a26d3dde6c0..220b490a8142 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
25 return readl(versatile_sys_24mhz); 25 return readl(versatile_sys_24mhz);
26} 26}
27 27
28static void __init versatile_sched_clock_init(struct device_node *node) 28static int __init versatile_sched_clock_init(struct device_node *node)
29{ 29{
30 void __iomem *base = of_iomap(node, 0); 30 void __iomem *base = of_iomap(node, 0);
31 31
32 if (!base) 32 if (!base)
33 return; 33 return -ENXIO;
34 34
35 versatile_sys_24mhz = base + SYS_24MHZ; 35 versatile_sys_24mhz = base + SYS_24MHZ;
36 36
37 sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); 37 sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
38
39 return 0;
38} 40}
39CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg", 41CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
40 versatile_sched_clock_init); 42 versatile_sched_clock_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index a0e6c68536a1..55d8d8402d90 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
156 return 0; 156 return 0;
157} 157}
158 158
159static void __init pit_timer_init(struct device_node *np) 159static int __init pit_timer_init(struct device_node *np)
160{ 160{
161 struct clk *pit_clk; 161 struct clk *pit_clk;
162 void __iomem *timer_base; 162 void __iomem *timer_base;
163 unsigned long clk_rate; 163 unsigned long clk_rate;
164 int irq; 164 int irq, ret;
165 165
166 timer_base = of_iomap(np, 0); 166 timer_base = of_iomap(np, 0);
167 BUG_ON(!timer_base); 167 if (!timer_base) {
168 pr_err("Failed to iomap");
169 return -ENXIO;
170 }
168 171
169 /* 172 /*
170 * PIT0 and PIT1 can be chained to build a 64-bit timer, 173 * PIT0 and PIT1 can be chained to build a 64-bit timer,
@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
175 clkevt_base = timer_base + PITn_OFFSET(3); 178 clkevt_base = timer_base + PITn_OFFSET(3);
176 179
177 irq = irq_of_parse_and_map(np, 0); 180 irq = irq_of_parse_and_map(np, 0);
178 BUG_ON(irq <= 0); 181 if (irq <= 0)
182 return -EINVAL;
179 183
180 pit_clk = of_clk_get(np, 0); 184 pit_clk = of_clk_get(np, 0);
181 BUG_ON(IS_ERR(pit_clk)); 185 if (IS_ERR(pit_clk))
186 return PTR_ERR(pit_clk);
182 187
183 BUG_ON(clk_prepare_enable(pit_clk)); 188 ret = clk_prepare_enable(pit_clk);
189 if (ret)
190 return ret;
184 191
185 clk_rate = clk_get_rate(pit_clk); 192 clk_rate = clk_get_rate(pit_clk);
186 cycle_per_jiffy = clk_rate / (HZ); 193 cycle_per_jiffy = clk_rate / (HZ);
@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
188 /* enable the pit module */ 195 /* enable the pit module */
189 __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); 196 __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
190 197
191 BUG_ON(pit_clocksource_init(clk_rate)); 198 ret = pit_clocksource_init(clk_rate);
199 if (ret)
200 return ret;
192 201
193 pit_clockevent_init(clk_rate, irq); 202 return pit_clockevent_init(clk_rate, irq);
194} 203}
195CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); 204CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ddb409274f45..b15069483fbd 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -121,38 +121,48 @@ static struct irqaction irq = {
121 .dev_id = &clockevent, 121 .dev_id = &clockevent,
122}; 122};
123 123
124static void __init vt8500_timer_init(struct device_node *np) 124static int __init vt8500_timer_init(struct device_node *np)
125{ 125{
126 int timer_irq; 126 int timer_irq, ret;
127 127
128 regbase = of_iomap(np, 0); 128 regbase = of_iomap(np, 0);
129 if (!regbase) { 129 if (!regbase) {
130 pr_err("%s: Missing iobase description in Device Tree\n", 130 pr_err("%s: Missing iobase description in Device Tree\n",
131 __func__); 131 __func__);
132 return; 132 return -ENXIO;
133 } 133 }
134
134 timer_irq = irq_of_parse_and_map(np, 0); 135 timer_irq = irq_of_parse_and_map(np, 0);
135 if (!timer_irq) { 136 if (!timer_irq) {
136 pr_err("%s: Missing irq description in Device Tree\n", 137 pr_err("%s: Missing irq description in Device Tree\n",
137 __func__); 138 __func__);
138 return; 139 return -EINVAL;
139 } 140 }
140 141
141 writel(1, regbase + TIMER_CTRL_VAL); 142 writel(1, regbase + TIMER_CTRL_VAL);
142 writel(0xf, regbase + TIMER_STATUS_VAL); 143 writel(0xf, regbase + TIMER_STATUS_VAL);
143 writel(~0, regbase + TIMER_MATCH_VAL); 144 writel(~0, regbase + TIMER_MATCH_VAL);
144 145
145 if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) 146 ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
147 if (ret) {
146 pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", 148 pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
147 __func__, clocksource.name); 149 __func__, clocksource.name);
150 return ret;
151 }
148 152
149 clockevent.cpumask = cpumask_of(0); 153 clockevent.cpumask = cpumask_of(0);
150 154
151 if (setup_irq(timer_irq, &irq)) 155 ret = setup_irq(timer_irq, &irq);
156 if (ret) {
152 pr_err("%s: setup_irq failed for %s\n", __func__, 157 pr_err("%s: setup_irq failed for %s\n", __func__,
153 clockevent.name); 158 clockevent.name);
159 return ret;
160 }
161
154 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, 162 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
155 MIN_OSCR_DELTA * 2, 0xf0000000); 163 MIN_OSCR_DELTA * 2, 0xf0000000);
164
165 return 0;
156} 166}
157 167
158CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); 168CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ceaa6133f9c2..9a53f5ef6157 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -210,9 +210,9 @@ error_free:
210 return ret; 210 return ret;
211} 211}
212 212
213static void __init zevio_timer_init(struct device_node *node) 213static int __init zevio_timer_init(struct device_node *node)
214{ 214{
215 BUG_ON(zevio_timer_add(node)); 215 return zevio_timer_add(node);
216} 216}
217 217
218CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); 218CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c45368e3f1..6bd715b7f11c 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -530,8 +530,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
530 else 530 else
531 timer_interval = GPSTATE_TIMER_INTERVAL; 531 timer_interval = GPSTATE_TIMER_INTERVAL;
532 532
533 mod_timer_pinned(&gpstates->timer, jiffies + 533 mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
534 msecs_to_jiffies(timer_interval));
535} 534}
536 535
537/** 536/**
@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
699 policy->driver_data = gpstates; 698 policy->driver_data = gpstates;
700 699
701 /* initialize timer */ 700 /* initialize timer */
702 init_timer_deferrable(&gpstates->timer); 701 init_timer_pinned_deferrable(&gpstates->timer);
703 gpstates->timer.data = (unsigned long)policy; 702 gpstates->timer.data = (unsigned long)policy;
704 gpstates->timer.function = gpstate_timer_handler; 703 gpstates->timer.function = gpstate_timer_handler;
705 gpstates->timer.expires = jiffies + 704 gpstates->timer.expires = jiffies +
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 03ddf0ecf402..684087db170b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
1068 jz4740_mmc_clock_disable(host); 1068 jz4740_mmc_clock_disable(host);
1069 setup_timer(&host->timeout_timer, jz4740_mmc_timeout, 1069 setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
1070 (unsigned long)host); 1070 (unsigned long)host);
1071 /* It is not important when it times out, it just needs to timeout. */
1072 set_timer_slack(&host->timeout_timer, HZ);
1073 1071
1074 host->use_dma = true; 1072 host->use_dma = true;
1075 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) 1073 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 922a443e3415..4ef605a90247 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
588static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) 588static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
589{ 589{
590 if (!info->egress_timer_scheduled) { 590 if (!info->egress_timer_scheduled) {
591 mod_timer_pinned(&info->egress_timer, jiffies + 1); 591 mod_timer(&info->egress_timer, jiffies + 1);
592 info->egress_timer_scheduled = true; 592 info->egress_timer_scheduled = true;
593 } 593 }
594} 594}
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
1004 BUG(); 1004 BUG();
1005 1005
1006 /* Initialize the egress timer. */ 1006 /* Initialize the egress timer. */
1007 init_timer(&info->egress_timer); 1007 init_timer_pinned(&info->egress_timer);
1008 info->egress_timer.data = (long)info; 1008 info->egress_timer.data = (long)info;
1009 info->egress_timer.function = tile_net_handle_egress_timer; 1009 info->egress_timer.function = tile_net_handle_egress_timer;
1010 1010
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 45f6ebf88df6..e90b3f307e0f 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
735 735
736 bq27xxx_battery_update(di); 736 bq27xxx_battery_update(di);
737 737
738 if (poll_interval > 0) { 738 if (poll_interval > 0)
739 /* The timer does not have to be accurate. */
740 set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
741 schedule_delayed_work(&di->work, poll_interval * HZ); 739 schedule_delayed_work(&di->work, poll_interval * HZ);
742 }
743} 740}
744 741
745/* 742/*
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 9325262289f9..25ccef2fe748 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
323 if (channel >= 0) 323 if (channel >= 0)
324 fetch_data(channel); 324 fetch_data(channel);
325 325
326 mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL); 326 mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
327} 327}
328 328
329static void add_poll_timer(struct timer_list *poll_timer) 329static void add_poll_timer(struct timer_list *poll_timer)
330{ 330{
331 setup_timer(poll_timer, dashtty_timer, 0); 331 setup_pinned_timer(poll_timer, dashtty_timer, 0);
332 poll_timer->expires = jiffies + DA_TTY_POLL; 332 poll_timer->expires = jiffies + DA_TTY_POLL;
333 333
334 /* 334 /*
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a119176a1855..234123b0c642 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
689 689
690 mips_ejtag_fdc_handle(priv); 690 mips_ejtag_fdc_handle(priv);
691 if (!priv->removing) 691 if (!priv->removing)
692 mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL); 692 mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
693} 693}
694 694
695/* TTY Port operations */ 695/* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
1002 raw_spin_unlock_irq(&priv->lock); 1002 raw_spin_unlock_irq(&priv->lock);
1003 } else { 1003 } else {
1004 /* If we didn't get an usable IRQ, poll instead */ 1004 /* If we didn't get an usable IRQ, poll instead */
1005 setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer, 1005 setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
1006 (unsigned long)priv); 1006 (unsigned long)priv);
1007 priv->poll_timer.expires = jiffies + FDC_TTY_POLL; 1007 priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
1008 /* 1008 /*
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0449235d4f22..1700908b84ef 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
500 500
501 setup_timer(&ohci->io_watchdog, io_watchdog_func, 501 setup_timer(&ohci->io_watchdog, io_watchdog_func,
502 (unsigned long) ohci); 502 (unsigned long) ohci);
503 set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
504 503
505 ohci->hcca = dma_alloc_coherent (hcd->self.controller, 504 ohci->hcca = dma_alloc_coherent (hcd->self.controller,
506 sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); 505 sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9da98321d8e6..01d96c9b3a75 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
490 xhci->comp_mode_recovery_timer.expires = jiffies + 490 xhci->comp_mode_recovery_timer.expires = jiffies +
491 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 491 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
492 492
493 set_timer_slack(&xhci->comp_mode_recovery_timer,
494 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
495 add_timer(&xhci->comp_mode_recovery_timer); 493 add_timer(&xhci->comp_mode_recovery_timer);
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Compliance mode recovery timer initialized"); 495 "Compliance mode recovery timer initialized");
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 053818dd6c18..9ae4abb4110b 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -390,6 +390,11 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
390 clockid != CLOCK_BOOTTIME_ALARM)) 390 clockid != CLOCK_BOOTTIME_ALARM))
391 return -EINVAL; 391 return -EINVAL;
392 392
393 if (!capable(CAP_WAKE_ALARM) &&
394 (clockid == CLOCK_REALTIME_ALARM ||
395 clockid == CLOCK_BOOTTIME_ALARM))
396 return -EPERM;
397
393 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 398 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
394 if (!ctx) 399 if (!ctx)
395 return -ENOMEM; 400 return -ENOMEM;
@@ -433,6 +438,11 @@ static int do_timerfd_settime(int ufd, int flags,
433 return ret; 438 return ret;
434 ctx = f.file->private_data; 439 ctx = f.file->private_data;
435 440
441 if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
442 fdput(f);
443 return -EPERM;
444 }
445
436 timerfd_setup_cancel(ctx, flags); 446 timerfd_setup_cancel(ctx, flags);
437 447
438 /* 448 /*
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
index 1f8a1caa7cb4..7654d71243dd 100644
--- a/include/clocksource/timer-sp804.h
+++ b/include/clocksource/timer-sp804.h
@@ -3,10 +3,10 @@
3 3
4struct clk; 4struct clk;
5 5
6void __sp804_clocksource_and_sched_clock_init(void __iomem *, 6int __sp804_clocksource_and_sched_clock_init(void __iomem *,
7 const char *, struct clk *, int); 7 const char *, struct clk *, int);
8void __sp804_clockevents_init(void __iomem *, unsigned int, 8int __sp804_clockevents_init(void __iomem *, unsigned int,
9 struct clk *, const char *); 9 struct clk *, const char *);
10void sp804_timer_disable(void __iomem *); 10void sp804_timer_disable(void __iomem *);
11 11
12static inline void sp804_clocksource_init(void __iomem *base, const char *name) 12static inline void sp804_clocksource_init(void __iomem *base, const char *name)
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 52f3b7da4f2d..9d8031257a90 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -26,10 +26,10 @@ enum alarmtimer_restart {
26 * struct alarm - Alarm timer structure 26 * struct alarm - Alarm timer structure
27 * @node: timerqueue node for adding to the event list this value 27 * @node: timerqueue node for adding to the event list this value
28 * also includes the expiration time. 28 * also includes the expiration time.
29 * @period: Period for recuring alarms 29 * @timer: hrtimer used to schedule events while running
30 * @function: Function pointer to be executed when the timer fires. 30 * @function: Function pointer to be executed when the timer fires.
31 * @type: Alarm type (BOOTTIME/REALTIME) 31 * @type: Alarm type (BOOTTIME/REALTIME).
32 * @enabled: Flag that represents if the alarm is set to fire or not 32 * @state: Flag that represents if the alarm is set to fire or not.
33 * @data: Internal data value. 33 * @data: Internal data value.
34 */ 34 */
35struct alarm { 35struct alarm {
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0df4a51e1a78..834179f3fa72 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
461 return NULL; 461 return NULL;
462} 462}
463 463
464static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
465{
466 return NULL;
467}
464#endif 468#endif
465 469
466/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 470/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 44a1aff22566..08398182f56e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
244extern int clocksource_i8253_init(void); 244extern int clocksource_i8253_init(void);
245 245
246#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ 246#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
247 OF_DECLARE_1(clksrc, name, compat, fn) 247 OF_DECLARE_1_RET(clksrc, name, compat, fn)
248 248
249#ifdef CONFIG_CLKSRC_PROBE 249#ifdef CONFIG_CLKSRC_PROBE
250extern void clocksource_probe(void); 250extern void clocksource_probe(void);
diff --git a/include/linux/list.h b/include/linux/list.h
index 5356f4d661a7..5183138aa932 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h)
679} 679}
680 680
681/* 681/*
682 * Check whether the node is the only node of the head without
683 * accessing head:
684 */
685static inline bool
686hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
687{
688 return !n->next && n->pprev == &h->first;
689}
690
691/*
682 * Move a list from one list head to another. Fixup the pprev 692 * Move a list from one list head to another. Fixup the pprev
683 * reference of the first entry if it exists. 693 * reference of the first entry if it exists.
684 */ 694 */
diff --git a/include/linux/of.h b/include/linux/of.h
index 74eb28cadbef..15c43f076b23 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
1009#endif 1009#endif
1010 1010
1011typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); 1011typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
1012typedef int (*of_init_fn_1_ret)(struct device_node *);
1012typedef void (*of_init_fn_1)(struct device_node *); 1013typedef void (*of_init_fn_1)(struct device_node *);
1013 1014
1014#define OF_DECLARE_1(table, name, compat, fn) \ 1015#define OF_DECLARE_1(table, name, compat, fn) \
1015 _OF_DECLARE(table, name, compat, fn, of_init_fn_1) 1016 _OF_DECLARE(table, name, compat, fn, of_init_fn_1)
1017#define OF_DECLARE_1_RET(table, name, compat, fn) \
1018 _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
1016#define OF_DECLARE_2(table, name, compat, fn) \ 1019#define OF_DECLARE_2(table, name, compat, fn) \
1017 _OF_DECLARE(table, name, compat, fn, of_init_fn_2) 1020 _OF_DECLARE(table, name, compat, fn, of_init_fn_2)
1018 1021
diff --git a/include/linux/time.h b/include/linux/time.h
index 297f09f23896..4cea09d94208 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -205,7 +205,20 @@ struct tm {
205 int tm_yday; 205 int tm_yday;
206}; 206};
207 207
208void time_to_tm(time_t totalsecs, int offset, struct tm *result); 208void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
209
210/**
211 * time_to_tm - converts the calendar time to local broken-down time
212 *
213 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
214 * Coordinated Universal Time (UTC).
215 * @offset offset seconds adding to totalsecs.
216 * @result pointer to struct tm variable to receive broken-down time
217 */
218static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
219{
220 time64_to_tm(totalsecs, offset, result);
221}
209 222
210/** 223/**
211 * timespec_to_ns - Convert timespec to nanoseconds 224 * timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 20ac746f3eb3..4419506b564e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
19 void (*function)(unsigned long); 19 void (*function)(unsigned long);
20 unsigned long data; 20 unsigned long data;
21 u32 flags; 21 u32 flags;
22 int slack;
23 22
24#ifdef CONFIG_TIMER_STATS 23#ifdef CONFIG_TIMER_STATS
25 int start_pid; 24 int start_pid;
@@ -58,11 +57,14 @@ struct timer_list {
58 * workqueue locking issues. It's not meant for executing random crap 57 * workqueue locking issues. It's not meant for executing random crap
59 * with interrupts disabled. Abuse is monitored! 58 * with interrupts disabled. Abuse is monitored!
60 */ 59 */
61#define TIMER_CPUMASK 0x0007FFFF 60#define TIMER_CPUMASK 0x0003FFFF
62#define TIMER_MIGRATING 0x00080000 61#define TIMER_MIGRATING 0x00040000
63#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) 62#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
64#define TIMER_DEFERRABLE 0x00100000 63#define TIMER_DEFERRABLE 0x00080000
64#define TIMER_PINNED 0x00100000
65#define TIMER_IRQSAFE 0x00200000 65#define TIMER_IRQSAFE 0x00200000
66#define TIMER_ARRAYSHIFT 22
67#define TIMER_ARRAYMASK 0xFFC00000
66 68
67#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ 69#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
68 .entry = { .next = TIMER_ENTRY_STATIC }, \ 70 .entry = { .next = TIMER_ENTRY_STATIC }, \
@@ -70,7 +72,6 @@ struct timer_list {
70 .expires = (_expires), \ 72 .expires = (_expires), \
71 .data = (_data), \ 73 .data = (_data), \
72 .flags = (_flags), \ 74 .flags = (_flags), \
73 .slack = -1, \
74 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 75 __TIMER_LOCKDEP_MAP_INITIALIZER( \
75 __FILE__ ":" __stringify(__LINE__)) \ 76 __FILE__ ":" __stringify(__LINE__)) \
76 } 77 }
@@ -78,9 +79,15 @@ struct timer_list {
78#define TIMER_INITIALIZER(_function, _expires, _data) \ 79#define TIMER_INITIALIZER(_function, _expires, _data) \
79 __TIMER_INITIALIZER((_function), (_expires), (_data), 0) 80 __TIMER_INITIALIZER((_function), (_expires), (_data), 0)
80 81
82#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \
83 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
84
81#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ 85#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
82 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) 86 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
83 87
88#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \
89 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
90
84#define DEFINE_TIMER(_name, _function, _expires, _data) \ 91#define DEFINE_TIMER(_name, _function, _expires, _data) \
85 struct timer_list _name = \ 92 struct timer_list _name = \
86 TIMER_INITIALIZER(_function, _expires, _data) 93 TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
124 131
125#define init_timer(timer) \ 132#define init_timer(timer) \
126 __init_timer((timer), 0) 133 __init_timer((timer), 0)
134#define init_timer_pinned(timer) \
135 __init_timer((timer), TIMER_PINNED)
127#define init_timer_deferrable(timer) \ 136#define init_timer_deferrable(timer) \
128 __init_timer((timer), TIMER_DEFERRABLE) 137 __init_timer((timer), TIMER_DEFERRABLE)
138#define init_timer_pinned_deferrable(timer) \
139 __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
129#define init_timer_on_stack(timer) \ 140#define init_timer_on_stack(timer) \
130 __init_timer_on_stack((timer), 0) 141 __init_timer_on_stack((timer), 0)
131 142
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
145 156
146#define setup_timer(timer, fn, data) \ 157#define setup_timer(timer, fn, data) \
147 __setup_timer((timer), (fn), (data), 0) 158 __setup_timer((timer), (fn), (data), 0)
159#define setup_pinned_timer(timer, fn, data) \
160 __setup_timer((timer), (fn), (data), TIMER_PINNED)
148#define setup_deferrable_timer(timer, fn, data) \ 161#define setup_deferrable_timer(timer, fn, data) \
149 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) 162 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
163#define setup_pinned_deferrable_timer(timer, fn, data) \
164 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
150#define setup_timer_on_stack(timer, fn, data) \ 165#define setup_timer_on_stack(timer, fn, data) \
151 __setup_timer_on_stack((timer), (fn), (data), 0) 166 __setup_timer_on_stack((timer), (fn), (data), 0)
167#define setup_pinned_timer_on_stack(timer, fn, data) \
168 __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
152#define setup_deferrable_timer_on_stack(timer, fn, data) \ 169#define setup_deferrable_timer_on_stack(timer, fn, data) \
153 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) 170 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
171#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
172 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
154 173
155/** 174/**
156 * timer_pending - is a timer pending? 175 * timer_pending - is a timer pending?
@@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
171extern int del_timer(struct timer_list * timer); 190extern int del_timer(struct timer_list * timer);
172extern int mod_timer(struct timer_list *timer, unsigned long expires); 191extern int mod_timer(struct timer_list *timer, unsigned long expires);
173extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); 192extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
174extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
175
176extern void set_timer_slack(struct timer_list *time, int slack_hz);
177 193
178#define TIMER_NOT_PINNED 0
179#define TIMER_PINNED 1
180/* 194/*
181 * The jiffies value which is added to now, when there is no timer 195 * The jiffies value which is added to now, when there is no timer
182 * in the timer wheel: 196 * in the timer wheel:
diff --git a/kernel/signal.c b/kernel/signal.c
index 96e9bc40667f..af21afc00d08 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2751 * @ts: upper bound on process time suspension 2751 * @ts: upper bound on process time suspension
2752 */ 2752 */
2753int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2753int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2754 const struct timespec *ts) 2754 const struct timespec *ts)
2755{ 2755{
2756 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
2756 struct task_struct *tsk = current; 2757 struct task_struct *tsk = current;
2757 long timeout = MAX_SCHEDULE_TIMEOUT;
2758 sigset_t mask = *which; 2758 sigset_t mask = *which;
2759 int sig; 2759 int sig, ret = 0;
2760 2760
2761 if (ts) { 2761 if (ts) {
2762 if (!timespec_valid(ts)) 2762 if (!timespec_valid(ts))
2763 return -EINVAL; 2763 return -EINVAL;
2764 timeout = timespec_to_jiffies(ts); 2764 timeout = timespec_to_ktime(*ts);
2765 /* 2765 to = &timeout;
2766 * We can be close to the next tick, add another one
2767 * to ensure we will wait at least the time asked for.
2768 */
2769 if (ts->tv_sec || ts->tv_nsec)
2770 timeout++;
2771 } 2766 }
2772 2767
2773 /* 2768 /*
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2778 2773
2779 spin_lock_irq(&tsk->sighand->siglock); 2774 spin_lock_irq(&tsk->sighand->siglock);
2780 sig = dequeue_signal(tsk, &mask, info); 2775 sig = dequeue_signal(tsk, &mask, info);
2781 if (!sig && timeout) { 2776 if (!sig && timeout.tv64) {
2782 /* 2777 /*
2783 * None ready, temporarily unblock those we're interested 2778 * None ready, temporarily unblock those we're interested
2784 * while we are sleeping in so that we'll be awakened when 2779 * while we are sleeping in so that we'll be awakened when
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2790 recalc_sigpending(); 2785 recalc_sigpending();
2791 spin_unlock_irq(&tsk->sighand->siglock); 2786 spin_unlock_irq(&tsk->sighand->siglock);
2792 2787
2793 timeout = freezable_schedule_timeout_interruptible(timeout); 2788 __set_current_state(TASK_INTERRUPTIBLE);
2794 2789 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2790 HRTIMER_MODE_REL);
2795 spin_lock_irq(&tsk->sighand->siglock); 2791 spin_lock_irq(&tsk->sighand->siglock);
2796 __set_task_blocked(tsk, &tsk->real_blocked); 2792 __set_task_blocked(tsk, &tsk->real_blocked);
2797 sigemptyset(&tsk->real_blocked); 2793 sigemptyset(&tsk->real_blocked);
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2801 2797
2802 if (sig) 2798 if (sig)
2803 return sig; 2799 return sig;
2804 return timeout ? -EINTR : -EAGAIN; 2800 return ret ? -EINTR : -EAGAIN;
2805} 2801}
2806 2802
2807/** 2803/**
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index e840ed867a5d..c3aad685bbc0 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -30,7 +30,6 @@
30 * struct alarm_base - Alarm timer bases 30 * struct alarm_base - Alarm timer bases
31 * @lock: Lock for syncrhonized access to the base 31 * @lock: Lock for syncrhonized access to the base
32 * @timerqueue: Timerqueue head managing the list of events 32 * @timerqueue: Timerqueue head managing the list of events
33 * @timer: hrtimer used to schedule events while running
34 * @gettime: Function to read the time correlating to the base 33 * @gettime: Function to read the time correlating to the base
35 * @base_clockid: clockid for the base 34 * @base_clockid: clockid for the base
36 */ 35 */
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index a9b76a40319e..2c5bc77c0bb0 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -645,7 +645,7 @@ void tick_cleanup_dead_cpu(int cpu)
645#endif 645#endif
646 646
647#ifdef CONFIG_SYSFS 647#ifdef CONFIG_SYSFS
648struct bus_type clockevents_subsys = { 648static struct bus_type clockevents_subsys = {
649 .name = "clockevents", 649 .name = "clockevents",
650 .dev_name = "clockevent", 650 .dev_name = "clockevent",
651}; 651};
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 56ece145a814..6a5a310a1a53 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -669,10 +669,12 @@ static void clocksource_enqueue(struct clocksource *cs)
669 struct list_head *entry = &clocksource_list; 669 struct list_head *entry = &clocksource_list;
670 struct clocksource *tmp; 670 struct clocksource *tmp;
671 671
672 list_for_each_entry(tmp, &clocksource_list, list) 672 list_for_each_entry(tmp, &clocksource_list, list) {
673 /* Keep track of the place, where to insert */ 673 /* Keep track of the place, where to insert */
674 if (tmp->rating >= cs->rating) 674 if (tmp->rating < cs->rating)
675 entry = &tmp->list; 675 break;
676 entry = &tmp->list;
677 }
676 list_add(&cs->list, entry); 678 list_add(&cs->list, entry);
677} 679}
678 680
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e99df0ff1d42..d13c9aebf7a3 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -177,7 +177,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
177#endif 177#endif
178} 178}
179 179
180#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 180#ifdef CONFIG_NO_HZ_COMMON
181static inline 181static inline
182struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 182struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
183 int pinned) 183 int pinned)
diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
index e622ba365a13..b0928ab3270f 100644
--- a/kernel/time/test_udelay.c
+++ b/kernel/time/test_udelay.c
@@ -43,13 +43,13 @@ static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
43 int allowed_error_ns = usecs * 5; 43 int allowed_error_ns = usecs * 5;
44 44
45 for (i = 0; i < iters; ++i) { 45 for (i = 0; i < iters; ++i) {
46 struct timespec ts1, ts2; 46 s64 kt1, kt2;
47 int time_passed; 47 int time_passed;
48 48
49 ktime_get_ts(&ts1); 49 kt1 = ktime_get_ns();
50 udelay(usecs); 50 udelay(usecs);
51 ktime_get_ts(&ts2); 51 kt2 = ktime_get_ns();
52 time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1); 52 time_passed = kt2 - kt1;
53 53
54 if (i == 0 || time_passed < min) 54 if (i == 0 || time_passed < min)
55 min = time_passed; 55 min = time_passed;
@@ -87,11 +87,11 @@ static int udelay_test_show(struct seq_file *s, void *v)
87 if (usecs > 0 && iters > 0) { 87 if (usecs > 0 && iters > 0) {
88 return udelay_test_single(s, usecs, iters); 88 return udelay_test_single(s, usecs, iters);
89 } else if (usecs == 0) { 89 } else if (usecs == 0) {
90 struct timespec ts; 90 struct timespec64 ts;
91 91
92 ktime_get_ts(&ts); 92 ktime_get_ts64(&ts);
93 seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n", 93 seq_printf(s, "udelay() test (lpj=%ld kt=%lld.%09ld)\n",
94 loops_per_jiffy, ts.tv_sec, ts.tv_nsec); 94 loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec);
95 seq_puts(s, "usage:\n"); 95 seq_puts(s, "usage:\n");
96 seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n"); 96 seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
97 seq_puts(s, "cat " DEBUGFS_FILENAME "\n"); 97 seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 53d7184da0be..690b797f522e 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -75,6 +75,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
75} 75}
76 76
77static struct clock_event_device ce_broadcast_hrtimer = { 77static struct clock_event_device ce_broadcast_hrtimer = {
78 .name = "bc_hrtimer",
78 .set_state_shutdown = bc_shutdown, 79 .set_state_shutdown = bc_shutdown,
79 .set_next_ktime = bc_set_next, 80 .set_next_ktime = bc_set_next,
80 .features = CLOCK_EVT_FEAT_ONESHOT | 81 .features = CLOCK_EVT_FEAT_ONESHOT |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 966a5a6fdd0a..f738251000fe 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -164,3 +164,4 @@ static inline void timers_update_migration(bool update_nohz) { }
164DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); 164DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
165 165
166extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem); 166extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
167void timer_clear_idle(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 536ada80f6dd..204fdc86863d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
31#include <trace/events/timer.h> 31#include <trace/events/timer.h>
32 32
33/* 33/*
34 * Per cpu nohz control structure 34 * Per-CPU nohz control structure
35 */ 35 */
36static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 36static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
37 37
@@ -61,7 +61,7 @@ static void tick_do_update_jiffies64(ktime_t now)
61 if (delta.tv64 < tick_period.tv64) 61 if (delta.tv64 < tick_period.tv64)
62 return; 62 return;
63 63
64 /* Reevalute with jiffies_lock held */ 64 /* Reevaluate with jiffies_lock held */
65 write_seqlock(&jiffies_lock); 65 write_seqlock(&jiffies_lock);
66 66
67 delta = ktime_sub(now, last_jiffies_update); 67 delta = ktime_sub(now, last_jiffies_update);
@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
116#ifdef CONFIG_NO_HZ_COMMON 116#ifdef CONFIG_NO_HZ_COMMON
117 /* 117 /*
118 * Check if the do_timer duty was dropped. We don't care about 118 * Check if the do_timer duty was dropped. We don't care about
119 * concurrency: This happens only when the cpu in charge went 119 * concurrency: This happens only when the CPU in charge went
120 * into a long sleep. If two cpus happen to assign themself to 120 * into a long sleep. If two CPUs happen to assign themselves to
121 * this duty, then the jiffies update is still serialized by 121 * this duty, then the jiffies update is still serialized by
122 * jiffies_lock. 122 * jiffies_lock.
123 */ 123 */
@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
349/* 349/*
350 * Re-evaluate the need for the tick as we switch the current task. 350 * Re-evaluate the need for the tick as we switch the current task.
351 * It might need the tick due to per task/process properties: 351 * It might need the tick due to per task/process properties:
352 * perf events, posix cpu timers, ... 352 * perf events, posix CPU timers, ...
353 */ 353 */
354void __tick_nohz_task_switch(void) 354void __tick_nohz_task_switch(void)
355{ 355{
@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
509 * 509 *
510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
511 * must be updated. Otherwise an interrupt handler could use a stale jiffy 511 * must be updated. Otherwise an interrupt handler could use a stale jiffy
512 * value. We do this unconditionally on any cpu, as we don't know whether the 512 * value. We do this unconditionally on any CPU, as we don't know whether the
513 * cpu, which has the update task assigned is in a long sleep. 513 * CPU, which has the update task assigned is in a long sleep.
514 */ 514 */
515static void tick_nohz_update_jiffies(ktime_t now) 515static void tick_nohz_update_jiffies(ktime_t now)
516{ 516{
@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
526} 526}
527 527
528/* 528/*
529 * Updates the per cpu time idle statistics counters 529 * Updates the per-CPU time idle statistics counters
530 */ 530 */
531static void 531static void
532update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 532update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
@@ -566,12 +566,12 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
566} 566}
567 567
568/** 568/**
569 * get_cpu_idle_time_us - get the total idle time of a cpu 569 * get_cpu_idle_time_us - get the total idle time of a CPU
570 * @cpu: CPU number to query 570 * @cpu: CPU number to query
571 * @last_update_time: variable to store update time in. Do not update 571 * @last_update_time: variable to store update time in. Do not update
572 * counters if NULL. 572 * counters if NULL.
573 * 573 *
574 * Return the cummulative idle time (since boot) for a given 574 * Return the cumulative idle time (since boot) for a given
575 * CPU, in microseconds. 575 * CPU, in microseconds.
576 * 576 *
577 * This time is measured via accounting rather than sampling, 577 * This time is measured via accounting rather than sampling,
@@ -607,12 +607,12 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
607EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 607EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
608 608
609/** 609/**
610 * get_cpu_iowait_time_us - get the total iowait time of a cpu 610 * get_cpu_iowait_time_us - get the total iowait time of a CPU
611 * @cpu: CPU number to query 611 * @cpu: CPU number to query
612 * @last_update_time: variable to store update time in. Do not update 612 * @last_update_time: variable to store update time in. Do not update
613 * counters if NULL. 613 * counters if NULL.
614 * 614 *
615 * Return the cummulative iowait time (since boot) for a given 615 * Return the cumulative iowait time (since boot) for a given
616 * CPU, in microseconds. 616 * CPU, in microseconds.
617 * 617 *
618 * This time is measured via accounting rather than sampling, 618 * This time is measured via accounting rather than sampling,
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
700 delta = next_tick - basemono; 700 delta = next_tick - basemono;
701 if (delta <= (u64)TICK_NSEC) { 701 if (delta <= (u64)TICK_NSEC) {
702 tick.tv64 = 0; 702 tick.tv64 = 0;
703
704 /*
705 * Tell the timer code that the base is not idle, i.e. undo
706 * the effect of get_next_timer_interrupt():
707 */
708 timer_clear_idle();
703 /* 709 /*
704 * We've not stopped the tick yet, and there's a timer in the 710 * We've not stopped the tick yet, and there's a timer in the
705 * next period, so no point in stopping it either, bail. 711 * next period, so no point in stopping it either, bail.
@@ -726,14 +732,14 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
726 } 732 }
727 733
728 /* 734 /*
729 * If this cpu is the one which updates jiffies, then give up 735 * If this CPU is the one which updates jiffies, then give up
730 * the assignment and let it be taken by the cpu which runs 736 * the assignment and let it be taken by the CPU which runs
731 * the tick timer next, which might be this cpu as well. If we 737 * the tick timer next, which might be this CPU as well. If we
732 * don't drop this here the jiffies might be stale and 738 * don't drop this here the jiffies might be stale and
733 * do_timer() never invoked. Keep track of the fact that it 739 * do_timer() never invoked. Keep track of the fact that it
734 * was the one which had the do_timer() duty last. If this cpu 740 * was the one which had the do_timer() duty last. If this CPU
735 * is the one which had the do_timer() duty last, we limit the 741 * is the one which had the do_timer() duty last, we limit the
736 * sleep time to the timekeeping max_deferement value. 742 * sleep time to the timekeeping max_deferment value.
737 * Otherwise we can sleep as long as we want. 743 * Otherwise we can sleep as long as we want.
738 */ 744 */
739 delta = timekeeping_max_deferment(); 745 delta = timekeeping_max_deferment();
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
809 tick_do_update_jiffies64(now); 815 tick_do_update_jiffies64(now);
810 cpu_load_update_nohz_stop(); 816 cpu_load_update_nohz_stop();
811 817
818 /*
819 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
820 * the clock forward checks in the enqueue path:
821 */
822 timer_clear_idle();
823
812 calc_load_exit_idle(); 824 calc_load_exit_idle();
813 touch_softlockup_watchdog_sched(); 825 touch_softlockup_watchdog_sched();
814 /* 826 /*
@@ -841,9 +853,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
841static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 853static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
842{ 854{
843 /* 855 /*
844 * If this cpu is offline and it is the one which updates 856 * If this CPU is offline and it is the one which updates
845 * jiffies, then give up the assignment and let it be taken by 857 * jiffies, then give up the assignment and let it be taken by
846 * the cpu which runs the tick timer next. If we don't drop 858 * the CPU which runs the tick timer next. If we don't drop
847 * this here the jiffies might be stale and do_timer() never 859 * this here the jiffies might be stale and do_timer() never
848 * invoked. 860 * invoked.
849 */ 861 */
@@ -896,11 +908,10 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
896 ktime_t now, expires; 908 ktime_t now, expires;
897 int cpu = smp_processor_id(); 909 int cpu = smp_processor_id();
898 910
899 now = tick_nohz_start_idle(ts);
900
901 if (can_stop_idle_tick(cpu, ts)) { 911 if (can_stop_idle_tick(cpu, ts)) {
902 int was_stopped = ts->tick_stopped; 912 int was_stopped = ts->tick_stopped;
903 913
914 now = tick_nohz_start_idle(ts);
904 ts->idle_calls++; 915 ts->idle_calls++;
905 916
906 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 917 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
@@ -933,11 +944,11 @@ void tick_nohz_idle_enter(void)
933 WARN_ON_ONCE(irqs_disabled()); 944 WARN_ON_ONCE(irqs_disabled());
934 945
935 /* 946 /*
936 * Update the idle state in the scheduler domain hierarchy 947 * Update the idle state in the scheduler domain hierarchy
937 * when tick_nohz_stop_sched_tick() is called from the idle loop. 948 * when tick_nohz_stop_sched_tick() is called from the idle loop.
938 * State will be updated to busy during the first busy tick after 949 * State will be updated to busy during the first busy tick after
939 * exiting idle. 950 * exiting idle.
940 */ 951 */
941 set_cpu_sd_state_idle(); 952 set_cpu_sd_state_idle();
942 953
943 local_irq_disable(); 954 local_irq_disable();
@@ -1092,35 +1103,6 @@ static void tick_nohz_switch_to_nohz(void)
1092 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1103 tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1093} 1104}
1094 1105
1095/*
1096 * When NOHZ is enabled and the tick is stopped, we need to kick the
1097 * tick timer from irq_enter() so that the jiffies update is kept
1098 * alive during long running softirqs. That's ugly as hell, but
1099 * correctness is key even if we need to fix the offending softirq in
1100 * the first place.
1101 *
1102 * Note, this is different to tick_nohz_restart. We just kick the
1103 * timer and do not touch the other magic bits which need to be done
1104 * when idle is left.
1105 */
1106static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1107{
1108#if 0
1109 /* Switch back to 2.6.27 behaviour */
1110 ktime_t delta;
1111
1112 /*
1113 * Do not touch the tick device, when the next expiry is either
1114 * already reached or less/equal than the tick period.
1115 */
1116 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
1117 if (delta.tv64 <= tick_period.tv64)
1118 return;
1119
1120 tick_nohz_restart(ts, now);
1121#endif
1122}
1123
1124static inline void tick_nohz_irq_enter(void) 1106static inline void tick_nohz_irq_enter(void)
1125{ 1107{
1126 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1108 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
@@ -1131,10 +1113,8 @@ static inline void tick_nohz_irq_enter(void)
1131 now = ktime_get(); 1113 now = ktime_get();
1132 if (ts->idle_active) 1114 if (ts->idle_active)
1133 tick_nohz_stop_idle(ts, now); 1115 tick_nohz_stop_idle(ts, now);
1134 if (ts->tick_stopped) { 1116 if (ts->tick_stopped)
1135 tick_nohz_update_jiffies(now); 1117 tick_nohz_update_jiffies(now);
1136 tick_nohz_kick_tick(ts, now);
1137 }
1138} 1118}
1139 1119
1140#else 1120#else
@@ -1211,7 +1191,7 @@ void tick_setup_sched_timer(void)
1211 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1191 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1212 ts->sched_timer.function = tick_sched_timer; 1192 ts->sched_timer.function = tick_sched_timer;
1213 1193
1214 /* Get the next period (per cpu) */ 1194 /* Get the next period (per-CPU) */
1215 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1195 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1216 1196
1217 /* Offset the tick to avert jiffies_lock contention. */ 1197 /* Offset the tick to avert jiffies_lock contention. */
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 86628e755f38..7142580ad94f 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -67,20 +67,21 @@ static const unsigned short __mon_yday[2][13] = {
67#define SECS_PER_DAY (SECS_PER_HOUR * 24) 67#define SECS_PER_DAY (SECS_PER_HOUR * 24)
68 68
69/** 69/**
70 * time_to_tm - converts the calendar time to local broken-down time 70 * time64_to_tm - converts the calendar time to local broken-down time
71 * 71 *
72 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, 72 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
73 * Coordinated Universal Time (UTC). 73 * Coordinated Universal Time (UTC).
74 * @offset offset seconds adding to totalsecs. 74 * @offset offset seconds adding to totalsecs.
75 * @result pointer to struct tm variable to receive broken-down time 75 * @result pointer to struct tm variable to receive broken-down time
76 */ 76 */
77void time_to_tm(time_t totalsecs, int offset, struct tm *result) 77void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
78{ 78{
79 long days, rem, y; 79 long days, rem, y;
80 int remainder;
80 const unsigned short *ip; 81 const unsigned short *ip;
81 82
82 days = totalsecs / SECS_PER_DAY; 83 days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder);
83 rem = totalsecs % SECS_PER_DAY; 84 rem = remainder;
84 rem += offset; 85 rem += offset;
85 while (rem < 0) { 86 while (rem < 0) {
86 rem += SECS_PER_DAY; 87 rem += SECS_PER_DAY;
@@ -124,4 +125,4 @@ void time_to_tm(time_t totalsecs, int offset, struct tm *result)
124 result->tm_mon = y; 125 result->tm_mon = y;
125 result->tm_mday = days + 1; 126 result->tm_mday = days + 1;
126} 127}
127EXPORT_SYMBOL(time_to_tm); 128EXPORT_SYMBOL(time64_to_tm);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 255e225393ac..3b65746c7f15 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -480,10 +480,12 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
480 * users are removed, this can be killed. 480 * users are removed, this can be killed.
481 */ 481 */
482 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1); 482 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
483 tk->tkr_mono.xtime_nsec -= remainder; 483 if (remainder != 0) {
484 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; 484 tk->tkr_mono.xtime_nsec -= remainder;
485 tk->ntp_error += remainder << tk->ntp_error_shift; 485 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
486 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; 486 tk->ntp_error += remainder << tk->ntp_error_shift;
487 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
488 }
487} 489}
488#else 490#else
489#define old_vsyscall_fixup(tk) 491#define old_vsyscall_fixup(tk)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3a95f9728778..cb9ab401e2d9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -59,43 +59,153 @@ __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
59EXPORT_SYMBOL(jiffies_64); 59EXPORT_SYMBOL(jiffies_64);
60 60
61/* 61/*
62 * per-CPU timer vector definitions: 62 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
63 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
64 * level has a different granularity.
65 *
66 * The level granularity is: LVL_CLK_DIV ^ lvl
67 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
68 *
69 * The array level of a newly armed timer depends on the relative expiry
70 * time. The farther the expiry time is away the higher the array level and
71 * therefor the granularity becomes.
72 *
73 * Contrary to the original timer wheel implementation, which aims for 'exact'
74 * expiry of the timers, this implementation removes the need for recascading
75 * the timers into the lower array levels. The previous 'classic' timer wheel
76 * implementation of the kernel already violated the 'exact' expiry by adding
77 * slack to the expiry time to provide batched expiration. The granularity
78 * levels provide implicit batching.
79 *
80 * This is an optimization of the original timer wheel implementation for the
81 * majority of the timer wheel use cases: timeouts. The vast majority of
82 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
83 * the timeout expires it indicates that normal operation is disturbed, so it
84 * does not matter much whether the timeout comes with a slight delay.
85 *
86 * The only exception to this are networking timers with a small expiry
87 * time. They rely on the granularity. Those fit into the first wheel level,
88 * which has HZ granularity.
89 *
90 * We don't have cascading anymore. timers with a expiry time above the
91 * capacity of the last wheel level are force expired at the maximum timeout
92 * value of the last wheel level. From data sampling we know that the maximum
93 * value observed is 5 days (network connection tracking), so this should not
94 * be an issue.
95 *
96 * The currently chosen array constants values are a good compromise between
97 * array size and granularity.
98 *
99 * This results in the following granularity and range levels:
100 *
101 * HZ 1000 steps
102 * Level Offset Granularity Range
103 * 0 0 1 ms 0 ms - 63 ms
104 * 1 64 8 ms 64 ms - 511 ms
105 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
106 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
107 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
108 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
109 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
110 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
111 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
112 *
113 * HZ 300
114 * Level Offset Granularity Range
115 * 0 0 3 ms 0 ms - 210 ms
116 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
117 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
118 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
119 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
120 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
121 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
122 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
123 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
124 *
125 * HZ 250
126 * Level Offset Granularity Range
127 * 0 0 4 ms 0 ms - 255 ms
128 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
129 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
130 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
131 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
132 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
133 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
134 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
135 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
136 *
137 * HZ 100
138 * Level Offset Granularity Range
139 * 0 0 10 ms 0 ms - 630 ms
140 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
141 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
142 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
143 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
144 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
145 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
146 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
63 */ 147 */
64#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66#define TVN_SIZE (1 << TVN_BITS)
67#define TVR_SIZE (1 << TVR_BITS)
68#define TVN_MASK (TVN_SIZE - 1)
69#define TVR_MASK (TVR_SIZE - 1)
70#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71
72struct tvec {
73 struct hlist_head vec[TVN_SIZE];
74};
75 148
76struct tvec_root { 149/* Clock divisor for the next level */
77 struct hlist_head vec[TVR_SIZE]; 150#define LVL_CLK_SHIFT 3
78}; 151#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
152#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
153#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
154#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
79 155
80struct tvec_base { 156/*
81 spinlock_t lock; 157 * The time start value for each level to select the bucket at enqueue
82 struct timer_list *running_timer; 158 * time.
83 unsigned long timer_jiffies; 159 */
84 unsigned long next_timer; 160#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
85 unsigned long active_timers; 161
86 unsigned long all_timers; 162/* Size of each clock level */
87 int cpu; 163#define LVL_BITS 6
88 bool migration_enabled; 164#define LVL_SIZE (1UL << LVL_BITS)
89 bool nohz_active; 165#define LVL_MASK (LVL_SIZE - 1)
90 struct tvec_root tv1; 166#define LVL_OFFS(n) ((n) * LVL_SIZE)
91 struct tvec tv2; 167
92 struct tvec tv3; 168/* Level depth */
93 struct tvec tv4; 169#if HZ > 100
94 struct tvec tv5; 170# define LVL_DEPTH 9
95} ____cacheline_aligned; 171# else
172# define LVL_DEPTH 8
173#endif
174
175/* The cutoff (max. capacity of the wheel) */
176#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
177#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
178
179/*
180 * The resulting wheel size. If NOHZ is configured we allocate two
181 * wheels so we have a separate storage for the deferrable timers.
182 */
183#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
184
185#ifdef CONFIG_NO_HZ_COMMON
186# define NR_BASES 2
187# define BASE_STD 0
188# define BASE_DEF 1
189#else
190# define NR_BASES 1
191# define BASE_STD 0
192# define BASE_DEF 0
193#endif
96 194
195struct timer_base {
196 spinlock_t lock;
197 struct timer_list *running_timer;
198 unsigned long clk;
199 unsigned long next_expiry;
200 unsigned int cpu;
201 bool migration_enabled;
202 bool nohz_active;
203 bool is_idle;
204 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
205 struct hlist_head vectors[WHEEL_SIZE];
206} ____cacheline_aligned;
97 207
98static DEFINE_PER_CPU(struct tvec_base, tvec_bases); 208static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
99 209
100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 210#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101unsigned int sysctl_timer_migration = 1; 211unsigned int sysctl_timer_migration = 1;
@@ -106,15 +216,17 @@ void timers_update_migration(bool update_nohz)
106 unsigned int cpu; 216 unsigned int cpu;
107 217
108 /* Avoid the loop, if nothing to update */ 218 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases.migration_enabled) == on) 219 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
110 return; 220 return;
111 221
112 for_each_possible_cpu(cpu) { 222 for_each_possible_cpu(cpu) {
113 per_cpu(tvec_bases.migration_enabled, cpu) = on; 223 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
224 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on; 225 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
115 if (!update_nohz) 226 if (!update_nohz)
116 continue; 227 continue;
117 per_cpu(tvec_bases.nohz_active, cpu) = true; 228 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
229 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
118 per_cpu(hrtimer_bases.nohz_active, cpu) = true; 230 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
119 } 231 }
120} 232}
@@ -133,20 +245,6 @@ int timer_migration_handler(struct ctl_table *table, int write,
133 mutex_unlock(&mutex); 245 mutex_unlock(&mutex);
134 return ret; 246 return ret;
135} 247}
136
137static inline struct tvec_base *get_target_base(struct tvec_base *base,
138 int pinned)
139{
140 if (pinned || !base->migration_enabled)
141 return this_cpu_ptr(&tvec_bases);
142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
143}
144#else
145static inline struct tvec_base *get_target_base(struct tvec_base *base,
146 int pinned)
147{
148 return this_cpu_ptr(&tvec_bases);
149}
150#endif 248#endif
151 249
152static unsigned long round_jiffies_common(unsigned long j, int cpu, 250static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -351,101 +449,126 @@ unsigned long round_jiffies_up_relative(unsigned long j)
351} 449}
352EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 450EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
353 451
354/** 452
355 * set_timer_slack - set the allowed slack for a timer 453static inline unsigned int timer_get_idx(struct timer_list *timer)
356 * @timer: the timer to be modified
357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
358 *
359 * Set the amount of time, in jiffies, that a certain timer has
360 * in terms of slack. By setting this value, the timer subsystem
361 * will schedule the actual timer somewhere between
362 * the time mod_timer() asks for, and that time plus the slack.
363 *
364 * By setting the slack to -1, a percentage of the delay is used
365 * instead.
366 */
367void set_timer_slack(struct timer_list *timer, int slack_hz)
368{ 454{
369 timer->slack = slack_hz; 455 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
370} 456}
371EXPORT_SYMBOL_GPL(set_timer_slack);
372 457
373static void 458static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
374__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
375{ 459{
376 unsigned long expires = timer->expires; 460 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
377 unsigned long idx = expires - base->timer_jiffies; 461 idx << TIMER_ARRAYSHIFT;
378 struct hlist_head *vec; 462}
379 463
380 if (idx < TVR_SIZE) { 464/*
381 int i = expires & TVR_MASK; 465 * Helper function to calculate the array index for a given expiry
382 vec = base->tv1.vec + i; 466 * time.
383 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { 467 */
384 int i = (expires >> TVR_BITS) & TVN_MASK; 468static inline unsigned calc_index(unsigned expires, unsigned lvl)
385 vec = base->tv2.vec + i; 469{
386 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { 470 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
387 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; 471 return LVL_OFFS(lvl) + (expires & LVL_MASK);
388 vec = base->tv3.vec + i; 472}
389 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { 473
390 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; 474static int calc_wheel_index(unsigned long expires, unsigned long clk)
391 vec = base->tv4.vec + i; 475{
392 } else if ((signed long) idx < 0) { 476 unsigned long delta = expires - clk;
393 /* 477 unsigned int idx;
394 * Can happen if you add a timer with expires == jiffies, 478
395 * or you set a timer to go off in the past 479 if (delta < LVL_START(1)) {
396 */ 480 idx = calc_index(expires, 0);
397 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); 481 } else if (delta < LVL_START(2)) {
482 idx = calc_index(expires, 1);
483 } else if (delta < LVL_START(3)) {
484 idx = calc_index(expires, 2);
485 } else if (delta < LVL_START(4)) {
486 idx = calc_index(expires, 3);
487 } else if (delta < LVL_START(5)) {
488 idx = calc_index(expires, 4);
489 } else if (delta < LVL_START(6)) {
490 idx = calc_index(expires, 5);
491 } else if (delta < LVL_START(7)) {
492 idx = calc_index(expires, 6);
493 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
494 idx = calc_index(expires, 7);
495 } else if ((long) delta < 0) {
496 idx = clk & LVL_MASK;
398 } else { 497 } else {
399 int i; 498 /*
400 /* If the timeout is larger than MAX_TVAL (on 64-bit 499 * Force expire obscene large timeouts to expire at the
401 * architectures or with CONFIG_BASE_SMALL=1) then we 500 * capacity limit of the wheel.
402 * use the maximum timeout.
403 */ 501 */
404 if (idx > MAX_TVAL) { 502 if (expires >= WHEEL_TIMEOUT_CUTOFF)
405 idx = MAX_TVAL; 503 expires = WHEEL_TIMEOUT_MAX;
406 expires = idx + base->timer_jiffies; 504
407 } 505 idx = calc_index(expires, LVL_DEPTH - 1);
408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409 vec = base->tv5.vec + i;
410 } 506 }
507 return idx;
508}
411 509
412 hlist_add_head(&timer->entry, vec); 510/*
511 * Enqueue the timer into the hash bucket, mark it pending in
512 * the bitmap and store the index in the timer flags.
513 */
514static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
515 unsigned int idx)
516{
517 hlist_add_head(&timer->entry, base->vectors + idx);
518 __set_bit(idx, base->pending_map);
519 timer_set_idx(timer, idx);
413} 520}
414 521
415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) 522static void
523__internal_add_timer(struct timer_base *base, struct timer_list *timer)
416{ 524{
417 /* Advance base->jiffies, if the base is empty */ 525 unsigned int idx;
418 if (!base->all_timers++) 526
419 base->timer_jiffies = jiffies; 527 idx = calc_wheel_index(timer->expires, base->clk);
528 enqueue_timer(base, timer, idx);
529}
530
531static void
532trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
533{
534 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
535 return;
420 536
421 __internal_add_timer(base, timer);
422 /* 537 /*
423 * Update base->active_timers and base->next_timer 538 * TODO: This wants some optimizing similar to the code below, but we
539 * will do that when we switch from push to pull for deferrable timers.
424 */ 540 */
425 if (!(timer->flags & TIMER_DEFERRABLE)) { 541 if (timer->flags & TIMER_DEFERRABLE) {
426 if (!base->active_timers++ || 542 if (tick_nohz_full_cpu(base->cpu))
427 time_before(timer->expires, base->next_timer)) 543 wake_up_nohz_cpu(base->cpu);
428 base->next_timer = timer->expires; 544 return;
429 } 545 }
430 546
431 /* 547 /*
432 * Check whether the other CPU is in dynticks mode and needs 548 * We might have to IPI the remote CPU if the base is idle and the
433 * to be triggered to reevaluate the timer wheel. 549 * timer is not deferrable. If the other CPU is on the way to idle
434 * We are protected against the other CPU fiddling 550 * then it can't set base->is_idle as we hold the base lock:
435 * with the timer by holding the timer base lock. This also
436 * makes sure that a CPU on the way to stop its tick can not
437 * evaluate the timer wheel.
438 *
439 * Spare the IPI for deferrable timers on idle targets though.
440 * The next busy ticks will take care of it. Except full dynticks
441 * require special care against races with idle_cpu(), lets deal
442 * with that later.
443 */ 551 */
444 if (base->nohz_active) { 552 if (!base->is_idle)
445 if (!(timer->flags & TIMER_DEFERRABLE) || 553 return;
446 tick_nohz_full_cpu(base->cpu)) 554
447 wake_up_nohz_cpu(base->cpu); 555 /* Check whether this is the new first expiring timer: */
448 } 556 if (time_after_eq(timer->expires, base->next_expiry))
557 return;
558
559 /*
560 * Set the next expiry time and kick the CPU so it can reevaluate the
561 * wheel:
562 */
563 base->next_expiry = timer->expires;
564 wake_up_nohz_cpu(base->cpu);
565}
566
567static void
568internal_add_timer(struct timer_base *base, struct timer_list *timer)
569{
570 __internal_add_timer(base, timer);
571 trigger_dyntick_cpu(base, timer);
449} 572}
450 573
451#ifdef CONFIG_TIMER_STATS 574#ifdef CONFIG_TIMER_STATS
@@ -666,7 +789,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
666{ 789{
667 timer->entry.pprev = NULL; 790 timer->entry.pprev = NULL;
668 timer->flags = flags | raw_smp_processor_id(); 791 timer->flags = flags | raw_smp_processor_id();
669 timer->slack = -1;
670#ifdef CONFIG_TIMER_STATS 792#ifdef CONFIG_TIMER_STATS
671 timer->start_site = NULL; 793 timer->start_site = NULL;
672 timer->start_pid = -1; 794 timer->start_pid = -1;
@@ -706,54 +828,125 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
706 entry->next = LIST_POISON2; 828 entry->next = LIST_POISON2;
707} 829}
708 830
709static inline void 831static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
710detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
711{
712 detach_timer(timer, true);
713 if (!(timer->flags & TIMER_DEFERRABLE))
714 base->active_timers--;
715 base->all_timers--;
716}
717
718static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
719 bool clear_pending) 832 bool clear_pending)
720{ 833{
834 unsigned idx = timer_get_idx(timer);
835
721 if (!timer_pending(timer)) 836 if (!timer_pending(timer))
722 return 0; 837 return 0;
723 838
839 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
840 __clear_bit(idx, base->pending_map);
841
724 detach_timer(timer, clear_pending); 842 detach_timer(timer, clear_pending);
725 if (!(timer->flags & TIMER_DEFERRABLE)) {
726 base->active_timers--;
727 if (timer->expires == base->next_timer)
728 base->next_timer = base->timer_jiffies;
729 }
730 /* If this was the last timer, advance base->jiffies */
731 if (!--base->all_timers)
732 base->timer_jiffies = jiffies;
733 return 1; 843 return 1;
734} 844}
735 845
846static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
847{
848 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
849
850 /*
851 * If the timer is deferrable and nohz is active then we need to use
852 * the deferrable base.
853 */
854 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
855 (tflags & TIMER_DEFERRABLE))
856 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
857 return base;
858}
859
860static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
861{
862 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
863
864 /*
865 * If the timer is deferrable and nohz is active then we need to use
866 * the deferrable base.
867 */
868 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
869 (tflags & TIMER_DEFERRABLE))
870 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
871 return base;
872}
873
874static inline struct timer_base *get_timer_base(u32 tflags)
875{
876 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
877}
878
879#ifdef CONFIG_NO_HZ_COMMON
880static inline struct timer_base *
881__get_target_base(struct timer_base *base, unsigned tflags)
882{
883#ifdef CONFIG_SMP
884 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
885 return get_timer_this_cpu_base(tflags);
886 return get_timer_cpu_base(tflags, get_nohz_timer_target());
887#else
888 return get_timer_this_cpu_base(tflags);
889#endif
890}
891
892static inline void forward_timer_base(struct timer_base *base)
893{
894 /*
895 * We only forward the base when it's idle and we have a delta between
896 * base clock and jiffies.
897 */
898 if (!base->is_idle || (long) (jiffies - base->clk) < 2)
899 return;
900
901 /*
902 * If the next expiry value is > jiffies, then we fast forward to
903 * jiffies otherwise we forward to the next expiry value.
904 */
905 if (time_after(base->next_expiry, jiffies))
906 base->clk = jiffies;
907 else
908 base->clk = base->next_expiry;
909}
910#else
911static inline struct timer_base *
912__get_target_base(struct timer_base *base, unsigned tflags)
913{
914 return get_timer_this_cpu_base(tflags);
915}
916
917static inline void forward_timer_base(struct timer_base *base) { }
918#endif
919
920static inline struct timer_base *
921get_target_base(struct timer_base *base, unsigned tflags)
922{
923 struct timer_base *target = __get_target_base(base, tflags);
924
925 forward_timer_base(target);
926 return target;
927}
928
736/* 929/*
737 * We are using hashed locking: holding per_cpu(tvec_bases).lock 930 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
738 * means that all timers which are tied to this base via timer->base are 931 * that all timers which are tied to this base are locked, and the base itself
739 * locked, and the base itself is locked too. 932 * is locked too.
740 * 933 *
741 * So __run_timers/migrate_timers can safely modify all timers which could 934 * So __run_timers/migrate_timers can safely modify all timers which could
742 * be found on ->tvX lists. 935 * be found in the base->vectors array.
743 * 936 *
744 * When the timer's base is locked and removed from the list, the 937 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
745 * TIMER_MIGRATING flag is set, FIXME 938 * to wait until the migration is done.
746 */ 939 */
747static struct tvec_base *lock_timer_base(struct timer_list *timer, 940static struct timer_base *lock_timer_base(struct timer_list *timer,
748 unsigned long *flags) 941 unsigned long *flags)
749 __acquires(timer->base->lock) 942 __acquires(timer->base->lock)
750{ 943{
751 for (;;) { 944 for (;;) {
945 struct timer_base *base;
752 u32 tf = timer->flags; 946 u32 tf = timer->flags;
753 struct tvec_base *base;
754 947
755 if (!(tf & TIMER_MIGRATING)) { 948 if (!(tf & TIMER_MIGRATING)) {
756 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); 949 base = get_timer_base(tf);
757 spin_lock_irqsave(&base->lock, *flags); 950 spin_lock_irqsave(&base->lock, *flags);
758 if (timer->flags == tf) 951 if (timer->flags == tf)
759 return base; 952 return base;
@@ -764,13 +957,41 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
764} 957}
765 958
766static inline int 959static inline int
767__mod_timer(struct timer_list *timer, unsigned long expires, 960__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
768 bool pending_only, int pinned)
769{ 961{
770 struct tvec_base *base, *new_base; 962 struct timer_base *base, *new_base;
771 unsigned long flags; 963 unsigned int idx = UINT_MAX;
964 unsigned long clk = 0, flags;
772 int ret = 0; 965 int ret = 0;
773 966
967 /*
968 * This is a common optimization triggered by the networking code - if
969 * the timer is re-modified to have the same timeout or ends up in the
970 * same array bucket then just return:
971 */
972 if (timer_pending(timer)) {
973 if (timer->expires == expires)
974 return 1;
975 /*
976 * Take the current timer_jiffies of base, but without holding
977 * the lock!
978 */
979 base = get_timer_base(timer->flags);
980 clk = base->clk;
981
982 idx = calc_wheel_index(expires, clk);
983
984 /*
985 * Retrieve and compare the array index of the pending
986 * timer. If it matches set the expiry to the new value so a
987 * subsequent call will exit in the expires check above.
988 */
989 if (idx == timer_get_idx(timer)) {
990 timer->expires = expires;
991 return 1;
992 }
993 }
994
774 timer_stats_timer_set_start_info(timer); 995 timer_stats_timer_set_start_info(timer);
775 BUG_ON(!timer->function); 996 BUG_ON(!timer->function);
776 997
@@ -782,15 +1003,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
782 1003
783 debug_activate(timer, expires); 1004 debug_activate(timer, expires);
784 1005
785 new_base = get_target_base(base, pinned); 1006 new_base = get_target_base(base, timer->flags);
786 1007
787 if (base != new_base) { 1008 if (base != new_base) {
788 /* 1009 /*
789 * We are trying to schedule the timer on the local CPU. 1010 * We are trying to schedule the timer on the new base.
790 * However we can't change timer's base while it is running, 1011 * However we can't change timer's base while it is running,
791 * otherwise del_timer_sync() can't detect that the timer's 1012 * otherwise del_timer_sync() can't detect that the timer's
792 * handler yet has not finished. This also guarantees that 1013 * handler yet has not finished. This also guarantees that the
793 * the timer is serialized wrt itself. 1014 * timer is serialized wrt itself.
794 */ 1015 */
795 if (likely(base->running_timer != timer)) { 1016 if (likely(base->running_timer != timer)) {
796 /* See the comment in lock_timer_base() */ 1017 /* See the comment in lock_timer_base() */
@@ -805,7 +1026,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
805 } 1026 }
806 1027
807 timer->expires = expires; 1028 timer->expires = expires;
808 internal_add_timer(base, timer); 1029 /*
1030 * If 'idx' was calculated above and the base time did not advance
1031 * between calculating 'idx' and taking the lock, only enqueue_timer()
1032 * and trigger_dyntick_cpu() is required. Otherwise we need to
1033 * (re)calculate the wheel index via internal_add_timer().
1034 */
1035 if (idx != UINT_MAX && clk == base->clk) {
1036 enqueue_timer(base, timer, idx);
1037 trigger_dyntick_cpu(base, timer);
1038 } else {
1039 internal_add_timer(base, timer);
1040 }
809 1041
810out_unlock: 1042out_unlock:
811 spin_unlock_irqrestore(&base->lock, flags); 1043 spin_unlock_irqrestore(&base->lock, flags);
@@ -825,49 +1057,10 @@ out_unlock:
825 */ 1057 */
826int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1058int mod_timer_pending(struct timer_list *timer, unsigned long expires)
827{ 1059{
828 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); 1060 return __mod_timer(timer, expires, true);
829} 1061}
830EXPORT_SYMBOL(mod_timer_pending); 1062EXPORT_SYMBOL(mod_timer_pending);
831 1063
832/*
833 * Decide where to put the timer while taking the slack into account
834 *
835 * Algorithm:
836 * 1) calculate the maximum (absolute) time
837 * 2) calculate the highest bit where the expires and new max are different
838 * 3) use this bit to make a mask
839 * 4) use the bitmask to round down the maximum time, so that all last
840 * bits are zeros
841 */
842static inline
843unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
844{
845 unsigned long expires_limit, mask;
846 int bit;
847
848 if (timer->slack >= 0) {
849 expires_limit = expires + timer->slack;
850 } else {
851 long delta = expires - jiffies;
852
853 if (delta < 256)
854 return expires;
855
856 expires_limit = expires + delta / 256;
857 }
858 mask = expires ^ expires_limit;
859 if (mask == 0)
860 return expires;
861
862 bit = __fls(mask);
863
864 mask = (1UL << bit) - 1;
865
866 expires_limit = expires_limit & ~(mask);
867
868 return expires_limit;
869}
870
871/** 1064/**
872 * mod_timer - modify a timer's timeout 1065 * mod_timer - modify a timer's timeout
873 * @timer: the timer to be modified 1066 * @timer: the timer to be modified
@@ -890,49 +1083,11 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
890 */ 1083 */
891int mod_timer(struct timer_list *timer, unsigned long expires) 1084int mod_timer(struct timer_list *timer, unsigned long expires)
892{ 1085{
893 expires = apply_slack(timer, expires); 1086 return __mod_timer(timer, expires, false);
894
895 /*
896 * This is a common optimization triggered by the
897 * networking code - if the timer is re-modified
898 * to be the same thing then just return:
899 */
900 if (timer_pending(timer) && timer->expires == expires)
901 return 1;
902
903 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
904} 1087}
905EXPORT_SYMBOL(mod_timer); 1088EXPORT_SYMBOL(mod_timer);
906 1089
907/** 1090/**
908 * mod_timer_pinned - modify a timer's timeout
909 * @timer: the timer to be modified
910 * @expires: new timeout in jiffies
911 *
912 * mod_timer_pinned() is a way to update the expire field of an
913 * active timer (if the timer is inactive it will be activated)
914 * and to ensure that the timer is scheduled on the current CPU.
915 *
916 * Note that this does not prevent the timer from being migrated
917 * when the current CPU goes offline. If this is a problem for
918 * you, use CPU-hotplug notifiers to handle it correctly, for
919 * example, cancelling the timer when the corresponding CPU goes
920 * offline.
921 *
922 * mod_timer_pinned(timer, expires) is equivalent to:
923 *
924 * del_timer(timer); timer->expires = expires; add_timer(timer);
925 */
926int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
927{
928 if (timer->expires == expires && timer_pending(timer))
929 return 1;
930
931 return __mod_timer(timer, expires, false, TIMER_PINNED);
932}
933EXPORT_SYMBOL(mod_timer_pinned);
934
935/**
936 * add_timer - start a timer 1091 * add_timer - start a timer
937 * @timer: the timer to be added 1092 * @timer: the timer to be added
938 * 1093 *
@@ -962,13 +1117,14 @@ EXPORT_SYMBOL(add_timer);
962 */ 1117 */
963void add_timer_on(struct timer_list *timer, int cpu) 1118void add_timer_on(struct timer_list *timer, int cpu)
964{ 1119{
965 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu); 1120 struct timer_base *new_base, *base;
966 struct tvec_base *base;
967 unsigned long flags; 1121 unsigned long flags;
968 1122
969 timer_stats_timer_set_start_info(timer); 1123 timer_stats_timer_set_start_info(timer);
970 BUG_ON(timer_pending(timer) || !timer->function); 1124 BUG_ON(timer_pending(timer) || !timer->function);
971 1125
1126 new_base = get_timer_cpu_base(timer->flags, cpu);
1127
972 /* 1128 /*
973 * If @timer was on a different CPU, it should be migrated with the 1129 * If @timer was on a different CPU, it should be migrated with the
974 * old base locked to prevent other operations proceeding with the 1130 * old base locked to prevent other operations proceeding with the
@@ -1004,7 +1160,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
1004 */ 1160 */
1005int del_timer(struct timer_list *timer) 1161int del_timer(struct timer_list *timer)
1006{ 1162{
1007 struct tvec_base *base; 1163 struct timer_base *base;
1008 unsigned long flags; 1164 unsigned long flags;
1009 int ret = 0; 1165 int ret = 0;
1010 1166
@@ -1030,7 +1186,7 @@ EXPORT_SYMBOL(del_timer);
1030 */ 1186 */
1031int try_to_del_timer_sync(struct timer_list *timer) 1187int try_to_del_timer_sync(struct timer_list *timer)
1032{ 1188{
1033 struct tvec_base *base; 1189 struct timer_base *base;
1034 unsigned long flags; 1190 unsigned long flags;
1035 int ret = -1; 1191 int ret = -1;
1036 1192
@@ -1114,27 +1270,6 @@ int del_timer_sync(struct timer_list *timer)
1114EXPORT_SYMBOL(del_timer_sync); 1270EXPORT_SYMBOL(del_timer_sync);
1115#endif 1271#endif
1116 1272
1117static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1118{
1119 /* cascade all the timers from tv up one level */
1120 struct timer_list *timer;
1121 struct hlist_node *tmp;
1122 struct hlist_head tv_list;
1123
1124 hlist_move_list(tv->vec + index, &tv_list);
1125
1126 /*
1127 * We are removing _all_ timers from the list, so we
1128 * don't have to detach them individually.
1129 */
1130 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1131 /* No accounting, while moving them */
1132 __internal_add_timer(base, timer);
1133 }
1134
1135 return index;
1136}
1137
1138static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), 1273static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1139 unsigned long data) 1274 unsigned long data)
1140{ 1275{
@@ -1178,147 +1313,141 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1178 } 1313 }
1179} 1314}
1180 1315
1181#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) 1316static void expire_timers(struct timer_base *base, struct hlist_head *head)
1182
1183/**
1184 * __run_timers - run all expired timers (if any) on this CPU.
1185 * @base: the timer vector to be processed.
1186 *
1187 * This function cascades all vectors and executes all expired timer
1188 * vectors.
1189 */
1190static inline void __run_timers(struct tvec_base *base)
1191{ 1317{
1192 struct timer_list *timer; 1318 while (!hlist_empty(head)) {
1319 struct timer_list *timer;
1320 void (*fn)(unsigned long);
1321 unsigned long data;
1193 1322
1194 spin_lock_irq(&base->lock); 1323 timer = hlist_entry(head->first, struct timer_list, entry);
1324 timer_stats_account_timer(timer);
1195 1325
1196 while (time_after_eq(jiffies, base->timer_jiffies)) { 1326 base->running_timer = timer;
1197 struct hlist_head work_list; 1327 detach_timer(timer, true);
1198 struct hlist_head *head = &work_list;
1199 int index;
1200 1328
1201 if (!base->all_timers) { 1329 fn = timer->function;
1202 base->timer_jiffies = jiffies; 1330 data = timer->data;
1203 break; 1331
1332 if (timer->flags & TIMER_IRQSAFE) {
1333 spin_unlock(&base->lock);
1334 call_timer_fn(timer, fn, data);
1335 spin_lock(&base->lock);
1336 } else {
1337 spin_unlock_irq(&base->lock);
1338 call_timer_fn(timer, fn, data);
1339 spin_lock_irq(&base->lock);
1204 } 1340 }
1341 }
1342}
1205 1343
1206 index = base->timer_jiffies & TVR_MASK; 1344static int __collect_expired_timers(struct timer_base *base,
1345 struct hlist_head *heads)
1346{
1347 unsigned long clk = base->clk;
1348 struct hlist_head *vec;
1349 int i, levels = 0;
1350 unsigned int idx;
1207 1351
1208 /* 1352 for (i = 0; i < LVL_DEPTH; i++) {
1209 * Cascade timers: 1353 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1210 */ 1354
1211 if (!index && 1355 if (__test_and_clear_bit(idx, base->pending_map)) {
1212 (!cascade(base, &base->tv2, INDEX(0))) && 1356 vec = base->vectors + idx;
1213 (!cascade(base, &base->tv3, INDEX(1))) && 1357 hlist_move_list(vec, heads++);
1214 !cascade(base, &base->tv4, INDEX(2))) 1358 levels++;
1215 cascade(base, &base->tv5, INDEX(3));
1216 ++base->timer_jiffies;
1217 hlist_move_list(base->tv1.vec + index, head);
1218 while (!hlist_empty(head)) {
1219 void (*fn)(unsigned long);
1220 unsigned long data;
1221 bool irqsafe;
1222
1223 timer = hlist_entry(head->first, struct timer_list, entry);
1224 fn = timer->function;
1225 data = timer->data;
1226 irqsafe = timer->flags & TIMER_IRQSAFE;
1227
1228 timer_stats_account_timer(timer);
1229
1230 base->running_timer = timer;
1231 detach_expired_timer(timer, base);
1232
1233 if (irqsafe) {
1234 spin_unlock(&base->lock);
1235 call_timer_fn(timer, fn, data);
1236 spin_lock(&base->lock);
1237 } else {
1238 spin_unlock_irq(&base->lock);
1239 call_timer_fn(timer, fn, data);
1240 spin_lock_irq(&base->lock);
1241 }
1242 } 1359 }
1360 /* Is it time to look at the next level? */
1361 if (clk & LVL_CLK_MASK)
1362 break;
1363 /* Shift clock for the next level granularity */
1364 clk >>= LVL_CLK_SHIFT;
1243 } 1365 }
1244 base->running_timer = NULL; 1366 return levels;
1245 spin_unlock_irq(&base->lock);
1246} 1367}
1247 1368
1248#ifdef CONFIG_NO_HZ_COMMON 1369#ifdef CONFIG_NO_HZ_COMMON
1249/* 1370/*
1250 * Find out when the next timer event is due to happen. This 1371 * Find the next pending bucket of a level. Search from level start (@offset)
1251 * is used on S/390 to stop all activity when a CPU is idle. 1372 * + @clk upwards and if nothing there, search from start of the level
1252 * This function needs to be called with interrupts disabled. 1373 * (@offset) up to @offset + clk.
1253 */ 1374 */
1254static unsigned long __next_timer_interrupt(struct tvec_base *base) 1375static int next_pending_bucket(struct timer_base *base, unsigned offset,
1255{ 1376 unsigned clk)
1256 unsigned long timer_jiffies = base->timer_jiffies; 1377{
1257 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; 1378 unsigned pos, start = offset + clk;
1258 int index, slot, array, found = 0; 1379 unsigned end = offset + LVL_SIZE;
1259 struct timer_list *nte; 1380
1260 struct tvec *varray[4]; 1381 pos = find_next_bit(base->pending_map, end, start);
1261 1382 if (pos < end)
1262 /* Look for timer events in tv1. */ 1383 return pos - start;
1263 index = slot = timer_jiffies & TVR_MASK; 1384
1264 do { 1385 pos = find_next_bit(base->pending_map, start, offset);
1265 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) { 1386 return pos < start ? pos + LVL_SIZE - start : -1;
1266 if (nte->flags & TIMER_DEFERRABLE) 1387}
1267 continue; 1388
1268 1389/*
1269 found = 1; 1390 * Search the first expiring timer in the various clock levels. Caller must
1270 expires = nte->expires; 1391 * hold base->lock.
1271 /* Look at the cascade bucket(s)? */ 1392 */
1272 if (!index || slot < index) 1393static unsigned long __next_timer_interrupt(struct timer_base *base)
1273 goto cascade; 1394{
1274 return expires; 1395 unsigned long clk, next, adj;
1396 unsigned lvl, offset = 0;
1397
1398 next = base->clk + NEXT_TIMER_MAX_DELTA;
1399 clk = base->clk;
1400 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1401 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1402
1403 if (pos >= 0) {
1404 unsigned long tmp = clk + (unsigned long) pos;
1405
1406 tmp <<= LVL_SHIFT(lvl);
1407 if (time_before(tmp, next))
1408 next = tmp;
1275 } 1409 }
1276 slot = (slot + 1) & TVR_MASK; 1410 /*
1277 } while (slot != index); 1411 * Clock for the next level. If the current level clock lower
1278 1412 * bits are zero, we look at the next level as is. If not we
1279cascade: 1413 * need to advance it by one because that's going to be the
1280 /* Calculate the next cascade event */ 1414 * next expiring bucket in that level. base->clk is the next
1281 if (index) 1415 * expiring jiffie. So in case of:
1282 timer_jiffies += TVR_SIZE - index; 1416 *
1283 timer_jiffies >>= TVR_BITS; 1417 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1284 1418 * 0 0 0 0 0 0
1285 /* Check tv2-tv5. */ 1419 *
1286 varray[0] = &base->tv2; 1420 * we have to look at all levels @index 0. With
1287 varray[1] = &base->tv3; 1421 *
1288 varray[2] = &base->tv4; 1422 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1289 varray[3] = &base->tv5; 1423 * 0 0 0 0 0 2
1290 1424 *
1291 for (array = 0; array < 4; array++) { 1425 * LVL0 has the next expiring bucket @index 2. The upper
1292 struct tvec *varp = varray[array]; 1426 * levels have the next expiring bucket @index 1.
1293 1427 *
1294 index = slot = timer_jiffies & TVN_MASK; 1428 * In case that the propagation wraps the next level the same
1295 do { 1429 * rules apply:
1296 hlist_for_each_entry(nte, varp->vec + slot, entry) { 1430 *
1297 if (nte->flags & TIMER_DEFERRABLE) 1431 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1298 continue; 1432 * 0 0 0 0 F 2
1299 1433 *
1300 found = 1; 1434 * So after looking at LVL0 we get:
1301 if (time_before(nte->expires, expires)) 1435 *
1302 expires = nte->expires; 1436 * LVL5 LVL4 LVL3 LVL2 LVL1
1303 } 1437 * 0 0 0 1 0
1304 /* 1438 *
1305 * Do we still search for the first timer or are 1439 * So no propagation from LVL1 to LVL2 because that happened
1306 * we looking up the cascade buckets ? 1440 * with the add already, but then we need to propagate further
1307 */ 1441 * from LVL2 to LVL3.
1308 if (found) { 1442 *
1309 /* Look at the cascade bucket(s)? */ 1443 * So the simple check whether the lower bits of the current
1310 if (!index || slot < index) 1444 * level are 0 or not is sufficient for all cases.
1311 break; 1445 */
1312 return expires; 1446 adj = clk & LVL_CLK_MASK ? 1 : 0;
1313 } 1447 clk >>= LVL_CLK_SHIFT;
1314 slot = (slot + 1) & TVN_MASK; 1448 clk += adj;
1315 } while (slot != index);
1316
1317 if (index)
1318 timer_jiffies += TVN_SIZE - index;
1319 timer_jiffies >>= TVN_BITS;
1320 } 1449 }
1321 return expires; 1450 return next;
1322} 1451}
1323 1452
1324/* 1453/*
@@ -1364,7 +1493,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1364 */ 1493 */
1365u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1494u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1366{ 1495{
1367 struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1496 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1368 u64 expires = KTIME_MAX; 1497 u64 expires = KTIME_MAX;
1369 unsigned long nextevt; 1498 unsigned long nextevt;
1370 1499
@@ -1376,19 +1505,80 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1376 return expires; 1505 return expires;
1377 1506
1378 spin_lock(&base->lock); 1507 spin_lock(&base->lock);
1379 if (base->active_timers) { 1508 nextevt = __next_timer_interrupt(base);
1380 if (time_before_eq(base->next_timer, base->timer_jiffies)) 1509 base->next_expiry = nextevt;
1381 base->next_timer = __next_timer_interrupt(base); 1510 /*
1382 nextevt = base->next_timer; 1511 * We have a fresh next event. Check whether we can forward the base:
1383 if (time_before_eq(nextevt, basej)) 1512 */
1384 expires = basem; 1513 if (time_after(nextevt, jiffies))
1385 else 1514 base->clk = jiffies;
1386 expires = basem + (nextevt - basej) * TICK_NSEC; 1515 else if (time_after(nextevt, base->clk))
1516 base->clk = nextevt;
1517
1518 if (time_before_eq(nextevt, basej)) {
1519 expires = basem;
1520 base->is_idle = false;
1521 } else {
1522 expires = basem + (nextevt - basej) * TICK_NSEC;
1523 /*
1524 * If we expect to sleep more than a tick, mark the base idle:
1525 */
1526 if ((expires - basem) > TICK_NSEC)
1527 base->is_idle = true;
1387 } 1528 }
1388 spin_unlock(&base->lock); 1529 spin_unlock(&base->lock);
1389 1530
1390 return cmp_next_hrtimer_event(basem, expires); 1531 return cmp_next_hrtimer_event(basem, expires);
1391} 1532}
1533
1534/**
1535 * timer_clear_idle - Clear the idle state of the timer base
1536 *
1537 * Called with interrupts disabled
1538 */
1539void timer_clear_idle(void)
1540{
1541 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1542
1543 /*
1544 * We do this unlocked. The worst outcome is a remote enqueue sending
1545 * a pointless IPI, but taking the lock would just make the window for
1546 * sending the IPI a few instructions smaller for the cost of taking
1547 * the lock in the exit from idle path.
1548 */
1549 base->is_idle = false;
1550}
1551
1552static int collect_expired_timers(struct timer_base *base,
1553 struct hlist_head *heads)
1554{
1555 /*
1556 * NOHZ optimization. After a long idle sleep we need to forward the
1557 * base to current jiffies. Avoid a loop by searching the bitfield for
1558 * the next expiring timer.
1559 */
1560 if ((long)(jiffies - base->clk) > 2) {
1561 unsigned long next = __next_timer_interrupt(base);
1562
1563 /*
1564 * If the next timer is ahead of time forward to current
1565 * jiffies, otherwise forward to the next expiry time:
1566 */
1567 if (time_after(next, jiffies)) {
1568 /* The call site will increment clock! */
1569 base->clk = jiffies - 1;
1570 return 0;
1571 }
1572 base->clk = next;
1573 }
1574 return __collect_expired_timers(base, heads);
1575}
1576#else
1577static inline int collect_expired_timers(struct timer_base *base,
1578 struct hlist_head *heads)
1579{
1580 return __collect_expired_timers(base, heads);
1581}
1392#endif 1582#endif
1393 1583
1394/* 1584/*
@@ -1411,15 +1601,42 @@ void update_process_times(int user_tick)
1411 run_posix_cpu_timers(p); 1601 run_posix_cpu_timers(p);
1412} 1602}
1413 1603
1604/**
1605 * __run_timers - run all expired timers (if any) on this CPU.
1606 * @base: the timer vector to be processed.
1607 */
1608static inline void __run_timers(struct timer_base *base)
1609{
1610 struct hlist_head heads[LVL_DEPTH];
1611 int levels;
1612
1613 if (!time_after_eq(jiffies, base->clk))
1614 return;
1615
1616 spin_lock_irq(&base->lock);
1617
1618 while (time_after_eq(jiffies, base->clk)) {
1619
1620 levels = collect_expired_timers(base, heads);
1621 base->clk++;
1622
1623 while (levels--)
1624 expire_timers(base, heads + levels);
1625 }
1626 base->running_timer = NULL;
1627 spin_unlock_irq(&base->lock);
1628}
1629
1414/* 1630/*
1415 * This function runs timers and the timer-tq in bottom half context. 1631 * This function runs timers and the timer-tq in bottom half context.
1416 */ 1632 */
1417static void run_timer_softirq(struct softirq_action *h) 1633static void run_timer_softirq(struct softirq_action *h)
1418{ 1634{
1419 struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1635 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1420 1636
1421 if (time_after_eq(jiffies, base->timer_jiffies)) 1637 __run_timers(base);
1422 __run_timers(base); 1638 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1639 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1423} 1640}
1424 1641
1425/* 1642/*
@@ -1427,7 +1644,18 @@ static void run_timer_softirq(struct softirq_action *h)
1427 */ 1644 */
1428void run_local_timers(void) 1645void run_local_timers(void)
1429{ 1646{
1647 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1648
1430 hrtimer_run_queues(); 1649 hrtimer_run_queues();
1650 /* Raise the softirq only if required. */
1651 if (time_before(jiffies, base->clk)) {
1652 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
1653 return;
1654 /* CPU is awake, so check the deferrable base. */
1655 base++;
1656 if (time_before(jiffies, base->clk))
1657 return;
1658 }
1431 raise_softirq(TIMER_SOFTIRQ); 1659 raise_softirq(TIMER_SOFTIRQ);
1432} 1660}
1433 1661
@@ -1512,7 +1740,7 @@ signed long __sched schedule_timeout(signed long timeout)
1512 expire = timeout + jiffies; 1740 expire = timeout + jiffies;
1513 1741
1514 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1742 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1515 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); 1743 __mod_timer(&timer, expire, false);
1516 schedule(); 1744 schedule();
1517 del_singleshot_timer_sync(&timer); 1745 del_singleshot_timer_sync(&timer);
1518 1746
@@ -1563,14 +1791,13 @@ signed long __sched schedule_timeout_idle(signed long timeout)
1563EXPORT_SYMBOL(schedule_timeout_idle); 1791EXPORT_SYMBOL(schedule_timeout_idle);
1564 1792
1565#ifdef CONFIG_HOTPLUG_CPU 1793#ifdef CONFIG_HOTPLUG_CPU
1566static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head) 1794static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1567{ 1795{
1568 struct timer_list *timer; 1796 struct timer_list *timer;
1569 int cpu = new_base->cpu; 1797 int cpu = new_base->cpu;
1570 1798
1571 while (!hlist_empty(head)) { 1799 while (!hlist_empty(head)) {
1572 timer = hlist_entry(head->first, struct timer_list, entry); 1800 timer = hlist_entry(head->first, struct timer_list, entry);
1573 /* We ignore the accounting on the dying cpu */
1574 detach_timer(timer, false); 1801 detach_timer(timer, false);
1575 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1802 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1576 internal_add_timer(new_base, timer); 1803 internal_add_timer(new_base, timer);
@@ -1579,37 +1806,31 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
1579 1806
1580static void migrate_timers(int cpu) 1807static void migrate_timers(int cpu)
1581{ 1808{
1582 struct tvec_base *old_base; 1809 struct timer_base *old_base;
1583 struct tvec_base *new_base; 1810 struct timer_base *new_base;
1584 int i; 1811 int b, i;
1585 1812
1586 BUG_ON(cpu_online(cpu)); 1813 BUG_ON(cpu_online(cpu));
1587 old_base = per_cpu_ptr(&tvec_bases, cpu);
1588 new_base = get_cpu_ptr(&tvec_bases);
1589 /*
1590 * The caller is globally serialized and nobody else
1591 * takes two locks at once, deadlock is not possible.
1592 */
1593 spin_lock_irq(&new_base->lock);
1594 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1595
1596 BUG_ON(old_base->running_timer);
1597
1598 for (i = 0; i < TVR_SIZE; i++)
1599 migrate_timer_list(new_base, old_base->tv1.vec + i);
1600 for (i = 0; i < TVN_SIZE; i++) {
1601 migrate_timer_list(new_base, old_base->tv2.vec + i);
1602 migrate_timer_list(new_base, old_base->tv3.vec + i);
1603 migrate_timer_list(new_base, old_base->tv4.vec + i);
1604 migrate_timer_list(new_base, old_base->tv5.vec + i);
1605 }
1606 1814
1607 old_base->active_timers = 0; 1815 for (b = 0; b < NR_BASES; b++) {
1608 old_base->all_timers = 0; 1816 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1817 new_base = get_cpu_ptr(&timer_bases[b]);
1818 /*
1819 * The caller is globally serialized and nobody else
1820 * takes two locks at once, deadlock is not possible.
1821 */
1822 spin_lock_irq(&new_base->lock);
1823 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1824
1825 BUG_ON(old_base->running_timer);
1609 1826
1610 spin_unlock(&old_base->lock); 1827 for (i = 0; i < WHEEL_SIZE; i++)
1611 spin_unlock_irq(&new_base->lock); 1828 migrate_timer_list(new_base, old_base->vectors + i);
1612 put_cpu_ptr(&tvec_bases); 1829
1830 spin_unlock(&old_base->lock);
1831 spin_unlock_irq(&new_base->lock);
1832 put_cpu_ptr(&timer_bases);
1833 }
1613} 1834}
1614 1835
1615static int timer_cpu_notify(struct notifier_block *self, 1836static int timer_cpu_notify(struct notifier_block *self,
@@ -1637,13 +1858,15 @@ static inline void timer_register_cpu_notifier(void) { }
1637 1858
1638static void __init init_timer_cpu(int cpu) 1859static void __init init_timer_cpu(int cpu)
1639{ 1860{
1640 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu); 1861 struct timer_base *base;
1641 1862 int i;
1642 base->cpu = cpu;
1643 spin_lock_init(&base->lock);
1644 1863
1645 base->timer_jiffies = jiffies; 1864 for (i = 0; i < NR_BASES; i++) {
1646 base->next_timer = base->timer_jiffies; 1865 base = per_cpu_ptr(&timer_bases[i], cpu);
1866 base->cpu = cpu;
1867 spin_lock_init(&base->lock);
1868 base->clk = jiffies;
1869 }
1647} 1870}
1648 1871
1649static void __init init_timer_cpus(void) 1872static void __init init_timer_cpus(void)
@@ -1702,9 +1925,15 @@ static void __sched do_usleep_range(unsigned long min, unsigned long max)
1702} 1925}
1703 1926
1704/** 1927/**
1705 * usleep_range - Drop in replacement for udelay where wakeup is flexible 1928 * usleep_range - Sleep for an approximate time
1706 * @min: Minimum time in usecs to sleep 1929 * @min: Minimum time in usecs to sleep
1707 * @max: Maximum time in usecs to sleep 1930 * @max: Maximum time in usecs to sleep
1931 *
1932 * In non-atomic context where the exact wakeup time is flexible, use
1933 * usleep_range() instead of udelay(). The sleep improves responsiveness
1934 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
1935 * power usage by allowing hrtimers to take advantage of an already-
1936 * scheduled interrupt instead of scheduling a new one just for this sleep.
1708 */ 1937 */
1709void __sched usleep_range(unsigned long min, unsigned long max) 1938void __sched usleep_range(unsigned long min, unsigned long max)
1710{ 1939{
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 1adecb4b87c8..087204c733eb 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -279,7 +279,7 @@ static void print_name_offset(struct seq_file *m, unsigned long addr)
279 279
280static int tstats_show(struct seq_file *m, void *v) 280static int tstats_show(struct seq_file *m, void *v)
281{ 281{
282 struct timespec period; 282 struct timespec64 period;
283 struct entry *entry; 283 struct entry *entry;
284 unsigned long ms; 284 unsigned long ms;
285 long events = 0; 285 long events = 0;
@@ -295,11 +295,11 @@ static int tstats_show(struct seq_file *m, void *v)
295 295
296 time = ktime_sub(time_stop, time_start); 296 time = ktime_sub(time_stop, time_start);
297 297
298 period = ktime_to_timespec(time); 298 period = ktime_to_timespec64(time);
299 ms = period.tv_nsec / 1000000; 299 ms = period.tv_nsec / 1000000;
300 300
301 seq_puts(m, "Timer Stats Version: v0.3\n"); 301 seq_puts(m, "Timer Stats Version: v0.3\n");
302 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); 302 seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
303 if (atomic_read(&overflow_count)) 303 if (atomic_read(&overflow_count))
304 seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count)); 304 seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
305 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive"); 305 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
diff --git a/lib/random32.c b/lib/random32.c
index 510d1ce7d4d2..69ed593aab07 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare)
233 233
234static void __init __prandom_start_seed_timer(void) 234static void __init __prandom_start_seed_timer(void)
235{ 235{
236 set_timer_slack(&seed_timer, HZ);
237 seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); 236 seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
238 add_timer(&seed_timer); 237 add_timer(&seed_timer);
239} 238}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fa8c39804bdb..61a9deec2993 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned long data)
603 if (req->num_timeout++ == 0) 603 if (req->num_timeout++ == 0)
604 atomic_dec(&queue->young); 604 atomic_dec(&queue->young);
605 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); 605 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
606 mod_timer_pinned(&req->rsk_timer, jiffies + timeo); 606 mod_timer(&req->rsk_timer, jiffies + timeo);
607 return; 607 return;
608 } 608 }
609drop: 609drop:
@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct request_sock *req,
617 req->num_timeout = 0; 617 req->num_timeout = 0;
618 req->sk = NULL; 618 req->sk = NULL;
619 619
620 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); 620 setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
621 mod_timer_pinned(&req->rsk_timer, jiffies + timeout); 621 (unsigned long)req);
622 mod_timer(&req->rsk_timer, jiffies + timeout);
622 623
623 inet_ehash_insert(req_to_sk(req), NULL); 624 inet_ehash_insert(req_to_sk(req), NULL);
624 /* before letting lookups find us, make sure all req fields 625 /* before letting lookups find us, make sure all req fields
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 206581674806..ddcd56c08d14 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
188 tw->tw_prot = sk->sk_prot_creator; 188 tw->tw_prot = sk->sk_prot_creator;
189 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); 189 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
190 twsk_net_set(tw, sock_net(sk)); 190 twsk_net_set(tw, sock_net(sk));
191 setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw); 191 setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
192 (unsigned long)tw);
192 /* 193 /*
193 * Because we use RCU lookups, we should not set tw_refcnt 194 * Because we use RCU lookups, we should not set tw_refcnt
194 * to a non null value before everything is setup for this 195 * to a non null value before everything is setup for this
@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
248 249
249 tw->tw_kill = timeo <= 4*HZ; 250 tw->tw_kill = timeo <= 4*HZ;
250 if (!rearm) { 251 if (!rearm) {
251 BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo)); 252 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
252 atomic_inc(&tw->tw_dr->tw_count); 253 atomic_inc(&tw->tw_dr->tw_count);
253 } else { 254 } else {
254 mod_timer_pending(&tw->tw_timer, jiffies + timeo); 255 mod_timer_pending(&tw->tw_timer, jiffies + timeo);