aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 18:36:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 18:36:09 -0400
commit0da273668657a70155f3d4ae121dc19277a05778 (patch)
tree18f8c2edcbcd17e80b5cb052c3c024ce1d0e1f2f
parent560ae37178b12e3bd37626f7b1e0b29c503ea558 (diff)
parentb0ec636c93ddd77235bf0f023a8a95d78cb6cafe (diff)
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: - watchdog fixes for full dynticks - improved debug output for full dynticks - remove an obsolete full dynticks check - two ARM SoC clocksource drivers for sharing across SoCs - tick broadcast fix for CPU hotplug * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tick: broadcast: Check broadcast mode on CPU hotplug clocksource: arm_global_timer: Add ARM global timer support clocksource: Add Marvell Orion SoC timer nohz: Remove obsolete check for full dynticks CPUs to be RCU nocbs watchdog: Boot-disable by default on full dynticks watchdog: Rename confusing state variable watchdog: Register / unregister watchdog kthreads on sysctl control nohz: Warn if the machine can not perform nohz_full
-rw-r--r--Documentation/devicetree/bindings/arm/global_timer.txt24
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,orion-timer.txt17
-rw-r--r--drivers/clocksource/Kconfig18
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/arm_global_timer.c321
-rw-r--r--drivers/clocksource/time-orion.c150
-rw-r--r--include/linux/nmi.h2
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/time/tick-broadcast.c5
-rw-r--r--kernel/time/tick-sched.c15
-rw-r--r--kernel/watchdog.c113
11 files changed, 608 insertions, 63 deletions
diff --git a/Documentation/devicetree/bindings/arm/global_timer.txt b/Documentation/devicetree/bindings/arm/global_timer.txt
new file mode 100644
index 000000000000..1e548981eda4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/global_timer.txt
@@ -0,0 +1,24 @@
1
2* ARM Global Timer
3 Cortex-A9 are often associated with a per-core Global timer.
4
5** Timer node required properties:
6
7- compatible : Should be "arm,cortex-a9-global-timer"
8 Driver supports versions r2p0 and above.
9
10- interrupts : One interrupt to each core
11
12- reg : Specify the base address and the size of the GT timer
13 register window.
14
15- clocks : Should be phandle to a clock.
16
17Example:
18
19 timer@2c000600 {
20 compatible = "arm,cortex-a9-global-timer";
21 reg = <0x2c000600 0x20>;
22 interrupts = <1 13 0xf01>;
23 clocks = <&arm_periph_clk>;
24 };
diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
new file mode 100644
index 000000000000..62bb8260cf6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
@@ -0,0 +1,17 @@
1Marvell Orion SoC timer
2
3Required properties:
4- compatible: shall be "marvell,orion-timer"
5- reg: base address of the timer register starting with TIMERS CONTROL register
6- interrupt-parent: phandle of the bridge interrupt controller
7- interrupts: should contain the interrupts for Timer0 and Timer1
8- clocks: phandle of timer reference clock (tclk)
9
10Example:
11 timer: timer {
12 compatible = "marvell,orion-timer";
13 reg = <0x20300 0x20>;
14 interrupt-parent = <&bridge_intc>;
15 interrupts = <1>, <2>;
16 clocks = <&core_clk 0>;
17 };
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 81465c21f873..b7b9b040a89b 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -27,6 +27,11 @@ config DW_APB_TIMER_OF
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 29
30config ORION_TIMER
31 select CLKSRC_OF
32 select CLKSRC_MMIO
33 bool
34
30config SUN4I_TIMER 35config SUN4I_TIMER
31 bool 36 bool
32 37
@@ -69,6 +74,19 @@ config ARM_ARCH_TIMER
69 bool 74 bool
70 select CLKSRC_OF if OF 75 select CLKSRC_OF if OF
71 76
77config ARM_GLOBAL_TIMER
78 bool
79 select CLKSRC_OF if OF
80 help
81 This options enables support for the ARM global timer unit
82
83config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
84 bool
85 depends on ARM_GLOBAL_TIMER
86 default y
87 help
88 Use ARM global timer clock source as sched_clock
89
72config CLKSRC_METAG_GENERIC 90config CLKSRC_METAG_GENERIC
73 def_bool y if METAG 91 def_bool y if METAG
74 help 92 help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 9ba8b4d867e3..8b00c5cebfa4 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
15obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o 15obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
16obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o 16obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
17obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 17obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
18obj-$(CONFIG_ORION_TIMER) += time-orion.o
18obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o 19obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
19obj-$(CONFIG_ARCH_MARCO) += timer-marco.o 20obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
20obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 21obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
@@ -30,5 +31,6 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
30obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o 31obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
31 32
32obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 33obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
34obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
33obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o 35obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
34obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 36obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
new file mode 100644
index 000000000000..db8afc7427a6
--- /dev/null
+++ b/drivers/clocksource/arm_global_timer.c
@@ -0,0 +1,321 @@
1/*
2 * drivers/clocksource/arm_global_timer.c
3 *
4 * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
5 * Author: Stuart Menefy <stuart.menefy@st.com>
6 * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
17#include <linux/cpu.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20#include <linux/io.h>
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/of_address.h>
24#include <linux/sched_clock.h>
25
26#include <asm/cputype.h>
27
28#define GT_COUNTER0 0x00
29#define GT_COUNTER1 0x04
30
31#define GT_CONTROL 0x08
32#define GT_CONTROL_TIMER_ENABLE BIT(0) /* this bit is NOT banked */
33#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
34#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
35#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
36
37#define GT_INT_STATUS 0x0c
38#define GT_INT_STATUS_EVENT_FLAG BIT(0)
39
40#define GT_COMP0 0x10
41#define GT_COMP1 0x14
42#define GT_AUTO_INC 0x18
43
44/*
45 * We are expecting to be clocked by the ARM peripheral clock.
46 *
47 * Note: it is assumed we are using a prescaler value of zero, so this is
48 * the units for all operations.
49 */
50static void __iomem *gt_base;
51static unsigned long gt_clk_rate;
52static int gt_ppi;
53static struct clock_event_device __percpu *gt_evt;
54
55/*
56 * To get the value from the Global Timer Counter register proceed as follows:
57 * 1. Read the upper 32-bit timer counter register
58 * 2. Read the lower 32-bit timer counter register
59 * 3. Read the upper 32-bit timer counter register again. If the value is
60 * different to the 32-bit upper value read previously, go back to step 2.
61 * Otherwise the 64-bit timer counter value is correct.
62 */
63static u64 gt_counter_read(void)
64{
65 u64 counter;
66 u32 lower;
67 u32 upper, old_upper;
68
69 upper = readl_relaxed(gt_base + GT_COUNTER1);
70 do {
71 old_upper = upper;
72 lower = readl_relaxed(gt_base + GT_COUNTER0);
73 upper = readl_relaxed(gt_base + GT_COUNTER1);
74 } while (upper != old_upper);
75
76 counter = upper;
77 counter <<= 32;
78 counter |= lower;
79 return counter;
80}
81
82/**
83 * To ensure that updates to comparator value register do not set the
84 * Interrupt Status Register proceed as follows:
85 * 1. Clear the Comp Enable bit in the Timer Control Register.
86 * 2. Write the lower 32-bit Comparator Value Register.
87 * 3. Write the upper 32-bit Comparator Value Register.
88 * 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit.
89 */
90static void gt_compare_set(unsigned long delta, int periodic)
91{
92 u64 counter = gt_counter_read();
93 unsigned long ctrl;
94
95 counter += delta;
96 ctrl = GT_CONTROL_TIMER_ENABLE;
97 writel(ctrl, gt_base + GT_CONTROL);
98 writel(lower_32_bits(counter), gt_base + GT_COMP0);
99 writel(upper_32_bits(counter), gt_base + GT_COMP1);
100
101 if (periodic) {
102 writel(delta, gt_base + GT_AUTO_INC);
103 ctrl |= GT_CONTROL_AUTO_INC;
104 }
105
106 ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE;
107 writel(ctrl, gt_base + GT_CONTROL);
108}
109
110static void gt_clockevent_set_mode(enum clock_event_mode mode,
111 struct clock_event_device *clk)
112{
113 unsigned long ctrl;
114
115 switch (mode) {
116 case CLOCK_EVT_MODE_PERIODIC:
117 gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
118 break;
119 case CLOCK_EVT_MODE_ONESHOT:
120 case CLOCK_EVT_MODE_UNUSED:
121 case CLOCK_EVT_MODE_SHUTDOWN:
122 ctrl = readl(gt_base + GT_CONTROL);
123 ctrl &= ~(GT_CONTROL_COMP_ENABLE |
124 GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC);
125 writel(ctrl, gt_base + GT_CONTROL);
126 break;
127 default:
128 break;
129 }
130}
131
132static int gt_clockevent_set_next_event(unsigned long evt,
133 struct clock_event_device *unused)
134{
135 gt_compare_set(evt, 0);
136 return 0;
137}
138
139static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
140{
141 struct clock_event_device *evt = dev_id;
142
143 if (!(readl_relaxed(gt_base + GT_INT_STATUS) &
144 GT_INT_STATUS_EVENT_FLAG))
145 return IRQ_NONE;
146
147 /**
148 * ERRATA 740657( Global Timer can send 2 interrupts for
149 * the same event in single-shot mode)
150 * Workaround:
151 * Either disable single-shot mode.
152 * Or
153 * Modify the Interrupt Handler to avoid the
154 * offending sequence. This is achieved by clearing
155 * the Global Timer flag _after_ having incremented
156 * the Comparator register value to a higher value.
157 */
158 if (evt->mode == CLOCK_EVT_MODE_ONESHOT)
159 gt_compare_set(ULONG_MAX, 0);
160
161 writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
162 evt->event_handler(evt);
163
164 return IRQ_HANDLED;
165}
166
167static int __cpuinit gt_clockevents_init(struct clock_event_device *clk)
168{
169 int cpu = smp_processor_id();
170
171 clk->name = "arm_global_timer";
172 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
173 clk->set_mode = gt_clockevent_set_mode;
174 clk->set_next_event = gt_clockevent_set_next_event;
175 clk->cpumask = cpumask_of(cpu);
176 clk->rating = 300;
177 clk->irq = gt_ppi;
178 clockevents_config_and_register(clk, gt_clk_rate,
179 1, 0xffffffff);
180 enable_percpu_irq(clk->irq, IRQ_TYPE_NONE);
181 return 0;
182}
183
184static void gt_clockevents_stop(struct clock_event_device *clk)
185{
186 gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
187 disable_percpu_irq(clk->irq);
188}
189
190static cycle_t gt_clocksource_read(struct clocksource *cs)
191{
192 return gt_counter_read();
193}
194
195static struct clocksource gt_clocksource = {
196 .name = "arm_global_timer",
197 .rating = 300,
198 .read = gt_clocksource_read,
199 .mask = CLOCKSOURCE_MASK(64),
200 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
201};
202
203#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
204static u32 notrace gt_sched_clock_read(void)
205{
206 return gt_counter_read();
207}
208#endif
209
210static void __init gt_clocksource_init(void)
211{
212 writel(0, gt_base + GT_CONTROL);
213 writel(0, gt_base + GT_COUNTER0);
214 writel(0, gt_base + GT_COUNTER1);
215 /* enables timer on all the cores */
216 writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
217
218#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
219 setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate);
220#endif
221 clocksource_register_hz(&gt_clocksource, gt_clk_rate);
222}
223
224static int __cpuinit gt_cpu_notify(struct notifier_block *self,
225 unsigned long action, void *hcpu)
226{
227 switch (action & ~CPU_TASKS_FROZEN) {
228 case CPU_STARTING:
229 gt_clockevents_init(this_cpu_ptr(gt_evt));
230 break;
231 case CPU_DYING:
232 gt_clockevents_stop(this_cpu_ptr(gt_evt));
233 break;
234 }
235
236 return NOTIFY_OK;
237}
238static struct notifier_block gt_cpu_nb __cpuinitdata = {
239 .notifier_call = gt_cpu_notify,
240};
241
242static void __init global_timer_of_register(struct device_node *np)
243{
244 struct clk *gt_clk;
245 int err = 0;
246
247 /*
248 * In r2p0 the comparators for each processor with the global timer
249 * fire when the timer value is greater than or equal to. In previous
250 * revisions the comparators fired when the timer value was equal to.
251 */
252 if ((read_cpuid_id() & 0xf0000f) < 0x200000) {
253 pr_warn("global-timer: non support for this cpu version.\n");
254 return;
255 }
256
257 gt_ppi = irq_of_parse_and_map(np, 0);
258 if (!gt_ppi) {
259 pr_warn("global-timer: unable to parse irq\n");
260 return;
261 }
262
263 gt_base = of_iomap(np, 0);
264 if (!gt_base) {
265 pr_warn("global-timer: invalid base address\n");
266 return;
267 }
268
269 gt_clk = of_clk_get(np, 0);
270 if (!IS_ERR(gt_clk)) {
271 err = clk_prepare_enable(gt_clk);
272 if (err)
273 goto out_unmap;
274 } else {
275 pr_warn("global-timer: clk not found\n");
276 err = -EINVAL;
277 goto out_unmap;
278 }
279
280 gt_clk_rate = clk_get_rate(gt_clk);
281 gt_evt = alloc_percpu(struct clock_event_device);
282 if (!gt_evt) {
283 pr_warn("global-timer: can't allocate memory\n");
284 err = -ENOMEM;
285 goto out_clk;
286 }
287
288 err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
289 "gt", gt_evt);
290 if (err) {
291 pr_warn("global-timer: can't register interrupt %d (%d)\n",
292 gt_ppi, err);
293 goto out_free;
294 }
295
296 err = register_cpu_notifier(&gt_cpu_nb);
297 if (err) {
298 pr_warn("global-timer: unable to register cpu notifier.\n");
299 goto out_irq;
300 }
301
302 /* Immediately configure the timer on the boot CPU */
303 gt_clocksource_init();
304 gt_clockevents_init(this_cpu_ptr(gt_evt));
305
306 return;
307
308out_irq:
309 free_percpu_irq(gt_ppi, gt_evt);
310out_free:
311 free_percpu(gt_evt);
312out_clk:
313 clk_disable_unprepare(gt_clk);
314out_unmap:
315 iounmap(gt_base);
316 WARN(err, "ARM Global timer register failed (%d)\n", err);
317}
318
319/* Only tested on r2p2 and r3p0 */
320CLOCKSOURCE_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer",
321 global_timer_of_register);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
new file mode 100644
index 000000000000..ecbeb6810215
--- /dev/null
+++ b/drivers/clocksource/time-orion.c
@@ -0,0 +1,150 @@
1/*
2 * Marvell Orion SoC timer handling.
3 *
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 *
10 * Timer 0 is used as free-running clocksource, while timer 1 is
11 * used as clock_event_device.
12 */
13
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/interrupt.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21#include <linux/spinlock.h>
22#include <asm/sched_clock.h>
23
24#define TIMER_CTRL 0x00
25#define TIMER0_EN BIT(0)
26#define TIMER0_RELOAD_EN BIT(1)
27#define TIMER1_EN BIT(2)
28#define TIMER1_RELOAD_EN BIT(3)
29#define TIMER0_RELOAD 0x10
30#define TIMER0_VAL 0x14
31#define TIMER1_RELOAD 0x18
32#define TIMER1_VAL 0x1c
33
34#define ORION_ONESHOT_MIN 1
35#define ORION_ONESHOT_MAX 0xfffffffe
36
37static void __iomem *timer_base;
38static DEFINE_SPINLOCK(timer_ctrl_lock);
39
40/*
41 * Thread-safe access to TIMER_CTRL register
42 * (shared with watchdog timer)
43 */
44void orion_timer_ctrl_clrset(u32 clr, u32 set)
45{
46 spin_lock(&timer_ctrl_lock);
47 writel((readl(timer_base + TIMER_CTRL) & ~clr) | set,
48 timer_base + TIMER_CTRL);
49 spin_unlock(&timer_ctrl_lock);
50}
51EXPORT_SYMBOL(orion_timer_ctrl_clrset);
52
53/*
54 * Free-running clocksource handling.
55 */
56static u32 notrace orion_read_sched_clock(void)
57{
58 return ~readl(timer_base + TIMER0_VAL);
59}
60
61/*
62 * Clockevent handling.
63 */
64static u32 ticks_per_jiffy;
65
66static int orion_clkevt_next_event(unsigned long delta,
67 struct clock_event_device *dev)
68{
69 /* setup and enable one-shot timer */
70 writel(delta, timer_base + TIMER1_VAL);
71 orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN);
72
73 return 0;
74}
75
76static void orion_clkevt_mode(enum clock_event_mode mode,
77 struct clock_event_device *dev)
78{
79 if (mode == CLOCK_EVT_MODE_PERIODIC) {
80 /* setup and enable periodic timer at 1/HZ intervals */
81 writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
82 writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
83 orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN);
84 } else {
85 /* disable timer */
86 orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0);
87 }
88}
89
90static struct clock_event_device orion_clkevt = {
91 .name = "orion_event",
92 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
93 .shift = 32,
94 .rating = 300,
95 .set_next_event = orion_clkevt_next_event,
96 .set_mode = orion_clkevt_mode,
97};
98
99static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id)
100{
101 orion_clkevt.event_handler(&orion_clkevt);
102 return IRQ_HANDLED;
103}
104
105static struct irqaction orion_clkevt_irq = {
106 .name = "orion_event",
107 .flags = IRQF_TIMER,
108 .handler = orion_clkevt_irq_handler,
109};
110
111static void __init orion_timer_init(struct device_node *np)
112{
113 struct clk *clk;
114 int irq;
115
116 /* timer registers are shared with watchdog timer */
117 timer_base = of_iomap(np, 0);
118 if (!timer_base)
119 panic("%s: unable to map resource\n", np->name);
120
121 clk = of_clk_get(np, 0);
122 if (IS_ERR(clk))
123 panic("%s: unable to get clk\n", np->name);
124 clk_prepare_enable(clk);
125
126 /* we are only interested in timer1 irq */
127 irq = irq_of_parse_and_map(np, 1);
128 if (irq <= 0)
129 panic("%s: unable to parse timer1 irq\n", np->name);
130
131 /* setup timer0 as free-running clocksource */
132 writel(~0, timer_base + TIMER0_VAL);
133 writel(~0, timer_base + TIMER0_RELOAD);
134 orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN);
135 clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
136 clk_get_rate(clk), 300, 32,
137 clocksource_mmio_readl_down);
138 setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk));
139
140 /* setup timer1 as clockevent timer */
141 if (setup_irq(irq, &orion_clkevt_irq))
142 panic("%s: unable to setup irq\n", np->name);
143
144 ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
145 orion_clkevt.cpumask = cpumask_of(0);
146 orion_clkevt.irq = irq;
147 clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
148 ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
149}
150CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index db50840e6355..6a45fb583ff1 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -46,7 +46,7 @@ static inline bool trigger_all_cpu_backtrace(void)
46#ifdef CONFIG_LOCKUP_DETECTOR 46#ifdef CONFIG_LOCKUP_DETECTOR
47int hw_nmi_is_cpu_stuck(struct pt_regs *); 47int hw_nmi_is_cpu_stuck(struct pt_regs *);
48u64 hw_nmi_get_sample_period(int watchdog_thresh); 48u64 hw_nmi_get_sample_period(int watchdog_thresh);
49extern int watchdog_enabled; 49extern int watchdog_user_enabled;
50extern int watchdog_thresh; 50extern int watchdog_thresh;
51struct ctl_table; 51struct ctl_table;
52extern int proc_dowatchdog(struct ctl_table *, int , 52extern int proc_dowatchdog(struct ctl_table *, int ,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e5b31aff67aa..ac09d98490aa 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -807,7 +807,7 @@ static struct ctl_table kern_table[] = {
807#if defined(CONFIG_LOCKUP_DETECTOR) 807#if defined(CONFIG_LOCKUP_DETECTOR)
808 { 808 {
809 .procname = "watchdog", 809 .procname = "watchdog",
810 .data = &watchdog_enabled, 810 .data = &watchdog_user_enabled,
811 .maxlen = sizeof (int), 811 .maxlen = sizeof (int),
812 .mode = 0644, 812 .mode = 0644,
813 .proc_handler = proc_dowatchdog, 813 .proc_handler = proc_dowatchdog,
@@ -834,7 +834,7 @@ static struct ctl_table kern_table[] = {
834 }, 834 },
835 { 835 {
836 .procname = "nmi_watchdog", 836 .procname = "nmi_watchdog",
837 .data = &watchdog_enabled, 837 .data = &watchdog_user_enabled,
838 .maxlen = sizeof (int), 838 .maxlen = sizeof (int),
839 .mode = 0644, 839 .mode = 0644,
840 .proc_handler = proc_dowatchdog, 840 .proc_handler = proc_dowatchdog,
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 6d3f91631de6..218bcb565fed 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -157,7 +157,10 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
157 dev->event_handler = tick_handle_periodic; 157 dev->event_handler = tick_handle_periodic;
158 tick_device_setup_broadcast_func(dev); 158 tick_device_setup_broadcast_func(dev);
159 cpumask_set_cpu(cpu, tick_broadcast_mask); 159 cpumask_set_cpu(cpu, tick_broadcast_mask);
160 tick_broadcast_start_periodic(bc); 160 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
161 tick_broadcast_start_periodic(bc);
162 else
163 tick_broadcast_setup_oneshot(bc);
161 ret = 1; 164 ret = 1;
162 } else { 165 } else {
163 /* 166 /*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0cf1c1453181..69601726a745 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -178,6 +178,11 @@ static bool can_stop_full_tick(void)
178 */ 178 */
179 if (!sched_clock_stable) { 179 if (!sched_clock_stable) {
180 trace_tick_stop(0, "unstable sched clock\n"); 180 trace_tick_stop(0, "unstable sched clock\n");
181 /*
182 * Don't allow the user to think they can get
183 * full NO_HZ with this machine.
184 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
181 return false; 186 return false;
182 } 187 }
183#endif 188#endif
@@ -346,16 +351,6 @@ void __init tick_nohz_init(void)
346 } 351 }
347 352
348 cpu_notifier(tick_nohz_cpu_down_callback, 0); 353 cpu_notifier(tick_nohz_cpu_down_callback, 0);
349
350 /* Make sure full dynticks CPU are also RCU nocbs */
351 for_each_cpu(cpu, nohz_full_mask) {
352 if (!rcu_is_nocb_cpu(cpu)) {
353 pr_warning("NO_HZ: CPU %d is not RCU nocb: "
354 "cleared from nohz_full range", cpu);
355 cpumask_clear_cpu(cpu, nohz_full_mask);
356 }
357 }
358
359 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); 354 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
360 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); 355 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
361} 356}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 05039e348f07..1241d8c91d5e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -29,9 +29,9 @@
29#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31 31
32int watchdog_enabled = 1; 32int watchdog_user_enabled = 1;
33int __read_mostly watchdog_thresh = 10; 33int __read_mostly watchdog_thresh = 10;
34static int __read_mostly watchdog_disabled; 34static int __read_mostly watchdog_running;
35static u64 __read_mostly sample_period; 35static u64 __read_mostly sample_period;
36 36
37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -63,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str)
63 else if (!strncmp(str, "nopanic", 7)) 63 else if (!strncmp(str, "nopanic", 7))
64 hardlockup_panic = 0; 64 hardlockup_panic = 0;
65 else if (!strncmp(str, "0", 1)) 65 else if (!strncmp(str, "0", 1))
66 watchdog_enabled = 0; 66 watchdog_user_enabled = 0;
67 return 1; 67 return 1;
68} 68}
69__setup("nmi_watchdog=", hardlockup_panic_setup); 69__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -82,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
82 82
83static int __init nowatchdog_setup(char *str) 83static int __init nowatchdog_setup(char *str)
84{ 84{
85 watchdog_enabled = 0; 85 watchdog_user_enabled = 0;
86 return 1; 86 return 1;
87} 87}
88__setup("nowatchdog", nowatchdog_setup); 88__setup("nowatchdog", nowatchdog_setup);
@@ -90,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup);
90/* deprecated */ 90/* deprecated */
91static int __init nosoftlockup_setup(char *str) 91static int __init nosoftlockup_setup(char *str)
92{ 92{
93 watchdog_enabled = 0; 93 watchdog_user_enabled = 0;
94 return 1; 94 return 1;
95} 95}
96__setup("nosoftlockup", nosoftlockup_setup); 96__setup("nosoftlockup", nosoftlockup_setup);
@@ -158,7 +158,7 @@ void touch_all_softlockup_watchdogs(void)
158#ifdef CONFIG_HARDLOCKUP_DETECTOR 158#ifdef CONFIG_HARDLOCKUP_DETECTOR
159void touch_nmi_watchdog(void) 159void touch_nmi_watchdog(void)
160{ 160{
161 if (watchdog_enabled) { 161 if (watchdog_user_enabled) {
162 unsigned cpu; 162 unsigned cpu;
163 163
164 for_each_present_cpu(cpu) { 164 for_each_present_cpu(cpu) {
@@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu)
347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
348 hrtimer->function = watchdog_timer_fn; 348 hrtimer->function = watchdog_timer_fn;
349 349
350 if (!watchdog_enabled) {
351 kthread_park(current);
352 return;
353 }
354
355 /* Enable the perf event */ 350 /* Enable the perf event */
356 watchdog_nmi_enable(cpu); 351 watchdog_nmi_enable(cpu);
357 352
@@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu)
374 watchdog_nmi_disable(cpu); 369 watchdog_nmi_disable(cpu);
375} 370}
376 371
372static void watchdog_cleanup(unsigned int cpu, bool online)
373{
374 watchdog_disable(cpu);
375}
376
377static int watchdog_should_run(unsigned int cpu) 377static int watchdog_should_run(unsigned int cpu)
378{ 378{
379 return __this_cpu_read(hrtimer_interrupts) != 379 return __this_cpu_read(hrtimer_interrupts) !=
@@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
475static void watchdog_nmi_disable(unsigned int cpu) { return; } 475static void watchdog_nmi_disable(unsigned int cpu) { return; }
476#endif /* CONFIG_HARDLOCKUP_DETECTOR */ 476#endif /* CONFIG_HARDLOCKUP_DETECTOR */
477 477
478/* prepare/enable/disable routines */ 478static struct smp_hotplug_thread watchdog_threads = {
479/* sysctl functions */ 479 .store = &softlockup_watchdog,
480#ifdef CONFIG_SYSCTL 480 .thread_should_run = watchdog_should_run,
481static void watchdog_enable_all_cpus(void) 481 .thread_fn = watchdog,
482 .thread_comm = "watchdog/%u",
483 .setup = watchdog_enable,
484 .cleanup = watchdog_cleanup,
485 .park = watchdog_disable,
486 .unpark = watchdog_enable,
487};
488
489static int watchdog_enable_all_cpus(void)
482{ 490{
483 unsigned int cpu; 491 int err = 0;
484 492
485 if (watchdog_disabled) { 493 if (!watchdog_running) {
486 watchdog_disabled = 0; 494 err = smpboot_register_percpu_thread(&watchdog_threads);
487 for_each_online_cpu(cpu) 495 if (err)
488 kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 496 pr_err("Failed to create watchdog threads, disabled\n");
497 else
498 watchdog_running = 1;
489 } 499 }
500
501 return err;
490} 502}
491 503
504/* prepare/enable/disable routines */
505/* sysctl functions */
506#ifdef CONFIG_SYSCTL
492static void watchdog_disable_all_cpus(void) 507static void watchdog_disable_all_cpus(void)
493{ 508{
494 unsigned int cpu; 509 if (watchdog_running) {
495 510 watchdog_running = 0;
496 if (!watchdog_disabled) { 511 smpboot_unregister_percpu_thread(&watchdog_threads);
497 watchdog_disabled = 1;
498 for_each_online_cpu(cpu)
499 kthread_park(per_cpu(softlockup_watchdog, cpu));
500 } 512 }
501} 513}
502 514
@@ -507,45 +519,48 @@ static void watchdog_disable_all_cpus(void)
507int proc_dowatchdog(struct ctl_table *table, int write, 519int proc_dowatchdog(struct ctl_table *table, int write,
508 void __user *buffer, size_t *lenp, loff_t *ppos) 520 void __user *buffer, size_t *lenp, loff_t *ppos)
509{ 521{
510 int ret; 522 int err, old_thresh, old_enabled;
511 523
512 if (watchdog_disabled < 0) 524 old_thresh = ACCESS_ONCE(watchdog_thresh);
513 return -ENODEV; 525 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
514 526
515 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
516 if (ret || !write) 528 if (err || !write)
517 return ret; 529 return err;
518 530
519 set_sample_period(); 531 set_sample_period();
520 /* 532 /*
521 * Watchdog threads shouldn't be enabled if they are 533 * Watchdog threads shouldn't be enabled if they are
522 * disabled. The 'watchdog_disabled' variable check in 534 * disabled. The 'watchdog_running' variable check in
523 * watchdog_*_all_cpus() function takes care of this. 535 * watchdog_*_all_cpus() function takes care of this.
524 */ 536 */
525 if (watchdog_enabled && watchdog_thresh) 537 if (watchdog_user_enabled && watchdog_thresh)
526 watchdog_enable_all_cpus(); 538 err = watchdog_enable_all_cpus();
527 else 539 else
528 watchdog_disable_all_cpus(); 540 watchdog_disable_all_cpus();
529 541
530 return ret; 542 /* Restore old values on failure */
543 if (err) {
544 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled;
546 }
547
548 return err;
531} 549}
532#endif /* CONFIG_SYSCTL */ 550#endif /* CONFIG_SYSCTL */
533 551
534static struct smp_hotplug_thread watchdog_threads = {
535 .store = &softlockup_watchdog,
536 .thread_should_run = watchdog_should_run,
537 .thread_fn = watchdog,
538 .thread_comm = "watchdog/%u",
539 .setup = watchdog_enable,
540 .park = watchdog_disable,
541 .unpark = watchdog_enable,
542};
543
544void __init lockup_detector_init(void) 552void __init lockup_detector_init(void)
545{ 553{
546 set_sample_period(); 554 set_sample_period();
547 if (smpboot_register_percpu_thread(&watchdog_threads)) { 555
548 pr_err("Failed to create watchdog threads, disabled\n"); 556#ifdef CONFIG_NO_HZ_FULL
549 watchdog_disabled = -ENODEV; 557 if (watchdog_user_enabled) {
558 watchdog_user_enabled = 0;
559 pr_warning("Disabled lockup detectors by default for full dynticks\n");
560 pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n");
550 } 561 }
562#endif
563
564 if (watchdog_user_enabled)
565 watchdog_enable_all_cpus();
551} 566}