diff options
author | Olof Johansson <olof@lixom.net> | 2013-07-23 17:51:34 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-07-23 19:54:15 -0400 |
commit | 47dcd3563e45fc5a59bf7f3326ef56087be8bebe (patch) | |
tree | 5cebf3d803be1ad0ac8914332da1472cdc0e2652 /drivers/clocksource | |
parent | 3b2f64d00c46e1e4e9bd0bb9bb12619adac27a4b (diff) | |
parent | 060fd3043e5e3488504b9e70182e188dd9113aea (diff) |
Merge tag 'remove-local-timers' of git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm into next/cleanup
From Stephen Boyd:
Now that we have a generic arch hook for broadcast we can remove the
local timer API entirely. Doing so will reduce code in ARM core, reduce
the architecture dependencies of our timer drivers, and simplify the code
because we no longer go through an architecture layer that is essentially
a hotplug notifier.
* tag 'remove-local-timers' of git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm:
ARM: smp: Remove local timer API
clocksource: time-armada-370-xp: Divorce from local timer API
clocksource: time-armada-370-xp: Fix sparse warning
ARM: msm: Divorce msm_timer from local timer API
ARM: PRIMA2: Divorce timer-marco from local timer API
ARM: EXYNOS4: Divorce mct from local timer API
ARM: OMAP2+: Divorce from local timer API
ARM: smp_twd: Divorce smp_twd from local timer API
ARM: smp: Remove duplicate dummy timer implementation
Resolved a large number of conflicts due to __cpuinit cleanups, etc.
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/exynos_mct.c | 58 | ||||
-rw-r--r-- | drivers/clocksource/time-armada-370-xp.c | 92 | ||||
-rw-r--r-- | drivers/clocksource/timer-marco.c | 98 |
3 files changed, 138 insertions, 110 deletions
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index b2bbc415f120..5b34768f4d7c 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
@@ -24,7 +25,6 @@ | |||
24 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
25 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
26 | 27 | ||
27 | #include <asm/localtimer.h> | ||
28 | #include <asm/mach/time.h> | 28 | #include <asm/mach/time.h> |
29 | 29 | ||
30 | #define EXYNOS4_MCTREG(x) (x) | 30 | #define EXYNOS4_MCTREG(x) (x) |
@@ -80,7 +80,7 @@ static unsigned int mct_int_type; | |||
80 | static int mct_irqs[MCT_NR_IRQS]; | 80 | static int mct_irqs[MCT_NR_IRQS]; |
81 | 81 | ||
82 | struct mct_clock_event_device { | 82 | struct mct_clock_event_device { |
83 | struct clock_event_device *evt; | 83 | struct clock_event_device evt; |
84 | unsigned long base; | 84 | unsigned long base; |
85 | char name[10]; | 85 | char name[10]; |
86 | }; | 86 | }; |
@@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void) | |||
295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); | 295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); |
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef CONFIG_LOCAL_TIMERS | ||
299 | |||
300 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | 298 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); |
301 | 299 | ||
302 | /* Clock event handling */ | 300 | /* Clock event handling */ |
@@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode, | |||
369 | 367 | ||
370 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | 368 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) |
371 | { | 369 | { |
372 | struct clock_event_device *evt = mevt->evt; | 370 | struct clock_event_device *evt = &mevt->evt; |
373 | 371 | ||
374 | /* | 372 | /* |
375 | * This is for supporting oneshot mode. | 373 | * This is for supporting oneshot mode. |
@@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | |||
391 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) | 389 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) |
392 | { | 390 | { |
393 | struct mct_clock_event_device *mevt = dev_id; | 391 | struct mct_clock_event_device *mevt = dev_id; |
394 | struct clock_event_device *evt = mevt->evt; | 392 | struct clock_event_device *evt = &mevt->evt; |
395 | 393 | ||
396 | exynos4_mct_tick_clear(mevt); | 394 | exynos4_mct_tick_clear(mevt); |
397 | 395 | ||
@@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) | |||
405 | struct mct_clock_event_device *mevt; | 403 | struct mct_clock_event_device *mevt; |
406 | unsigned int cpu = smp_processor_id(); | 404 | unsigned int cpu = smp_processor_id(); |
407 | 405 | ||
408 | mevt = this_cpu_ptr(&percpu_mct_tick); | 406 | mevt = container_of(evt, struct mct_clock_event_device, evt); |
409 | mevt->evt = evt; | ||
410 | 407 | ||
411 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 408 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
412 | sprintf(mevt->name, "mct_tick%d", cpu); | 409 | sprintf(mevt->name, "mct_tick%d", cpu); |
@@ -448,14 +445,37 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) | |||
448 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); | 445 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
449 | } | 446 | } |
450 | 447 | ||
451 | static struct local_timer_ops exynos4_mct_tick_ops = { | 448 | static int exynos4_mct_cpu_notify(struct notifier_block *self, |
452 | .setup = exynos4_local_timer_setup, | 449 | unsigned long action, void *hcpu) |
453 | .stop = exynos4_local_timer_stop, | 450 | { |
451 | struct mct_clock_event_device *mevt; | ||
452 | |||
453 | /* | ||
454 | * Grab cpu pointer in each case to avoid spurious | ||
455 | * preemptible warnings | ||
456 | */ | ||
457 | switch (action & ~CPU_TASKS_FROZEN) { | ||
458 | case CPU_STARTING: | ||
459 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
460 | exynos4_local_timer_setup(&mevt->evt); | ||
461 | break; | ||
462 | case CPU_DYING: | ||
463 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
464 | exynos4_local_timer_stop(&mevt->evt); | ||
465 | break; | ||
466 | } | ||
467 | |||
468 | return NOTIFY_OK; | ||
469 | } | ||
470 | |||
471 | static struct notifier_block exynos4_mct_cpu_nb = { | ||
472 | .notifier_call = exynos4_mct_cpu_notify, | ||
454 | }; | 473 | }; |
455 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
456 | 474 | ||
457 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) | 475 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) |
458 | { | 476 | { |
477 | int err; | ||
478 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | ||
459 | struct clk *mct_clk, *tick_clk; | 479 | struct clk *mct_clk, *tick_clk; |
460 | 480 | ||
461 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : | 481 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : |
@@ -473,9 +493,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
473 | if (!reg_base) | 493 | if (!reg_base) |
474 | panic("%s: unable to ioremap mct address space\n", __func__); | 494 | panic("%s: unable to ioremap mct address space\n", __func__); |
475 | 495 | ||
476 | #ifdef CONFIG_LOCAL_TIMERS | ||
477 | if (mct_int_type == MCT_INT_PPI) { | 496 | if (mct_int_type == MCT_INT_PPI) { |
478 | int err; | ||
479 | 497 | ||
480 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], | 498 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], |
481 | exynos4_mct_tick_isr, "MCT", | 499 | exynos4_mct_tick_isr, "MCT", |
@@ -484,8 +502,16 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
484 | mct_irqs[MCT_L0_IRQ], err); | 502 | mct_irqs[MCT_L0_IRQ], err); |
485 | } | 503 | } |
486 | 504 | ||
487 | local_timer_register(&exynos4_mct_tick_ops); | 505 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); |
488 | #endif /* CONFIG_LOCAL_TIMERS */ | 506 | if (err) |
507 | goto out_irq; | ||
508 | |||
509 | /* Immediately configure the timer on the boot CPU */ | ||
510 | exynos4_local_timer_setup(&mevt->evt); | ||
511 | return; | ||
512 | |||
513 | out_irq: | ||
514 | free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); | ||
489 | } | 515 | } |
490 | 516 | ||
491 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) | 517 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 1b04b7e1d39b..847cab6f6e31 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/cpu.h> | ||
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/clockchips.h> | 24 | #include <linux/clockchips.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
@@ -28,9 +29,9 @@ | |||
28 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/sched_clock.h> | 31 | #include <linux/sched_clock.h> |
31 | |||
32 | #include <asm/localtimer.h> | ||
33 | #include <linux/percpu.h> | 32 | #include <linux/percpu.h> |
33 | #include <linux/time-armada-370-xp.h> | ||
34 | |||
34 | /* | 35 | /* |
35 | * Timer block registers. | 36 | * Timer block registers. |
36 | */ | 37 | */ |
@@ -69,7 +70,7 @@ static bool timer25Mhz = true; | |||
69 | */ | 70 | */ |
70 | static u32 ticks_per_jiffy; | 71 | static u32 ticks_per_jiffy; |
71 | 72 | ||
72 | static struct clock_event_device __percpu **percpu_armada_370_xp_evt; | 73 | static struct clock_event_device __percpu *armada_370_xp_evt; |
73 | 74 | ||
74 | static u32 notrace armada_370_xp_read_sched_clock(void) | 75 | static u32 notrace armada_370_xp_read_sched_clock(void) |
75 | { | 76 | { |
@@ -142,21 +143,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, | |||
142 | } | 143 | } |
143 | } | 144 | } |
144 | 145 | ||
145 | static struct clock_event_device armada_370_xp_clkevt = { | 146 | static int armada_370_xp_clkevt_irq; |
146 | .name = "armada_370_xp_per_cpu_tick", | ||
147 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | ||
148 | .shift = 32, | ||
149 | .rating = 300, | ||
150 | .set_next_event = armada_370_xp_clkevt_next_event, | ||
151 | .set_mode = armada_370_xp_clkevt_mode, | ||
152 | }; | ||
153 | 147 | ||
154 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) | 148 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) |
155 | { | 149 | { |
156 | /* | 150 | /* |
157 | * ACK timer interrupt and call event handler. | 151 | * ACK timer interrupt and call event handler. |
158 | */ | 152 | */ |
159 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 153 | struct clock_event_device *evt = dev_id; |
160 | 154 | ||
161 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); | 155 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); |
162 | evt->event_handler(evt); | 156 | evt->event_handler(evt); |
@@ -172,42 +166,55 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) | |||
172 | u32 u; | 166 | u32 u; |
173 | int cpu = smp_processor_id(); | 167 | int cpu = smp_processor_id(); |
174 | 168 | ||
175 | /* Use existing clock_event for cpu 0 */ | ||
176 | if (!smp_processor_id()) | ||
177 | return 0; | ||
178 | |||
179 | u = readl(local_base + TIMER_CTRL_OFF); | 169 | u = readl(local_base + TIMER_CTRL_OFF); |
180 | if (timer25Mhz) | 170 | if (timer25Mhz) |
181 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 171 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
182 | else | 172 | else |
183 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 173 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
184 | 174 | ||
185 | evt->name = armada_370_xp_clkevt.name; | 175 | evt->name = "armada_370_xp_per_cpu_tick", |
186 | evt->irq = armada_370_xp_clkevt.irq; | 176 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
187 | evt->features = armada_370_xp_clkevt.features; | 177 | CLOCK_EVT_FEAT_PERIODIC; |
188 | evt->shift = armada_370_xp_clkevt.shift; | 178 | evt->shift = 32, |
189 | evt->rating = armada_370_xp_clkevt.rating, | 179 | evt->rating = 300, |
190 | evt->set_next_event = armada_370_xp_clkevt_next_event, | 180 | evt->set_next_event = armada_370_xp_clkevt_next_event, |
191 | evt->set_mode = armada_370_xp_clkevt_mode, | 181 | evt->set_mode = armada_370_xp_clkevt_mode, |
182 | evt->irq = armada_370_xp_clkevt_irq; | ||
192 | evt->cpumask = cpumask_of(cpu); | 183 | evt->cpumask = cpumask_of(cpu); |
193 | 184 | ||
194 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt; | ||
195 | |||
196 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); | 185 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); |
197 | enable_percpu_irq(evt->irq, 0); | 186 | enable_percpu_irq(evt->irq, 0); |
198 | 187 | ||
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
202 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) | 191 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) |
203 | { | 192 | { |
204 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 193 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
205 | disable_percpu_irq(evt->irq); | 194 | disable_percpu_irq(evt->irq); |
206 | } | 195 | } |
207 | 196 | ||
208 | static struct local_timer_ops armada_370_xp_local_timer_ops = { | 197 | static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, |
209 | .setup = armada_370_xp_timer_setup, | 198 | unsigned long action, void *hcpu) |
210 | .stop = armada_370_xp_timer_stop, | 199 | { |
200 | /* | ||
201 | * Grab cpu pointer in each case to avoid spurious | ||
202 | * preemptible warnings | ||
203 | */ | ||
204 | switch (action & ~CPU_TASKS_FROZEN) { | ||
205 | case CPU_STARTING: | ||
206 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | ||
207 | break; | ||
208 | case CPU_DYING: | ||
209 | armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); | ||
210 | break; | ||
211 | } | ||
212 | |||
213 | return NOTIFY_OK; | ||
214 | } | ||
215 | |||
216 | static struct notifier_block armada_370_xp_timer_cpu_nb = { | ||
217 | .notifier_call = armada_370_xp_timer_cpu_notify, | ||
211 | }; | 218 | }; |
212 | 219 | ||
213 | void __init armada_370_xp_timer_init(void) | 220 | void __init armada_370_xp_timer_init(void) |
@@ -223,9 +230,6 @@ void __init armada_370_xp_timer_init(void) | |||
223 | 230 | ||
224 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { | 231 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { |
225 | /* The fixed 25MHz timer is available so let's use it */ | 232 | /* The fixed 25MHz timer is available so let's use it */ |
226 | u = readl(local_base + TIMER_CTRL_OFF); | ||
227 | writel(u | TIMER0_25MHZ, | ||
228 | local_base + TIMER_CTRL_OFF); | ||
229 | u = readl(timer_base + TIMER_CTRL_OFF); | 233 | u = readl(timer_base + TIMER_CTRL_OFF); |
230 | writel(u | TIMER0_25MHZ, | 234 | writel(u | TIMER0_25MHZ, |
231 | timer_base + TIMER_CTRL_OFF); | 235 | timer_base + TIMER_CTRL_OFF); |
@@ -235,9 +239,6 @@ void __init armada_370_xp_timer_init(void) | |||
235 | struct clk *clk = of_clk_get(np, 0); | 239 | struct clk *clk = of_clk_get(np, 0); |
236 | WARN_ON(IS_ERR(clk)); | 240 | WARN_ON(IS_ERR(clk)); |
237 | rate = clk_get_rate(clk); | 241 | rate = clk_get_rate(clk); |
238 | u = readl(local_base + TIMER_CTRL_OFF); | ||
239 | writel(u & ~(TIMER0_25MHZ), | ||
240 | local_base + TIMER_CTRL_OFF); | ||
241 | 242 | ||
242 | u = readl(timer_base + TIMER_CTRL_OFF); | 243 | u = readl(timer_base + TIMER_CTRL_OFF); |
243 | writel(u & ~(TIMER0_25MHZ), | 244 | writel(u & ~(TIMER0_25MHZ), |
@@ -251,7 +252,7 @@ void __init armada_370_xp_timer_init(void) | |||
251 | * We use timer 0 as clocksource, and private(local) timer 0 | 252 | * We use timer 0 as clocksource, and private(local) timer 0 |
252 | * for clockevents | 253 | * for clockevents |
253 | */ | 254 | */ |
254 | armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4); | 255 | armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); |
255 | 256 | ||
256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 257 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; |
257 | 258 | ||
@@ -276,26 +277,19 @@ void __init armada_370_xp_timer_init(void) | |||
276 | "armada_370_xp_clocksource", | 277 | "armada_370_xp_clocksource", |
277 | timer_clk, 300, 32, clocksource_mmio_readl_down); | 278 | timer_clk, 300, 32, clocksource_mmio_readl_down); |
278 | 279 | ||
279 | /* Register the clockevent on the private timer of CPU 0 */ | 280 | register_cpu_notifier(&armada_370_xp_timer_cpu_nb); |
280 | armada_370_xp_clkevt.cpumask = cpumask_of(0); | ||
281 | clockevents_config_and_register(&armada_370_xp_clkevt, | ||
282 | timer_clk, 1, 0xfffffffe); | ||
283 | 281 | ||
284 | percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *); | 282 | armada_370_xp_evt = alloc_percpu(struct clock_event_device); |
285 | 283 | ||
286 | 284 | ||
287 | /* | 285 | /* |
288 | * Setup clockevent timer (interrupt-driven). | 286 | * Setup clockevent timer (interrupt-driven). |
289 | */ | 287 | */ |
290 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt; | 288 | res = request_percpu_irq(armada_370_xp_clkevt_irq, |
291 | res = request_percpu_irq(armada_370_xp_clkevt.irq, | ||
292 | armada_370_xp_timer_interrupt, | 289 | armada_370_xp_timer_interrupt, |
293 | armada_370_xp_clkevt.name, | 290 | "armada_370_xp_per_cpu_tick", |
294 | percpu_armada_370_xp_evt); | 291 | armada_370_xp_evt); |
295 | if (!res) { | 292 | /* Immediately configure the timer on the boot CPU */ |
296 | enable_percpu_irq(armada_370_xp_clkevt.irq, 0); | 293 | if (!res) |
297 | #ifdef CONFIG_LOCAL_TIMERS | 294 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); |
298 | local_timer_register(&armada_370_xp_local_timer_ops); | ||
299 | #endif | ||
300 | } | ||
301 | } | 295 | } |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 62876baa3ab9..09a17d9a6594 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/clockchips.h> | 11 | #include <linux/clockchips.h> |
12 | #include <linux/clocksource.h> | 12 | #include <linux/clocksource.h> |
13 | #include <linux/cpu.h> | ||
13 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
14 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
15 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
19 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
20 | #include <linux/sched_clock.h> | 21 | #include <linux/sched_clock.h> |
21 | #include <asm/localtimer.h> | ||
22 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
23 | 23 | ||
24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 | 24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 |
@@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) | |||
151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); | 151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct clock_event_device sirfsoc_clockevent = { | 154 | static struct clock_event_device __percpu *sirfsoc_clockevent; |
155 | .name = "sirfsoc_clockevent", | ||
156 | .rating = 200, | ||
157 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
158 | .set_mode = sirfsoc_timer_set_mode, | ||
159 | .set_next_event = sirfsoc_timer_set_next_event, | ||
160 | }; | ||
161 | 155 | ||
162 | static struct clocksource sirfsoc_clocksource = { | 156 | static struct clocksource sirfsoc_clocksource = { |
163 | .name = "sirfsoc_clocksource", | 157 | .name = "sirfsoc_clocksource", |
@@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = { | |||
173 | .name = "sirfsoc_timer0", | 167 | .name = "sirfsoc_timer0", |
174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 168 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
175 | .handler = sirfsoc_timer_interrupt, | 169 | .handler = sirfsoc_timer_interrupt, |
176 | .dev_id = &sirfsoc_clockevent, | ||
177 | }; | 170 | }; |
178 | 171 | ||
179 | #ifdef CONFIG_LOCAL_TIMERS | ||
180 | |||
181 | static struct irqaction sirfsoc_timer1_irq = { | 172 | static struct irqaction sirfsoc_timer1_irq = { |
182 | .name = "sirfsoc_timer1", | 173 | .name = "sirfsoc_timer1", |
183 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
@@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = { | |||
186 | 177 | ||
187 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | 178 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) |
188 | { | 179 | { |
189 | /* Use existing clock_event for cpu 0 */ | 180 | int cpu = smp_processor_id(); |
190 | if (!smp_processor_id()) | 181 | struct irqaction *action; |
191 | return 0; | 182 | |
183 | if (cpu == 0) | ||
184 | action = &sirfsoc_timer_irq; | ||
185 | else | ||
186 | action = &sirfsoc_timer1_irq; | ||
192 | 187 | ||
193 | ce->irq = sirfsoc_timer1_irq.irq; | 188 | ce->irq = action->irq; |
194 | ce->name = "local_timer"; | 189 | ce->name = "local_timer"; |
195 | ce->features = sirfsoc_clockevent.features; | 190 | ce->features = CLOCK_EVT_FEAT_ONESHOT; |
196 | ce->rating = sirfsoc_clockevent.rating; | 191 | ce->rating = 200; |
197 | ce->set_mode = sirfsoc_timer_set_mode; | 192 | ce->set_mode = sirfsoc_timer_set_mode; |
198 | ce->set_next_event = sirfsoc_timer_set_next_event; | 193 | ce->set_next_event = sirfsoc_timer_set_next_event; |
199 | ce->shift = sirfsoc_clockevent.shift; | 194 | clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60); |
200 | ce->mult = sirfsoc_clockevent.mult; | 195 | ce->max_delta_ns = clockevent_delta2ns(-2, ce); |
201 | ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns; | 196 | ce->min_delta_ns = clockevent_delta2ns(2, ce); |
202 | ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns; | 197 | ce->cpumask = cpumask_of(cpu); |
203 | 198 | ||
204 | sirfsoc_timer1_irq.dev_id = ce; | 199 | action->dev_id = ce; |
205 | BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq)); | 200 | BUG_ON(setup_irq(ce->irq, action)); |
206 | irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1)); | 201 | irq_set_affinity(action->irq, cpumask_of(cpu)); |
207 | 202 | ||
208 | clockevents_register_device(ce); | 203 | clockevents_register_device(ce); |
209 | return 0; | 204 | return 0; |
@@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | |||
211 | 206 | ||
212 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) | 207 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) |
213 | { | 208 | { |
209 | int cpu = smp_processor_id(); | ||
210 | |||
214 | sirfsoc_timer_count_disable(1); | 211 | sirfsoc_timer_count_disable(1); |
215 | 212 | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | 213 | if (cpu == 0) |
214 | remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); | ||
215 | else | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | ||
217 | } | 217 | } |
218 | 218 | ||
219 | static struct local_timer_ops sirfsoc_local_timer_ops = { | 219 | static int sirfsoc_cpu_notify(struct notifier_block *self, |
220 | .setup = sirfsoc_local_timer_setup, | 220 | unsigned long action, void *hcpu) |
221 | .stop = sirfsoc_local_timer_stop, | 221 | { |
222 | /* | ||
223 | * Grab cpu pointer in each case to avoid spurious | ||
224 | * preemptible warnings | ||
225 | */ | ||
226 | switch (action & ~CPU_TASKS_FROZEN) { | ||
227 | case CPU_STARTING: | ||
228 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | ||
229 | break; | ||
230 | case CPU_DYING: | ||
231 | sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); | ||
232 | break; | ||
233 | } | ||
234 | |||
235 | return NOTIFY_OK; | ||
236 | } | ||
237 | |||
238 | static struct notifier_block sirfsoc_cpu_nb = { | ||
239 | .notifier_call = sirfsoc_cpu_notify, | ||
222 | }; | 240 | }; |
223 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
224 | 241 | ||
225 | static void __init sirfsoc_clockevent_init(void) | 242 | static void __init sirfsoc_clockevent_init(void) |
226 | { | 243 | { |
227 | clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60); | 244 | sirfsoc_clockevent = alloc_percpu(struct clock_event_device); |
228 | 245 | BUG_ON(!sirfsoc_clockevent); | |
229 | sirfsoc_clockevent.max_delta_ns = | 246 | |
230 | clockevent_delta2ns(-2, &sirfsoc_clockevent); | 247 | BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); |
231 | sirfsoc_clockevent.min_delta_ns = | 248 | |
232 | clockevent_delta2ns(2, &sirfsoc_clockevent); | 249 | /* Immediately configure the timer on the boot CPU */ |
233 | 250 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | |
234 | sirfsoc_clockevent.cpumask = cpumask_of(0); | ||
235 | clockevents_register_device(&sirfsoc_clockevent); | ||
236 | #ifdef CONFIG_LOCAL_TIMERS | ||
237 | local_timer_register(&sirfsoc_local_timer_ops); | ||
238 | #endif | ||
239 | } | 251 | } |
240 | 252 | ||
241 | /* initialize the kernel jiffy timer source */ | 253 | /* initialize the kernel jiffy timer source */ |
@@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void) | |||
273 | 285 | ||
274 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); | 286 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); |
275 | 287 | ||
276 | BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); | ||
277 | |||
278 | sirfsoc_clockevent_init(); | 288 | sirfsoc_clockevent_init(); |
279 | } | 289 | } |
280 | 290 | ||
@@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np) | |||
288 | if (!sirfsoc_timer_irq.irq) | 298 | if (!sirfsoc_timer_irq.irq) |
289 | panic("No irq passed for timer0 via DT\n"); | 299 | panic("No irq passed for timer0 via DT\n"); |
290 | 300 | ||
291 | #ifdef CONFIG_LOCAL_TIMERS | ||
292 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); | 301 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); |
293 | if (!sirfsoc_timer1_irq.irq) | 302 | if (!sirfsoc_timer1_irq.irq) |
294 | panic("No irq passed for timer1 via DT\n"); | 303 | panic("No irq passed for timer1 via DT\n"); |
295 | #endif | ||
296 | 304 | ||
297 | sirfsoc_marco_timer_init(); | 305 | sirfsoc_marco_timer_init(); |
298 | } | 306 | } |