diff options
-rw-r--r-- | arch/arm/Kconfig | 12 | ||||
-rw-r--r-- | arch/arm/include/asm/localtimer.h | 34 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 87 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 64 | ||||
-rw-r--r-- | arch/arm/mach-highbank/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mach-imx/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/mach-msm/timer.c | 124 | ||||
-rw-r--r-- | arch/arm/mach-omap2/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/mach-omap2/timer.c | 7 | ||||
-rw-r--r-- | arch/arm/mach-realview/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/mach-spear/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mach-tegra/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mach-ux500/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mach-zynq/Kconfig | 2 | ||||
-rw-r--r-- | drivers/clocksource/exynos_mct.c | 58 | ||||
-rw-r--r-- | drivers/clocksource/time-armada-370-xp.c | 92 | ||||
-rw-r--r-- | drivers/clocksource/timer-marco.c | 98 | ||||
-rw-r--r-- | include/linux/time-armada-370-xp.h | 4 |
19 files changed, 265 insertions, 341 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba412e02ec0c..baf43de26a54 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -645,7 +645,7 @@ config ARCH_SHMOBILE | |||
645 | select CLKDEV_LOOKUP | 645 | select CLKDEV_LOOKUP |
646 | select GENERIC_CLOCKEVENTS | 646 | select GENERIC_CLOCKEVENTS |
647 | select HAVE_ARM_SCU if SMP | 647 | select HAVE_ARM_SCU if SMP |
648 | select HAVE_ARM_TWD if LOCAL_TIMERS | 648 | select HAVE_ARM_TWD if SMP |
649 | select HAVE_CLK | 649 | select HAVE_CLK |
650 | select HAVE_MACH_CLKDEV | 650 | select HAVE_MACH_CLKDEV |
651 | select HAVE_SMP | 651 | select HAVE_SMP |
@@ -1584,16 +1584,6 @@ config ARM_PSCI | |||
1584 | 0022A ("Power State Coordination Interface System Software on | 1584 | 0022A ("Power State Coordination Interface System Software on |
1585 | ARM processors"). | 1585 | ARM processors"). |
1586 | 1586 | ||
1587 | config LOCAL_TIMERS | ||
1588 | bool "Use local timer interrupts" | ||
1589 | depends on SMP | ||
1590 | default y | ||
1591 | help | ||
1592 | Enable support for local timers on SMP platforms, rather then the | ||
1593 | legacy IPI broadcast method. Local timers allows the system | ||
1594 | accounting to be spread across the timer interval, preventing a | ||
1595 | "thundering herd" at every timer tick. | ||
1596 | |||
1597 | # The GPIO number here must be sorted by descending number. In case of | 1587 | # The GPIO number here must be sorted by descending number. In case of |
1598 | # a multiplatform kernel, we just want the highest value required by the | 1588 | # a multiplatform kernel, we just want the highest value required by the |
1599 | # selected platforms. | 1589 | # selected platforms. |
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h deleted file mode 100644 index f77ffc1eb0c2..000000000000 --- a/arch/arm/include/asm/localtimer.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/localtimer.h | ||
3 | * | ||
4 | * Copyright (C) 2004-2005 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef __ASM_ARM_LOCALTIMER_H | ||
11 | #define __ASM_ARM_LOCALTIMER_H | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | |||
15 | struct clock_event_device; | ||
16 | |||
17 | struct local_timer_ops { | ||
18 | int (*setup)(struct clock_event_device *); | ||
19 | void (*stop)(struct clock_event_device *); | ||
20 | }; | ||
21 | |||
22 | #ifdef CONFIG_LOCAL_TIMERS | ||
23 | /* | ||
24 | * Register a local timer driver | ||
25 | */ | ||
26 | int local_timer_register(struct local_timer_ops *); | ||
27 | #else | ||
28 | static inline int local_timer_register(struct local_timer_ops *ops) | ||
29 | { | ||
30 | return -ENXIO; | ||
31 | } | ||
32 | #endif | ||
33 | |||
34 | #endif | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..3a98192a3118 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <asm/sections.h> | 41 | #include <asm/sections.h> |
42 | #include <asm/tlbflush.h> | 42 | #include <asm/tlbflush.h> |
43 | #include <asm/ptrace.h> | 43 | #include <asm/ptrace.h> |
44 | #include <asm/localtimer.h> | ||
45 | #include <asm/smp_plat.h> | 44 | #include <asm/smp_plat.h> |
46 | #include <asm/virt.h> | 45 | #include <asm/virt.h> |
47 | #include <asm/mach/arch.h> | 46 | #include <asm/mach/arch.h> |
@@ -146,8 +145,6 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
146 | } | 145 | } |
147 | 146 | ||
148 | #ifdef CONFIG_HOTPLUG_CPU | 147 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | ||
150 | |||
151 | static int platform_cpu_kill(unsigned int cpu) | 148 | static int platform_cpu_kill(unsigned int cpu) |
152 | { | 149 | { |
153 | if (smp_ops.cpu_kill) | 150 | if (smp_ops.cpu_kill) |
@@ -191,11 +188,6 @@ int __cpu_disable(void) | |||
191 | migrate_irqs(); | 188 | migrate_irqs(); |
192 | 189 | ||
193 | /* | 190 | /* |
194 | * Stop the local timer for this CPU. | ||
195 | */ | ||
196 | percpu_timer_stop(); | ||
197 | |||
198 | /* | ||
199 | * Flush user cache and TLB mappings, and then remove this CPU | 191 | * Flush user cache and TLB mappings, and then remove this CPU |
200 | * from the vm mask set of all processes. | 192 | * from the vm mask set of all processes. |
201 | * | 193 | * |
@@ -316,8 +308,6 @@ static void smp_store_cpu_info(unsigned int cpuid) | |||
316 | store_cpu_topology(cpuid); | 308 | store_cpu_topology(cpuid); |
317 | } | 309 | } |
318 | 310 | ||
319 | static void percpu_timer_setup(void); | ||
320 | |||
321 | /* | 311 | /* |
322 | * This is the secondary CPU boot entry. We're using this CPUs | 312 | * This is the secondary CPU boot entry. We're using this CPUs |
323 | * idle thread stack, but a set of temporary page tables. | 313 | * idle thread stack, but a set of temporary page tables. |
@@ -372,11 +362,6 @@ asmlinkage void secondary_start_kernel(void) | |||
372 | set_cpu_online(cpu, true); | 362 | set_cpu_online(cpu, true); |
373 | complete(&cpu_running); | 363 | complete(&cpu_running); |
374 | 364 | ||
375 | /* | ||
376 | * Setup the percpu timer for this CPU. | ||
377 | */ | ||
378 | percpu_timer_setup(); | ||
379 | |||
380 | local_irq_enable(); | 365 | local_irq_enable(); |
381 | local_fiq_enable(); | 366 | local_fiq_enable(); |
382 | 367 | ||
@@ -423,12 +408,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
423 | max_cpus = ncores; | 408 | max_cpus = ncores; |
424 | if (ncores > 1 && max_cpus) { | 409 | if (ncores > 1 && max_cpus) { |
425 | /* | 410 | /* |
426 | * Enable the local timer or broadcast device for the | ||
427 | * boot CPU, but only if we have more than one CPU. | ||
428 | */ | ||
429 | percpu_timer_setup(); | ||
430 | |||
431 | /* | ||
432 | * Initialise the present map, which describes the set of CPUs | 411 | * Initialise the present map, which describes the set of CPUs |
433 | * actually populated at the present time. A platform should | 412 | * actually populated at the present time. A platform should |
434 | * re-initialize the map in the platforms smp_prepare_cpus() | 413 | * re-initialize the map in the platforms smp_prepare_cpus() |
@@ -504,11 +483,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu) | |||
504 | return sum; | 483 | return sum; |
505 | } | 484 | } |
506 | 485 | ||
507 | /* | ||
508 | * Timer (local or broadcast) support | ||
509 | */ | ||
510 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | ||
511 | |||
512 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 486 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
513 | void tick_broadcast(const struct cpumask *mask) | 487 | void tick_broadcast(const struct cpumask *mask) |
514 | { | 488 | { |
@@ -516,67 +490,6 @@ void tick_broadcast(const struct cpumask *mask) | |||
516 | } | 490 | } |
517 | #endif | 491 | #endif |
518 | 492 | ||
519 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | ||
520 | struct clock_event_device *evt) | ||
521 | { | ||
522 | } | ||
523 | |||
524 | static void broadcast_timer_setup(struct clock_event_device *evt) | ||
525 | { | ||
526 | evt->name = "dummy_timer"; | ||
527 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | ||
528 | CLOCK_EVT_FEAT_PERIODIC | | ||
529 | CLOCK_EVT_FEAT_DUMMY; | ||
530 | evt->rating = 100; | ||
531 | evt->mult = 1; | ||
532 | evt->set_mode = broadcast_timer_set_mode; | ||
533 | |||
534 | clockevents_register_device(evt); | ||
535 | } | ||
536 | |||
537 | static struct local_timer_ops *lt_ops; | ||
538 | |||
539 | #ifdef CONFIG_LOCAL_TIMERS | ||
540 | int local_timer_register(struct local_timer_ops *ops) | ||
541 | { | ||
542 | if (!is_smp() || !setup_max_cpus) | ||
543 | return -ENXIO; | ||
544 | |||
545 | if (lt_ops) | ||
546 | return -EBUSY; | ||
547 | |||
548 | lt_ops = ops; | ||
549 | return 0; | ||
550 | } | ||
551 | #endif | ||
552 | |||
553 | static void percpu_timer_setup(void) | ||
554 | { | ||
555 | unsigned int cpu = smp_processor_id(); | ||
556 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | ||
557 | |||
558 | evt->cpumask = cpumask_of(cpu); | ||
559 | |||
560 | if (!lt_ops || lt_ops->setup(evt)) | ||
561 | broadcast_timer_setup(evt); | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_HOTPLUG_CPU | ||
565 | /* | ||
566 | * The generic clock events code purposely does not stop the local timer | ||
567 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | ||
568 | * manually here. | ||
569 | */ | ||
570 | static void percpu_timer_stop(void) | ||
571 | { | ||
572 | unsigned int cpu = smp_processor_id(); | ||
573 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | ||
574 | |||
575 | if (lt_ops) | ||
576 | lt_ops->stop(evt); | ||
577 | } | ||
578 | #endif | ||
579 | |||
580 | static DEFINE_RAW_SPINLOCK(stop_lock); | 493 | static DEFINE_RAW_SPINLOCK(stop_lock); |
581 | 494 | ||
582 | /* | 495 | /* |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 25956204ef23..2985c9f0905d 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/cpu.h> | ||
14 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
15 | #include <linux/device.h> | 16 | #include <linux/device.h> |
16 | #include <linux/err.h> | 17 | #include <linux/err.h> |
@@ -24,7 +25,6 @@ | |||
24 | 25 | ||
25 | #include <asm/smp_plat.h> | 26 | #include <asm/smp_plat.h> |
26 | #include <asm/smp_twd.h> | 27 | #include <asm/smp_twd.h> |
27 | #include <asm/localtimer.h> | ||
28 | 28 | ||
29 | /* set up by the platform code */ | 29 | /* set up by the platform code */ |
30 | static void __iomem *twd_base; | 30 | static void __iomem *twd_base; |
@@ -33,7 +33,7 @@ static struct clk *twd_clk; | |||
33 | static unsigned long twd_timer_rate; | 33 | static unsigned long twd_timer_rate; |
34 | static DEFINE_PER_CPU(bool, percpu_setup_called); | 34 | static DEFINE_PER_CPU(bool, percpu_setup_called); |
35 | 35 | ||
36 | static struct clock_event_device __percpu **twd_evt; | 36 | static struct clock_event_device __percpu *twd_evt; |
37 | static int twd_ppi; | 37 | static int twd_ppi; |
38 | 38 | ||
39 | static void twd_set_mode(enum clock_event_mode mode, | 39 | static void twd_set_mode(enum clock_event_mode mode, |
@@ -90,8 +90,10 @@ static int twd_timer_ack(void) | |||
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
93 | static void twd_timer_stop(struct clock_event_device *clk) | 93 | static void twd_timer_stop(void) |
94 | { | 94 | { |
95 | struct clock_event_device *clk = __this_cpu_ptr(twd_evt); | ||
96 | |||
95 | twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); | 97 | twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); |
96 | disable_percpu_irq(clk->irq); | 98 | disable_percpu_irq(clk->irq); |
97 | } | 99 | } |
@@ -106,7 +108,7 @@ static void twd_update_frequency(void *new_rate) | |||
106 | { | 108 | { |
107 | twd_timer_rate = *((unsigned long *) new_rate); | 109 | twd_timer_rate = *((unsigned long *) new_rate); |
108 | 110 | ||
109 | clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); | 111 | clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate); |
110 | } | 112 | } |
111 | 113 | ||
112 | static int twd_rate_change(struct notifier_block *nb, | 114 | static int twd_rate_change(struct notifier_block *nb, |
@@ -132,7 +134,7 @@ static struct notifier_block twd_clk_nb = { | |||
132 | 134 | ||
133 | static int twd_clk_init(void) | 135 | static int twd_clk_init(void) |
134 | { | 136 | { |
135 | if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) | 137 | if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
136 | return clk_notifier_register(twd_clk, &twd_clk_nb); | 138 | return clk_notifier_register(twd_clk, &twd_clk_nb); |
137 | 139 | ||
138 | return 0; | 140 | return 0; |
@@ -151,7 +153,7 @@ static void twd_update_frequency(void *data) | |||
151 | { | 153 | { |
152 | twd_timer_rate = clk_get_rate(twd_clk); | 154 | twd_timer_rate = clk_get_rate(twd_clk); |
153 | 155 | ||
154 | clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); | 156 | clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate); |
155 | } | 157 | } |
156 | 158 | ||
157 | static int twd_cpufreq_transition(struct notifier_block *nb, | 159 | static int twd_cpufreq_transition(struct notifier_block *nb, |
@@ -177,7 +179,7 @@ static struct notifier_block twd_cpufreq_nb = { | |||
177 | 179 | ||
178 | static int twd_cpufreq_init(void) | 180 | static int twd_cpufreq_init(void) |
179 | { | 181 | { |
180 | if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) | 182 | if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
181 | return cpufreq_register_notifier(&twd_cpufreq_nb, | 183 | return cpufreq_register_notifier(&twd_cpufreq_nb, |
182 | CPUFREQ_TRANSITION_NOTIFIER); | 184 | CPUFREQ_TRANSITION_NOTIFIER); |
183 | 185 | ||
@@ -228,7 +230,7 @@ static void twd_calibrate_rate(void) | |||
228 | 230 | ||
229 | static irqreturn_t twd_handler(int irq, void *dev_id) | 231 | static irqreturn_t twd_handler(int irq, void *dev_id) |
230 | { | 232 | { |
231 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 233 | struct clock_event_device *evt = dev_id; |
232 | 234 | ||
233 | if (twd_timer_ack()) { | 235 | if (twd_timer_ack()) { |
234 | evt->event_handler(evt); | 236 | evt->event_handler(evt); |
@@ -265,9 +267,9 @@ static void twd_get_clock(struct device_node *np) | |||
265 | /* | 267 | /* |
266 | * Setup the local clock events for a CPU. | 268 | * Setup the local clock events for a CPU. |
267 | */ | 269 | */ |
268 | static int twd_timer_setup(struct clock_event_device *clk) | 270 | static void twd_timer_setup(void) |
269 | { | 271 | { |
270 | struct clock_event_device **this_cpu_clk; | 272 | struct clock_event_device *clk = __this_cpu_ptr(twd_evt); |
271 | int cpu = smp_processor_id(); | 273 | int cpu = smp_processor_id(); |
272 | 274 | ||
273 | /* | 275 | /* |
@@ -276,9 +278,9 @@ static int twd_timer_setup(struct clock_event_device *clk) | |||
276 | */ | 278 | */ |
277 | if (per_cpu(percpu_setup_called, cpu)) { | 279 | if (per_cpu(percpu_setup_called, cpu)) { |
278 | __raw_writel(0, twd_base + TWD_TIMER_CONTROL); | 280 | __raw_writel(0, twd_base + TWD_TIMER_CONTROL); |
279 | clockevents_register_device(*__this_cpu_ptr(twd_evt)); | 281 | clockevents_register_device(clk); |
280 | enable_percpu_irq(clk->irq, 0); | 282 | enable_percpu_irq(clk->irq, 0); |
281 | return 0; | 283 | return; |
282 | } | 284 | } |
283 | per_cpu(percpu_setup_called, cpu) = true; | 285 | per_cpu(percpu_setup_called, cpu) = true; |
284 | 286 | ||
@@ -297,27 +299,37 @@ static int twd_timer_setup(struct clock_event_device *clk) | |||
297 | clk->set_mode = twd_set_mode; | 299 | clk->set_mode = twd_set_mode; |
298 | clk->set_next_event = twd_set_next_event; | 300 | clk->set_next_event = twd_set_next_event; |
299 | clk->irq = twd_ppi; | 301 | clk->irq = twd_ppi; |
300 | 302 | clk->cpumask = cpumask_of(cpu); | |
301 | this_cpu_clk = __this_cpu_ptr(twd_evt); | ||
302 | *this_cpu_clk = clk; | ||
303 | 303 | ||
304 | clockevents_config_and_register(clk, twd_timer_rate, | 304 | clockevents_config_and_register(clk, twd_timer_rate, |
305 | 0xf, 0xffffffff); | 305 | 0xf, 0xffffffff); |
306 | enable_percpu_irq(clk->irq, 0); | 306 | enable_percpu_irq(clk->irq, 0); |
307 | } | ||
307 | 308 | ||
308 | return 0; | 309 | static int twd_timer_cpu_notify(struct notifier_block *self, |
310 | unsigned long action, void *hcpu) | ||
311 | { | ||
312 | switch (action & ~CPU_TASKS_FROZEN) { | ||
313 | case CPU_STARTING: | ||
314 | twd_timer_setup(); | ||
315 | break; | ||
316 | case CPU_DYING: | ||
317 | twd_timer_stop(); | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | return NOTIFY_OK; | ||
309 | } | 322 | } |
310 | 323 | ||
311 | static struct local_timer_ops twd_lt_ops = { | 324 | static struct notifier_block twd_timer_cpu_nb = { |
312 | .setup = twd_timer_setup, | 325 | .notifier_call = twd_timer_cpu_notify, |
313 | .stop = twd_timer_stop, | ||
314 | }; | 326 | }; |
315 | 327 | ||
316 | static int __init twd_local_timer_common_register(struct device_node *np) | 328 | static int __init twd_local_timer_common_register(struct device_node *np) |
317 | { | 329 | { |
318 | int err; | 330 | int err; |
319 | 331 | ||
320 | twd_evt = alloc_percpu(struct clock_event_device *); | 332 | twd_evt = alloc_percpu(struct clock_event_device); |
321 | if (!twd_evt) { | 333 | if (!twd_evt) { |
322 | err = -ENOMEM; | 334 | err = -ENOMEM; |
323 | goto out_free; | 335 | goto out_free; |
@@ -329,12 +341,22 @@ static int __init twd_local_timer_common_register(struct device_node *np) | |||
329 | goto out_free; | 341 | goto out_free; |
330 | } | 342 | } |
331 | 343 | ||
332 | err = local_timer_register(&twd_lt_ops); | 344 | err = register_cpu_notifier(&twd_timer_cpu_nb); |
333 | if (err) | 345 | if (err) |
334 | goto out_irq; | 346 | goto out_irq; |
335 | 347 | ||
336 | twd_get_clock(np); | 348 | twd_get_clock(np); |
337 | 349 | ||
350 | /* | ||
351 | * Immediately configure the timer on the boot CPU, unless we need | ||
352 | * jiffies to be incrementing to calibrate the rate in which case | ||
353 | * setup the timer in late_time_init. | ||
354 | */ | ||
355 | if (twd_timer_rate) | ||
356 | twd_timer_setup(); | ||
357 | else | ||
358 | late_time_init = twd_timer_setup; | ||
359 | |||
338 | return 0; | 360 | return 0; |
339 | 361 | ||
340 | out_irq: | 362 | out_irq: |
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index cd9fcb1cd7ab..6acbdabf6222 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig | |||
@@ -12,7 +12,7 @@ config ARCH_HIGHBANK | |||
12 | select CPU_V7 | 12 | select CPU_V7 |
13 | select GENERIC_CLOCKEVENTS | 13 | select GENERIC_CLOCKEVENTS |
14 | select HAVE_ARM_SCU | 14 | select HAVE_ARM_SCU |
15 | select HAVE_ARM_TWD if LOCAL_TIMERS | 15 | select HAVE_ARM_TWD if SMP |
16 | select HAVE_SMP | 16 | select HAVE_SMP |
17 | select MAILBOX | 17 | select MAILBOX |
18 | select PL320_MBOX | 18 | select PL320_MBOX |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index f54656091a9d..21fa9fa3d54e 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -793,7 +793,8 @@ config SOC_IMX6Q | |||
793 | select COMMON_CLK | 793 | select COMMON_CLK |
794 | select CPU_V7 | 794 | select CPU_V7 |
795 | select HAVE_ARM_SCU if SMP | 795 | select HAVE_ARM_SCU if SMP |
796 | select HAVE_ARM_TWD if LOCAL_TIMERS | 796 | select HAVE_ARM_TWD if SMP |
797 | select HAVE_CAN_FLEXCAN if CAN | ||
797 | select HAVE_IMX_ANATOP | 798 | select HAVE_IMX_ANATOP |
798 | select HAVE_IMX_GPC | 799 | select HAVE_IMX_GPC |
799 | select HAVE_IMX_MMDC | 800 | select HAVE_IMX_MMDC |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 8697cfc0d0b6..a7afbacae61a 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
21 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
@@ -26,7 +27,6 @@ | |||
26 | #include <linux/sched_clock.h> | 27 | #include <linux/sched_clock.h> |
27 | 28 | ||
28 | #include <asm/mach/time.h> | 29 | #include <asm/mach/time.h> |
29 | #include <asm/localtimer.h> | ||
30 | 30 | ||
31 | #include "common.h" | 31 | #include "common.h" |
32 | 32 | ||
@@ -49,7 +49,7 @@ static void __iomem *sts_base; | |||
49 | 49 | ||
50 | static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) |
51 | { | 51 | { |
52 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 52 | struct clock_event_device *evt = dev_id; |
53 | /* Stop the timer tick */ | 53 | /* Stop the timer tick */ |
54 | if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { | 54 | if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { |
55 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); | 55 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); |
@@ -101,18 +101,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode, | |||
101 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); | 101 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct clock_event_device msm_clockevent = { | 104 | static struct clock_event_device __percpu *msm_evt; |
105 | .name = "gp_timer", | ||
106 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
107 | .rating = 200, | ||
108 | .set_next_event = msm_timer_set_next_event, | ||
109 | .set_mode = msm_timer_set_mode, | ||
110 | }; | ||
111 | |||
112 | static union { | ||
113 | struct clock_event_device *evt; | ||
114 | struct clock_event_device * __percpu *percpu_evt; | ||
115 | } msm_evt; | ||
116 | 105 | ||
117 | static void __iomem *source_base; | 106 | static void __iomem *source_base; |
118 | 107 | ||
@@ -138,23 +127,34 @@ static struct clocksource msm_clocksource = { | |||
138 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 127 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
139 | }; | 128 | }; |
140 | 129 | ||
141 | #ifdef CONFIG_LOCAL_TIMERS | 130 | static int msm_timer_irq; |
131 | static int msm_timer_has_ppi; | ||
132 | |||
142 | static int msm_local_timer_setup(struct clock_event_device *evt) | 133 | static int msm_local_timer_setup(struct clock_event_device *evt) |
143 | { | 134 | { |
144 | /* Use existing clock_event for cpu 0 */ | 135 | int cpu = smp_processor_id(); |
145 | if (!smp_processor_id()) | 136 | int err; |
146 | return 0; | 137 | |
147 | 138 | evt->irq = msm_timer_irq; | |
148 | evt->irq = msm_clockevent.irq; | 139 | evt->name = "msm_timer"; |
149 | evt->name = "local_timer"; | 140 | evt->features = CLOCK_EVT_FEAT_ONESHOT; |
150 | evt->features = msm_clockevent.features; | 141 | evt->rating = 200; |
151 | evt->rating = msm_clockevent.rating; | ||
152 | evt->set_mode = msm_timer_set_mode; | 142 | evt->set_mode = msm_timer_set_mode; |
153 | evt->set_next_event = msm_timer_set_next_event; | 143 | evt->set_next_event = msm_timer_set_next_event; |
144 | evt->cpumask = cpumask_of(cpu); | ||
145 | |||
146 | clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); | ||
147 | |||
148 | if (msm_timer_has_ppi) { | ||
149 | enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); | ||
150 | } else { | ||
151 | err = request_irq(evt->irq, msm_timer_interrupt, | ||
152 | IRQF_TIMER | IRQF_NOBALANCING | | ||
153 | IRQF_TRIGGER_RISING, "gp_timer", evt); | ||
154 | if (err) | ||
155 | pr_err("request_irq failed\n"); | ||
156 | } | ||
154 | 157 | ||
155 | *__this_cpu_ptr(msm_evt.percpu_evt) = evt; | ||
156 | clockevents_config_and_register(evt, GPT_HZ, 4, 0xf0000000); | ||
157 | enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); | ||
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | 160 | ||
@@ -164,11 +164,28 @@ static void msm_local_timer_stop(struct clock_event_device *evt) | |||
164 | disable_percpu_irq(evt->irq); | 164 | disable_percpu_irq(evt->irq); |
165 | } | 165 | } |
166 | 166 | ||
167 | static struct local_timer_ops msm_local_timer_ops = { | 167 | static int msm_timer_cpu_notify(struct notifier_block *self, |
168 | .setup = msm_local_timer_setup, | 168 | unsigned long action, void *hcpu) |
169 | .stop = msm_local_timer_stop, | 169 | { |
170 | /* | ||
171 | * Grab cpu pointer in each case to avoid spurious | ||
172 | * preemptible warnings | ||
173 | */ | ||
174 | switch (action & ~CPU_TASKS_FROZEN) { | ||
175 | case CPU_STARTING: | ||
176 | msm_local_timer_setup(this_cpu_ptr(msm_evt)); | ||
177 | break; | ||
178 | case CPU_DYING: | ||
179 | msm_local_timer_stop(this_cpu_ptr(msm_evt)); | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | return NOTIFY_OK; | ||
184 | } | ||
185 | |||
186 | static struct notifier_block msm_timer_cpu_nb = { | ||
187 | .notifier_call = msm_timer_cpu_notify, | ||
170 | }; | 188 | }; |
171 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
172 | 189 | ||
173 | static notrace u32 msm_sched_clock_read(void) | 190 | static notrace u32 msm_sched_clock_read(void) |
174 | { | 191 | { |
@@ -178,38 +195,35 @@ static notrace u32 msm_sched_clock_read(void) | |||
178 | static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, | 195 | static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, |
179 | bool percpu) | 196 | bool percpu) |
180 | { | 197 | { |
181 | struct clock_event_device *ce = &msm_clockevent; | ||
182 | struct clocksource *cs = &msm_clocksource; | 198 | struct clocksource *cs = &msm_clocksource; |
183 | int res; | 199 | int res = 0; |
200 | |||
201 | msm_timer_irq = irq; | ||
202 | msm_timer_has_ppi = percpu; | ||
203 | |||
204 | msm_evt = alloc_percpu(struct clock_event_device); | ||
205 | if (!msm_evt) { | ||
206 | pr_err("memory allocation failed for clockevents\n"); | ||
207 | goto err; | ||
208 | } | ||
184 | 209 | ||
185 | ce->cpumask = cpumask_of(0); | 210 | if (percpu) |
186 | ce->irq = irq; | 211 | res = request_percpu_irq(irq, msm_timer_interrupt, |
212 | "gp_timer", msm_evt); | ||
187 | 213 | ||
188 | clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff); | 214 | if (res) { |
189 | if (percpu) { | 215 | pr_err("request_percpu_irq failed\n"); |
190 | msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *); | 216 | } else { |
191 | if (!msm_evt.percpu_evt) { | 217 | res = register_cpu_notifier(&msm_timer_cpu_nb); |
192 | pr_err("memory allocation failed for %s\n", ce->name); | 218 | if (res) { |
219 | free_percpu_irq(irq, msm_evt); | ||
193 | goto err; | 220 | goto err; |
194 | } | 221 | } |
195 | *__this_cpu_ptr(msm_evt.percpu_evt) = ce; | 222 | |
196 | res = request_percpu_irq(ce->irq, msm_timer_interrupt, | 223 | /* Immediately configure the timer on the boot CPU */ |
197 | ce->name, msm_evt.percpu_evt); | 224 | msm_local_timer_setup(__this_cpu_ptr(msm_evt)); |
198 | if (!res) { | ||
199 | enable_percpu_irq(ce->irq, IRQ_TYPE_EDGE_RISING); | ||
200 | #ifdef CONFIG_LOCAL_TIMERS | ||
201 | local_timer_register(&msm_local_timer_ops); | ||
202 | #endif | ||
203 | } | ||
204 | } else { | ||
205 | msm_evt.evt = ce; | ||
206 | res = request_irq(ce->irq, msm_timer_interrupt, | ||
207 | IRQF_TIMER | IRQF_NOBALANCING | | ||
208 | IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt); | ||
209 | } | 225 | } |
210 | 226 | ||
211 | if (res) | ||
212 | pr_err("request_irq failed for %s\n", ce->name); | ||
213 | err: | 227 | err: |
214 | writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); | 228 | writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); |
215 | res = clocksource_register_hz(cs, dgt_hz); | 229 | res = clocksource_register_hz(cs, dgt_hz); |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 627fa7e41fba..3d6ee149d3d7 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -37,9 +37,8 @@ config ARCH_OMAP4 | |||
37 | select CACHE_L2X0 | 37 | select CACHE_L2X0 |
38 | select CPU_V7 | 38 | select CPU_V7 |
39 | select HAVE_ARM_SCU if SMP | 39 | select HAVE_ARM_SCU if SMP |
40 | select HAVE_ARM_TWD if LOCAL_TIMERS | 40 | select HAVE_ARM_TWD if SMP |
41 | select HAVE_SMP | 41 | select HAVE_SMP |
42 | select LOCAL_TIMERS if SMP | ||
43 | select OMAP_INTERCONNECT | 42 | select OMAP_INTERCONNECT |
44 | select PL310_ERRATA_588369 | 43 | select PL310_ERRATA_588369 |
45 | select PL310_ERRATA_727915 | 44 | select PL310_ERRATA_727915 |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index b37e1fcbad56..8e99ca368e07 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -600,7 +600,6 @@ static OMAP_SYS_32K_TIMER_INIT(4, 1, "timer_32k_ck", "ti,timer-alwon", | |||
600 | #endif | 600 | #endif |
601 | 601 | ||
602 | #ifdef CONFIG_ARCH_OMAP4 | 602 | #ifdef CONFIG_ARCH_OMAP4 |
603 | #ifdef CONFIG_LOCAL_TIMERS | ||
604 | static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29); | 603 | static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29); |
605 | void __init omap4_local_timer_init(void) | 604 | void __init omap4_local_timer_init(void) |
606 | { | 605 | { |
@@ -619,12 +618,6 @@ void __init omap4_local_timer_init(void) | |||
619 | pr_err("twd_local_timer_register failed %d\n", err); | 618 | pr_err("twd_local_timer_register failed %d\n", err); |
620 | } | 619 | } |
621 | } | 620 | } |
622 | #else /* CONFIG_LOCAL_TIMERS */ | ||
623 | void __init omap4_local_timer_init(void) | ||
624 | { | ||
625 | omap4_sync32k_timer_init(); | ||
626 | } | ||
627 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
628 | #endif /* CONFIG_ARCH_OMAP4 */ | 621 | #endif /* CONFIG_ARCH_OMAP4 */ |
629 | 622 | ||
630 | #ifdef CONFIG_SOC_OMAP5 | 623 | #ifdef CONFIG_SOC_OMAP5 |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index d210c0f9c2c4..9db2029aa632 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -13,7 +13,7 @@ config REALVIEW_EB_A9MP | |||
13 | depends on MACH_REALVIEW_EB | 13 | depends on MACH_REALVIEW_EB |
14 | select CPU_V7 | 14 | select CPU_V7 |
15 | select HAVE_ARM_SCU if SMP | 15 | select HAVE_ARM_SCU if SMP |
16 | select HAVE_ARM_TWD if LOCAL_TIMERS | 16 | select HAVE_ARM_TWD if SMP |
17 | select HAVE_SMP | 17 | select HAVE_SMP |
18 | select MIGHT_HAVE_CACHE_L2X0 | 18 | select MIGHT_HAVE_CACHE_L2X0 |
19 | help | 19 | help |
@@ -26,7 +26,7 @@ config REALVIEW_EB_ARM11MP | |||
26 | select ARCH_HAS_BARRIERS if SMP | 26 | select ARCH_HAS_BARRIERS if SMP |
27 | select CPU_V6K | 27 | select CPU_V6K |
28 | select HAVE_ARM_SCU if SMP | 28 | select HAVE_ARM_SCU if SMP |
29 | select HAVE_ARM_TWD if LOCAL_TIMERS | 29 | select HAVE_ARM_TWD if SMP |
30 | select HAVE_SMP | 30 | select HAVE_SMP |
31 | select MIGHT_HAVE_CACHE_L2X0 | 31 | select MIGHT_HAVE_CACHE_L2X0 |
32 | help | 32 | help |
@@ -48,7 +48,7 @@ config MACH_REALVIEW_PB11MP | |||
48 | select ARM_GIC | 48 | select ARM_GIC |
49 | select CPU_V6K | 49 | select CPU_V6K |
50 | select HAVE_ARM_SCU if SMP | 50 | select HAVE_ARM_SCU if SMP |
51 | select HAVE_ARM_TWD if LOCAL_TIMERS | 51 | select HAVE_ARM_TWD if SMP |
52 | select HAVE_PATA_PLATFORM | 52 | select HAVE_PATA_PLATFORM |
53 | select HAVE_SMP | 53 | select HAVE_SMP |
54 | select MIGHT_HAVE_CACHE_L2X0 | 54 | select MIGHT_HAVE_CACHE_L2X0 |
@@ -92,7 +92,7 @@ config MACH_REALVIEW_PBX | |||
92 | select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET | 92 | select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET |
93 | select ARM_GIC | 93 | select ARM_GIC |
94 | select HAVE_ARM_SCU if SMP | 94 | select HAVE_ARM_SCU if SMP |
95 | select HAVE_ARM_TWD if LOCAL_TIMERS | 95 | select HAVE_ARM_TWD if SMP |
96 | select HAVE_PATA_PLATFORM | 96 | select HAVE_PATA_PLATFORM |
97 | select HAVE_SMP | 97 | select HAVE_SMP |
98 | select MIGHT_HAVE_CACHE_L2X0 | 98 | select MIGHT_HAVE_CACHE_L2X0 |
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig index 442917eedff3..df0d59afeb40 100644 --- a/arch/arm/mach-spear/Kconfig +++ b/arch/arm/mach-spear/Kconfig | |||
@@ -23,7 +23,7 @@ config ARCH_SPEAR13XX | |||
23 | select CPU_V7 | 23 | select CPU_V7 |
24 | select GPIO_SPEAR_SPICS | 24 | select GPIO_SPEAR_SPICS |
25 | select HAVE_ARM_SCU if SMP | 25 | select HAVE_ARM_SCU if SMP |
26 | select HAVE_ARM_TWD if LOCAL_TIMERS | 26 | select HAVE_ARM_TWD if SMP |
27 | select HAVE_SMP | 27 | select HAVE_SMP |
28 | select MIGHT_HAVE_CACHE_L2X0 | 28 | select MIGHT_HAVE_CACHE_L2X0 |
29 | select PINCTRL | 29 | select PINCTRL |
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index ef3a8da49b2d..59925cc896fb 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig | |||
@@ -8,7 +8,7 @@ config ARCH_TEGRA | |||
8 | select COMMON_CLK | 8 | select COMMON_CLK |
9 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select HAVE_ARM_SCU if SMP | 10 | select HAVE_ARM_SCU if SMP |
11 | select HAVE_ARM_TWD if LOCAL_TIMERS | 11 | select HAVE_ARM_TWD if SMP |
12 | select HAVE_CLK | 12 | select HAVE_CLK |
13 | select HAVE_SMP | 13 | select HAVE_SMP |
14 | select MIGHT_HAVE_CACHE_L2X0 | 14 | select MIGHT_HAVE_CACHE_L2X0 |
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index b19b07204aaf..99a28d628297 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
@@ -8,7 +8,7 @@ config ARCH_U8500 | |||
8 | select CPU_V7 | 8 | select CPU_V7 |
9 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select HAVE_ARM_SCU if SMP | 10 | select HAVE_ARM_SCU if SMP |
11 | select HAVE_ARM_TWD if LOCAL_TIMERS | 11 | select HAVE_ARM_TWD if SMP |
12 | select HAVE_SMP | 12 | select HAVE_SMP |
13 | select MIGHT_HAVE_CACHE_L2X0 | 13 | select MIGHT_HAVE_CACHE_L2X0 |
14 | help | 14 | help |
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index b8bbabec6310..83c8677bb181 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig | |||
@@ -10,7 +10,7 @@ config ARCH_VEXPRESS | |||
10 | select CPU_V7 | 10 | select CPU_V7 |
11 | select GENERIC_CLOCKEVENTS | 11 | select GENERIC_CLOCKEVENTS |
12 | select HAVE_ARM_SCU if SMP | 12 | select HAVE_ARM_SCU if SMP |
13 | select HAVE_ARM_TWD if LOCAL_TIMERS | 13 | select HAVE_ARM_TWD if SMP |
14 | select HAVE_CLK | 14 | select HAVE_CLK |
15 | select HAVE_PATA_PLATFORM | 15 | select HAVE_PATA_PLATFORM |
16 | select HAVE_SMP | 16 | select HAVE_SMP |
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig index c1d61f281e68..04f8a4a6e755 100644 --- a/arch/arm/mach-zynq/Kconfig +++ b/arch/arm/mach-zynq/Kconfig | |||
@@ -6,7 +6,7 @@ config ARCH_ZYNQ | |||
6 | select CPU_V7 | 6 | select CPU_V7 |
7 | select GENERIC_CLOCKEVENTS | 7 | select GENERIC_CLOCKEVENTS |
8 | select HAVE_ARM_SCU if SMP | 8 | select HAVE_ARM_SCU if SMP |
9 | select HAVE_ARM_TWD if LOCAL_TIMERS | 9 | select HAVE_ARM_TWD if SMP |
10 | select ICST | 10 | select ICST |
11 | select MIGHT_HAVE_CACHE_L2X0 | 11 | select MIGHT_HAVE_CACHE_L2X0 |
12 | select USE_OF | 12 | select USE_OF |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index b2bbc415f120..5b34768f4d7c 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
@@ -24,7 +25,6 @@ | |||
24 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
25 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
26 | 27 | ||
27 | #include <asm/localtimer.h> | ||
28 | #include <asm/mach/time.h> | 28 | #include <asm/mach/time.h> |
29 | 29 | ||
30 | #define EXYNOS4_MCTREG(x) (x) | 30 | #define EXYNOS4_MCTREG(x) (x) |
@@ -80,7 +80,7 @@ static unsigned int mct_int_type; | |||
80 | static int mct_irqs[MCT_NR_IRQS]; | 80 | static int mct_irqs[MCT_NR_IRQS]; |
81 | 81 | ||
82 | struct mct_clock_event_device { | 82 | struct mct_clock_event_device { |
83 | struct clock_event_device *evt; | 83 | struct clock_event_device evt; |
84 | unsigned long base; | 84 | unsigned long base; |
85 | char name[10]; | 85 | char name[10]; |
86 | }; | 86 | }; |
@@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void) | |||
295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); | 295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); |
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef CONFIG_LOCAL_TIMERS | ||
299 | |||
300 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | 298 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); |
301 | 299 | ||
302 | /* Clock event handling */ | 300 | /* Clock event handling */ |
@@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode, | |||
369 | 367 | ||
370 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | 368 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) |
371 | { | 369 | { |
372 | struct clock_event_device *evt = mevt->evt; | 370 | struct clock_event_device *evt = &mevt->evt; |
373 | 371 | ||
374 | /* | 372 | /* |
375 | * This is for supporting oneshot mode. | 373 | * This is for supporting oneshot mode. |
@@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | |||
391 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) | 389 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) |
392 | { | 390 | { |
393 | struct mct_clock_event_device *mevt = dev_id; | 391 | struct mct_clock_event_device *mevt = dev_id; |
394 | struct clock_event_device *evt = mevt->evt; | 392 | struct clock_event_device *evt = &mevt->evt; |
395 | 393 | ||
396 | exynos4_mct_tick_clear(mevt); | 394 | exynos4_mct_tick_clear(mevt); |
397 | 395 | ||
@@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) | |||
405 | struct mct_clock_event_device *mevt; | 403 | struct mct_clock_event_device *mevt; |
406 | unsigned int cpu = smp_processor_id(); | 404 | unsigned int cpu = smp_processor_id(); |
407 | 405 | ||
408 | mevt = this_cpu_ptr(&percpu_mct_tick); | 406 | mevt = container_of(evt, struct mct_clock_event_device, evt); |
409 | mevt->evt = evt; | ||
410 | 407 | ||
411 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 408 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
412 | sprintf(mevt->name, "mct_tick%d", cpu); | 409 | sprintf(mevt->name, "mct_tick%d", cpu); |
@@ -448,14 +445,37 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) | |||
448 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); | 445 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
449 | } | 446 | } |
450 | 447 | ||
451 | static struct local_timer_ops exynos4_mct_tick_ops = { | 448 | static int exynos4_mct_cpu_notify(struct notifier_block *self, |
452 | .setup = exynos4_local_timer_setup, | 449 | unsigned long action, void *hcpu) |
453 | .stop = exynos4_local_timer_stop, | 450 | { |
451 | struct mct_clock_event_device *mevt; | ||
452 | |||
453 | /* | ||
454 | * Grab cpu pointer in each case to avoid spurious | ||
455 | * preemptible warnings | ||
456 | */ | ||
457 | switch (action & ~CPU_TASKS_FROZEN) { | ||
458 | case CPU_STARTING: | ||
459 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
460 | exynos4_local_timer_setup(&mevt->evt); | ||
461 | break; | ||
462 | case CPU_DYING: | ||
463 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
464 | exynos4_local_timer_stop(&mevt->evt); | ||
465 | break; | ||
466 | } | ||
467 | |||
468 | return NOTIFY_OK; | ||
469 | } | ||
470 | |||
471 | static struct notifier_block exynos4_mct_cpu_nb = { | ||
472 | .notifier_call = exynos4_mct_cpu_notify, | ||
454 | }; | 473 | }; |
455 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
456 | 474 | ||
457 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) | 475 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) |
458 | { | 476 | { |
477 | int err; | ||
478 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | ||
459 | struct clk *mct_clk, *tick_clk; | 479 | struct clk *mct_clk, *tick_clk; |
460 | 480 | ||
461 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : | 481 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : |
@@ -473,9 +493,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
473 | if (!reg_base) | 493 | if (!reg_base) |
474 | panic("%s: unable to ioremap mct address space\n", __func__); | 494 | panic("%s: unable to ioremap mct address space\n", __func__); |
475 | 495 | ||
476 | #ifdef CONFIG_LOCAL_TIMERS | ||
477 | if (mct_int_type == MCT_INT_PPI) { | 496 | if (mct_int_type == MCT_INT_PPI) { |
478 | int err; | ||
479 | 497 | ||
480 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], | 498 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], |
481 | exynos4_mct_tick_isr, "MCT", | 499 | exynos4_mct_tick_isr, "MCT", |
@@ -484,8 +502,16 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
484 | mct_irqs[MCT_L0_IRQ], err); | 502 | mct_irqs[MCT_L0_IRQ], err); |
485 | } | 503 | } |
486 | 504 | ||
487 | local_timer_register(&exynos4_mct_tick_ops); | 505 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); |
488 | #endif /* CONFIG_LOCAL_TIMERS */ | 506 | if (err) |
507 | goto out_irq; | ||
508 | |||
509 | /* Immediately configure the timer on the boot CPU */ | ||
510 | exynos4_local_timer_setup(&mevt->evt); | ||
511 | return; | ||
512 | |||
513 | out_irq: | ||
514 | free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); | ||
489 | } | 515 | } |
490 | 516 | ||
491 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) | 517 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 1b04b7e1d39b..847cab6f6e31 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/cpu.h> | ||
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/clockchips.h> | 24 | #include <linux/clockchips.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
@@ -28,9 +29,9 @@ | |||
28 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/sched_clock.h> | 31 | #include <linux/sched_clock.h> |
31 | |||
32 | #include <asm/localtimer.h> | ||
33 | #include <linux/percpu.h> | 32 | #include <linux/percpu.h> |
33 | #include <linux/time-armada-370-xp.h> | ||
34 | |||
34 | /* | 35 | /* |
35 | * Timer block registers. | 36 | * Timer block registers. |
36 | */ | 37 | */ |
@@ -69,7 +70,7 @@ static bool timer25Mhz = true; | |||
69 | */ | 70 | */ |
70 | static u32 ticks_per_jiffy; | 71 | static u32 ticks_per_jiffy; |
71 | 72 | ||
72 | static struct clock_event_device __percpu **percpu_armada_370_xp_evt; | 73 | static struct clock_event_device __percpu *armada_370_xp_evt; |
73 | 74 | ||
74 | static u32 notrace armada_370_xp_read_sched_clock(void) | 75 | static u32 notrace armada_370_xp_read_sched_clock(void) |
75 | { | 76 | { |
@@ -142,21 +143,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, | |||
142 | } | 143 | } |
143 | } | 144 | } |
144 | 145 | ||
145 | static struct clock_event_device armada_370_xp_clkevt = { | 146 | static int armada_370_xp_clkevt_irq; |
146 | .name = "armada_370_xp_per_cpu_tick", | ||
147 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | ||
148 | .shift = 32, | ||
149 | .rating = 300, | ||
150 | .set_next_event = armada_370_xp_clkevt_next_event, | ||
151 | .set_mode = armada_370_xp_clkevt_mode, | ||
152 | }; | ||
153 | 147 | ||
154 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) | 148 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) |
155 | { | 149 | { |
156 | /* | 150 | /* |
157 | * ACK timer interrupt and call event handler. | 151 | * ACK timer interrupt and call event handler. |
158 | */ | 152 | */ |
159 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 153 | struct clock_event_device *evt = dev_id; |
160 | 154 | ||
161 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); | 155 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); |
162 | evt->event_handler(evt); | 156 | evt->event_handler(evt); |
@@ -172,42 +166,55 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) | |||
172 | u32 u; | 166 | u32 u; |
173 | int cpu = smp_processor_id(); | 167 | int cpu = smp_processor_id(); |
174 | 168 | ||
175 | /* Use existing clock_event for cpu 0 */ | ||
176 | if (!smp_processor_id()) | ||
177 | return 0; | ||
178 | |||
179 | u = readl(local_base + TIMER_CTRL_OFF); | 169 | u = readl(local_base + TIMER_CTRL_OFF); |
180 | if (timer25Mhz) | 170 | if (timer25Mhz) |
181 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 171 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
182 | else | 172 | else |
183 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 173 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
184 | 174 | ||
185 | evt->name = armada_370_xp_clkevt.name; | 175 | evt->name = "armada_370_xp_per_cpu_tick", |
186 | evt->irq = armada_370_xp_clkevt.irq; | 176 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
187 | evt->features = armada_370_xp_clkevt.features; | 177 | CLOCK_EVT_FEAT_PERIODIC; |
188 | evt->shift = armada_370_xp_clkevt.shift; | 178 | evt->shift = 32, |
189 | evt->rating = armada_370_xp_clkevt.rating, | 179 | evt->rating = 300, |
190 | evt->set_next_event = armada_370_xp_clkevt_next_event, | 180 | evt->set_next_event = armada_370_xp_clkevt_next_event, |
191 | evt->set_mode = armada_370_xp_clkevt_mode, | 181 | evt->set_mode = armada_370_xp_clkevt_mode, |
182 | evt->irq = armada_370_xp_clkevt_irq; | ||
192 | evt->cpumask = cpumask_of(cpu); | 183 | evt->cpumask = cpumask_of(cpu); |
193 | 184 | ||
194 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt; | ||
195 | |||
196 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); | 185 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); |
197 | enable_percpu_irq(evt->irq, 0); | 186 | enable_percpu_irq(evt->irq, 0); |
198 | 187 | ||
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
202 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) | 191 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) |
203 | { | 192 | { |
204 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 193 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
205 | disable_percpu_irq(evt->irq); | 194 | disable_percpu_irq(evt->irq); |
206 | } | 195 | } |
207 | 196 | ||
208 | static struct local_timer_ops armada_370_xp_local_timer_ops = { | 197 | static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, |
209 | .setup = armada_370_xp_timer_setup, | 198 | unsigned long action, void *hcpu) |
210 | .stop = armada_370_xp_timer_stop, | 199 | { |
200 | /* | ||
201 | * Grab cpu pointer in each case to avoid spurious | ||
202 | * preemptible warnings | ||
203 | */ | ||
204 | switch (action & ~CPU_TASKS_FROZEN) { | ||
205 | case CPU_STARTING: | ||
206 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | ||
207 | break; | ||
208 | case CPU_DYING: | ||
209 | armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); | ||
210 | break; | ||
211 | } | ||
212 | |||
213 | return NOTIFY_OK; | ||
214 | } | ||
215 | |||
216 | static struct notifier_block armada_370_xp_timer_cpu_nb = { | ||
217 | .notifier_call = armada_370_xp_timer_cpu_notify, | ||
211 | }; | 218 | }; |
212 | 219 | ||
213 | void __init armada_370_xp_timer_init(void) | 220 | void __init armada_370_xp_timer_init(void) |
@@ -223,9 +230,6 @@ void __init armada_370_xp_timer_init(void) | |||
223 | 230 | ||
224 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { | 231 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { |
225 | /* The fixed 25MHz timer is available so let's use it */ | 232 | /* The fixed 25MHz timer is available so let's use it */ |
226 | u = readl(local_base + TIMER_CTRL_OFF); | ||
227 | writel(u | TIMER0_25MHZ, | ||
228 | local_base + TIMER_CTRL_OFF); | ||
229 | u = readl(timer_base + TIMER_CTRL_OFF); | 233 | u = readl(timer_base + TIMER_CTRL_OFF); |
230 | writel(u | TIMER0_25MHZ, | 234 | writel(u | TIMER0_25MHZ, |
231 | timer_base + TIMER_CTRL_OFF); | 235 | timer_base + TIMER_CTRL_OFF); |
@@ -235,9 +239,6 @@ void __init armada_370_xp_timer_init(void) | |||
235 | struct clk *clk = of_clk_get(np, 0); | 239 | struct clk *clk = of_clk_get(np, 0); |
236 | WARN_ON(IS_ERR(clk)); | 240 | WARN_ON(IS_ERR(clk)); |
237 | rate = clk_get_rate(clk); | 241 | rate = clk_get_rate(clk); |
238 | u = readl(local_base + TIMER_CTRL_OFF); | ||
239 | writel(u & ~(TIMER0_25MHZ), | ||
240 | local_base + TIMER_CTRL_OFF); | ||
241 | 242 | ||
242 | u = readl(timer_base + TIMER_CTRL_OFF); | 243 | u = readl(timer_base + TIMER_CTRL_OFF); |
243 | writel(u & ~(TIMER0_25MHZ), | 244 | writel(u & ~(TIMER0_25MHZ), |
@@ -251,7 +252,7 @@ void __init armada_370_xp_timer_init(void) | |||
251 | * We use timer 0 as clocksource, and private(local) timer 0 | 252 | * We use timer 0 as clocksource, and private(local) timer 0 |
252 | * for clockevents | 253 | * for clockevents |
253 | */ | 254 | */ |
254 | armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4); | 255 | armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); |
255 | 256 | ||
256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 257 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; |
257 | 258 | ||
@@ -276,26 +277,19 @@ void __init armada_370_xp_timer_init(void) | |||
276 | "armada_370_xp_clocksource", | 277 | "armada_370_xp_clocksource", |
277 | timer_clk, 300, 32, clocksource_mmio_readl_down); | 278 | timer_clk, 300, 32, clocksource_mmio_readl_down); |
278 | 279 | ||
279 | /* Register the clockevent on the private timer of CPU 0 */ | 280 | register_cpu_notifier(&armada_370_xp_timer_cpu_nb); |
280 | armada_370_xp_clkevt.cpumask = cpumask_of(0); | ||
281 | clockevents_config_and_register(&armada_370_xp_clkevt, | ||
282 | timer_clk, 1, 0xfffffffe); | ||
283 | 281 | ||
284 | percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *); | 282 | armada_370_xp_evt = alloc_percpu(struct clock_event_device); |
285 | 283 | ||
286 | 284 | ||
287 | /* | 285 | /* |
288 | * Setup clockevent timer (interrupt-driven). | 286 | * Setup clockevent timer (interrupt-driven). |
289 | */ | 287 | */ |
290 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt; | 288 | res = request_percpu_irq(armada_370_xp_clkevt_irq, |
291 | res = request_percpu_irq(armada_370_xp_clkevt.irq, | ||
292 | armada_370_xp_timer_interrupt, | 289 | armada_370_xp_timer_interrupt, |
293 | armada_370_xp_clkevt.name, | 290 | "armada_370_xp_per_cpu_tick", |
294 | percpu_armada_370_xp_evt); | 291 | armada_370_xp_evt); |
295 | if (!res) { | 292 | /* Immediately configure the timer on the boot CPU */ |
296 | enable_percpu_irq(armada_370_xp_clkevt.irq, 0); | 293 | if (!res) |
297 | #ifdef CONFIG_LOCAL_TIMERS | 294 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); |
298 | local_timer_register(&armada_370_xp_local_timer_ops); | ||
299 | #endif | ||
300 | } | ||
301 | } | 295 | } |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 62876baa3ab9..09a17d9a6594 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/clockchips.h> | 11 | #include <linux/clockchips.h> |
12 | #include <linux/clocksource.h> | 12 | #include <linux/clocksource.h> |
13 | #include <linux/cpu.h> | ||
13 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
14 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
15 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
19 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
20 | #include <linux/sched_clock.h> | 21 | #include <linux/sched_clock.h> |
21 | #include <asm/localtimer.h> | ||
22 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
23 | 23 | ||
24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 | 24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 |
@@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) | |||
151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); | 151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct clock_event_device sirfsoc_clockevent = { | 154 | static struct clock_event_device __percpu *sirfsoc_clockevent; |
155 | .name = "sirfsoc_clockevent", | ||
156 | .rating = 200, | ||
157 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
158 | .set_mode = sirfsoc_timer_set_mode, | ||
159 | .set_next_event = sirfsoc_timer_set_next_event, | ||
160 | }; | ||
161 | 155 | ||
162 | static struct clocksource sirfsoc_clocksource = { | 156 | static struct clocksource sirfsoc_clocksource = { |
163 | .name = "sirfsoc_clocksource", | 157 | .name = "sirfsoc_clocksource", |
@@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = { | |||
173 | .name = "sirfsoc_timer0", | 167 | .name = "sirfsoc_timer0", |
174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 168 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
175 | .handler = sirfsoc_timer_interrupt, | 169 | .handler = sirfsoc_timer_interrupt, |
176 | .dev_id = &sirfsoc_clockevent, | ||
177 | }; | 170 | }; |
178 | 171 | ||
179 | #ifdef CONFIG_LOCAL_TIMERS | ||
180 | |||
181 | static struct irqaction sirfsoc_timer1_irq = { | 172 | static struct irqaction sirfsoc_timer1_irq = { |
182 | .name = "sirfsoc_timer1", | 173 | .name = "sirfsoc_timer1", |
183 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
@@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = { | |||
186 | 177 | ||
187 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | 178 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) |
188 | { | 179 | { |
189 | /* Use existing clock_event for cpu 0 */ | 180 | int cpu = smp_processor_id(); |
190 | if (!smp_processor_id()) | 181 | struct irqaction *action; |
191 | return 0; | 182 | |
183 | if (cpu == 0) | ||
184 | action = &sirfsoc_timer_irq; | ||
185 | else | ||
186 | action = &sirfsoc_timer1_irq; | ||
192 | 187 | ||
193 | ce->irq = sirfsoc_timer1_irq.irq; | 188 | ce->irq = action->irq; |
194 | ce->name = "local_timer"; | 189 | ce->name = "local_timer"; |
195 | ce->features = sirfsoc_clockevent.features; | 190 | ce->features = CLOCK_EVT_FEAT_ONESHOT; |
196 | ce->rating = sirfsoc_clockevent.rating; | 191 | ce->rating = 200; |
197 | ce->set_mode = sirfsoc_timer_set_mode; | 192 | ce->set_mode = sirfsoc_timer_set_mode; |
198 | ce->set_next_event = sirfsoc_timer_set_next_event; | 193 | ce->set_next_event = sirfsoc_timer_set_next_event; |
199 | ce->shift = sirfsoc_clockevent.shift; | 194 | clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60); |
200 | ce->mult = sirfsoc_clockevent.mult; | 195 | ce->max_delta_ns = clockevent_delta2ns(-2, ce); |
201 | ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns; | 196 | ce->min_delta_ns = clockevent_delta2ns(2, ce); |
202 | ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns; | 197 | ce->cpumask = cpumask_of(cpu); |
203 | 198 | ||
204 | sirfsoc_timer1_irq.dev_id = ce; | 199 | action->dev_id = ce; |
205 | BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq)); | 200 | BUG_ON(setup_irq(ce->irq, action)); |
206 | irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1)); | 201 | irq_set_affinity(action->irq, cpumask_of(cpu)); |
207 | 202 | ||
208 | clockevents_register_device(ce); | 203 | clockevents_register_device(ce); |
209 | return 0; | 204 | return 0; |
@@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | |||
211 | 206 | ||
212 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) | 207 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) |
213 | { | 208 | { |
209 | int cpu = smp_processor_id(); | ||
210 | |||
214 | sirfsoc_timer_count_disable(1); | 211 | sirfsoc_timer_count_disable(1); |
215 | 212 | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | 213 | if (cpu == 0) |
214 | remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); | ||
215 | else | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | ||
217 | } | 217 | } |
218 | 218 | ||
219 | static struct local_timer_ops sirfsoc_local_timer_ops = { | 219 | static int sirfsoc_cpu_notify(struct notifier_block *self, |
220 | .setup = sirfsoc_local_timer_setup, | 220 | unsigned long action, void *hcpu) |
221 | .stop = sirfsoc_local_timer_stop, | 221 | { |
222 | /* | ||
223 | * Grab cpu pointer in each case to avoid spurious | ||
224 | * preemptible warnings | ||
225 | */ | ||
226 | switch (action & ~CPU_TASKS_FROZEN) { | ||
227 | case CPU_STARTING: | ||
228 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | ||
229 | break; | ||
230 | case CPU_DYING: | ||
231 | sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); | ||
232 | break; | ||
233 | } | ||
234 | |||
235 | return NOTIFY_OK; | ||
236 | } | ||
237 | |||
238 | static struct notifier_block sirfsoc_cpu_nb = { | ||
239 | .notifier_call = sirfsoc_cpu_notify, | ||
222 | }; | 240 | }; |
223 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
224 | 241 | ||
225 | static void __init sirfsoc_clockevent_init(void) | 242 | static void __init sirfsoc_clockevent_init(void) |
226 | { | 243 | { |
227 | clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60); | 244 | sirfsoc_clockevent = alloc_percpu(struct clock_event_device); |
228 | 245 | BUG_ON(!sirfsoc_clockevent); | |
229 | sirfsoc_clockevent.max_delta_ns = | 246 | |
230 | clockevent_delta2ns(-2, &sirfsoc_clockevent); | 247 | BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); |
231 | sirfsoc_clockevent.min_delta_ns = | 248 | |
232 | clockevent_delta2ns(2, &sirfsoc_clockevent); | 249 | /* Immediately configure the timer on the boot CPU */ |
233 | 250 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | |
234 | sirfsoc_clockevent.cpumask = cpumask_of(0); | ||
235 | clockevents_register_device(&sirfsoc_clockevent); | ||
236 | #ifdef CONFIG_LOCAL_TIMERS | ||
237 | local_timer_register(&sirfsoc_local_timer_ops); | ||
238 | #endif | ||
239 | } | 251 | } |
240 | 252 | ||
241 | /* initialize the kernel jiffy timer source */ | 253 | /* initialize the kernel jiffy timer source */ |
@@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void) | |||
273 | 285 | ||
274 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); | 286 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); |
275 | 287 | ||
276 | BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); | ||
277 | |||
278 | sirfsoc_clockevent_init(); | 288 | sirfsoc_clockevent_init(); |
279 | } | 289 | } |
280 | 290 | ||
@@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np) | |||
288 | if (!sirfsoc_timer_irq.irq) | 298 | if (!sirfsoc_timer_irq.irq) |
289 | panic("No irq passed for timer0 via DT\n"); | 299 | panic("No irq passed for timer0 via DT\n"); |
290 | 300 | ||
291 | #ifdef CONFIG_LOCAL_TIMERS | ||
292 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); | 301 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); |
293 | if (!sirfsoc_timer1_irq.irq) | 302 | if (!sirfsoc_timer1_irq.irq) |
294 | panic("No irq passed for timer1 via DT\n"); | 303 | panic("No irq passed for timer1 via DT\n"); |
295 | #endif | ||
296 | 304 | ||
297 | sirfsoc_marco_timer_init(); | 305 | sirfsoc_marco_timer_init(); |
298 | } | 306 | } |
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h index dfdfdc03115b..6fb0856b9405 100644 --- a/include/linux/time-armada-370-xp.h +++ b/include/linux/time-armada-370-xp.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #ifndef __TIME_ARMADA_370_XPPRCMU_H | 11 | #ifndef __TIME_ARMADA_370_XPPRCMU_H |
12 | #define __TIME_ARMADA_370_XPPRCMU_H | 12 | #define __TIME_ARMADA_370_XPPRCMU_H |
13 | 13 | ||
14 | #include <linux/init.h> | 14 | void armada_370_xp_timer_init(void); |
15 | |||
16 | void __init armada_370_xp_timer_init(void); | ||
17 | 15 | ||
18 | #endif | 16 | #endif |