diff options
author | Mark Rutland <mark.rutland@arm.com> | 2012-11-12 12:29:43 -0500 |
---|---|---|
committer | Mark Rutland <mark.rutland@arm.com> | 2013-01-31 10:51:38 -0500 |
commit | 1ba1cefc277865a0ac222f53bbbf2ebacad1559a (patch) | |
tree | 2db2038390a6ae9bf654e2473c7fbc0e8f2a051f | |
parent | 45801042225c66a66fb2cb50fae6ff71883a99d6 (diff) |
arm: arch_timer: divorce from local_timer api
Currently, the arch_timer driver is tied to the arm port, as it relies
on code in arch/arm/smp.c to setup and teardown timers as cores are
hotplugged on and off. The timer is registered through an arm-specific
registration mechanism, preventing sharing the driver with the arm64
port.
This patch moves the driver to using a cpu notifier instead, making it
easier to port.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
-rw-r--r-- | arch/arm/kernel/arch_timer.c | 52 |
1 files changed, 29 insertions, 23 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index e973cc0eaad1..c8dfec052f2d 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | 22 | ||
23 | #include <asm/delay.h> | 23 | #include <asm/delay.h> |
24 | #include <asm/localtimer.h> | ||
25 | #include <asm/arch_timer.h> | 24 | #include <asm/arch_timer.h> |
26 | #include <asm/sched_clock.h> | 25 | #include <asm/sched_clock.h> |
27 | 26 | ||
@@ -37,7 +36,7 @@ enum ppi_nr { | |||
37 | 36 | ||
38 | static int arch_timer_ppi[MAX_TIMER_PPI]; | 37 | static int arch_timer_ppi[MAX_TIMER_PPI]; |
39 | 38 | ||
40 | static struct clock_event_device __percpu **arch_timer_evt; | 39 | static struct clock_event_device __percpu *arch_timer_evt; |
41 | static struct delay_timer arch_delay_timer; | 40 | static struct delay_timer arch_delay_timer; |
42 | 41 | ||
43 | static bool arch_timer_use_virtual = true; | 42 | static bool arch_timer_use_virtual = true; |
@@ -63,14 +62,14 @@ static irqreturn_t inline timer_handler(const int access, | |||
63 | 62 | ||
64 | static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) | 63 | static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) |
65 | { | 64 | { |
66 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 65 | struct clock_event_device *evt = dev_id; |
67 | 66 | ||
68 | return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); | 67 | return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); |
69 | } | 68 | } |
70 | 69 | ||
71 | static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) | 70 | static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) |
72 | { | 71 | { |
73 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 72 | struct clock_event_device *evt = dev_id; |
74 | 73 | ||
75 | return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); | 74 | return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); |
76 | } | 75 | } |
@@ -141,13 +140,13 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) | |||
141 | clk->set_next_event = arch_timer_set_next_event_phys; | 140 | clk->set_next_event = arch_timer_set_next_event_phys; |
142 | } | 141 | } |
143 | 142 | ||
143 | clk->cpumask = cpumask_of(smp_processor_id()); | ||
144 | |||
144 | clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); | 145 | clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); |
145 | 146 | ||
146 | clockevents_config_and_register(clk, arch_timer_rate, | 147 | clockevents_config_and_register(clk, arch_timer_rate, |
147 | 0xf, 0x7fffffff); | 148 | 0xf, 0x7fffffff); |
148 | 149 | ||
149 | *__this_cpu_ptr(arch_timer_evt) = clk; | ||
150 | |||
151 | if (arch_timer_use_virtual) | 150 | if (arch_timer_use_virtual) |
152 | enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); | 151 | enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); |
153 | else { | 152 | else { |
@@ -251,12 +250,26 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk) | |||
251 | clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); | 250 | clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); |
252 | } | 251 | } |
253 | 252 | ||
254 | static struct local_timer_ops arch_timer_ops __cpuinitdata = { | 253 | static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, |
255 | .setup = arch_timer_setup, | 254 | unsigned long action, void *hcpu) |
256 | .stop = arch_timer_stop, | 255 | { |
257 | }; | 256 | struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt); |
257 | |||
258 | switch (action & ~CPU_TASKS_FROZEN) { | ||
259 | case CPU_STARTING: | ||
260 | arch_timer_setup(evt); | ||
261 | break; | ||
262 | case CPU_DYING: | ||
263 | arch_timer_stop(evt); | ||
264 | break; | ||
265 | } | ||
266 | |||
267 | return NOTIFY_OK; | ||
268 | } | ||
258 | 269 | ||
259 | static struct clock_event_device arch_timer_global_evt; | 270 | static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { |
271 | .notifier_call = arch_timer_cpu_notify, | ||
272 | }; | ||
260 | 273 | ||
261 | static int __init arch_timer_register(void) | 274 | static int __init arch_timer_register(void) |
262 | { | 275 | { |
@@ -267,7 +280,7 @@ static int __init arch_timer_register(void) | |||
267 | if (err) | 280 | if (err) |
268 | goto out; | 281 | goto out; |
269 | 282 | ||
270 | arch_timer_evt = alloc_percpu(struct clock_event_device *); | 283 | arch_timer_evt = alloc_percpu(struct clock_event_device); |
271 | if (!arch_timer_evt) { | 284 | if (!arch_timer_evt) { |
272 | err = -ENOMEM; | 285 | err = -ENOMEM; |
273 | goto out; | 286 | goto out; |
@@ -303,20 +316,13 @@ static int __init arch_timer_register(void) | |||
303 | goto out_free; | 316 | goto out_free; |
304 | } | 317 | } |
305 | 318 | ||
306 | err = local_timer_register(&arch_timer_ops); | 319 | err = register_cpu_notifier(&arch_timer_cpu_nb); |
307 | if (err) { | ||
308 | /* | ||
309 | * We couldn't register as a local timer (could be | ||
310 | * because we're on a UP platform, or because some | ||
311 | * other local timer is already present...). Try as a | ||
312 | * global timer instead. | ||
313 | */ | ||
314 | arch_timer_global_evt.cpumask = cpumask_of(0); | ||
315 | err = arch_timer_setup(&arch_timer_global_evt); | ||
316 | } | ||
317 | if (err) | 320 | if (err) |
318 | goto out_free_irq; | 321 | goto out_free_irq; |
319 | 322 | ||
323 | /* Immediately configure the timer on the boot CPU */ | ||
324 | arch_timer_setup(this_cpu_ptr(arch_timer_evt)); | ||
325 | |||
320 | /* Use the architected timer for the delay loop. */ | 326 | /* Use the architected timer for the delay loop. */ |
321 | arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; | 327 | arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; |
322 | arch_delay_timer.freq = arch_timer_rate; | 328 | arch_delay_timer.freq = arch_timer_rate; |