aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/arch_timer.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2012-11-12 09:33:44 -0500
committerMark Rutland <mark.rutland@arm.com>2013-01-31 10:51:49 -0500
commit8a4da6e36c582ff746191eca85b6c1c068dbfbd6 (patch)
tree9c4be7e6853d33f35580e6f7c64c3d410dfe4aaf /arch/arm/kernel/arch_timer.c
parentb2deabe3ba664a1ec47400c0ca285e951874e0cc (diff)
arm: arch_timer: move core to drivers/clocksource
The core functionality of the arch_timer driver is not directly tied to anything under arch/arm, and can be split out. This patch factors out the core of the arch_timer driver, so it can be shared with other architectures. A couple of functions are added so that architecture-specific code can interact with the driver without needing to touch its internals. The ARM_ARCH_TIMER config variable is moved out to drivers/clocksource/Kconfig, existing uses in arch/arm are replaced with HAVE_ARM_ARCH_TIMER, which selects it. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/kernel/arch_timer.c')
-rw-r--r--arch/arm/kernel/arch_timer.c386
1 files changed, 18 insertions, 368 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 94f503394c5c..36ebcf4b516f 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -9,402 +9,52 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/types.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/cpu.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/interrupt.h>
20#include <linux/of_irq.h>
21#include <linux/io.h>
22 13
23#include <asm/delay.h> 14#include <asm/delay.h>
24#include <asm/arch_timer.h>
25#include <asm/sched_clock.h> 15#include <asm/sched_clock.h>
26 16
27static u32 arch_timer_rate; 17#include <clocksource/arm_arch_timer.h>
28 18
29enum ppi_nr { 19static unsigned long arch_timer_read_counter_long(void)
30 PHYS_SECURE_PPI,
31 PHYS_NONSECURE_PPI,
32 VIRT_PPI,
33 HYP_PPI,
34 MAX_TIMER_PPI
35};
36
37static int arch_timer_ppi[MAX_TIMER_PPI];
38
39static struct clock_event_device __percpu *arch_timer_evt;
40static struct delay_timer arch_delay_timer;
41
42static bool arch_timer_use_virtual = true;
43
44/*
45 * Architected system timer support.
46 */
47
48static irqreturn_t inline timer_handler(const int access,
49 struct clock_event_device *evt)
50{
51 unsigned long ctrl;
52 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
53 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
54 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
55 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
56 evt->event_handler(evt);
57 return IRQ_HANDLED;
58 }
59
60 return IRQ_NONE;
61}
62
63static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
64{
65 struct clock_event_device *evt = dev_id;
66
67 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
68}
69
70static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
71{
72 struct clock_event_device *evt = dev_id;
73
74 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
75}
76
77static inline void timer_set_mode(const int access, int mode)
78{
79 unsigned long ctrl;
80 switch (mode) {
81 case CLOCK_EVT_MODE_UNUSED:
82 case CLOCK_EVT_MODE_SHUTDOWN:
83 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
84 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
85 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
86 break;
87 default:
88 break;
89 }
90}
91
92static void arch_timer_set_mode_virt(enum clock_event_mode mode,
93 struct clock_event_device *clk)
94{
95 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
96}
97
98static void arch_timer_set_mode_phys(enum clock_event_mode mode,
99 struct clock_event_device *clk)
100{
101 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
102}
103
104static inline void set_next_event(const int access, unsigned long evt)
105{
106 unsigned long ctrl;
107 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
108 ctrl |= ARCH_TIMER_CTRL_ENABLE;
109 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
110 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
111 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
112}
113
114static int arch_timer_set_next_event_virt(unsigned long evt,
115 struct clock_event_device *unused)
116{
117 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
118 return 0;
119}
120
121static int arch_timer_set_next_event_phys(unsigned long evt,
122 struct clock_event_device *unused)
123{
124 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
125 return 0;
126}
127
128static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
129{
130 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
131 clk->name = "arch_sys_timer";
132 clk->rating = 450;
133 if (arch_timer_use_virtual) {
134 clk->irq = arch_timer_ppi[VIRT_PPI];
135 clk->set_mode = arch_timer_set_mode_virt;
136 clk->set_next_event = arch_timer_set_next_event_virt;
137 } else {
138 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
139 clk->set_mode = arch_timer_set_mode_phys;
140 clk->set_next_event = arch_timer_set_next_event_phys;
141 }
142
143 clk->cpumask = cpumask_of(smp_processor_id());
144
145 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
146
147 clockevents_config_and_register(clk, arch_timer_rate,
148 0xf, 0x7fffffff);
149
150 if (arch_timer_use_virtual)
151 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
152 else {
153 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
154 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
155 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
156 }
157
158 arch_counter_set_user_access();
159
160 return 0;
161}
162
163static int arch_timer_available(void)
164{
165 u32 freq;
166
167 if (arch_timer_rate == 0) {
168 freq = arch_timer_get_cntfrq();
169
170 /* Check the timer frequency. */
171 if (freq == 0) {
172 pr_warn("Architected timer frequency not available\n");
173 return -EINVAL;
174 }
175
176 arch_timer_rate = freq;
177 }
178
179 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
180 (unsigned long)arch_timer_rate / 1000000,
181 (unsigned long)(arch_timer_rate / 10000) % 100,
182 arch_timer_use_virtual ? "virt" : "phys");
183 return 0;
184}
185
186/*
187 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
188 * call it before it has been initialised. Rather than incur a performance
189 * penalty checking for initialisation, provide a default implementation that
190 * won't lead to time appearing to jump backwards.
191 */
192static u64 arch_timer_read_zero(void)
193{
194 return 0;
195}
196
197u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
198
199static u32 arch_timer_read_counter32(void)
200{
201 return arch_timer_read_counter();
202}
203
204static cycle_t arch_counter_read(struct clocksource *cs)
205{ 20{
206 return arch_timer_read_counter(); 21 return arch_timer_read_counter();
207} 22}
208 23
209static unsigned long arch_timer_read_current_timer(void) 24static u32 arch_timer_read_counter_u32(void)
210{ 25{
211 return arch_timer_read_counter(); 26 return arch_timer_read_counter();
212} 27}
213 28
214static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 29static struct delay_timer arch_delay_timer;
215{
216 return arch_timer_read_counter();
217}
218
219static struct clocksource clocksource_counter = {
220 .name = "arch_sys_counter",
221 .rating = 400,
222 .read = arch_counter_read,
223 .mask = CLOCKSOURCE_MASK(56),
224 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
225};
226
227static struct cyclecounter cyclecounter = {
228 .read = arch_counter_read_cc,
229 .mask = CLOCKSOURCE_MASK(56),
230};
231
232static struct timecounter timecounter;
233
234struct timecounter *arch_timer_get_timecounter(void)
235{
236 return &timecounter;
237}
238
239static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
240{
241 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
242 clk->irq, smp_processor_id());
243
244 if (arch_timer_use_virtual)
245 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
246 else {
247 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
248 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
249 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
250 }
251
252 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
253}
254
255static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
256 unsigned long action, void *hcpu)
257{
258 struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
259
260 switch (action & ~CPU_TASKS_FROZEN) {
261 case CPU_STARTING:
262 arch_timer_setup(evt);
263 break;
264 case CPU_DYING:
265 arch_timer_stop(evt);
266 break;
267 }
268
269 return NOTIFY_OK;
270}
271
272static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
273 .notifier_call = arch_timer_cpu_notify,
274};
275 30
276static int __init arch_timer_register(void) 31static void __init arch_timer_delay_timer_register(void)
277{ 32{
278 int err;
279 int ppi;
280
281 err = arch_timer_available();
282 if (err)
283 goto out;
284
285 arch_timer_evt = alloc_percpu(struct clock_event_device);
286 if (!arch_timer_evt) {
287 err = -ENOMEM;
288 goto out;
289 }
290
291 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
292 cyclecounter.mult = clocksource_counter.mult;
293 cyclecounter.shift = clocksource_counter.shift;
294 timecounter_init(&timecounter, &cyclecounter,
295 arch_counter_get_cntpct());
296
297 if (arch_timer_use_virtual) {
298 ppi = arch_timer_ppi[VIRT_PPI];
299 err = request_percpu_irq(ppi, arch_timer_handler_virt,
300 "arch_timer", arch_timer_evt);
301 } else {
302 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
303 err = request_percpu_irq(ppi, arch_timer_handler_phys,
304 "arch_timer", arch_timer_evt);
305 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
306 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
307 err = request_percpu_irq(ppi, arch_timer_handler_phys,
308 "arch_timer", arch_timer_evt);
309 if (err)
310 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
311 arch_timer_evt);
312 }
313 }
314
315 if (err) {
316 pr_err("arch_timer: can't register interrupt %d (%d)\n",
317 ppi, err);
318 goto out_free;
319 }
320
321 err = register_cpu_notifier(&arch_timer_cpu_nb);
322 if (err)
323 goto out_free_irq;
324
325 /* Immediately configure the timer on the boot CPU */
326 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
327
328 /* Use the architected timer for the delay loop. */ 33 /* Use the architected timer for the delay loop. */
329 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; 34 arch_delay_timer.read_current_timer = arch_timer_read_counter_long;
330 arch_delay_timer.freq = arch_timer_rate; 35 arch_delay_timer.freq = arch_timer_get_rate();
331 register_current_timer_delay(&arch_delay_timer); 36 register_current_timer_delay(&arch_delay_timer);
332 return 0;
333
334out_free_irq:
335 if (arch_timer_use_virtual)
336 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
337 else {
338 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
339 arch_timer_evt);
340 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
341 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
342 arch_timer_evt);
343 }
344
345out_free:
346 free_percpu(arch_timer_evt);
347out:
348 return err;
349} 37}
350 38
351static const struct of_device_id arch_timer_of_match[] __initconst = {
352 { .compatible = "arm,armv7-timer", },
353 {},
354};
355
356int __init arch_timer_of_register(void) 39int __init arch_timer_of_register(void)
357{ 40{
358 struct device_node *np; 41 int ret;
359 u32 freq;
360 int i;
361
362 np = of_find_matching_node(NULL, arch_timer_of_match);
363 if (!np) {
364 pr_err("arch_timer: can't find DT node\n");
365 return -ENODEV;
366 }
367 42
368 /* Try to determine the frequency from the device tree or CNTFRQ */ 43 ret = arch_timer_init();
369 if (!of_property_read_u32(np, "clock-frequency", &freq)) 44 if (ret)
370 arch_timer_rate = freq; 45 return ret;
371 46
372 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 47 arch_timer_delay_timer_register();
373 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
374 48
375 of_node_put(np); 49 return 0;
376
377 /*
378 * If no interrupt provided for virtual timer, we'll have to
379 * stick to the physical timer. It'd better be accessible...
380 */
381 if (!arch_timer_ppi[VIRT_PPI]) {
382 arch_timer_use_virtual = false;
383
384 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
385 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
386 pr_warn("arch_timer: No interrupt available, giving up\n");
387 return -EINVAL;
388 }
389 }
390
391 if (arch_timer_use_virtual)
392 arch_timer_read_counter = arch_counter_get_cntvct;
393 else
394 arch_timer_read_counter = arch_counter_get_cntpct;
395
396 return arch_timer_register();
397} 50}
398 51
399int __init arch_timer_sched_clock_init(void) 52int __init arch_timer_sched_clock_init(void)
400{ 53{
401 int err; 54 if (arch_timer_get_rate() == 0)
402 55 return -ENXIO;
403 err = arch_timer_available();
404 if (err)
405 return err;
406 56
407 setup_sched_clock(arch_timer_read_counter32, 57 setup_sched_clock(arch_timer_read_counter_u32,
408 32, arch_timer_rate); 58 32, arch_timer_get_rate());
409 return 0; 59 return 0;
410} 60}