aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2012-11-12 09:33:44 -0500
committerMark Rutland <mark.rutland@arm.com>2013-01-31 10:51:49 -0500
commit8a4da6e36c582ff746191eca85b6c1c068dbfbd6 (patch)
tree9c4be7e6853d33f35580e6f7c64c3d410dfe4aaf
parentb2deabe3ba664a1ec47400c0ca285e951874e0cc (diff)
arm: arch_timer: move core to drivers/clocksource
The core functionality of the arch_timer driver is not directly tied to anything under arch/arm, and can be split out. This patch factors out the core of the arch_timer driver, so it can be shared with other architectures. A couple of functions are added so that architecture-specific code can interact with the driver without needing to touch its internals. The ARM_ARCH_TIMER config variable is moved out to drivers/clocksource/Kconfig, existing uses in arch/arm are replaced with HAVE_ARM_ARCH_TIMER, which selects it. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/include/asm/arch_timer.h19
-rw-r--r--arch/arm/kernel/arch_timer.c386
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c385
-rw-r--r--include/clocksource/arm_arch_timer.h63
8 files changed, 476 insertions, 386 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 67874b82a4ed..e1162f52f2b5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1572,9 +1572,10 @@ config HAVE_ARM_SCU
1572 help 1572 help
1573 This option enables support for the ARM system coherency unit 1573 This option enables support for the ARM system coherency unit
1574 1574
1575config ARM_ARCH_TIMER 1575config HAVE_ARM_ARCH_TIMER
1576 bool "Architected timer support" 1576 bool "Architected timer support"
1577 depends on CPU_V7 1577 depends on CPU_V7
1578 select ARM_ARCH_TIMER
1578 help 1579 help
1579 This option enables support for the ARM architected timer 1580 This option enables support for the ARM architected timer
1580 1581
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 729f6d98df86..7ade91d8cc6f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -4,22 +4,14 @@
4#include <asm/barrier.h> 4#include <asm/barrier.h>
5#include <asm/errno.h> 5#include <asm/errno.h>
6#include <linux/clocksource.h> 6#include <linux/clocksource.h>
7#include <linux/init.h>
7#include <linux/types.h> 8#include <linux/types.h>
8 9
10#include <clocksource/arm_arch_timer.h>
11
9#ifdef CONFIG_ARM_ARCH_TIMER 12#ifdef CONFIG_ARM_ARCH_TIMER
10int arch_timer_of_register(void); 13int arch_timer_of_register(void);
11int arch_timer_sched_clock_init(void); 14int arch_timer_sched_clock_init(void);
12struct timecounter *arch_timer_get_timecounter(void);
13
14#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
15#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
16#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
17
18#define ARCH_TIMER_REG_CTRL 0
19#define ARCH_TIMER_REG_TVAL 1
20
21#define ARCH_TIMER_PHYS_ACCESS 0
22#define ARCH_TIMER_VIRT_ACCESS 1
23 15
24/* 16/*
25 * These register accessors are marked inline so the compiler can 17 * These register accessors are marked inline so the compiler can
@@ -128,11 +120,6 @@ static inline int arch_timer_sched_clock_init(void)
128{ 120{
129 return -ENXIO; 121 return -ENXIO;
130} 122}
131
132static inline struct timecounter *arch_timer_get_timecounter(void)
133{
134 return NULL;
135}
136#endif 123#endif
137 124
138#endif 125#endif
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 94f503394c5c..36ebcf4b516f 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -9,402 +9,52 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/types.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/cpu.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/interrupt.h>
20#include <linux/of_irq.h>
21#include <linux/io.h>
22 13
23#include <asm/delay.h> 14#include <asm/delay.h>
24#include <asm/arch_timer.h>
25#include <asm/sched_clock.h> 15#include <asm/sched_clock.h>
26 16
27static u32 arch_timer_rate; 17#include <clocksource/arm_arch_timer.h>
28 18
29enum ppi_nr { 19static unsigned long arch_timer_read_counter_long(void)
30 PHYS_SECURE_PPI,
31 PHYS_NONSECURE_PPI,
32 VIRT_PPI,
33 HYP_PPI,
34 MAX_TIMER_PPI
35};
36
37static int arch_timer_ppi[MAX_TIMER_PPI];
38
39static struct clock_event_device __percpu *arch_timer_evt;
40static struct delay_timer arch_delay_timer;
41
42static bool arch_timer_use_virtual = true;
43
44/*
45 * Architected system timer support.
46 */
47
48static irqreturn_t inline timer_handler(const int access,
49 struct clock_event_device *evt)
50{
51 unsigned long ctrl;
52 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
53 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
54 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
55 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
56 evt->event_handler(evt);
57 return IRQ_HANDLED;
58 }
59
60 return IRQ_NONE;
61}
62
63static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
64{
65 struct clock_event_device *evt = dev_id;
66
67 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
68}
69
70static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
71{
72 struct clock_event_device *evt = dev_id;
73
74 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
75}
76
77static inline void timer_set_mode(const int access, int mode)
78{
79 unsigned long ctrl;
80 switch (mode) {
81 case CLOCK_EVT_MODE_UNUSED:
82 case CLOCK_EVT_MODE_SHUTDOWN:
83 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
84 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
85 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
86 break;
87 default:
88 break;
89 }
90}
91
92static void arch_timer_set_mode_virt(enum clock_event_mode mode,
93 struct clock_event_device *clk)
94{
95 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
96}
97
98static void arch_timer_set_mode_phys(enum clock_event_mode mode,
99 struct clock_event_device *clk)
100{
101 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
102}
103
104static inline void set_next_event(const int access, unsigned long evt)
105{
106 unsigned long ctrl;
107 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
108 ctrl |= ARCH_TIMER_CTRL_ENABLE;
109 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
110 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
111 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
112}
113
114static int arch_timer_set_next_event_virt(unsigned long evt,
115 struct clock_event_device *unused)
116{
117 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
118 return 0;
119}
120
121static int arch_timer_set_next_event_phys(unsigned long evt,
122 struct clock_event_device *unused)
123{
124 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
125 return 0;
126}
127
128static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
129{
130 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
131 clk->name = "arch_sys_timer";
132 clk->rating = 450;
133 if (arch_timer_use_virtual) {
134 clk->irq = arch_timer_ppi[VIRT_PPI];
135 clk->set_mode = arch_timer_set_mode_virt;
136 clk->set_next_event = arch_timer_set_next_event_virt;
137 } else {
138 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
139 clk->set_mode = arch_timer_set_mode_phys;
140 clk->set_next_event = arch_timer_set_next_event_phys;
141 }
142
143 clk->cpumask = cpumask_of(smp_processor_id());
144
145 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
146
147 clockevents_config_and_register(clk, arch_timer_rate,
148 0xf, 0x7fffffff);
149
150 if (arch_timer_use_virtual)
151 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
152 else {
153 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
154 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
155 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
156 }
157
158 arch_counter_set_user_access();
159
160 return 0;
161}
162
163static int arch_timer_available(void)
164{
165 u32 freq;
166
167 if (arch_timer_rate == 0) {
168 freq = arch_timer_get_cntfrq();
169
170 /* Check the timer frequency. */
171 if (freq == 0) {
172 pr_warn("Architected timer frequency not available\n");
173 return -EINVAL;
174 }
175
176 arch_timer_rate = freq;
177 }
178
179 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
180 (unsigned long)arch_timer_rate / 1000000,
181 (unsigned long)(arch_timer_rate / 10000) % 100,
182 arch_timer_use_virtual ? "virt" : "phys");
183 return 0;
184}
185
186/*
187 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
188 * call it before it has been initialised. Rather than incur a performance
189 * penalty checking for initialisation, provide a default implementation that
190 * won't lead to time appearing to jump backwards.
191 */
192static u64 arch_timer_read_zero(void)
193{
194 return 0;
195}
196
197u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
198
199static u32 arch_timer_read_counter32(void)
200{
201 return arch_timer_read_counter();
202}
203
204static cycle_t arch_counter_read(struct clocksource *cs)
205{ 20{
206 return arch_timer_read_counter(); 21 return arch_timer_read_counter();
207} 22}
208 23
209static unsigned long arch_timer_read_current_timer(void) 24static u32 arch_timer_read_counter_u32(void)
210{ 25{
211 return arch_timer_read_counter(); 26 return arch_timer_read_counter();
212} 27}
213 28
214static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 29static struct delay_timer arch_delay_timer;
215{
216 return arch_timer_read_counter();
217}
218
219static struct clocksource clocksource_counter = {
220 .name = "arch_sys_counter",
221 .rating = 400,
222 .read = arch_counter_read,
223 .mask = CLOCKSOURCE_MASK(56),
224 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
225};
226
227static struct cyclecounter cyclecounter = {
228 .read = arch_counter_read_cc,
229 .mask = CLOCKSOURCE_MASK(56),
230};
231
232static struct timecounter timecounter;
233
234struct timecounter *arch_timer_get_timecounter(void)
235{
236 return &timecounter;
237}
238
239static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
240{
241 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
242 clk->irq, smp_processor_id());
243
244 if (arch_timer_use_virtual)
245 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
246 else {
247 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
248 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
249 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
250 }
251
252 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
253}
254
255static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
256 unsigned long action, void *hcpu)
257{
258 struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
259
260 switch (action & ~CPU_TASKS_FROZEN) {
261 case CPU_STARTING:
262 arch_timer_setup(evt);
263 break;
264 case CPU_DYING:
265 arch_timer_stop(evt);
266 break;
267 }
268
269 return NOTIFY_OK;
270}
271
272static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
273 .notifier_call = arch_timer_cpu_notify,
274};
275 30
276static int __init arch_timer_register(void) 31static void __init arch_timer_delay_timer_register(void)
277{ 32{
278 int err;
279 int ppi;
280
281 err = arch_timer_available();
282 if (err)
283 goto out;
284
285 arch_timer_evt = alloc_percpu(struct clock_event_device);
286 if (!arch_timer_evt) {
287 err = -ENOMEM;
288 goto out;
289 }
290
291 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
292 cyclecounter.mult = clocksource_counter.mult;
293 cyclecounter.shift = clocksource_counter.shift;
294 timecounter_init(&timecounter, &cyclecounter,
295 arch_counter_get_cntpct());
296
297 if (arch_timer_use_virtual) {
298 ppi = arch_timer_ppi[VIRT_PPI];
299 err = request_percpu_irq(ppi, arch_timer_handler_virt,
300 "arch_timer", arch_timer_evt);
301 } else {
302 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
303 err = request_percpu_irq(ppi, arch_timer_handler_phys,
304 "arch_timer", arch_timer_evt);
305 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
306 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
307 err = request_percpu_irq(ppi, arch_timer_handler_phys,
308 "arch_timer", arch_timer_evt);
309 if (err)
310 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
311 arch_timer_evt);
312 }
313 }
314
315 if (err) {
316 pr_err("arch_timer: can't register interrupt %d (%d)\n",
317 ppi, err);
318 goto out_free;
319 }
320
321 err = register_cpu_notifier(&arch_timer_cpu_nb);
322 if (err)
323 goto out_free_irq;
324
325 /* Immediately configure the timer on the boot CPU */
326 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
327
328 /* Use the architected timer for the delay loop. */ 33 /* Use the architected timer for the delay loop. */
329 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; 34 arch_delay_timer.read_current_timer = arch_timer_read_counter_long;
330 arch_delay_timer.freq = arch_timer_rate; 35 arch_delay_timer.freq = arch_timer_get_rate();
331 register_current_timer_delay(&arch_delay_timer); 36 register_current_timer_delay(&arch_delay_timer);
332 return 0;
333
334out_free_irq:
335 if (arch_timer_use_virtual)
336 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
337 else {
338 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
339 arch_timer_evt);
340 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
341 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
342 arch_timer_evt);
343 }
344
345out_free:
346 free_percpu(arch_timer_evt);
347out:
348 return err;
349} 37}
350 38
351static const struct of_device_id arch_timer_of_match[] __initconst = {
352 { .compatible = "arm,armv7-timer", },
353 {},
354};
355
356int __init arch_timer_of_register(void) 39int __init arch_timer_of_register(void)
357{ 40{
358 struct device_node *np; 41 int ret;
359 u32 freq;
360 int i;
361
362 np = of_find_matching_node(NULL, arch_timer_of_match);
363 if (!np) {
364 pr_err("arch_timer: can't find DT node\n");
365 return -ENODEV;
366 }
367 42
368 /* Try to determine the frequency from the device tree or CNTFRQ */ 43 ret = arch_timer_init();
369 if (!of_property_read_u32(np, "clock-frequency", &freq)) 44 if (ret)
370 arch_timer_rate = freq; 45 return ret;
371 46
372 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 47 arch_timer_delay_timer_register();
373 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
374 48
375 of_node_put(np); 49 return 0;
376
377 /*
378 * If no interrupt provided for virtual timer, we'll have to
379 * stick to the physical timer. It'd better be accessible...
380 */
381 if (!arch_timer_ppi[VIRT_PPI]) {
382 arch_timer_use_virtual = false;
383
384 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
385 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
386 pr_warn("arch_timer: No interrupt available, giving up\n");
387 return -EINVAL;
388 }
389 }
390
391 if (arch_timer_use_virtual)
392 arch_timer_read_counter = arch_counter_get_cntvct;
393 else
394 arch_timer_read_counter = arch_counter_get_cntpct;
395
396 return arch_timer_register();
397} 50}
398 51
399int __init arch_timer_sched_clock_init(void) 52int __init arch_timer_sched_clock_init(void)
400{ 53{
401 int err; 54 if (arch_timer_get_rate() == 0)
402 55 return -ENXIO;
403 err = arch_timer_available();
404 if (err)
405 return err;
406 56
407 setup_sched_clock(arch_timer_read_counter32, 57 setup_sched_clock(arch_timer_read_counter_u32,
408 32, arch_timer_rate); 58 32, arch_timer_get_rate());
409 return 0; 59 return 0;
410} 60}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 41b581fd0213..9d7909e58980 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -76,12 +76,12 @@ config ARCH_OMAP4
76 76
77config SOC_OMAP5 77config SOC_OMAP5
78 bool "TI OMAP5" 78 bool "TI OMAP5"
79 select ARM_ARCH_TIMER
80 select ARM_CPU_SUSPEND if PM 79 select ARM_CPU_SUSPEND if PM
81 select ARM_GIC 80 select ARM_GIC
82 select CPU_V7 81 select CPU_V7
83 select HAVE_SMP 82 select HAVE_SMP
84 select COMMON_CLK 83 select COMMON_CLK
84 select HAVE_ARM_ARCH_TIMER
85 85
86comment "OMAP Core Type" 86comment "OMAP Core Type"
87 depends on ARCH_OMAP2 87 depends on ARCH_OMAP2
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 7fdcbd3f4da5..dbb085ac64d4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -58,3 +58,6 @@ config CLKSRC_ARM_GENERIC
58 def_bool y if ARM64 58 def_bool y if ARM64
59 help 59 help
60 This option enables support for the ARM generic timer. 60 This option enables support for the ARM generic timer.
61
62config ARM_ARCH_TIMER
63 bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f93453d01673..32f858c8eecc 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
18obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o 18obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
19 19
20obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o 20obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
21obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
new file mode 100644
index 000000000000..3e4739df0e82
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -0,0 +1,385 @@
1/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/clockchips.h>
17#include <linux/interrupt.h>
18#include <linux/of_irq.h>
19#include <linux/io.h>
20
21#include <asm/arch_timer.h>
22
23#include <clocksource/arm_arch_timer.h>
24
25static u32 arch_timer_rate;
26
27enum ppi_nr {
28 PHYS_SECURE_PPI,
29 PHYS_NONSECURE_PPI,
30 VIRT_PPI,
31 HYP_PPI,
32 MAX_TIMER_PPI
33};
34
35static int arch_timer_ppi[MAX_TIMER_PPI];
36
37static struct clock_event_device __percpu *arch_timer_evt;
38
39static bool arch_timer_use_virtual = true;
40
41/*
42 * Architected system timer support.
43 */
44
45static inline irqreturn_t timer_handler(const int access,
46 struct clock_event_device *evt)
47{
48 unsigned long ctrl;
49 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
50 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
51 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
52 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
53 evt->event_handler(evt);
54 return IRQ_HANDLED;
55 }
56
57 return IRQ_NONE;
58}
59
60static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
61{
62 struct clock_event_device *evt = dev_id;
63
64 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
65}
66
67static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
68{
69 struct clock_event_device *evt = dev_id;
70
71 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
72}
73
74static inline void timer_set_mode(const int access, int mode)
75{
76 unsigned long ctrl;
77 switch (mode) {
78 case CLOCK_EVT_MODE_UNUSED:
79 case CLOCK_EVT_MODE_SHUTDOWN:
80 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
81 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
82 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
83 break;
84 default:
85 break;
86 }
87}
88
89static void arch_timer_set_mode_virt(enum clock_event_mode mode,
90 struct clock_event_device *clk)
91{
92 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
93}
94
95static void arch_timer_set_mode_phys(enum clock_event_mode mode,
96 struct clock_event_device *clk)
97{
98 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
99}
100
101static inline void set_next_event(const int access, unsigned long evt)
102{
103 unsigned long ctrl;
104 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
105 ctrl |= ARCH_TIMER_CTRL_ENABLE;
106 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
107 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
108 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
109}
110
111static int arch_timer_set_next_event_virt(unsigned long evt,
112 struct clock_event_device *unused)
113{
114 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
115 return 0;
116}
117
118static int arch_timer_set_next_event_phys(unsigned long evt,
119 struct clock_event_device *unused)
120{
121 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
122 return 0;
123}
124
125static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
126{
127 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
128 clk->name = "arch_sys_timer";
129 clk->rating = 450;
130 if (arch_timer_use_virtual) {
131 clk->irq = arch_timer_ppi[VIRT_PPI];
132 clk->set_mode = arch_timer_set_mode_virt;
133 clk->set_next_event = arch_timer_set_next_event_virt;
134 } else {
135 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
136 clk->set_mode = arch_timer_set_mode_phys;
137 clk->set_next_event = arch_timer_set_next_event_phys;
138 }
139
140 clk->cpumask = cpumask_of(smp_processor_id());
141
142 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
143
144 clockevents_config_and_register(clk, arch_timer_rate,
145 0xf, 0x7fffffff);
146
147 if (arch_timer_use_virtual)
148 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
149 else {
150 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
151 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
152 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
153 }
154
155 arch_counter_set_user_access();
156
157 return 0;
158}
159
160static int arch_timer_available(void)
161{
162 u32 freq;
163
164 if (arch_timer_rate == 0) {
165 freq = arch_timer_get_cntfrq();
166
167 /* Check the timer frequency. */
168 if (freq == 0) {
169 pr_warn("Architected timer frequency not available\n");
170 return -EINVAL;
171 }
172
173 arch_timer_rate = freq;
174 }
175
176 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
177 (unsigned long)arch_timer_rate / 1000000,
178 (unsigned long)(arch_timer_rate / 10000) % 100,
179 arch_timer_use_virtual ? "virt" : "phys");
180 return 0;
181}
182
183u32 arch_timer_get_rate(void)
184{
185 return arch_timer_rate;
186}
187
188/*
189 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
190 * call it before it has been initialised. Rather than incur a performance
191 * penalty checking for initialisation, provide a default implementation that
192 * won't lead to time appearing to jump backwards.
193 */
194static u64 arch_timer_read_zero(void)
195{
196 return 0;
197}
198
199u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
200
201static cycle_t arch_counter_read(struct clocksource *cs)
202{
203 return arch_timer_read_counter();
204}
205
206static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
207{
208 return arch_timer_read_counter();
209}
210
211static struct clocksource clocksource_counter = {
212 .name = "arch_sys_counter",
213 .rating = 400,
214 .read = arch_counter_read,
215 .mask = CLOCKSOURCE_MASK(56),
216 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
217};
218
219static struct cyclecounter cyclecounter = {
220 .read = arch_counter_read_cc,
221 .mask = CLOCKSOURCE_MASK(56),
222};
223
224static struct timecounter timecounter;
225
226struct timecounter *arch_timer_get_timecounter(void)
227{
228 return &timecounter;
229}
230
231static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
232{
233 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
234 clk->irq, smp_processor_id());
235
236 if (arch_timer_use_virtual)
237 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
238 else {
239 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
240 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
241 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
242 }
243
244 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
245}
246
247static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
248 unsigned long action, void *hcpu)
249{
250 struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
251
252 switch (action & ~CPU_TASKS_FROZEN) {
253 case CPU_STARTING:
254 arch_timer_setup(evt);
255 break;
256 case CPU_DYING:
257 arch_timer_stop(evt);
258 break;
259 }
260
261 return NOTIFY_OK;
262}
263
264static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
265 .notifier_call = arch_timer_cpu_notify,
266};
267
268static int __init arch_timer_register(void)
269{
270 int err;
271 int ppi;
272
273 err = arch_timer_available();
274 if (err)
275 goto out;
276
277 arch_timer_evt = alloc_percpu(struct clock_event_device);
278 if (!arch_timer_evt) {
279 err = -ENOMEM;
280 goto out;
281 }
282
283 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
284 cyclecounter.mult = clocksource_counter.mult;
285 cyclecounter.shift = clocksource_counter.shift;
286 timecounter_init(&timecounter, &cyclecounter,
287 arch_counter_get_cntpct());
288
289 if (arch_timer_use_virtual) {
290 ppi = arch_timer_ppi[VIRT_PPI];
291 err = request_percpu_irq(ppi, arch_timer_handler_virt,
292 "arch_timer", arch_timer_evt);
293 } else {
294 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
295 err = request_percpu_irq(ppi, arch_timer_handler_phys,
296 "arch_timer", arch_timer_evt);
297 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
298 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
299 err = request_percpu_irq(ppi, arch_timer_handler_phys,
300 "arch_timer", arch_timer_evt);
301 if (err)
302 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
303 arch_timer_evt);
304 }
305 }
306
307 if (err) {
308 pr_err("arch_timer: can't register interrupt %d (%d)\n",
309 ppi, err);
310 goto out_free;
311 }
312
313 err = register_cpu_notifier(&arch_timer_cpu_nb);
314 if (err)
315 goto out_free_irq;
316
317 /* Immediately configure the timer on the boot CPU */
318 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
319
320 return 0;
321
322out_free_irq:
323 if (arch_timer_use_virtual)
324 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
325 else {
326 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
327 arch_timer_evt);
328 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
329 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
330 arch_timer_evt);
331 }
332
333out_free:
334 free_percpu(arch_timer_evt);
335out:
336 return err;
337}
338
339static const struct of_device_id arch_timer_of_match[] __initconst = {
340 { .compatible = "arm,armv7-timer", },
341 {},
342};
343
344int __init arch_timer_init(void)
345{
346 struct device_node *np;
347 u32 freq;
348 int i;
349
350 np = of_find_matching_node(NULL, arch_timer_of_match);
351 if (!np) {
352 pr_err("arch_timer: can't find DT node\n");
353 return -ENODEV;
354 }
355
356 /* Try to determine the frequency from the device tree or CNTFRQ */
357 if (!of_property_read_u32(np, "clock-frequency", &freq))
358 arch_timer_rate = freq;
359
360 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
361 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
362
363 of_node_put(np);
364
365 /*
366 * If no interrupt provided for virtual timer, we'll have to
367 * stick to the physical timer. It'd better be accessible...
368 */
369 if (!arch_timer_ppi[VIRT_PPI]) {
370 arch_timer_use_virtual = false;
371
372 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
373 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
374 pr_warn("arch_timer: No interrupt available, giving up\n");
375 return -EINVAL;
376 }
377 }
378
379 if (arch_timer_use_virtual)
380 arch_timer_read_counter = arch_counter_get_cntvct;
381 else
382 arch_timer_read_counter = arch_counter_get_cntpct;
383
384 return arch_timer_register();
385}
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
new file mode 100644
index 000000000000..b61f9961b0cc
--- /dev/null
+++ b/include/clocksource/arm_arch_timer.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
17#define __CLKSOURCE_ARM_ARCH_TIMER_H
18
19#include <linux/clocksource.h>
20#include <linux/types.h>
21
22#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
23#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
24#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
25
26#define ARCH_TIMER_REG_CTRL 0
27#define ARCH_TIMER_REG_TVAL 1
28
29#define ARCH_TIMER_PHYS_ACCESS 0
30#define ARCH_TIMER_VIRT_ACCESS 1
31
32#ifdef CONFIG_ARM_ARCH_TIMER
33
34extern int arch_timer_init(void);
35extern u32 arch_timer_get_rate(void);
36extern u64 (*arch_timer_read_counter)(void);
37extern struct timecounter *arch_timer_get_timecounter(void);
38
39#else
40
41static inline int arch_timer_init(void)
42{
43 return -ENXIO;
44}
45
46static inline u32 arch_timer_get_rate(void)
47{
48 return 0;
49}
50
51static inline u64 arch_timer_read_counter(void)
52{
53 return 0;
54}
55
56static struct timecounter *arch_timer_get_timecounter(void)
57{
58 return NULL;
59}
60
61#endif
62
63#endif