diff options
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/Kconfig | 6 | ||||
-rw-r--r-- | drivers/clocksource/Makefile | 2 | ||||
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 391 | ||||
-rw-r--r-- | drivers/clocksource/arm_generic.c | 232 |
4 files changed, 394 insertions, 237 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 7d978c1bd528..e920cbe519fa 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -60,7 +60,5 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK | |||
60 | help | 60 | help |
61 | Use the always on PRCMU Timer as sched_clock | 61 | Use the always on PRCMU Timer as sched_clock |
62 | 62 | ||
63 | config CLKSRC_ARM_GENERIC | 63 | config ARM_ARCH_TIMER |
64 | def_bool y if ARM64 | 64 | bool |
65 | help | ||
66 | This option enables support for the ARM generic timer. | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 596c45c2f192..7d671b85a98e 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -20,4 +20,4 @@ obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o | |||
20 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o | 20 | obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o |
21 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o | 21 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o |
22 | 22 | ||
23 | obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o | 23 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c new file mode 100644 index 000000000000..d7ad425ab9b3 --- /dev/null +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -0,0 +1,391 @@ | |||
1 | /* | ||
2 | * linux/drivers/clocksource/arm_arch_timer.c | ||
3 | * | ||
4 | * Copyright (C) 2011 ARM Ltd. | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/cpu.h> | ||
16 | #include <linux/clockchips.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/of_irq.h> | ||
19 | #include <linux/io.h> | ||
20 | |||
21 | #include <asm/arch_timer.h> | ||
22 | #include <asm/virt.h> | ||
23 | |||
24 | #include <clocksource/arm_arch_timer.h> | ||
25 | |||
26 | static u32 arch_timer_rate; | ||
27 | |||
28 | enum ppi_nr { | ||
29 | PHYS_SECURE_PPI, | ||
30 | PHYS_NONSECURE_PPI, | ||
31 | VIRT_PPI, | ||
32 | HYP_PPI, | ||
33 | MAX_TIMER_PPI | ||
34 | }; | ||
35 | |||
36 | static int arch_timer_ppi[MAX_TIMER_PPI]; | ||
37 | |||
38 | static struct clock_event_device __percpu *arch_timer_evt; | ||
39 | |||
40 | static bool arch_timer_use_virtual = true; | ||
41 | |||
42 | /* | ||
43 | * Architected system timer support. | ||
44 | */ | ||
45 | |||
46 | static inline irqreturn_t timer_handler(const int access, | ||
47 | struct clock_event_device *evt) | ||
48 | { | ||
49 | unsigned long ctrl; | ||
50 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); | ||
51 | if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { | ||
52 | ctrl |= ARCH_TIMER_CTRL_IT_MASK; | ||
53 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); | ||
54 | evt->event_handler(evt); | ||
55 | return IRQ_HANDLED; | ||
56 | } | ||
57 | |||
58 | return IRQ_NONE; | ||
59 | } | ||
60 | |||
61 | static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) | ||
62 | { | ||
63 | struct clock_event_device *evt = dev_id; | ||
64 | |||
65 | return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); | ||
66 | } | ||
67 | |||
68 | static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) | ||
69 | { | ||
70 | struct clock_event_device *evt = dev_id; | ||
71 | |||
72 | return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); | ||
73 | } | ||
74 | |||
75 | static inline void timer_set_mode(const int access, int mode) | ||
76 | { | ||
77 | unsigned long ctrl; | ||
78 | switch (mode) { | ||
79 | case CLOCK_EVT_MODE_UNUSED: | ||
80 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
81 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); | ||
82 | ctrl &= ~ARCH_TIMER_CTRL_ENABLE; | ||
83 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); | ||
84 | break; | ||
85 | default: | ||
86 | break; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static void arch_timer_set_mode_virt(enum clock_event_mode mode, | ||
91 | struct clock_event_device *clk) | ||
92 | { | ||
93 | timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); | ||
94 | } | ||
95 | |||
96 | static void arch_timer_set_mode_phys(enum clock_event_mode mode, | ||
97 | struct clock_event_device *clk) | ||
98 | { | ||
99 | timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); | ||
100 | } | ||
101 | |||
102 | static inline void set_next_event(const int access, unsigned long evt) | ||
103 | { | ||
104 | unsigned long ctrl; | ||
105 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); | ||
106 | ctrl |= ARCH_TIMER_CTRL_ENABLE; | ||
107 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; | ||
108 | arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); | ||
109 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); | ||
110 | } | ||
111 | |||
112 | static int arch_timer_set_next_event_virt(unsigned long evt, | ||
113 | struct clock_event_device *unused) | ||
114 | { | ||
115 | set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int arch_timer_set_next_event_phys(unsigned long evt, | ||
120 | struct clock_event_device *unused) | ||
121 | { | ||
122 | set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static int __cpuinit arch_timer_setup(struct clock_event_device *clk) | ||
127 | { | ||
128 | clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; | ||
129 | clk->name = "arch_sys_timer"; | ||
130 | clk->rating = 450; | ||
131 | if (arch_timer_use_virtual) { | ||
132 | clk->irq = arch_timer_ppi[VIRT_PPI]; | ||
133 | clk->set_mode = arch_timer_set_mode_virt; | ||
134 | clk->set_next_event = arch_timer_set_next_event_virt; | ||
135 | } else { | ||
136 | clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; | ||
137 | clk->set_mode = arch_timer_set_mode_phys; | ||
138 | clk->set_next_event = arch_timer_set_next_event_phys; | ||
139 | } | ||
140 | |||
141 | clk->cpumask = cpumask_of(smp_processor_id()); | ||
142 | |||
143 | clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); | ||
144 | |||
145 | clockevents_config_and_register(clk, arch_timer_rate, | ||
146 | 0xf, 0x7fffffff); | ||
147 | |||
148 | if (arch_timer_use_virtual) | ||
149 | enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); | ||
150 | else { | ||
151 | enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); | ||
152 | if (arch_timer_ppi[PHYS_NONSECURE_PPI]) | ||
153 | enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); | ||
154 | } | ||
155 | |||
156 | arch_counter_set_user_access(); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int arch_timer_available(void) | ||
162 | { | ||
163 | u32 freq; | ||
164 | |||
165 | if (arch_timer_rate == 0) { | ||
166 | freq = arch_timer_get_cntfrq(); | ||
167 | |||
168 | /* Check the timer frequency. */ | ||
169 | if (freq == 0) { | ||
170 | pr_warn("Architected timer frequency not available\n"); | ||
171 | return -EINVAL; | ||
172 | } | ||
173 | |||
174 | arch_timer_rate = freq; | ||
175 | } | ||
176 | |||
177 | pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", | ||
178 | (unsigned long)arch_timer_rate / 1000000, | ||
179 | (unsigned long)(arch_timer_rate / 10000) % 100, | ||
180 | arch_timer_use_virtual ? "virt" : "phys"); | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | u32 arch_timer_get_rate(void) | ||
185 | { | ||
186 | return arch_timer_rate; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to | ||
191 | * call it before it has been initialised. Rather than incur a performance | ||
192 | * penalty checking for initialisation, provide a default implementation that | ||
193 | * won't lead to time appearing to jump backwards. | ||
194 | */ | ||
195 | static u64 arch_timer_read_zero(void) | ||
196 | { | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero; | ||
201 | |||
202 | static cycle_t arch_counter_read(struct clocksource *cs) | ||
203 | { | ||
204 | return arch_timer_read_counter(); | ||
205 | } | ||
206 | |||
207 | static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) | ||
208 | { | ||
209 | return arch_timer_read_counter(); | ||
210 | } | ||
211 | |||
212 | static struct clocksource clocksource_counter = { | ||
213 | .name = "arch_sys_counter", | ||
214 | .rating = 400, | ||
215 | .read = arch_counter_read, | ||
216 | .mask = CLOCKSOURCE_MASK(56), | ||
217 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
218 | }; | ||
219 | |||
220 | static struct cyclecounter cyclecounter = { | ||
221 | .read = arch_counter_read_cc, | ||
222 | .mask = CLOCKSOURCE_MASK(56), | ||
223 | }; | ||
224 | |||
225 | static struct timecounter timecounter; | ||
226 | |||
227 | struct timecounter *arch_timer_get_timecounter(void) | ||
228 | { | ||
229 | return &timecounter; | ||
230 | } | ||
231 | |||
232 | static void __cpuinit arch_timer_stop(struct clock_event_device *clk) | ||
233 | { | ||
234 | pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", | ||
235 | clk->irq, smp_processor_id()); | ||
236 | |||
237 | if (arch_timer_use_virtual) | ||
238 | disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); | ||
239 | else { | ||
240 | disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); | ||
241 | if (arch_timer_ppi[PHYS_NONSECURE_PPI]) | ||
242 | disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); | ||
243 | } | ||
244 | |||
245 | clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); | ||
246 | } | ||
247 | |||
248 | static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, | ||
249 | unsigned long action, void *hcpu) | ||
250 | { | ||
251 | struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt); | ||
252 | |||
253 | switch (action & ~CPU_TASKS_FROZEN) { | ||
254 | case CPU_STARTING: | ||
255 | arch_timer_setup(evt); | ||
256 | break; | ||
257 | case CPU_DYING: | ||
258 | arch_timer_stop(evt); | ||
259 | break; | ||
260 | } | ||
261 | |||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { | ||
266 | .notifier_call = arch_timer_cpu_notify, | ||
267 | }; | ||
268 | |||
269 | static int __init arch_timer_register(void) | ||
270 | { | ||
271 | int err; | ||
272 | int ppi; | ||
273 | |||
274 | err = arch_timer_available(); | ||
275 | if (err) | ||
276 | goto out; | ||
277 | |||
278 | arch_timer_evt = alloc_percpu(struct clock_event_device); | ||
279 | if (!arch_timer_evt) { | ||
280 | err = -ENOMEM; | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | clocksource_register_hz(&clocksource_counter, arch_timer_rate); | ||
285 | cyclecounter.mult = clocksource_counter.mult; | ||
286 | cyclecounter.shift = clocksource_counter.shift; | ||
287 | timecounter_init(&timecounter, &cyclecounter, | ||
288 | arch_counter_get_cntpct()); | ||
289 | |||
290 | if (arch_timer_use_virtual) { | ||
291 | ppi = arch_timer_ppi[VIRT_PPI]; | ||
292 | err = request_percpu_irq(ppi, arch_timer_handler_virt, | ||
293 | "arch_timer", arch_timer_evt); | ||
294 | } else { | ||
295 | ppi = arch_timer_ppi[PHYS_SECURE_PPI]; | ||
296 | err = request_percpu_irq(ppi, arch_timer_handler_phys, | ||
297 | "arch_timer", arch_timer_evt); | ||
298 | if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { | ||
299 | ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; | ||
300 | err = request_percpu_irq(ppi, arch_timer_handler_phys, | ||
301 | "arch_timer", arch_timer_evt); | ||
302 | if (err) | ||
303 | free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], | ||
304 | arch_timer_evt); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | if (err) { | ||
309 | pr_err("arch_timer: can't register interrupt %d (%d)\n", | ||
310 | ppi, err); | ||
311 | goto out_free; | ||
312 | } | ||
313 | |||
314 | err = register_cpu_notifier(&arch_timer_cpu_nb); | ||
315 | if (err) | ||
316 | goto out_free_irq; | ||
317 | |||
318 | /* Immediately configure the timer on the boot CPU */ | ||
319 | arch_timer_setup(this_cpu_ptr(arch_timer_evt)); | ||
320 | |||
321 | return 0; | ||
322 | |||
323 | out_free_irq: | ||
324 | if (arch_timer_use_virtual) | ||
325 | free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); | ||
326 | else { | ||
327 | free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], | ||
328 | arch_timer_evt); | ||
329 | if (arch_timer_ppi[PHYS_NONSECURE_PPI]) | ||
330 | free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], | ||
331 | arch_timer_evt); | ||
332 | } | ||
333 | |||
334 | out_free: | ||
335 | free_percpu(arch_timer_evt); | ||
336 | out: | ||
337 | return err; | ||
338 | } | ||
339 | |||
340 | static const struct of_device_id arch_timer_of_match[] __initconst = { | ||
341 | { .compatible = "arm,armv7-timer", }, | ||
342 | { .compatible = "arm,armv8-timer", }, | ||
343 | {}, | ||
344 | }; | ||
345 | |||
346 | int __init arch_timer_init(void) | ||
347 | { | ||
348 | struct device_node *np; | ||
349 | u32 freq; | ||
350 | int i; | ||
351 | |||
352 | np = of_find_matching_node(NULL, arch_timer_of_match); | ||
353 | if (!np) { | ||
354 | pr_err("arch_timer: can't find DT node\n"); | ||
355 | return -ENODEV; | ||
356 | } | ||
357 | |||
358 | /* Try to determine the frequency from the device tree or CNTFRQ */ | ||
359 | if (!of_property_read_u32(np, "clock-frequency", &freq)) | ||
360 | arch_timer_rate = freq; | ||
361 | |||
362 | for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) | ||
363 | arch_timer_ppi[i] = irq_of_parse_and_map(np, i); | ||
364 | |||
365 | of_node_put(np); | ||
366 | |||
367 | /* | ||
368 | * If HYP mode is available, we know that the physical timer | ||
369 | * has been configured to be accessible from PL1. Use it, so | ||
370 | * that a guest can use the virtual timer instead. | ||
371 | * | ||
372 | * If no interrupt provided for virtual timer, we'll have to | ||
373 | * stick to the physical timer. It'd better be accessible... | ||
374 | */ | ||
375 | if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { | ||
376 | arch_timer_use_virtual = false; | ||
377 | |||
378 | if (!arch_timer_ppi[PHYS_SECURE_PPI] || | ||
379 | !arch_timer_ppi[PHYS_NONSECURE_PPI]) { | ||
380 | pr_warn("arch_timer: No interrupt available, giving up\n"); | ||
381 | return -EINVAL; | ||
382 | } | ||
383 | } | ||
384 | |||
385 | if (arch_timer_use_virtual) | ||
386 | arch_timer_read_counter = arch_counter_get_cntvct; | ||
387 | else | ||
388 | arch_timer_read_counter = arch_counter_get_cntpct; | ||
389 | |||
390 | return arch_timer_register(); | ||
391 | } | ||
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c deleted file mode 100644 index 8ae1a61523ff..000000000000 --- a/drivers/clocksource/arm_generic.c +++ /dev/null | |||
@@ -1,232 +0,0 @@ | |||
1 | /* | ||
2 | * Generic timers support | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/cpu.h> | ||
26 | #include <linux/jiffies.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/clockchips.h> | ||
29 | #include <linux/of_irq.h> | ||
30 | #include <linux/io.h> | ||
31 | |||
32 | #include <clocksource/arm_generic.h> | ||
33 | |||
34 | #include <asm/arm_generic.h> | ||
35 | |||
36 | static u32 arch_timer_rate; | ||
37 | static u64 sched_clock_mult __read_mostly; | ||
38 | static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt); | ||
39 | static int arch_timer_ppi; | ||
40 | |||
41 | static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id) | ||
42 | { | ||
43 | struct clock_event_device *evt = dev_id; | ||
44 | unsigned long ctrl; | ||
45 | |||
46 | ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); | ||
47 | if (ctrl & ARCH_TIMER_CTRL_ISTATUS) { | ||
48 | ctrl |= ARCH_TIMER_CTRL_IMASK; | ||
49 | arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); | ||
50 | evt->event_handler(evt); | ||
51 | return IRQ_HANDLED; | ||
52 | } | ||
53 | |||
54 | return IRQ_NONE; | ||
55 | } | ||
56 | |||
57 | static void arch_timer_stop(void) | ||
58 | { | ||
59 | unsigned long ctrl; | ||
60 | |||
61 | ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); | ||
62 | ctrl &= ~ARCH_TIMER_CTRL_ENABLE; | ||
63 | arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); | ||
64 | } | ||
65 | |||
66 | static void arch_timer_set_mode(enum clock_event_mode mode, | ||
67 | struct clock_event_device *clk) | ||
68 | { | ||
69 | switch (mode) { | ||
70 | case CLOCK_EVT_MODE_UNUSED: | ||
71 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
72 | arch_timer_stop(); | ||
73 | break; | ||
74 | default: | ||
75 | break; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | static int arch_timer_set_next_event(unsigned long evt, | ||
80 | struct clock_event_device *unused) | ||
81 | { | ||
82 | unsigned long ctrl; | ||
83 | |||
84 | ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); | ||
85 | ctrl |= ARCH_TIMER_CTRL_ENABLE; | ||
86 | ctrl &= ~ARCH_TIMER_CTRL_IMASK; | ||
87 | |||
88 | arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); | ||
89 | arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static void __cpuinit arch_timer_setup(struct clock_event_device *clk) | ||
95 | { | ||
96 | /* Let's make sure the timer is off before doing anything else */ | ||
97 | arch_timer_stop(); | ||
98 | |||
99 | clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; | ||
100 | clk->name = "arch_sys_timer"; | ||
101 | clk->rating = 400; | ||
102 | clk->set_mode = arch_timer_set_mode; | ||
103 | clk->set_next_event = arch_timer_set_next_event; | ||
104 | clk->irq = arch_timer_ppi; | ||
105 | clk->cpumask = cpumask_of(smp_processor_id()); | ||
106 | |||
107 | clockevents_config_and_register(clk, arch_timer_rate, | ||
108 | 0xf, 0x7fffffff); | ||
109 | |||
110 | enable_percpu_irq(clk->irq, 0); | ||
111 | |||
112 | /* Ensure the virtual counter is visible to userspace for the vDSO. */ | ||
113 | arch_counter_enable_user_access(); | ||
114 | } | ||
115 | |||
116 | static void __init arch_timer_calibrate(void) | ||
117 | { | ||
118 | if (arch_timer_rate == 0) { | ||
119 | arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); | ||
120 | arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); | ||
121 | |||
122 | /* Check the timer frequency. */ | ||
123 | if (arch_timer_rate == 0) | ||
124 | panic("Architected timer frequency is set to zero.\n" | ||
125 | "You must set this in your .dts file\n"); | ||
126 | } | ||
127 | |||
128 | /* Cache the sched_clock multiplier to save a divide in the hot path. */ | ||
129 | |||
130 | sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate); | ||
131 | |||
132 | pr_info("Architected local timer running at %u.%02uMHz.\n", | ||
133 | arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); | ||
134 | } | ||
135 | |||
136 | static cycle_t arch_counter_read(struct clocksource *cs) | ||
137 | { | ||
138 | return arch_counter_get_cntpct(); | ||
139 | } | ||
140 | |||
141 | static struct clocksource clocksource_counter = { | ||
142 | .name = "arch_sys_counter", | ||
143 | .rating = 400, | ||
144 | .read = arch_counter_read, | ||
145 | .mask = CLOCKSOURCE_MASK(56), | ||
146 | .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES), | ||
147 | }; | ||
148 | |||
149 | int read_current_timer(unsigned long *timer_value) | ||
150 | { | ||
151 | *timer_value = arch_counter_get_cntpct(); | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | unsigned long long notrace sched_clock(void) | ||
156 | { | ||
157 | return arch_counter_get_cntvct() * sched_clock_mult; | ||
158 | } | ||
159 | |||
160 | static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, | ||
161 | unsigned long action, void *hcpu) | ||
162 | { | ||
163 | int cpu = (long)hcpu; | ||
164 | struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu); | ||
165 | |||
166 | switch(action) { | ||
167 | case CPU_STARTING: | ||
168 | case CPU_STARTING_FROZEN: | ||
169 | arch_timer_setup(clk); | ||
170 | break; | ||
171 | |||
172 | case CPU_DYING: | ||
173 | case CPU_DYING_FROZEN: | ||
174 | pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", | ||
175 | clk->irq, cpu); | ||
176 | disable_percpu_irq(clk->irq); | ||
177 | arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); | ||
178 | break; | ||
179 | } | ||
180 | |||
181 | return NOTIFY_OK; | ||
182 | } | ||
183 | |||
184 | static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { | ||
185 | .notifier_call = arch_timer_cpu_notify, | ||
186 | }; | ||
187 | |||
188 | static const struct of_device_id arch_timer_of_match[] __initconst = { | ||
189 | { .compatible = "arm,armv8-timer" }, | ||
190 | {}, | ||
191 | }; | ||
192 | |||
193 | int __init arm_generic_timer_init(void) | ||
194 | { | ||
195 | struct device_node *np; | ||
196 | int err; | ||
197 | u32 freq; | ||
198 | |||
199 | np = of_find_matching_node(NULL, arch_timer_of_match); | ||
200 | if (!np) { | ||
201 | pr_err("arch_timer: can't find DT node\n"); | ||
202 | return -ENODEV; | ||
203 | } | ||
204 | |||
205 | /* Try to determine the frequency from the device tree or CNTFRQ */ | ||
206 | if (!of_property_read_u32(np, "clock-frequency", &freq)) | ||
207 | arch_timer_rate = freq; | ||
208 | arch_timer_calibrate(); | ||
209 | |||
210 | arch_timer_ppi = irq_of_parse_and_map(np, 0); | ||
211 | pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi); | ||
212 | |||
213 | err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq, | ||
214 | np->name, &arch_timer_evt); | ||
215 | if (err) { | ||
216 | pr_err("arch_timer: can't register interrupt %d (%d)\n", | ||
217 | arch_timer_ppi, err); | ||
218 | return err; | ||
219 | } | ||
220 | |||
221 | clocksource_register_hz(&clocksource_counter, arch_timer_rate); | ||
222 | |||
223 | /* Calibrate the delay loop directly */ | ||
224 | lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ); | ||
225 | |||
226 | /* Immediately configure the timer on the boot CPU */ | ||
227 | arch_timer_setup(this_cpu_ptr(&arch_timer_evt)); | ||
228 | |||
229 | register_cpu_notifier(&arch_timer_cpu_nb); | ||
230 | |||
231 | return 0; | ||
232 | } | ||