aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorMarc Zyngier <Marc.Zyngier@arm.com>2012-09-07 13:09:57 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-09-15 16:53:32 -0400
commitf48b5f12799dbabba4a9c799a9bef0775b2f977d (patch)
tree088d7b6a3a21d3e69bc7b23fb746923b86ca0843 /arch/arm
parent55d512e245bc7699a8800e23df1a24195dd08217 (diff)
ARM: 7523/1: arch_timers: enable the use of the virtual timer
At the moment, the arch_timer driver only uses the physical timer, which can cause problem if PL2 hasn't enabled PL1 access in CNTHCTL, which is likely in a virtualized environment. Instead, the virtual timer is always available. This patch enables the use of the virtual timer, unless no interrupt is provided in the DT for it, in which case it falls back to the physical timer. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/arch_timer.c343
1 files changed, 241 insertions, 102 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index cf258807160d..c4e20b69197a 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -27,13 +27,23 @@
27#include <asm/sched_clock.h> 27#include <asm/sched_clock.h>
28 28
29static unsigned long arch_timer_rate; 29static unsigned long arch_timer_rate;
30static int arch_timer_ppi; 30
31static int arch_timer_ppi2; 31enum ppi_nr {
32 PHYS_SECURE_PPI,
33 PHYS_NONSECURE_PPI,
34 VIRT_PPI,
35 HYP_PPI,
36 MAX_TIMER_PPI
37};
38
39static int arch_timer_ppi[MAX_TIMER_PPI];
32 40
33static struct clock_event_device __percpu **arch_timer_evt; 41static struct clock_event_device __percpu **arch_timer_evt;
34 42
35extern void init_current_timer_delay(unsigned long freq); 43extern void init_current_timer_delay(unsigned long freq);
36 44
45static bool arch_timer_use_virtual = true;
46
37/* 47/*
38 * Architected system timer support. 48 * Architected system timer support.
39 */ 49 */
@@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);
46#define ARCH_TIMER_REG_FREQ 1 56#define ARCH_TIMER_REG_FREQ 1
47#define ARCH_TIMER_REG_TVAL 2 57#define ARCH_TIMER_REG_TVAL 2
48 58
49static void arch_timer_reg_write(int reg, u32 val) 59#define ARCH_TIMER_PHYS_ACCESS 0
60#define ARCH_TIMER_VIRT_ACCESS 1
61
62/*
63 * These register accessors are marked inline so the compiler can
64 * nicely work out which register we want, and chuck away the rest of
65 * the code. At least it does so with a recent GCC (4.6.3).
66 */
67static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
50{ 68{
51 switch (reg) { 69 if (access == ARCH_TIMER_PHYS_ACCESS) {
52 case ARCH_TIMER_REG_CTRL: 70 switch (reg) {
53 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); 71 case ARCH_TIMER_REG_CTRL:
54 break; 72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
55 case ARCH_TIMER_REG_TVAL: 73 break;
56 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 74 case ARCH_TIMER_REG_TVAL:
57 break; 75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
76 break;
77 }
78 }
79
80 if (access == ARCH_TIMER_VIRT_ACCESS) {
81 switch (reg) {
82 case ARCH_TIMER_REG_CTRL:
83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
84 break;
85 case ARCH_TIMER_REG_TVAL:
86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
87 break;
88 }
58 } 89 }
59 90
60 isb(); 91 isb();
61} 92}
62 93
63static u32 arch_timer_reg_read(int reg) 94static inline u32 arch_timer_reg_read(const int access, const int reg)
64{ 95{
65 u32 val; 96 u32 val = 0;
97
98 if (access == ARCH_TIMER_PHYS_ACCESS) {
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
105 break;
106 case ARCH_TIMER_REG_FREQ:
107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
108 break;
109 }
110 }
66 111
67 switch (reg) { 112 if (access == ARCH_TIMER_VIRT_ACCESS) {
68 case ARCH_TIMER_REG_CTRL: 113 switch (reg) {
69 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); 114 case ARCH_TIMER_REG_CTRL:
70 break; 115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
71 case ARCH_TIMER_REG_FREQ: 116 break;
72 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); 117 case ARCH_TIMER_REG_TVAL:
73 break; 118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
74 case ARCH_TIMER_REG_TVAL: 119 break;
75 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 120 }
76 break;
77 default:
78 BUG();
79 } 121 }
80 122
81 return val; 123 return val;
82} 124}
83 125
84static irqreturn_t arch_timer_handler(int irq, void *dev_id) 126static inline cycle_t arch_timer_counter_read(const int access)
85{ 127{
86 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 128 cycle_t cval = 0;
87 unsigned long ctrl; 129
130 if (access == ARCH_TIMER_PHYS_ACCESS)
131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
132
133 if (access == ARCH_TIMER_VIRT_ACCESS)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
135
136 return cval;
137}
88 138
89 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 139static inline cycle_t arch_counter_get_cntpct(void)
140{
141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS);
142}
143
144static inline cycle_t arch_counter_get_cntvct(void)
145{
146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS);
147}
148
149static irqreturn_t inline timer_handler(const int access,
150 struct clock_event_device *evt)
151{
152 unsigned long ctrl;
153 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
90 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 154 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
91 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 155 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
92 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 156 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
93 evt->event_handler(evt); 157 evt->event_handler(evt);
94 return IRQ_HANDLED; 158 return IRQ_HANDLED;
95 } 159 }
@@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)
97 return IRQ_NONE; 161 return IRQ_NONE;
98} 162}
99 163
100static void arch_timer_disable(void) 164static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
101{ 165{
102 unsigned long ctrl; 166 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
103 167
104 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 168 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
105 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
106 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
107} 169}
108 170
109static void arch_timer_set_mode(enum clock_event_mode mode, 171static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
110 struct clock_event_device *clk)
111{ 172{
173 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
174
175 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
176}
177
178static inline void timer_set_mode(const int access, int mode)
179{
180 unsigned long ctrl;
112 switch (mode) { 181 switch (mode) {
113 case CLOCK_EVT_MODE_UNUSED: 182 case CLOCK_EVT_MODE_UNUSED:
114 case CLOCK_EVT_MODE_SHUTDOWN: 183 case CLOCK_EVT_MODE_SHUTDOWN:
115 arch_timer_disable(); 184 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
185 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
186 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
116 break; 187 break;
117 default: 188 default:
118 break; 189 break;
119 } 190 }
120} 191}
121 192
122static int arch_timer_set_next_event(unsigned long evt, 193static void arch_timer_set_mode_virt(enum clock_event_mode mode,
123 struct clock_event_device *unused) 194 struct clock_event_device *clk)
124{ 195{
125 unsigned long ctrl; 196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
197}
198
199static void arch_timer_set_mode_phys(enum clock_event_mode mode,
200 struct clock_event_device *clk)
201{
202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
203}
126 204
127 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 205static inline void set_next_event(const int access, unsigned long evt)
206{
207 unsigned long ctrl;
208 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
128 ctrl |= ARCH_TIMER_CTRL_ENABLE; 209 ctrl |= ARCH_TIMER_CTRL_ENABLE;
129 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 210 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
211 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
212 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
213}
130 214
131 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); 215static int arch_timer_set_next_event_virt(unsigned long evt,
132 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 216 struct clock_event_device *unused)
217{
218 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
219 return 0;
220}
133 221
222static int arch_timer_set_next_event_phys(unsigned long evt,
223 struct clock_event_device *unused)
224{
225 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
134 return 0; 226 return 0;
135} 227}
136 228
137static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 229static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
138{ 230{
139 /* Be safe... */
140 arch_timer_disable();
141
142 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 231 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
143 clk->name = "arch_sys_timer"; 232 clk->name = "arch_sys_timer";
144 clk->rating = 450; 233 clk->rating = 450;
145 clk->set_mode = arch_timer_set_mode; 234 if (arch_timer_use_virtual) {
146 clk->set_next_event = arch_timer_set_next_event; 235 clk->irq = arch_timer_ppi[VIRT_PPI];
147 clk->irq = arch_timer_ppi; 236 clk->set_mode = arch_timer_set_mode_virt;
237 clk->set_next_event = arch_timer_set_next_event_virt;
238 } else {
239 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
240 clk->set_mode = arch_timer_set_mode_phys;
241 clk->set_next_event = arch_timer_set_next_event_phys;
242 }
243
244 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
148 245
149 clockevents_config_and_register(clk, arch_timer_rate, 246 clockevents_config_and_register(clk, arch_timer_rate,
150 0xf, 0x7fffffff); 247 0xf, 0x7fffffff);
151 248
152 *__this_cpu_ptr(arch_timer_evt) = clk; 249 *__this_cpu_ptr(arch_timer_evt) = clk;
153 250
154 enable_percpu_irq(clk->irq, 0); 251 if (arch_timer_use_virtual)
155 if (arch_timer_ppi2) 252 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
156 enable_percpu_irq(arch_timer_ppi2, 0); 253 else {
254 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
255 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
256 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
257 }
157 258
158 return 0; 259 return 0;
159} 260}
@@ -173,8 +274,8 @@ static int arch_timer_available(void)
173 return -ENXIO; 274 return -ENXIO;
174 275
175 if (arch_timer_rate == 0) { 276 if (arch_timer_rate == 0) {
176 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); 277 freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS,
177 freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); 278 ARCH_TIMER_REG_FREQ);
178 279
179 /* Check the timer frequency. */ 280 /* Check the timer frequency. */
180 if (freq == 0) { 281 if (freq == 0) {
@@ -185,43 +286,42 @@ static int arch_timer_available(void)
185 arch_timer_rate = freq; 286 arch_timer_rate = freq;
186 } 287 }
187 288
188 pr_info_once("Architected local timer running at %lu.%02luMHz.\n", 289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
189 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); 290 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100,
291 arch_timer_use_virtual ? "virt" : "phys");
190 return 0; 292 return 0;
191} 293}
192 294
193static inline cycle_t arch_counter_get_cntpct(void) 295static u32 notrace arch_counter_get_cntpct32(void)
194{ 296{
195 u32 cvall, cvalh; 297 cycle_t cnt = arch_counter_get_cntpct();
196
197 asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
198 298
199 return ((cycle_t) cvalh << 32) | cvall; 299 /*
200} 300 * The sched_clock infrastructure only knows about counters
201 301 * with at most 32bits. Forget about the upper 24 bits for the
202static inline cycle_t arch_counter_get_cntvct(void) 302 * time being...
203{ 303 */
204 u32 cvall, cvalh; 304 return (u32)cnt;
205
206 asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
207
208 return ((cycle_t) cvalh << 32) | cvall;
209} 305}
210 306
211static u32 notrace arch_counter_get_cntvct32(void) 307static u32 notrace arch_counter_get_cntvct32(void)
212{ 308{
213 cycle_t cntvct = arch_counter_get_cntvct(); 309 cycle_t cnt = arch_counter_get_cntvct();
214 310
215 /* 311 /*
216 * The sched_clock infrastructure only knows about counters 312 * The sched_clock infrastructure only knows about counters
217 * with at most 32bits. Forget about the upper 24 bits for the 313 * with at most 32bits. Forget about the upper 24 bits for the
218 * time being... 314 * time being...
219 */ 315 */
220 return (u32)(cntvct & (u32)~0); 316 return (u32)cnt;
221} 317}
222 318
223static cycle_t arch_counter_read(struct clocksource *cs) 319static cycle_t arch_counter_read(struct clocksource *cs)
224{ 320{
321 /*
322 * Always use the physical counter for the clocksource.
323 * CNTHCTL.PL1PCTEN must be set to 1.
324 */
225 return arch_counter_get_cntpct(); 325 return arch_counter_get_cntpct();
226} 326}
227 327
@@ -245,10 +345,16 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
245{ 345{
246 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 346 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
247 clk->irq, smp_processor_id()); 347 clk->irq, smp_processor_id());
248 disable_percpu_irq(clk->irq); 348
249 if (arch_timer_ppi2) 349 if (arch_timer_use_virtual)
250 disable_percpu_irq(arch_timer_ppi2); 350 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
251 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); 351 else {
352 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
353 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
354 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
355 }
356
357 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
252} 358}
253 359
254static struct local_timer_ops arch_timer_ops __cpuinitdata = { 360static struct local_timer_ops arch_timer_ops __cpuinitdata = {
@@ -261,36 +367,44 @@ static struct clock_event_device arch_timer_global_evt;
261static int __init arch_timer_register(void) 367static int __init arch_timer_register(void)
262{ 368{
263 int err; 369 int err;
370 int ppi;
264 371
265 err = arch_timer_available(); 372 err = arch_timer_available();
266 if (err) 373 if (err)
267 return err; 374 goto out;
268 375
269 arch_timer_evt = alloc_percpu(struct clock_event_device *); 376 arch_timer_evt = alloc_percpu(struct clock_event_device *);
270 if (!arch_timer_evt) 377 if (!arch_timer_evt) {
271 return -ENOMEM; 378 err = -ENOMEM;
379 goto out;
380 }
272 381
273 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 382 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
274 383
275 err = request_percpu_irq(arch_timer_ppi, arch_timer_handler, 384 if (arch_timer_use_virtual) {
276 "arch_timer", arch_timer_evt); 385 ppi = arch_timer_ppi[VIRT_PPI];
386 err = request_percpu_irq(ppi, arch_timer_handler_virt,
387 "arch_timer", arch_timer_evt);
388 } else {
389 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
390 err = request_percpu_irq(ppi, arch_timer_handler_phys,
391 "arch_timer", arch_timer_evt);
392 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
393 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
394 err = request_percpu_irq(ppi, arch_timer_handler_phys,
395 "arch_timer", arch_timer_evt);
396 if (err)
397 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
398 arch_timer_evt);
399 }
400 }
401
277 if (err) { 402 if (err) {
278 pr_err("arch_timer: can't register interrupt %d (%d)\n", 403 pr_err("arch_timer: can't register interrupt %d (%d)\n",
279 arch_timer_ppi, err); 404 ppi, err);
280 goto out_free; 405 goto out_free;
281 } 406 }
282 407
283 if (arch_timer_ppi2) {
284 err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
285 "arch_timer", arch_timer_evt);
286 if (err) {
287 pr_err("arch_timer: can't register interrupt %d (%d)\n",
288 arch_timer_ppi2, err);
289 arch_timer_ppi2 = 0;
290 goto out_free_irq;
291 }
292 }
293
294 err = local_timer_register(&arch_timer_ops); 408 err = local_timer_register(&arch_timer_ops);
295 if (err) { 409 if (err) {
296 /* 410 /*
@@ -310,13 +424,19 @@ static int __init arch_timer_register(void)
310 return 0; 424 return 0;
311 425
312out_free_irq: 426out_free_irq:
313 free_percpu_irq(arch_timer_ppi, arch_timer_evt); 427 if (arch_timer_use_virtual)
314 if (arch_timer_ppi2) 428 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
315 free_percpu_irq(arch_timer_ppi2, arch_timer_evt); 429 else {
430 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
431 arch_timer_evt);
432 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
433 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
434 arch_timer_evt);
435 }
316 436
317out_free: 437out_free:
318 free_percpu(arch_timer_evt); 438 free_percpu(arch_timer_evt);
319 439out:
320 return err; 440 return err;
321} 441}
322 442
@@ -329,6 +449,7 @@ int __init arch_timer_of_register(void)
329{ 449{
330 struct device_node *np; 450 struct device_node *np;
331 u32 freq; 451 u32 freq;
452 int i;
332 453
333 np = of_find_matching_node(NULL, arch_timer_of_match); 454 np = of_find_matching_node(NULL, arch_timer_of_match);
334 if (!np) { 455 if (!np) {
@@ -340,22 +461,40 @@ int __init arch_timer_of_register(void)
340 if (!of_property_read_u32(np, "clock-frequency", &freq)) 461 if (!of_property_read_u32(np, "clock-frequency", &freq))
341 arch_timer_rate = freq; 462 arch_timer_rate = freq;
342 463
343 arch_timer_ppi = irq_of_parse_and_map(np, 0); 464 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
344 arch_timer_ppi2 = irq_of_parse_and_map(np, 1); 465 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
345 pr_info("arch_timer: found %s irqs %d %d\n", 466
346 np->name, arch_timer_ppi, arch_timer_ppi2); 467 /*
468 * If no interrupt provided for virtual timer, we'll have to
469 * stick to the physical timer. It'd better be accessible...
470 */
471 if (!arch_timer_ppi[VIRT_PPI]) {
472 arch_timer_use_virtual = false;
473
474 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
475 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
476 pr_warn("arch_timer: No interrupt available, giving up\n");
477 return -EINVAL;
478 }
479 }
347 480
348 return arch_timer_register(); 481 return arch_timer_register();
349} 482}
350 483
351int __init arch_timer_sched_clock_init(void) 484int __init arch_timer_sched_clock_init(void)
352{ 485{
486 u32 (*cnt32)(void);
353 int err; 487 int err;
354 488
355 err = arch_timer_available(); 489 err = arch_timer_available();
356 if (err) 490 if (err)
357 return err; 491 return err;
358 492
359 setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); 493 if (arch_timer_use_virtual)
494 cnt32 = arch_counter_get_cntvct32;
495 else
496 cnt32 = arch_counter_get_cntpct32;
497
498 setup_sched_clock(cnt32, 32, arch_timer_rate);
360 return 0; 499 return 0;
361} 500}