aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/arch_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/arch_timer.c')
-rw-r--r--arch/arm/kernel/arch_timer.c383
1 files changed, 273 insertions, 110 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index cf258807160d..c8ef20747ee7 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -21,18 +21,28 @@
21#include <linux/io.h> 21#include <linux/io.h>
22 22
23#include <asm/cputype.h> 23#include <asm/cputype.h>
24#include <asm/delay.h>
24#include <asm/localtimer.h> 25#include <asm/localtimer.h>
25#include <asm/arch_timer.h> 26#include <asm/arch_timer.h>
26#include <asm/system_info.h> 27#include <asm/system_info.h>
27#include <asm/sched_clock.h> 28#include <asm/sched_clock.h>
28 29
29static unsigned long arch_timer_rate; 30static unsigned long arch_timer_rate;
30static int arch_timer_ppi; 31
31static int arch_timer_ppi2; 32enum ppi_nr {
33 PHYS_SECURE_PPI,
34 PHYS_NONSECURE_PPI,
35 VIRT_PPI,
36 HYP_PPI,
37 MAX_TIMER_PPI
38};
39
40static int arch_timer_ppi[MAX_TIMER_PPI];
32 41
33static struct clock_event_device __percpu **arch_timer_evt; 42static struct clock_event_device __percpu **arch_timer_evt;
43static struct delay_timer arch_delay_timer;
34 44
35extern void init_current_timer_delay(unsigned long freq); 45static bool arch_timer_use_virtual = true;
36 46
37/* 47/*
38 * Architected system timer support. 48 * Architected system timer support.
@@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);
46#define ARCH_TIMER_REG_FREQ 1 56#define ARCH_TIMER_REG_FREQ 1
47#define ARCH_TIMER_REG_TVAL 2 57#define ARCH_TIMER_REG_TVAL 2
48 58
49static void arch_timer_reg_write(int reg, u32 val) 59#define ARCH_TIMER_PHYS_ACCESS 0
60#define ARCH_TIMER_VIRT_ACCESS 1
61
62/*
63 * These register accessors are marked inline so the compiler can
64 * nicely work out which register we want, and chuck away the rest of
65 * the code. At least it does so with a recent GCC (4.6.3).
66 */
67static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
50{ 68{
51 switch (reg) { 69 if (access == ARCH_TIMER_PHYS_ACCESS) {
52 case ARCH_TIMER_REG_CTRL: 70 switch (reg) {
53 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); 71 case ARCH_TIMER_REG_CTRL:
54 break; 72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
55 case ARCH_TIMER_REG_TVAL: 73 break;
56 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 74 case ARCH_TIMER_REG_TVAL:
57 break; 75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
76 break;
77 }
78 }
79
80 if (access == ARCH_TIMER_VIRT_ACCESS) {
81 switch (reg) {
82 case ARCH_TIMER_REG_CTRL:
83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
84 break;
85 case ARCH_TIMER_REG_TVAL:
86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
87 break;
88 }
58 } 89 }
59 90
60 isb(); 91 isb();
61} 92}
62 93
63static u32 arch_timer_reg_read(int reg) 94static inline u32 arch_timer_reg_read(const int access, const int reg)
64{ 95{
65 u32 val; 96 u32 val = 0;
97
98 if (access == ARCH_TIMER_PHYS_ACCESS) {
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
105 break;
106 case ARCH_TIMER_REG_FREQ:
107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
108 break;
109 }
110 }
66 111
67 switch (reg) { 112 if (access == ARCH_TIMER_VIRT_ACCESS) {
68 case ARCH_TIMER_REG_CTRL: 113 switch (reg) {
69 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); 114 case ARCH_TIMER_REG_CTRL:
70 break; 115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
71 case ARCH_TIMER_REG_FREQ: 116 break;
72 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); 117 case ARCH_TIMER_REG_TVAL:
73 break; 118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
74 case ARCH_TIMER_REG_TVAL: 119 break;
75 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 120 }
76 break;
77 default:
78 BUG();
79 } 121 }
80 122
81 return val; 123 return val;
82} 124}
83 125
84static irqreturn_t arch_timer_handler(int irq, void *dev_id) 126static inline cycle_t arch_timer_counter_read(const int access)
85{ 127{
86 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 128 cycle_t cval = 0;
87 unsigned long ctrl; 129
130 if (access == ARCH_TIMER_PHYS_ACCESS)
131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
132
133 if (access == ARCH_TIMER_VIRT_ACCESS)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
135
136 return cval;
137}
138
139static inline cycle_t arch_counter_get_cntpct(void)
140{
141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS);
142}
88 143
89 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 144static inline cycle_t arch_counter_get_cntvct(void)
145{
146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS);
147}
148
149static irqreturn_t inline timer_handler(const int access,
150 struct clock_event_device *evt)
151{
152 unsigned long ctrl;
153 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
90 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 154 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
91 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 155 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
92 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 156 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
93 evt->event_handler(evt); 157 evt->event_handler(evt);
94 return IRQ_HANDLED; 158 return IRQ_HANDLED;
95 } 159 }
@@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)
97 return IRQ_NONE; 161 return IRQ_NONE;
98} 162}
99 163
100static void arch_timer_disable(void) 164static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
101{ 165{
102 unsigned long ctrl; 166 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
103 167
104 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 168 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
105 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
106 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
107} 169}
108 170
109static void arch_timer_set_mode(enum clock_event_mode mode, 171static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
110 struct clock_event_device *clk)
111{ 172{
173 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
174
175 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
176}
177
178static inline void timer_set_mode(const int access, int mode)
179{
180 unsigned long ctrl;
112 switch (mode) { 181 switch (mode) {
113 case CLOCK_EVT_MODE_UNUSED: 182 case CLOCK_EVT_MODE_UNUSED:
114 case CLOCK_EVT_MODE_SHUTDOWN: 183 case CLOCK_EVT_MODE_SHUTDOWN:
115 arch_timer_disable(); 184 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
185 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
186 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
116 break; 187 break;
117 default: 188 default:
118 break; 189 break;
119 } 190 }
120} 191}
121 192
122static int arch_timer_set_next_event(unsigned long evt, 193static void arch_timer_set_mode_virt(enum clock_event_mode mode,
123 struct clock_event_device *unused) 194 struct clock_event_device *clk)
124{ 195{
125 unsigned long ctrl; 196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
197}
126 198
127 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 199static void arch_timer_set_mode_phys(enum clock_event_mode mode,
200 struct clock_event_device *clk)
201{
202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
203}
204
205static inline void set_next_event(const int access, unsigned long evt)
206{
207 unsigned long ctrl;
208 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
128 ctrl |= ARCH_TIMER_CTRL_ENABLE; 209 ctrl |= ARCH_TIMER_CTRL_ENABLE;
129 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 210 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
211 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
212 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
213}
130 214
131 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); 215static int arch_timer_set_next_event_virt(unsigned long evt,
132 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 216 struct clock_event_device *unused)
217{
218 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
219 return 0;
220}
133 221
222static int arch_timer_set_next_event_phys(unsigned long evt,
223 struct clock_event_device *unused)
224{
225 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
134 return 0; 226 return 0;
135} 227}
136 228
137static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 229static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
138{ 230{
139 /* Be safe... */
140 arch_timer_disable();
141
142 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 231 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
143 clk->name = "arch_sys_timer"; 232 clk->name = "arch_sys_timer";
144 clk->rating = 450; 233 clk->rating = 450;
145 clk->set_mode = arch_timer_set_mode; 234 if (arch_timer_use_virtual) {
146 clk->set_next_event = arch_timer_set_next_event; 235 clk->irq = arch_timer_ppi[VIRT_PPI];
147 clk->irq = arch_timer_ppi; 236 clk->set_mode = arch_timer_set_mode_virt;
237 clk->set_next_event = arch_timer_set_next_event_virt;
238 } else {
239 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
240 clk->set_mode = arch_timer_set_mode_phys;
241 clk->set_next_event = arch_timer_set_next_event_phys;
242 }
243
244 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
148 245
149 clockevents_config_and_register(clk, arch_timer_rate, 246 clockevents_config_and_register(clk, arch_timer_rate,
150 0xf, 0x7fffffff); 247 0xf, 0x7fffffff);
151 248
152 *__this_cpu_ptr(arch_timer_evt) = clk; 249 *__this_cpu_ptr(arch_timer_evt) = clk;
153 250
154 enable_percpu_irq(clk->irq, 0); 251 if (arch_timer_use_virtual)
155 if (arch_timer_ppi2) 252 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
156 enable_percpu_irq(arch_timer_ppi2, 0); 253 else {
254 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
255 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
256 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
257 }
157 258
158 return 0; 259 return 0;
159} 260}
@@ -173,8 +274,8 @@ static int arch_timer_available(void)
173 return -ENXIO; 274 return -ENXIO;
174 275
175 if (arch_timer_rate == 0) { 276 if (arch_timer_rate == 0) {
176 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); 277 freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS,
177 freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); 278 ARCH_TIMER_REG_FREQ);
178 279
179 /* Check the timer frequency. */ 280 /* Check the timer frequency. */
180 if (freq == 0) { 281 if (freq == 0) {
@@ -185,52 +286,57 @@ static int arch_timer_available(void)
185 arch_timer_rate = freq; 286 arch_timer_rate = freq;
186 } 287 }
187 288
188 pr_info_once("Architected local timer running at %lu.%02luMHz.\n", 289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
189 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); 290 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100,
291 arch_timer_use_virtual ? "virt" : "phys");
190 return 0; 292 return 0;
191} 293}
192 294
193static inline cycle_t arch_counter_get_cntpct(void) 295static u32 notrace arch_counter_get_cntpct32(void)
194{ 296{
195 u32 cvall, cvalh; 297 cycle_t cnt = arch_counter_get_cntpct();
196
197 asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
198 298
199 return ((cycle_t) cvalh << 32) | cvall; 299 /*
200} 300 * The sched_clock infrastructure only knows about counters
201 301 * with at most 32bits. Forget about the upper 24 bits for the
202static inline cycle_t arch_counter_get_cntvct(void) 302 * time being...
203{ 303 */
204 u32 cvall, cvalh; 304 return (u32)cnt;
205
206 asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
207
208 return ((cycle_t) cvalh << 32) | cvall;
209} 305}
210 306
211static u32 notrace arch_counter_get_cntvct32(void) 307static u32 notrace arch_counter_get_cntvct32(void)
212{ 308{
213 cycle_t cntvct = arch_counter_get_cntvct(); 309 cycle_t cnt = arch_counter_get_cntvct();
214 310
215 /* 311 /*
216 * The sched_clock infrastructure only knows about counters 312 * The sched_clock infrastructure only knows about counters
217 * with at most 32bits. Forget about the upper 24 bits for the 313 * with at most 32bits. Forget about the upper 24 bits for the
218 * time being... 314 * time being...
219 */ 315 */
220 return (u32)(cntvct & (u32)~0); 316 return (u32)cnt;
221} 317}
222 318
223static cycle_t arch_counter_read(struct clocksource *cs) 319static cycle_t arch_counter_read(struct clocksource *cs)
224{ 320{
321 /*
322 * Always use the physical counter for the clocksource.
323 * CNTHCTL.PL1PCTEN must be set to 1.
324 */
225 return arch_counter_get_cntpct(); 325 return arch_counter_get_cntpct();
226} 326}
227 327
228int read_current_timer(unsigned long *timer_val) 328static unsigned long arch_timer_read_current_timer(void)
229{ 329{
230 if (!arch_timer_rate) 330 return arch_counter_get_cntpct();
231 return -ENXIO; 331}
232 *timer_val = arch_counter_get_cntpct(); 332
233 return 0; 333static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
334{
335 /*
336 * Always use the physical counter for the clocksource.
337 * CNTHCTL.PL1PCTEN must be set to 1.
338 */
339 return arch_counter_get_cntpct();
234} 340}
235 341
236static struct clocksource clocksource_counter = { 342static struct clocksource clocksource_counter = {
@@ -241,14 +347,32 @@ static struct clocksource clocksource_counter = {
241 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 347 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
242}; 348};
243 349
350static struct cyclecounter cyclecounter = {
351 .read = arch_counter_read_cc,
352 .mask = CLOCKSOURCE_MASK(56),
353};
354
355static struct timecounter timecounter;
356
357struct timecounter *arch_timer_get_timecounter(void)
358{
359 return &timecounter;
360}
361
244static void __cpuinit arch_timer_stop(struct clock_event_device *clk) 362static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
245{ 363{
246 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 364 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
247 clk->irq, smp_processor_id()); 365 clk->irq, smp_processor_id());
248 disable_percpu_irq(clk->irq); 366
249 if (arch_timer_ppi2) 367 if (arch_timer_use_virtual)
250 disable_percpu_irq(arch_timer_ppi2); 368 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
251 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); 369 else {
370 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
371 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
372 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
373 }
374
375 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
252} 376}
253 377
254static struct local_timer_ops arch_timer_ops __cpuinitdata = { 378static struct local_timer_ops arch_timer_ops __cpuinitdata = {
@@ -261,36 +385,48 @@ static struct clock_event_device arch_timer_global_evt;
261static int __init arch_timer_register(void) 385static int __init arch_timer_register(void)
262{ 386{
263 int err; 387 int err;
388 int ppi;
264 389
265 err = arch_timer_available(); 390 err = arch_timer_available();
266 if (err) 391 if (err)
267 return err; 392 goto out;
268 393
269 arch_timer_evt = alloc_percpu(struct clock_event_device *); 394 arch_timer_evt = alloc_percpu(struct clock_event_device *);
270 if (!arch_timer_evt) 395 if (!arch_timer_evt) {
271 return -ENOMEM; 396 err = -ENOMEM;
397 goto out;
398 }
272 399
273 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 400 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
401 cyclecounter.mult = clocksource_counter.mult;
402 cyclecounter.shift = clocksource_counter.shift;
403 timecounter_init(&timecounter, &cyclecounter,
404 arch_counter_get_cntpct());
405
406 if (arch_timer_use_virtual) {
407 ppi = arch_timer_ppi[VIRT_PPI];
408 err = request_percpu_irq(ppi, arch_timer_handler_virt,
409 "arch_timer", arch_timer_evt);
410 } else {
411 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
412 err = request_percpu_irq(ppi, arch_timer_handler_phys,
413 "arch_timer", arch_timer_evt);
414 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
415 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
416 err = request_percpu_irq(ppi, arch_timer_handler_phys,
417 "arch_timer", arch_timer_evt);
418 if (err)
419 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
420 arch_timer_evt);
421 }
422 }
274 423
275 err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
276 "arch_timer", arch_timer_evt);
277 if (err) { 424 if (err) {
278 pr_err("arch_timer: can't register interrupt %d (%d)\n", 425 pr_err("arch_timer: can't register interrupt %d (%d)\n",
279 arch_timer_ppi, err); 426 ppi, err);
280 goto out_free; 427 goto out_free;
281 } 428 }
282 429
283 if (arch_timer_ppi2) {
284 err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
285 "arch_timer", arch_timer_evt);
286 if (err) {
287 pr_err("arch_timer: can't register interrupt %d (%d)\n",
288 arch_timer_ppi2, err);
289 arch_timer_ppi2 = 0;
290 goto out_free_irq;
291 }
292 }
293
294 err = local_timer_register(&arch_timer_ops); 430 err = local_timer_register(&arch_timer_ops);
295 if (err) { 431 if (err) {
296 /* 432 /*
@@ -302,21 +438,29 @@ static int __init arch_timer_register(void)
302 arch_timer_global_evt.cpumask = cpumask_of(0); 438 arch_timer_global_evt.cpumask = cpumask_of(0);
303 err = arch_timer_setup(&arch_timer_global_evt); 439 err = arch_timer_setup(&arch_timer_global_evt);
304 } 440 }
305
306 if (err) 441 if (err)
307 goto out_free_irq; 442 goto out_free_irq;
308 443
309 init_current_timer_delay(arch_timer_rate); 444 /* Use the architected timer for the delay loop. */
445 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
446 arch_delay_timer.freq = arch_timer_rate;
447 register_current_timer_delay(&arch_delay_timer);
310 return 0; 448 return 0;
311 449
312out_free_irq: 450out_free_irq:
313 free_percpu_irq(arch_timer_ppi, arch_timer_evt); 451 if (arch_timer_use_virtual)
314 if (arch_timer_ppi2) 452 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
315 free_percpu_irq(arch_timer_ppi2, arch_timer_evt); 453 else {
454 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
455 arch_timer_evt);
456 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
457 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
458 arch_timer_evt);
459 }
316 460
317out_free: 461out_free:
318 free_percpu(arch_timer_evt); 462 free_percpu(arch_timer_evt);
319 463out:
320 return err; 464 return err;
321} 465}
322 466
@@ -329,6 +473,7 @@ int __init arch_timer_of_register(void)
329{ 473{
330 struct device_node *np; 474 struct device_node *np;
331 u32 freq; 475 u32 freq;
476 int i;
332 477
333 np = of_find_matching_node(NULL, arch_timer_of_match); 478 np = of_find_matching_node(NULL, arch_timer_of_match);
334 if (!np) { 479 if (!np) {
@@ -340,22 +485,40 @@ int __init arch_timer_of_register(void)
340 if (!of_property_read_u32(np, "clock-frequency", &freq)) 485 if (!of_property_read_u32(np, "clock-frequency", &freq))
341 arch_timer_rate = freq; 486 arch_timer_rate = freq;
342 487
343 arch_timer_ppi = irq_of_parse_and_map(np, 0); 488 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
344 arch_timer_ppi2 = irq_of_parse_and_map(np, 1); 489 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
345 pr_info("arch_timer: found %s irqs %d %d\n", 490
346 np->name, arch_timer_ppi, arch_timer_ppi2); 491 /*
492 * If no interrupt provided for virtual timer, we'll have to
493 * stick to the physical timer. It'd better be accessible...
494 */
495 if (!arch_timer_ppi[VIRT_PPI]) {
496 arch_timer_use_virtual = false;
497
498 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
499 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
500 pr_warn("arch_timer: No interrupt available, giving up\n");
501 return -EINVAL;
502 }
503 }
347 504
348 return arch_timer_register(); 505 return arch_timer_register();
349} 506}
350 507
351int __init arch_timer_sched_clock_init(void) 508int __init arch_timer_sched_clock_init(void)
352{ 509{
510 u32 (*cnt32)(void);
353 int err; 511 int err;
354 512
355 err = arch_timer_available(); 513 err = arch_timer_available();
356 if (err) 514 if (err)
357 return err; 515 return err;
358 516
359 setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); 517 if (arch_timer_use_virtual)
518 cnt32 = arch_counter_get_cntvct32;
519 else
520 cnt32 = arch_counter_get_cntpct32;
521
522 setup_sched_clock(cnt32, 32, arch_timer_rate);
360 return 0; 523 return 0;
361} 524}