diff options
author | Richard Henderson <rth@twiddle.net> | 2013-07-14 12:55:08 -0400 |
---|---|---|
committer | Matt Turner <mattst88@gmail.com> | 2013-11-16 19:33:18 -0500 |
commit | db2d3260617ae8c9076ef12e6de06bd5b3d82cd3 (patch) | |
tree | 700ee620482cdee9aef58c3d4c8bde7336394c5d /arch/alpha | |
parent | 85d0b3a573d8b711ee0c96199ac24a0f3283ed68 (diff) |
alpha: Enable the rpcc clocksource for single processor
Don't depend on SMP, just check the number of processors online.
This allows a single distribution kernel to use the clocksource
when run on a single processor machine. Do depend on whether or
not we're using WTINT.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/kernel/time.c | 67 |
1 files changed, 37 insertions, 30 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 4c6c0fe47a7b..0d72e2df4b0e 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -184,6 +184,37 @@ common_init_rtc(void) | |||
184 | init_rtc_irq(); | 184 | init_rtc_irq(); |
185 | } | 185 | } |
186 | 186 | ||
187 | |||
188 | #ifndef CONFIG_ALPHA_WTINT | ||
189 | /* | ||
190 | * The RPCC as a clocksource primitive. | ||
191 | * | ||
192 | * While we have free-running timecounters running on all CPUs, and we make | ||
193 | * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter | ||
194 | * with the wall clock, that initialization isn't kept up-to-date across | ||
195 | * different time counters in SMP mode. Therefore we can only use this | ||
196 | * method when there's only one CPU enabled. | ||
197 | * | ||
198 | * When using the WTINT PALcall, the RPCC may shift to a lower frequency, | ||
199 | * or stop altogether, while waiting for the interrupt. Therefore we cannot | ||
200 | * use this method when WTINT is in use. | ||
201 | */ | ||
202 | |||
203 | static cycle_t read_rpcc(struct clocksource *cs) | ||
204 | { | ||
205 | return rpcc(); | ||
206 | } | ||
207 | |||
208 | static struct clocksource clocksource_rpcc = { | ||
209 | .name = "rpcc", | ||
210 | .rating = 300, | ||
211 | .read = read_rpcc, | ||
212 | .mask = CLOCKSOURCE_MASK(32), | ||
213 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | ||
214 | }; | ||
215 | #endif /* ALPHA_WTINT */ | ||
216 | |||
217 | |||
187 | /* Validate a computed cycle counter result against the known bounds for | 218 | /* Validate a computed cycle counter result against the known bounds for |
188 | the given processor core. There's too much brokenness in the way of | 219 | the given processor core. There's too much brokenness in the way of |
189 | timing hardware for any one method to work everywhere. :-( | 220 | timing hardware for any one method to work everywhere. :-( |
@@ -294,33 +325,6 @@ rpcc_after_update_in_progress(void) | |||
294 | return rpcc(); | 325 | return rpcc(); |
295 | } | 326 | } |
296 | 327 | ||
297 | #ifndef CONFIG_SMP | ||
298 | /* Until and unless we figure out how to get cpu cycle counters | ||
299 | in sync and keep them there, we can't use the rpcc. */ | ||
300 | static cycle_t read_rpcc(struct clocksource *cs) | ||
301 | { | ||
302 | cycle_t ret = (cycle_t)rpcc(); | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static struct clocksource clocksource_rpcc = { | ||
307 | .name = "rpcc", | ||
308 | .rating = 300, | ||
309 | .read = read_rpcc, | ||
310 | .mask = CLOCKSOURCE_MASK(32), | ||
311 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | ||
312 | }; | ||
313 | |||
314 | static inline void register_rpcc_clocksource(long cycle_freq) | ||
315 | { | ||
316 | clocksource_register_hz(&clocksource_rpcc, cycle_freq); | ||
317 | } | ||
318 | #else /* !CONFIG_SMP */ | ||
319 | static inline void register_rpcc_clocksource(long cycle_freq) | ||
320 | { | ||
321 | } | ||
322 | #endif /* !CONFIG_SMP */ | ||
323 | |||
324 | void __init | 328 | void __init |
325 | time_init(void) | 329 | time_init(void) |
326 | { | 330 | { |
@@ -362,20 +366,23 @@ time_init(void) | |||
362 | "and unable to estimate a proper value!\n"); | 366 | "and unable to estimate a proper value!\n"); |
363 | } | 367 | } |
364 | 368 | ||
369 | /* See above for restrictions on using clocksource_rpcc. */ | ||
370 | #ifndef CONFIG_ALPHA_WTINT | ||
371 | if (hwrpb->nr_processors == 1) | ||
372 | clocksource_register_hz(&clocksource_rpcc, cycle_freq); | ||
373 | #endif | ||
374 | |||
365 | /* From John Bowman <bowman@math.ualberta.ca>: allow the values | 375 | /* From John Bowman <bowman@math.ualberta.ca>: allow the values |
366 | to settle, as the Update-In-Progress bit going low isn't good | 376 | to settle, as the Update-In-Progress bit going low isn't good |
367 | enough on some hardware. 2ms is our guess; we haven't found | 377 | enough on some hardware. 2ms is our guess; we haven't found |
368 | bogomips yet, but this is close on a 500Mhz box. */ | 378 | bogomips yet, but this is close on a 500Mhz box. */ |
369 | __delay(1000000); | 379 | __delay(1000000); |
370 | 380 | ||
371 | |||
372 | if (HZ > (1<<16)) { | 381 | if (HZ > (1<<16)) { |
373 | extern void __you_loose (void); | 382 | extern void __you_loose (void); |
374 | __you_loose(); | 383 | __you_loose(); |
375 | } | 384 | } |
376 | 385 | ||
377 | register_rpcc_clocksource(cycle_freq); | ||
378 | |||
379 | state.last_time = cc1; | 386 | state.last_time = cc1; |
380 | state.scaled_ticks_per_cycle | 387 | state.scaled_ticks_per_cycle |
381 | = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; | 388 | = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; |