aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorJohn Stultz <johnstul@us.ibm.com>2010-03-19 12:23:57 -0400
committerMatt Turner <mattst88@monolith.freenet-rz.de>2010-05-25 18:40:27 -0400
commit9ce34c8f4466608bc67630a42d04f4aaf0443d9b (patch)
tree6a5822670410f567ebef147c8a69c0c0b66d5b05 /arch/alpha
parentec96e2fe954c23a54bfdf2673437a39e193a1822 (diff)
Convert alpha to use clocksources instead of arch_gettimeoffset
Alpha has a tsc like rpcc counter that it uses to manage time. This can be converted to an actual clocksource instead of utilizing the arch_gettimeoffset method that is really only there for legacy systems with no continuous counter. Further cleanups could be made if alpha converted to the clockevent model. CC: Thomas Gleixner <tglx@linutronix.de> CC: Richard Henderson <rth@twiddle.net> Acked-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Tested-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Matt Turner <mattst88@gmail.com> Signed-off-by: John Stultz <johnstul@us.ibm.com>
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/kernel/time.c69
2 files changed, 31 insertions, 42 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b7193986cbf9..24efdfe277fc 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -51,10 +51,6 @@ config GENERIC_TIME
51 bool 51 bool
52 default y 52 default y
53 53
54config ARCH_USES_GETTIMEOFFSET
55 bool
56 default y
57
58config GENERIC_CMOS_UPDATE 54config GENERIC_CMOS_UPDATE
59 def_bool y 55 def_bool y
60 56
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 5465e932e568..1efbed82c0fd 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -51,6 +51,7 @@
51#include <linux/mc146818rtc.h> 51#include <linux/mc146818rtc.h>
52#include <linux/time.h> 52#include <linux/time.h>
53#include <linux/timex.h> 53#include <linux/timex.h>
54#include <linux/clocksource.h>
54 55
55#include "proto.h" 56#include "proto.h"
56#include "irq_impl.h" 57#include "irq_impl.h"
@@ -332,6 +333,34 @@ rpcc_after_update_in_progress(void)
332 return rpcc(); 333 return rpcc();
333} 334}
334 335
336#ifndef CONFIG_SMP
337/* Until and unless we figure out how to get cpu cycle counters
338 in sync and keep them there, we can't use the rpcc. */
339static cycle_t read_rpcc(struct clocksource *cs)
340{
341 cycle_t ret = (cycle_t)rpcc();
342 return ret;
343}
344
345static struct clocksource clocksource_rpcc = {
346 .name = "rpcc",
347 .rating = 300,
348 .read = read_rpcc,
349 .mask = CLOCKSOURCE_MASK(32),
350 .flags = CLOCK_SOURCE_IS_CONTINUOUS
351};
352
353static inline void register_rpcc_clocksource(long cycle_freq)
354{
355 clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
356 clocksource_register(&clocksource_rpcc);
357}
358#else /* !CONFIG_SMP */
359static inline void register_rpcc_clocksource(long cycle_freq)
360{
361}
362#endif /* !CONFIG_SMP */
363
335void __init 364void __init
336time_init(void) 365time_init(void)
337{ 366{
@@ -385,6 +414,8 @@ time_init(void)
385 __you_loose(); 414 __you_loose();
386 } 415 }
387 416
417 register_rpcc_clocksource(cycle_freq);
418
388 state.last_time = cc1; 419 state.last_time = cc1;
389 state.scaled_ticks_per_cycle 420 state.scaled_ticks_per_cycle
390 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; 421 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
@@ -395,44 +426,6 @@ time_init(void)
395} 426}
396 427
397/* 428/*
398 * Use the cycle counter to estimate an displacement from the last time
399 * tick. Unfortunately the Alpha designers made only the low 32-bits of
400 * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
401 * part. So we can't do the "find absolute time in terms of cycles" thing
402 * that the other ports do.
403 */
404u32 arch_gettimeoffset(void)
405{
406#ifdef CONFIG_SMP
407 /* Until and unless we figure out how to get cpu cycle counters
408 in sync and keep them there, we can't use the rpcc tricks. */
409 return 0;
410#else
411 unsigned long delta_cycles, delta_usec, partial_tick;
412
413 delta_cycles = rpcc() - state.last_time;
414 partial_tick = state.partial_tick;
415 /*
416 * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
417 * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
418 * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
419 *
420 * which, given a 600MHz cycle and a 1024Hz tick, has a
421 * dynamic range of about 1.7e17, which is less than the
422 * 1.8e19 in an unsigned long, so we are safe from overflow.
423 *
424 * Round, but with .5 up always, since .5 to even is harder
425 * with no clear gain.
426 */
427
428 delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
429 + partial_tick) * 15625;
430 delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
431 return delta_usec * 1000;
432#endif
433}
434
435/*
436 * In order to set the CMOS clock precisely, set_rtc_mmss has to be 429 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
437 * called 500 ms after the second nowtime has started, because when 430 * called 500 ms after the second nowtime has started, because when
438 * nowtime is written into the registers of the CMOS clock, it will 431 * nowtime is written into the registers of the CMOS clock, it will