diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-23 03:14:56 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-23 03:14:56 -0400 |
commit | 96c44507601d64f29b8ccc867637292e326c7019 (patch) | |
tree | 1fbdb6f4a3c2d99afaa6a244161518ec78f31048 /arch/powerpc/kernel/time.c | |
parent | 985990137e81ca9fd6561cd0f7d1a9695ec57d5a (diff) |
powerpc: Fix time code for 601 processors
The 601 doesn't have the timebase register; instead it has an RTCL
register that counts nanoseconds and wraps at 1000000000, and an
RTCU register that counts seconds. This makes the necessary changes
for the merged time code to use the RTCL/U registers when the kernel
is running on a 601.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r-- | arch/powerpc/kernel/time.c | 79 |
1 files changed, 56 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index b635c7de6698..ad501d62aa6e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -126,6 +126,16 @@ unsigned long ppc_tb_freq; | |||
126 | #define boot_cpuid 0 | 126 | #define boot_cpuid 0 |
127 | #endif | 127 | #endif |
128 | 128 | ||
129 | u64 tb_last_jiffy __cacheline_aligned_in_smp; | ||
130 | unsigned long tb_last_stamp; | ||
131 | |||
132 | /* | ||
133 | * Note that on ppc32 this only stores the bottom 32 bits of | ||
134 | * the timebase value, but that's enough to tell when a jiffy | ||
135 | * has passed. | ||
136 | */ | ||
137 | DEFINE_PER_CPU(unsigned long, last_jiffy); | ||
138 | |||
129 | static __inline__ void timer_check_rtc(void) | 139 | static __inline__ void timer_check_rtc(void) |
130 | { | 140 | { |
131 | /* | 141 | /* |
@@ -191,6 +201,26 @@ static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) | |||
191 | 201 | ||
192 | void do_gettimeofday(struct timeval *tv) | 202 | void do_gettimeofday(struct timeval *tv) |
193 | { | 203 | { |
204 | if (__USE_RTC()) { | ||
205 | /* do this the old way */ | ||
206 | unsigned long flags, seq; | ||
207 | unsigned int sec, nsec, usec, lost; | ||
208 | |||
209 | do { | ||
210 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
211 | sec = xtime.tv_sec; | ||
212 | nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); | ||
213 | lost = jiffies - wall_jiffies; | ||
214 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
215 | usec = nsec / 1000 + lost * (1000000 / HZ); | ||
216 | while (usec >= 1000000) { | ||
217 | usec -= 1000000; | ||
218 | ++sec; | ||
219 | } | ||
220 | tv->tv_sec = sec; | ||
221 | tv->tv_usec = usec; | ||
222 | return; | ||
223 | } | ||
194 | __do_gettimeofday(tv, get_tb()); | 224 | __do_gettimeofday(tv, get_tb()); |
195 | } | 225 | } |
196 | 226 | ||
@@ -272,6 +302,8 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) | |||
272 | unsigned long offset; | 302 | unsigned long offset; |
273 | u64 new_stamp_xsec; | 303 | u64 new_stamp_xsec; |
274 | 304 | ||
305 | if (__USE_RTC()) | ||
306 | return; | ||
275 | offset = cur_tb - do_gtod.varp->tb_orig_stamp; | 307 | offset = cur_tb - do_gtod.varp->tb_orig_stamp; |
276 | if ((offset & 0x80000000u) == 0) | 308 | if ((offset & 0x80000000u) == 0) |
277 | return; | 309 | return; |
@@ -357,15 +389,6 @@ static void iSeries_tb_recal(void) | |||
357 | * call will not be needed) | 389 | * call will not be needed) |
358 | */ | 390 | */ |
359 | 391 | ||
360 | u64 tb_last_stamp __cacheline_aligned_in_smp; | ||
361 | |||
362 | /* | ||
363 | * Note that on ppc32 this only stores the bottom 32 bits of | ||
364 | * the timebase value, but that's enough to tell when a jiffy | ||
365 | * has passed. | ||
366 | */ | ||
367 | DEFINE_PER_CPU(unsigned long, last_jiffy); | ||
368 | |||
369 | /* | 392 | /* |
370 | * timer_interrupt - gets called when the decrementer overflows, | 393 | * timer_interrupt - gets called when the decrementer overflows, |
371 | * with interrupts disabled. | 394 | * with interrupts disabled. |
@@ -415,10 +438,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
415 | continue; | 438 | continue; |
416 | 439 | ||
417 | write_seqlock(&xtime_lock); | 440 | write_seqlock(&xtime_lock); |
418 | tb_last_stamp += tb_ticks_per_jiffy; | 441 | tb_last_jiffy += tb_ticks_per_jiffy; |
419 | timer_recalc_offset(tb_last_stamp); | 442 | tb_last_stamp = per_cpu(last_jiffy, cpu); |
443 | timer_recalc_offset(tb_last_jiffy); | ||
420 | do_timer(regs); | 444 | do_timer(regs); |
421 | timer_sync_xtime(tb_last_stamp); | 445 | timer_sync_xtime(tb_last_jiffy); |
422 | timer_check_rtc(); | 446 | timer_check_rtc(); |
423 | write_sequnlock(&xtime_lock); | 447 | write_sequnlock(&xtime_lock); |
424 | if (adjusting_time && (time_adjust == 0)) | 448 | if (adjusting_time && (time_adjust == 0)) |
@@ -453,7 +477,7 @@ void wakeup_decrementer(void) | |||
453 | * We don't expect this to be called on a machine with a 601, | 477 | * We don't expect this to be called on a machine with a 601, |
454 | * so using get_tbl is fine. | 478 | * so using get_tbl is fine. |
455 | */ | 479 | */ |
456 | tb_last_stamp = get_tb(); | 480 | tb_last_stamp = tb_last_jiffy = get_tb(); |
457 | for_each_cpu(i) | 481 | for_each_cpu(i) |
458 | per_cpu(last_jiffy, i) = tb_last_stamp; | 482 | per_cpu(last_jiffy, i) = tb_last_stamp; |
459 | } | 483 | } |
@@ -483,6 +507,8 @@ void __init smp_space_timers(unsigned int max_cpus) | |||
483 | */ | 507 | */ |
484 | unsigned long long sched_clock(void) | 508 | unsigned long long sched_clock(void) |
485 | { | 509 | { |
510 | if (__USE_RTC()) | ||
511 | return get_rtc(); | ||
486 | return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; | 512 | return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; |
487 | } | 513 | } |
488 | 514 | ||
@@ -534,7 +560,7 @@ int do_settimeofday(struct timespec *tv) | |||
534 | new_xsec = (u64)new_nsec * XSEC_PER_SEC; | 560 | new_xsec = (u64)new_nsec * XSEC_PER_SEC; |
535 | do_div(new_xsec, NSEC_PER_SEC); | 561 | do_div(new_xsec, NSEC_PER_SEC); |
536 | new_xsec += (u64)new_sec * XSEC_PER_SEC; | 562 | new_xsec += (u64)new_sec * XSEC_PER_SEC; |
537 | update_gtod(tb_last_stamp, new_xsec, do_gtod.varp->tb_to_xs); | 563 | update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); |
538 | 564 | ||
539 | #ifdef CONFIG_PPC64 | 565 | #ifdef CONFIG_PPC64 |
540 | systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; | 566 | systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; |
@@ -616,12 +642,20 @@ void __init time_init(void) | |||
616 | if (ppc_md.time_init != NULL) | 642 | if (ppc_md.time_init != NULL) |
617 | timezone_offset = ppc_md.time_init(); | 643 | timezone_offset = ppc_md.time_init(); |
618 | 644 | ||
619 | ppc_md.calibrate_decr(); | 645 | if (__USE_RTC()) { |
620 | 646 | /* 601 processor: dec counts down by 128 every 128ns */ | |
621 | printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", | 647 | ppc_tb_freq = 1000000000; |
622 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); | 648 | tb_last_stamp = get_rtcl(); |
623 | printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", | 649 | tb_last_jiffy = tb_last_stamp; |
624 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 650 | } else { |
651 | /* Normal PowerPC with timebase register */ | ||
652 | ppc_md.calibrate_decr(); | ||
653 | printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", | ||
654 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); | ||
655 | printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", | ||
656 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | ||
657 | tb_last_stamp = tb_last_jiffy = get_tb(); | ||
658 | } | ||
625 | 659 | ||
626 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | 660 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; |
627 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; | 661 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; |
@@ -661,17 +695,16 @@ void __init time_init(void) | |||
661 | write_seqlock_irqsave(&xtime_lock, flags); | 695 | write_seqlock_irqsave(&xtime_lock, flags); |
662 | xtime.tv_sec = tm; | 696 | xtime.tv_sec = tm; |
663 | xtime.tv_nsec = 0; | 697 | xtime.tv_nsec = 0; |
664 | tb_last_stamp = get_tb(); | ||
665 | do_gtod.varp = &do_gtod.vars[0]; | 698 | do_gtod.varp = &do_gtod.vars[0]; |
666 | do_gtod.var_idx = 0; | 699 | do_gtod.var_idx = 0; |
667 | do_gtod.varp->tb_orig_stamp = tb_last_stamp; | 700 | do_gtod.varp->tb_orig_stamp = tb_last_jiffy; |
668 | __get_cpu_var(last_jiffy) = tb_last_stamp; | 701 | __get_cpu_var(last_jiffy) = tb_last_stamp; |
669 | do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; | 702 | do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; |
670 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; | 703 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; |
671 | do_gtod.varp->tb_to_xs = tb_to_xs; | 704 | do_gtod.varp->tb_to_xs = tb_to_xs; |
672 | do_gtod.tb_to_us = tb_to_us; | 705 | do_gtod.tb_to_us = tb_to_us; |
673 | #ifdef CONFIG_PPC64 | 706 | #ifdef CONFIG_PPC64 |
674 | systemcfg->tb_orig_stamp = tb_last_stamp; | 707 | systemcfg->tb_orig_stamp = tb_last_jiffy; |
675 | systemcfg->tb_update_count = 0; | 708 | systemcfg->tb_update_count = 0; |
676 | systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; | 709 | systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; |
677 | systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; | 710 | systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; |