aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c136
1 files changed, 3 insertions, 133 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5adebaf47f13..ccb8759c8532 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */
149EXPORT_SYMBOL(tb_ticks_per_usec); 149EXPORT_SYMBOL(tb_ticks_per_usec);
150unsigned long tb_ticks_per_sec; 150unsigned long tb_ticks_per_sec;
151EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 151EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
152u64 tb_to_xs;
153unsigned tb_to_us;
154
155#define TICKLEN_SCALE NTP_SCALE_SHIFT
156static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
157static u64 ticklen_to_xs; /* 0.64 fraction */
158
159/* If last_tick_len corresponds to about 1/HZ seconds, then
160 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
161#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
162 152
163DEFINE_SPINLOCK(rtc_lock); 153DEFINE_SPINLOCK(rtc_lock);
164EXPORT_SYMBOL_GPL(rtc_lock); 154EXPORT_SYMBOL_GPL(rtc_lock);
@@ -174,7 +164,6 @@ unsigned long ppc_proc_freq;
174EXPORT_SYMBOL(ppc_proc_freq); 164EXPORT_SYMBOL(ppc_proc_freq);
175unsigned long ppc_tb_freq; 165unsigned long ppc_tb_freq;
176 166
177static u64 tb_last_jiffy __cacheline_aligned_in_smp;
178static DEFINE_PER_CPU(u64, last_jiffy); 167static DEFINE_PER_CPU(u64, last_jiffy);
179 168
180#ifdef CONFIG_VIRT_CPU_ACCOUNTING 169#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -446,7 +435,6 @@ EXPORT_SYMBOL(profile_pc);
446 435
447static int __init iSeries_tb_recal(void) 436static int __init iSeries_tb_recal(void)
448{ 437{
449 struct div_result divres;
450 unsigned long titan, tb; 438 unsigned long titan, tb;
451 439
452 /* Make sure we only run on iSeries */ 440 /* Make sure we only run on iSeries */
@@ -477,10 +465,7 @@ static int __init iSeries_tb_recal(void)
477 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 465 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
478 tb_ticks_per_sec = new_tb_ticks_per_sec; 466 tb_ticks_per_sec = new_tb_ticks_per_sec;
479 calc_cputime_factors(); 467 calc_cputime_factors();
480 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
481 tb_to_xs = divres.result_low;
482 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 468 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
483 vdso_data->tb_to_xs = tb_to_xs;
484 setup_cputime_one_jiffy(); 469 setup_cputime_one_jiffy();
485 } 470 }
486 else { 471 else {
@@ -643,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs)
643 trace_timer_interrupt_exit(regs); 628 trace_timer_interrupt_exit(regs);
644} 629}
645 630
646void wakeup_decrementer(void)
647{
648 unsigned long ticks;
649
650 /*
651 * The timebase gets saved on sleep and restored on wakeup,
652 * so all we need to do is to reset the decrementer.
653 */
654 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
655 if (ticks < tb_ticks_per_jiffy)
656 ticks = tb_ticks_per_jiffy - ticks;
657 else
658 ticks = 1;
659 set_dec(ticks);
660}
661
662#ifdef CONFIG_SUSPEND 631#ifdef CONFIG_SUSPEND
663void generic_suspend_disable_irqs(void) 632static void generic_suspend_disable_irqs(void)
664{ 633{
665 preempt_disable();
666
667 /* Disable the decrementer, so that it doesn't interfere 634 /* Disable the decrementer, so that it doesn't interfere
668 * with suspending. 635 * with suspending.
669 */ 636 */
@@ -673,12 +640,9 @@ void generic_suspend_disable_irqs(void)
673 set_dec(0x7fffffff); 640 set_dec(0x7fffffff);
674} 641}
675 642
676void generic_suspend_enable_irqs(void) 643static void generic_suspend_enable_irqs(void)
677{ 644{
678 wakeup_decrementer();
679
680 local_irq_enable(); 645 local_irq_enable();
681 preempt_enable();
682} 646}
683 647
684/* Overrides the weak version in kernel/power/main.c */ 648/* Overrides the weak version in kernel/power/main.c */
@@ -698,23 +662,6 @@ void arch_suspend_enable_irqs(void)
698} 662}
699#endif 663#endif
700 664
701#ifdef CONFIG_SMP
702void __init smp_space_timers(unsigned int max_cpus)
703{
704 int i;
705 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
706
707 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
708 previous_tb -= tb_ticks_per_jiffy;
709
710 for_each_possible_cpu(i) {
711 if (i == boot_cpuid)
712 continue;
713 per_cpu(last_jiffy, i) = previous_tb;
714 }
715}
716#endif
717
718/* 665/*
719 * Scheduler clock - returns current time in nanosec units. 666 * Scheduler clock - returns current time in nanosec units.
720 * 667 *
@@ -1014,15 +961,13 @@ void secondary_cpu_time_init(void)
1014/* This function is only called on the boot processor */ 961/* This function is only called on the boot processor */
1015void __init time_init(void) 962void __init time_init(void)
1016{ 963{
1017 unsigned long flags;
1018 struct div_result res; 964 struct div_result res;
1019 u64 scale, x; 965 u64 scale;
1020 unsigned shift; 966 unsigned shift;
1021 967
1022 if (__USE_RTC()) { 968 if (__USE_RTC()) {
1023 /* 601 processor: dec counts down by 128 every 128ns */ 969 /* 601 processor: dec counts down by 128 every 128ns */
1024 ppc_tb_freq = 1000000000; 970 ppc_tb_freq = 1000000000;
1025 tb_last_jiffy = get_rtcl();
1026 } else { 971 } else {
1027 /* Normal PowerPC with timebase register */ 972 /* Normal PowerPC with timebase register */
1028 ppc_md.calibrate_decr(); 973 ppc_md.calibrate_decr();
@@ -1030,50 +975,15 @@ void __init time_init(void)
1030 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 975 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1031 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 976 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1032 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 977 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1033 tb_last_jiffy = get_tb();
1034 } 978 }
1035 979
1036 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 980 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1037 tb_ticks_per_sec = ppc_tb_freq; 981 tb_ticks_per_sec = ppc_tb_freq;
1038 tb_ticks_per_usec = ppc_tb_freq / 1000000; 982 tb_ticks_per_usec = ppc_tb_freq / 1000000;
1039 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
1040 calc_cputime_factors(); 983 calc_cputime_factors();
1041 setup_cputime_one_jiffy(); 984 setup_cputime_one_jiffy();
1042 985
1043 /* 986 /*
1044 * Calculate the length of each tick in ns. It will not be
1045 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
1046 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
1047 * rounded up.
1048 */
1049 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
1050 do_div(x, ppc_tb_freq);
1051 tick_nsec = x;
1052 last_tick_len = x << TICKLEN_SCALE;
1053
1054 /*
1055 * Compute ticklen_to_xs, which is a factor which gets multiplied
1056 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
1057 * It is computed as:
1058 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
1059 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
1060 * which turns out to be N = 51 - SHIFT_HZ.
1061 * This gives the result as a 0.64 fixed-point fraction.
1062 * That value is reduced by an offset amounting to 1 xsec per
1063 * 2^31 timebase ticks to avoid problems with time going backwards
1064 * by 1 xsec when we do timer_recalc_offset due to losing the
1065 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
1066 * since there are 2^20 xsec in a second.
1067 */
1068 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
1069 tb_ticks_per_jiffy << SHIFT_HZ, &res);
1070 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
1071 ticklen_to_xs = res.result_low;
1072
1073 /* Compute tb_to_xs from tick_nsec */
1074 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
1075
1076 /*
1077 * Compute scale factor for sched_clock. 987 * Compute scale factor for sched_clock.
1078 * The calibrate_decr() function has set tb_ticks_per_sec, 988 * The calibrate_decr() function has set tb_ticks_per_sec,
1079 * which is the timebase frequency. 989 * which is the timebase frequency.
@@ -1094,21 +1004,14 @@ void __init time_init(void)
1094 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 1004 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1095 boot_tb = get_tb_or_rtc(); 1005 boot_tb = get_tb_or_rtc();
1096 1006
1097 write_seqlock_irqsave(&xtime_lock, flags);
1098
1099 /* If platform provided a timezone (pmac), we correct the time */ 1007 /* If platform provided a timezone (pmac), we correct the time */
1100 if (timezone_offset) { 1008 if (timezone_offset) {
1101 sys_tz.tz_minuteswest = -timezone_offset / 60; 1009 sys_tz.tz_minuteswest = -timezone_offset / 60;
1102 sys_tz.tz_dsttime = 0; 1010 sys_tz.tz_dsttime = 0;
1103 } 1011 }
1104 1012
1105 vdso_data->tb_orig_stamp = tb_last_jiffy;
1106 vdso_data->tb_update_count = 0; 1013 vdso_data->tb_update_count = 0;
1107 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1014 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1108 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1109 vdso_data->tb_to_xs = tb_to_xs;
1110
1111 write_sequnlock_irqrestore(&xtime_lock, flags);
1112 1015
1113 /* Start the decrementer on CPUs that have manual control 1016 /* Start the decrementer on CPUs that have manual control
1114 * such as BookE 1017 * such as BookE
@@ -1202,39 +1105,6 @@ void to_tm(int tim, struct rtc_time * tm)
1202 GregorianDay(tm); 1105 GregorianDay(tm);
1203} 1106}
1204 1107
1205/* Auxiliary function to compute scaling factors */
1206/* Actually the choice of a timebase running at 1/4 the of the bus
1207 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1208 * It makes this computation very precise (27-28 bits typically) which
1209 * is optimistic considering the stability of most processor clock
1210 * oscillators and the precision with which the timebase frequency
1211 * is measured but does not harm.
1212 */
1213unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1214{
1215 unsigned mlt=0, tmp, err;
1216 /* No concern for performance, it's done once: use a stupid
1217 * but safe and compact method to find the multiplier.
1218 */
1219
1220 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1221 if (mulhwu(inscale, mlt|tmp) < outscale)
1222 mlt |= tmp;
1223 }
1224
1225 /* We might still be off by 1 for the best approximation.
1226 * A side effect of this is that if outscale is too large
1227 * the returned value will be zero.
1228 * Many corner cases have been checked and seem to work,
1229 * some might have been forgotten in the test however.
1230 */
1231
1232 err = inscale * (mlt+1);
1233 if (err <= inscale/2)
1234 mlt++;
1235 return mlt;
1236}
1237
1238/* 1108/*
1239 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1109 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1240 * result. 1110 * result.