aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-07-28 15:49:22 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-07-28 15:49:22 -0400
commit47916be4e28c3d6fdb97dd8fb887d1d9b3145b9d (patch)
tree3b2259ee965cbe70c4ce9325d0e0def9bc061d97 /arch/powerpc/kernel/time.c
parent852db46d55e85b475a72e665ca08d3317769ceef (diff)
parentd75d68cfef4936ddf38d2694ae2f7d1f7c45db05 (diff)
Merge branch 'powerpc.cherry-picks' into timers/clocksource
Conflicts: arch/powerpc/kernel/time.c Reason: The powerpc next tree contains two commits which conflict with the timekeeping changes: 8fd63a9e powerpc: Rework VDSO gettimeofday to prevent time going backwards c1aa687d powerpc: Clean up obsolete code relating to decrementer and timebase John Stultz identified them and provided the conflict resolution. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c142
1 files changed, 9 insertions, 133 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e215f76bba1c..ce53dfa7130d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */
149EXPORT_SYMBOL(tb_ticks_per_usec); 149EXPORT_SYMBOL(tb_ticks_per_usec);
150unsigned long tb_ticks_per_sec; 150unsigned long tb_ticks_per_sec;
151EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 151EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
152u64 tb_to_xs;
153unsigned tb_to_us;
154
155#define TICKLEN_SCALE NTP_SCALE_SHIFT
156static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
157static u64 ticklen_to_xs; /* 0.64 fraction */
158
159/* If last_tick_len corresponds to about 1/HZ seconds, then
160 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
161#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
162 152
163DEFINE_SPINLOCK(rtc_lock); 153DEFINE_SPINLOCK(rtc_lock);
164EXPORT_SYMBOL_GPL(rtc_lock); 154EXPORT_SYMBOL_GPL(rtc_lock);
@@ -174,7 +164,6 @@ unsigned long ppc_proc_freq;
174EXPORT_SYMBOL(ppc_proc_freq); 164EXPORT_SYMBOL(ppc_proc_freq);
175unsigned long ppc_tb_freq; 165unsigned long ppc_tb_freq;
176 166
177static u64 tb_last_jiffy __cacheline_aligned_in_smp;
178static DEFINE_PER_CPU(u64, last_jiffy); 167static DEFINE_PER_CPU(u64, last_jiffy);
179 168
180#ifdef CONFIG_VIRT_CPU_ACCOUNTING 169#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -446,7 +435,6 @@ EXPORT_SYMBOL(profile_pc);
446 435
447static int __init iSeries_tb_recal(void) 436static int __init iSeries_tb_recal(void)
448{ 437{
449 struct div_result divres;
450 unsigned long titan, tb; 438 unsigned long titan, tb;
451 439
452 /* Make sure we only run on iSeries */ 440 /* Make sure we only run on iSeries */
@@ -477,10 +465,7 @@ static int __init iSeries_tb_recal(void)
477 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 465 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
478 tb_ticks_per_sec = new_tb_ticks_per_sec; 466 tb_ticks_per_sec = new_tb_ticks_per_sec;
479 calc_cputime_factors(); 467 calc_cputime_factors();
480 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
481 tb_to_xs = divres.result_low;
482 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 468 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
483 vdso_data->tb_to_xs = tb_to_xs;
484 setup_cputime_one_jiffy(); 469 setup_cputime_one_jiffy();
485 } 470 }
486 else { 471 else {
@@ -643,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs)
643 trace_timer_interrupt_exit(regs); 628 trace_timer_interrupt_exit(regs);
644} 629}
645 630
646void wakeup_decrementer(void)
647{
648 unsigned long ticks;
649
650 /*
651 * The timebase gets saved on sleep and restored on wakeup,
652 * so all we need to do is to reset the decrementer.
653 */
654 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
655 if (ticks < tb_ticks_per_jiffy)
656 ticks = tb_ticks_per_jiffy - ticks;
657 else
658 ticks = 1;
659 set_dec(ticks);
660}
661
662#ifdef CONFIG_SUSPEND 631#ifdef CONFIG_SUSPEND
663void generic_suspend_disable_irqs(void) 632static void generic_suspend_disable_irqs(void)
664{ 633{
665 preempt_disable();
666
667 /* Disable the decrementer, so that it doesn't interfere 634 /* Disable the decrementer, so that it doesn't interfere
668 * with suspending. 635 * with suspending.
669 */ 636 */
@@ -673,12 +640,9 @@ void generic_suspend_disable_irqs(void)
673 set_dec(0x7fffffff); 640 set_dec(0x7fffffff);
674} 641}
675 642
676void generic_suspend_enable_irqs(void) 643static void generic_suspend_enable_irqs(void)
677{ 644{
678 wakeup_decrementer();
679
680 local_irq_enable(); 645 local_irq_enable();
681 preempt_enable();
682} 646}
683 647
684/* Overrides the weak version in kernel/power/main.c */ 648/* Overrides the weak version in kernel/power/main.c */
@@ -698,23 +662,6 @@ void arch_suspend_enable_irqs(void)
698} 662}
699#endif 663#endif
700 664
701#ifdef CONFIG_SMP
702void __init smp_space_timers(unsigned int max_cpus)
703{
704 int i;
705 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
706
707 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
708 previous_tb -= tb_ticks_per_jiffy;
709
710 for_each_possible_cpu(i) {
711 if (i == boot_cpuid)
712 continue;
713 per_cpu(last_jiffy, i) = previous_tb;
714 }
715}
716#endif
717
718/* 665/*
719 * Scheduler clock - returns current time in nanosec units. 666 * Scheduler clock - returns current time in nanosec units.
720 * 667 *
@@ -853,6 +800,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
853 struct clocksource *clock, u32 mult) 800 struct clocksource *clock, u32 mult)
854{ 801{
855 u64 new_tb_to_xs, new_stamp_xsec; 802 u64 new_tb_to_xs, new_stamp_xsec;
803 u32 frac_sec;
856 804
857 if (clock != &clocksource_timebase) 805 if (clock != &clocksource_timebase)
858 return; 806 return;
@@ -868,6 +816,10 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
868 do_div(new_stamp_xsec, 1000000000); 816 do_div(new_stamp_xsec, 1000000000);
869 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; 817 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
870 818
819 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
820 /* this is tv_nsec / 1e9 as a 0.32 fraction */
821 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
822
871 /* 823 /*
872 * tb_update_count is used to allow the userspace gettimeofday code 824 * tb_update_count is used to allow the userspace gettimeofday code
873 * to assure itself that it sees a consistent view of the tb_to_xs and 825 * to assure itself that it sees a consistent view of the tb_to_xs and
@@ -885,6 +837,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
885 vdso_data->wtom_clock_sec = wtm->tv_sec; 837 vdso_data->wtom_clock_sec = wtm->tv_sec;
886 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 838 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
887 vdso_data->stamp_xtime = *wall_time; 839 vdso_data->stamp_xtime = *wall_time;
840 vdso_data->stamp_sec_fraction = frac_sec;
888 smp_wmb(); 841 smp_wmb();
889 ++(vdso_data->tb_update_count); 842 ++(vdso_data->tb_update_count);
890} 843}
@@ -1002,15 +955,13 @@ void secondary_cpu_time_init(void)
1002/* This function is only called on the boot processor */ 955/* This function is only called on the boot processor */
1003void __init time_init(void) 956void __init time_init(void)
1004{ 957{
1005 unsigned long flags;
1006 struct div_result res; 958 struct div_result res;
1007 u64 scale, x; 959 u64 scale;
1008 unsigned shift; 960 unsigned shift;
1009 961
1010 if (__USE_RTC()) { 962 if (__USE_RTC()) {
1011 /* 601 processor: dec counts down by 128 every 128ns */ 963 /* 601 processor: dec counts down by 128 every 128ns */
1012 ppc_tb_freq = 1000000000; 964 ppc_tb_freq = 1000000000;
1013 tb_last_jiffy = get_rtcl();
1014 } else { 965 } else {
1015 /* Normal PowerPC with timebase register */ 966 /* Normal PowerPC with timebase register */
1016 ppc_md.calibrate_decr(); 967 ppc_md.calibrate_decr();
@@ -1018,50 +969,15 @@ void __init time_init(void)
1018 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 969 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1019 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 970 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1020 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 971 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1021 tb_last_jiffy = get_tb();
1022 } 972 }
1023 973
1024 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 974 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1025 tb_ticks_per_sec = ppc_tb_freq; 975 tb_ticks_per_sec = ppc_tb_freq;
1026 tb_ticks_per_usec = ppc_tb_freq / 1000000; 976 tb_ticks_per_usec = ppc_tb_freq / 1000000;
1027 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
1028 calc_cputime_factors(); 977 calc_cputime_factors();
1029 setup_cputime_one_jiffy(); 978 setup_cputime_one_jiffy();
1030 979
1031 /* 980 /*
1032 * Calculate the length of each tick in ns. It will not be
1033 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
1034 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
1035 * rounded up.
1036 */
1037 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
1038 do_div(x, ppc_tb_freq);
1039 tick_nsec = x;
1040 last_tick_len = x << TICKLEN_SCALE;
1041
1042 /*
1043 * Compute ticklen_to_xs, which is a factor which gets multiplied
1044 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
1045 * It is computed as:
1046 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
1047 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
1048 * which turns out to be N = 51 - SHIFT_HZ.
1049 * This gives the result as a 0.64 fixed-point fraction.
1050 * That value is reduced by an offset amounting to 1 xsec per
1051 * 2^31 timebase ticks to avoid problems with time going backwards
1052 * by 1 xsec when we do timer_recalc_offset due to losing the
1053 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
1054 * since there are 2^20 xsec in a second.
1055 */
1056 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
1057 tb_ticks_per_jiffy << SHIFT_HZ, &res);
1058 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
1059 ticklen_to_xs = res.result_low;
1060
1061 /* Compute tb_to_xs from tick_nsec */
1062 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
1063
1064 /*
1065 * Compute scale factor for sched_clock. 981 * Compute scale factor for sched_clock.
1066 * The calibrate_decr() function has set tb_ticks_per_sec, 982 * The calibrate_decr() function has set tb_ticks_per_sec,
1067 * which is the timebase frequency. 983 * which is the timebase frequency.
@@ -1082,21 +998,14 @@ void __init time_init(void)
1082 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 998 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1083 boot_tb = get_tb_or_rtc(); 999 boot_tb = get_tb_or_rtc();
1084 1000
1085 write_seqlock_irqsave(&xtime_lock, flags);
1086
1087 /* If platform provided a timezone (pmac), we correct the time */ 1001 /* If platform provided a timezone (pmac), we correct the time */
1088 if (timezone_offset) { 1002 if (timezone_offset) {
1089 sys_tz.tz_minuteswest = -timezone_offset / 60; 1003 sys_tz.tz_minuteswest = -timezone_offset / 60;
1090 sys_tz.tz_dsttime = 0; 1004 sys_tz.tz_dsttime = 0;
1091 } 1005 }
1092 1006
1093 vdso_data->tb_orig_stamp = tb_last_jiffy;
1094 vdso_data->tb_update_count = 0; 1007 vdso_data->tb_update_count = 0;
1095 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1008 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1096 vdso_data->stamp_xsec = (u64) get_seconds() * XSEC_PER_SEC;
1097 vdso_data->tb_to_xs = tb_to_xs;
1098
1099 write_sequnlock_irqrestore(&xtime_lock, flags);
1100 1009
1101 /* Start the decrementer on CPUs that have manual control 1010 /* Start the decrementer on CPUs that have manual control
1102 * such as BookE 1011 * such as BookE
@@ -1190,39 +1099,6 @@ void to_tm(int tim, struct rtc_time * tm)
1190 GregorianDay(tm); 1099 GregorianDay(tm);
1191} 1100}
1192 1101
1193/* Auxiliary function to compute scaling factors */
1194/* Actually the choice of a timebase running at 1/4 the of the bus
1195 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1196 * It makes this computation very precise (27-28 bits typically) which
1197 * is optimistic considering the stability of most processor clock
1198 * oscillators and the precision with which the timebase frequency
1199 * is measured but does not harm.
1200 */
1201unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1202{
1203 unsigned mlt=0, tmp, err;
1204 /* No concern for performance, it's done once: use a stupid
1205 * but safe and compact method to find the multiplier.
1206 */
1207
1208 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1209 if (mulhwu(inscale, mlt|tmp) < outscale)
1210 mlt |= tmp;
1211 }
1212
1213 /* We might still be off by 1 for the best approximation.
1214 * A side effect of this is that if outscale is too large
1215 * the returned value will be zero.
1216 * Many corner cases have been checked and seem to work,
1217 * some might have been forgotten in the test however.
1218 */
1219
1220 err = inscale * (mlt+1);
1221 if (err <= inscale/2)
1222 mlt++;
1223 return mlt;
1224}
1225
1226/* 1102/*
1227 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1103 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1228 * result. 1104 * result.