diff options
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r-- | arch/powerpc/kernel/time.c | 197 |
1 files changed, 37 insertions, 160 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0441bbdadbd1..ccb8759c8532 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */ | |||
149 | EXPORT_SYMBOL(tb_ticks_per_usec); | 149 | EXPORT_SYMBOL(tb_ticks_per_usec); |
150 | unsigned long tb_ticks_per_sec; | 150 | unsigned long tb_ticks_per_sec; |
151 | EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ | 151 | EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ |
152 | u64 tb_to_xs; | ||
153 | unsigned tb_to_us; | ||
154 | |||
155 | #define TICKLEN_SCALE NTP_SCALE_SHIFT | ||
156 | static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ | ||
157 | static u64 ticklen_to_xs; /* 0.64 fraction */ | ||
158 | |||
159 | /* If last_tick_len corresponds to about 1/HZ seconds, then | ||
160 | last_tick_len << TICKLEN_SHIFT will be about 2^63. */ | ||
161 | #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) | ||
162 | 152 | ||
163 | DEFINE_SPINLOCK(rtc_lock); | 153 | DEFINE_SPINLOCK(rtc_lock); |
164 | EXPORT_SYMBOL_GPL(rtc_lock); | 154 | EXPORT_SYMBOL_GPL(rtc_lock); |
@@ -174,7 +164,6 @@ unsigned long ppc_proc_freq; | |||
174 | EXPORT_SYMBOL(ppc_proc_freq); | 164 | EXPORT_SYMBOL(ppc_proc_freq); |
175 | unsigned long ppc_tb_freq; | 165 | unsigned long ppc_tb_freq; |
176 | 166 | ||
177 | static u64 tb_last_jiffy __cacheline_aligned_in_smp; | ||
178 | static DEFINE_PER_CPU(u64, last_jiffy); | 167 | static DEFINE_PER_CPU(u64, last_jiffy); |
179 | 168 | ||
180 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -423,30 +412,6 @@ void udelay(unsigned long usecs) | |||
423 | } | 412 | } |
424 | EXPORT_SYMBOL(udelay); | 413 | EXPORT_SYMBOL(udelay); |
425 | 414 | ||
426 | static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, | ||
427 | u64 new_tb_to_xs) | ||
428 | { | ||
429 | /* | ||
430 | * tb_update_count is used to allow the userspace gettimeofday code | ||
431 | * to assure itself that it sees a consistent view of the tb_to_xs and | ||
432 | * stamp_xsec variables. It reads the tb_update_count, then reads | ||
433 | * tb_to_xs and stamp_xsec and then reads tb_update_count again. If | ||
434 | * the two values of tb_update_count match and are even then the | ||
435 | * tb_to_xs and stamp_xsec values are consistent. If not, then it | ||
436 | * loops back and reads them again until this criteria is met. | ||
437 | * We expect the caller to have done the first increment of | ||
438 | * vdso_data->tb_update_count already. | ||
439 | */ | ||
440 | vdso_data->tb_orig_stamp = new_tb_stamp; | ||
441 | vdso_data->stamp_xsec = new_stamp_xsec; | ||
442 | vdso_data->tb_to_xs = new_tb_to_xs; | ||
443 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | ||
444 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | ||
445 | vdso_data->stamp_xtime = xtime; | ||
446 | smp_wmb(); | ||
447 | ++(vdso_data->tb_update_count); | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_SMP | 415 | #ifdef CONFIG_SMP |
451 | unsigned long profile_pc(struct pt_regs *regs) | 416 | unsigned long profile_pc(struct pt_regs *regs) |
452 | { | 417 | { |
@@ -470,7 +435,6 @@ EXPORT_SYMBOL(profile_pc); | |||
470 | 435 | ||
471 | static int __init iSeries_tb_recal(void) | 436 | static int __init iSeries_tb_recal(void) |
472 | { | 437 | { |
473 | struct div_result divres; | ||
474 | unsigned long titan, tb; | 438 | unsigned long titan, tb; |
475 | 439 | ||
476 | /* Make sure we only run on iSeries */ | 440 | /* Make sure we only run on iSeries */ |
@@ -501,10 +465,7 @@ static int __init iSeries_tb_recal(void) | |||
501 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; | 465 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; |
502 | tb_ticks_per_sec = new_tb_ticks_per_sec; | 466 | tb_ticks_per_sec = new_tb_ticks_per_sec; |
503 | calc_cputime_factors(); | 467 | calc_cputime_factors(); |
504 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); | ||
505 | tb_to_xs = divres.result_low; | ||
506 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 468 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
507 | vdso_data->tb_to_xs = tb_to_xs; | ||
508 | setup_cputime_one_jiffy(); | 469 | setup_cputime_one_jiffy(); |
509 | } | 470 | } |
510 | else { | 471 | else { |
@@ -667,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs) | |||
667 | trace_timer_interrupt_exit(regs); | 628 | trace_timer_interrupt_exit(regs); |
668 | } | 629 | } |
669 | 630 | ||
670 | void wakeup_decrementer(void) | ||
671 | { | ||
672 | unsigned long ticks; | ||
673 | |||
674 | /* | ||
675 | * The timebase gets saved on sleep and restored on wakeup, | ||
676 | * so all we need to do is to reset the decrementer. | ||
677 | */ | ||
678 | ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); | ||
679 | if (ticks < tb_ticks_per_jiffy) | ||
680 | ticks = tb_ticks_per_jiffy - ticks; | ||
681 | else | ||
682 | ticks = 1; | ||
683 | set_dec(ticks); | ||
684 | } | ||
685 | |||
686 | #ifdef CONFIG_SUSPEND | 631 | #ifdef CONFIG_SUSPEND |
687 | void generic_suspend_disable_irqs(void) | 632 | static void generic_suspend_disable_irqs(void) |
688 | { | 633 | { |
689 | preempt_disable(); | ||
690 | |||
691 | /* Disable the decrementer, so that it doesn't interfere | 634 | /* Disable the decrementer, so that it doesn't interfere |
692 | * with suspending. | 635 | * with suspending. |
693 | */ | 636 | */ |
@@ -697,12 +640,9 @@ void generic_suspend_disable_irqs(void) | |||
697 | set_dec(0x7fffffff); | 640 | set_dec(0x7fffffff); |
698 | } | 641 | } |
699 | 642 | ||
700 | void generic_suspend_enable_irqs(void) | 643 | static void generic_suspend_enable_irqs(void) |
701 | { | 644 | { |
702 | wakeup_decrementer(); | ||
703 | |||
704 | local_irq_enable(); | 645 | local_irq_enable(); |
705 | preempt_enable(); | ||
706 | } | 646 | } |
707 | 647 | ||
708 | /* Overrides the weak version in kernel/power/main.c */ | 648 | /* Overrides the weak version in kernel/power/main.c */ |
@@ -722,23 +662,6 @@ void arch_suspend_enable_irqs(void) | |||
722 | } | 662 | } |
723 | #endif | 663 | #endif |
724 | 664 | ||
725 | #ifdef CONFIG_SMP | ||
726 | void __init smp_space_timers(unsigned int max_cpus) | ||
727 | { | ||
728 | int i; | ||
729 | u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); | ||
730 | |||
731 | /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ | ||
732 | previous_tb -= tb_ticks_per_jiffy; | ||
733 | |||
734 | for_each_possible_cpu(i) { | ||
735 | if (i == boot_cpuid) | ||
736 | continue; | ||
737 | per_cpu(last_jiffy, i) = previous_tb; | ||
738 | } | ||
739 | } | ||
740 | #endif | ||
741 | |||
742 | /* | 665 | /* |
743 | * Scheduler clock - returns current time in nanosec units. | 666 | * Scheduler clock - returns current time in nanosec units. |
744 | * | 667 | * |
@@ -873,10 +796,37 @@ static cycle_t timebase_read(struct clocksource *cs) | |||
873 | return (cycle_t)get_tb(); | 796 | return (cycle_t)get_tb(); |
874 | } | 797 | } |
875 | 798 | ||
799 | static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, | ||
800 | u64 new_tb_to_xs, struct timespec *now, | ||
801 | u32 frac_sec) | ||
802 | { | ||
803 | /* | ||
804 | * tb_update_count is used to allow the userspace gettimeofday code | ||
805 | * to assure itself that it sees a consistent view of the tb_to_xs and | ||
806 | * stamp_xsec variables. It reads the tb_update_count, then reads | ||
807 | * tb_to_xs and stamp_xsec and then reads tb_update_count again. If | ||
808 | * the two values of tb_update_count match and are even then the | ||
809 | * tb_to_xs and stamp_xsec values are consistent. If not, then it | ||
810 | * loops back and reads them again until this criteria is met. | ||
811 | * We expect the caller to have done the first increment of | ||
812 | * vdso_data->tb_update_count already. | ||
813 | */ | ||
814 | vdso_data->tb_orig_stamp = new_tb_stamp; | ||
815 | vdso_data->stamp_xsec = new_stamp_xsec; | ||
816 | vdso_data->tb_to_xs = new_tb_to_xs; | ||
817 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | ||
818 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | ||
819 | vdso_data->stamp_xtime = *now; | ||
820 | vdso_data->stamp_sec_fraction = frac_sec; | ||
821 | smp_wmb(); | ||
822 | ++(vdso_data->tb_update_count); | ||
823 | } | ||
824 | |||
876 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, | 825 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, |
877 | u32 mult) | 826 | u32 mult) |
878 | { | 827 | { |
879 | u64 t2x, stamp_xsec; | 828 | u64 t2x, stamp_xsec; |
829 | u32 frac_sec; | ||
880 | 830 | ||
881 | if (clock != &clocksource_timebase) | 831 | if (clock != &clocksource_timebase) |
882 | return; | 832 | return; |
@@ -888,10 +838,14 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, | |||
888 | /* XXX this assumes clock->shift == 22 */ | 838 | /* XXX this assumes clock->shift == 22 */ |
889 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ | 839 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ |
890 | t2x = (u64) mult * 4611686018ULL; | 840 | t2x = (u64) mult * 4611686018ULL; |
891 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; | 841 | stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; |
892 | do_div(stamp_xsec, 1000000000); | 842 | do_div(stamp_xsec, 1000000000); |
893 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; | 843 | stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; |
894 | update_gtod(clock->cycle_last, stamp_xsec, t2x); | 844 | |
845 | BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); | ||
846 | /* this is tv_nsec / 1e9 as a 0.32 fraction */ | ||
847 | frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; | ||
848 | update_gtod(clock->cycle_last, stamp_xsec, t2x, wall_time, frac_sec); | ||
895 | } | 849 | } |
896 | 850 | ||
897 | void update_vsyscall_tz(void) | 851 | void update_vsyscall_tz(void) |
@@ -1007,15 +961,13 @@ void secondary_cpu_time_init(void) | |||
1007 | /* This function is only called on the boot processor */ | 961 | /* This function is only called on the boot processor */ |
1008 | void __init time_init(void) | 962 | void __init time_init(void) |
1009 | { | 963 | { |
1010 | unsigned long flags; | ||
1011 | struct div_result res; | 964 | struct div_result res; |
1012 | u64 scale, x; | 965 | u64 scale; |
1013 | unsigned shift; | 966 | unsigned shift; |
1014 | 967 | ||
1015 | if (__USE_RTC()) { | 968 | if (__USE_RTC()) { |
1016 | /* 601 processor: dec counts down by 128 every 128ns */ | 969 | /* 601 processor: dec counts down by 128 every 128ns */ |
1017 | ppc_tb_freq = 1000000000; | 970 | ppc_tb_freq = 1000000000; |
1018 | tb_last_jiffy = get_rtcl(); | ||
1019 | } else { | 971 | } else { |
1020 | /* Normal PowerPC with timebase register */ | 972 | /* Normal PowerPC with timebase register */ |
1021 | ppc_md.calibrate_decr(); | 973 | ppc_md.calibrate_decr(); |
@@ -1023,50 +975,15 @@ void __init time_init(void) | |||
1023 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); | 975 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); |
1024 | printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", | 976 | printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", |
1025 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 977 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); |
1026 | tb_last_jiffy = get_tb(); | ||
1027 | } | 978 | } |
1028 | 979 | ||
1029 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | 980 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; |
1030 | tb_ticks_per_sec = ppc_tb_freq; | 981 | tb_ticks_per_sec = ppc_tb_freq; |
1031 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | 982 | tb_ticks_per_usec = ppc_tb_freq / 1000000; |
1032 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | ||
1033 | calc_cputime_factors(); | 983 | calc_cputime_factors(); |
1034 | setup_cputime_one_jiffy(); | 984 | setup_cputime_one_jiffy(); |
1035 | 985 | ||
1036 | /* | 986 | /* |
1037 | * Calculate the length of each tick in ns. It will not be | ||
1038 | * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. | ||
1039 | * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, | ||
1040 | * rounded up. | ||
1041 | */ | ||
1042 | x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; | ||
1043 | do_div(x, ppc_tb_freq); | ||
1044 | tick_nsec = x; | ||
1045 | last_tick_len = x << TICKLEN_SCALE; | ||
1046 | |||
1047 | /* | ||
1048 | * Compute ticklen_to_xs, which is a factor which gets multiplied | ||
1049 | * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. | ||
1050 | * It is computed as: | ||
1051 | * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) | ||
1052 | * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT | ||
1053 | * which turns out to be N = 51 - SHIFT_HZ. | ||
1054 | * This gives the result as a 0.64 fixed-point fraction. | ||
1055 | * That value is reduced by an offset amounting to 1 xsec per | ||
1056 | * 2^31 timebase ticks to avoid problems with time going backwards | ||
1057 | * by 1 xsec when we do timer_recalc_offset due to losing the | ||
1058 | * fractional xsec. That offset is equal to ppc_tb_freq/2^51 | ||
1059 | * since there are 2^20 xsec in a second. | ||
1060 | */ | ||
1061 | div128_by_32((1ULL << 51) - ppc_tb_freq, 0, | ||
1062 | tb_ticks_per_jiffy << SHIFT_HZ, &res); | ||
1063 | div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); | ||
1064 | ticklen_to_xs = res.result_low; | ||
1065 | |||
1066 | /* Compute tb_to_xs from tick_nsec */ | ||
1067 | tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); | ||
1068 | |||
1069 | /* | ||
1070 | * Compute scale factor for sched_clock. | 987 | * Compute scale factor for sched_clock. |
1071 | * The calibrate_decr() function has set tb_ticks_per_sec, | 988 | * The calibrate_decr() function has set tb_ticks_per_sec, |
1072 | * which is the timebase frequency. | 989 | * which is the timebase frequency. |
@@ -1087,21 +1004,14 @@ void __init time_init(void) | |||
1087 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ | 1004 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ |
1088 | boot_tb = get_tb_or_rtc(); | 1005 | boot_tb = get_tb_or_rtc(); |
1089 | 1006 | ||
1090 | write_seqlock_irqsave(&xtime_lock, flags); | ||
1091 | |||
1092 | /* If platform provided a timezone (pmac), we correct the time */ | 1007 | /* If platform provided a timezone (pmac), we correct the time */ |
1093 | if (timezone_offset) { | 1008 | if (timezone_offset) { |
1094 | sys_tz.tz_minuteswest = -timezone_offset / 60; | 1009 | sys_tz.tz_minuteswest = -timezone_offset / 60; |
1095 | sys_tz.tz_dsttime = 0; | 1010 | sys_tz.tz_dsttime = 0; |
1096 | } | 1011 | } |
1097 | 1012 | ||
1098 | vdso_data->tb_orig_stamp = tb_last_jiffy; | ||
1099 | vdso_data->tb_update_count = 0; | 1013 | vdso_data->tb_update_count = 0; |
1100 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 1014 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
1101 | vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; | ||
1102 | vdso_data->tb_to_xs = tb_to_xs; | ||
1103 | |||
1104 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
1105 | 1015 | ||
1106 | /* Start the decrementer on CPUs that have manual control | 1016 | /* Start the decrementer on CPUs that have manual control |
1107 | * such as BookE | 1017 | * such as BookE |
@@ -1195,39 +1105,6 @@ void to_tm(int tim, struct rtc_time * tm) | |||
1195 | GregorianDay(tm); | 1105 | GregorianDay(tm); |
1196 | } | 1106 | } |
1197 | 1107 | ||
1198 | /* Auxiliary function to compute scaling factors */ | ||
1199 | /* Actually the choice of a timebase running at 1/4 the of the bus | ||
1200 | * frequency giving resolution of a few tens of nanoseconds is quite nice. | ||
1201 | * It makes this computation very precise (27-28 bits typically) which | ||
1202 | * is optimistic considering the stability of most processor clock | ||
1203 | * oscillators and the precision with which the timebase frequency | ||
1204 | * is measured but does not harm. | ||
1205 | */ | ||
1206 | unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) | ||
1207 | { | ||
1208 | unsigned mlt=0, tmp, err; | ||
1209 | /* No concern for performance, it's done once: use a stupid | ||
1210 | * but safe and compact method to find the multiplier. | ||
1211 | */ | ||
1212 | |||
1213 | for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { | ||
1214 | if (mulhwu(inscale, mlt|tmp) < outscale) | ||
1215 | mlt |= tmp; | ||
1216 | } | ||
1217 | |||
1218 | /* We might still be off by 1 for the best approximation. | ||
1219 | * A side effect of this is that if outscale is too large | ||
1220 | * the returned value will be zero. | ||
1221 | * Many corner cases have been checked and seem to work, | ||
1222 | * some might have been forgotten in the test however. | ||
1223 | */ | ||
1224 | |||
1225 | err = inscale * (mlt+1); | ||
1226 | if (err <= inscale/2) | ||
1227 | mlt++; | ||
1228 | return mlt; | ||
1229 | } | ||
1230 | |||
1231 | /* | 1108 | /* |
1232 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit | 1109 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit |
1233 | * result. | 1110 | * result. |