aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-02-19 18:38:56 -0500
committerPaul Mackerras <paulus@samba.org>2006-02-19 18:38:56 -0500
commit092b8f3488a3e50a4ab5f2f3f7c8bbf56b3144e1 (patch)
treefe9aa2dc2de1ed23109ef77ce8bd38120c2d643d /arch/powerpc
parentbd71c2b17468a2531fb4c81ec1d73520845e97e1 (diff)
powerpc: Keep xtime and gettimeofday in sync
This fixes a regression which was introduced by moving ppc32 to use the same sort of lockless gettimeofday as ppc64 has been using for some time. This involves getting the timebase and performing some simple arithmetic to convert it to seconds and microseconds. However, the factor and offset used there weren't being updated when NTP varied the tick length using adjtimex. 64-bit didn't notice the problem because it had a hook in the 32-bit adjtimex compat routine that attempted to work out what the generic timekeeping code would do and alter the factor and offset to match. However, that code was very complex and it wasn't clear that it still matched what the generic code would do. Now we use the generic current_tick_length() routine that was recently added to check that the current tick will be as long as we expect; if not we recompute the factor and offset. This keeps gettimeofday and xtime in sync. In addition we check that gettimeofday hasn't got ahead of xtime on each timer interrupt; if it has, we resync. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c4
-rw-r--r--arch/powerpc/kernel/time.c282
2 files changed, 99 insertions, 187 deletions
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 475249dc2350..cd75ab2908fa 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -176,7 +176,6 @@ struct timex32 {
176}; 176};
177 177
178extern int do_adjtimex(struct timex *); 178extern int do_adjtimex(struct timex *);
179extern void ppc_adjtimex(void);
180 179
181asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) 180asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
182{ 181{
@@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
209 208
210 ret = do_adjtimex(&txc); 209 ret = do_adjtimex(&txc);
211 210
212 /* adjust the conversion of TB to time of day to track adjtimex */
213 ppc_adjtimex();
214
215 if(put_user(txc.modes, &utp->modes) || 211 if(put_user(txc.modes, &utp->modes) ||
216 __put_user(txc.offset, &utp->offset) || 212 __put_user(txc.offset, &utp->offset) ||
217 __put_user(txc.freq, &utp->freq) || 213 __put_user(txc.freq, &utp->freq) ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1886045a2fd8..2a7ddc579379 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -50,6 +50,7 @@
50#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h> 51#include <linux/percpu.h>
52#include <linux/rtc.h> 52#include <linux/rtc.h>
53#include <linux/jiffies.h>
53 54
54#include <asm/io.h> 55#include <asm/io.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec);
99unsigned long tb_ticks_per_sec; 100unsigned long tb_ticks_per_sec;
100u64 tb_to_xs; 101u64 tb_to_xs;
101unsigned tb_to_us; 102unsigned tb_to_us;
102unsigned long processor_freq; 103
104#define TICKLEN_SCALE (SHIFT_SCALE - 10)
105u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106u64 ticklen_to_xs; /* 0.64 fraction */
107
108/* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
111
103DEFINE_SPINLOCK(rtc_lock); 112DEFINE_SPINLOCK(rtc_lock);
104EXPORT_SYMBOL_GPL(rtc_lock); 113EXPORT_SYMBOL_GPL(rtc_lock);
105 114
@@ -113,10 +122,6 @@ extern unsigned long wall_jiffies;
113extern struct timezone sys_tz; 122extern struct timezone sys_tz;
114static long timezone_offset; 123static long timezone_offset;
115 124
116void ppc_adjtimex(void);
117
118static unsigned adjusting_time = 0;
119
120unsigned long ppc_proc_freq; 125unsigned long ppc_proc_freq;
121unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
122 127
@@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void)
178 */ 183 */
179 if (ppc_md.set_rtc_time && ntp_synced() && 184 if (ppc_md.set_rtc_time && ntp_synced() &&
180 xtime.tv_sec - last_rtc_update >= 659 && 185 xtime.tv_sec - last_rtc_update >= 659 &&
181 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 186 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
182 jiffies - wall_jiffies == 1) {
183 struct rtc_time tm; 187 struct rtc_time tm;
184 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); 188 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
185 tm.tm_year -= 1900; 189 tm.tm_year -= 1900;
@@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv)
226 if (__USE_RTC()) { 230 if (__USE_RTC()) {
227 /* do this the old way */ 231 /* do this the old way */
228 unsigned long flags, seq; 232 unsigned long flags, seq;
229 unsigned int sec, nsec, usec, lost; 233 unsigned int sec, nsec, usec;
230 234
231 do { 235 do {
232 seq = read_seqbegin_irqsave(&xtime_lock, flags); 236 seq = read_seqbegin_irqsave(&xtime_lock, flags);
233 sec = xtime.tv_sec; 237 sec = xtime.tv_sec;
234 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 238 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
235 lost = jiffies - wall_jiffies;
236 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 239 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
237 usec = nsec / 1000 + lost * (1000000 / HZ); 240 usec = nsec / 1000;
238 while (usec >= 1000000) { 241 while (usec >= 1000000) {
239 usec -= 1000000; 242 usec -= 1000000;
240 ++sec; 243 ++sec;
@@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv)
248 251
249EXPORT_SYMBOL(do_gettimeofday); 252EXPORT_SYMBOL(do_gettimeofday);
250 253
251/* Synchronize xtime with do_gettimeofday */
252
253static inline void timer_sync_xtime(unsigned long cur_tb)
254{
255#ifdef CONFIG_PPC64
256 /* why do we do this? */
257 struct timeval my_tv;
258
259 __do_gettimeofday(&my_tv, cur_tb);
260
261 if (xtime.tv_sec <= my_tv.tv_sec) {
262 xtime.tv_sec = my_tv.tv_sec;
263 xtime.tv_nsec = my_tv.tv_usec * 1000;
264 }
265#endif
266}
267
268/* 254/*
269 * There are two copies of tb_to_xs and stamp_xsec so that no 255 * There are two copies of tb_to_xs and stamp_xsec so that no
270 * lock is needed to access and use these values in 256 * lock is needed to access and use these values in
@@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
323{ 309{
324 unsigned long offset; 310 unsigned long offset;
325 u64 new_stamp_xsec; 311 u64 new_stamp_xsec;
312 u64 tlen, t2x;
326 313
327 if (__USE_RTC()) 314 if (__USE_RTC())
328 return; 315 return;
316 tlen = current_tick_length();
329 offset = cur_tb - do_gtod.varp->tb_orig_stamp; 317 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
330 if ((offset & 0x80000000u) == 0) 318 if (tlen == last_tick_len && offset < 0x80000000u) {
331 return; 319 /* check that we're still in sync; if not, resync */
332 new_stamp_xsec = do_gtod.varp->stamp_xsec 320 struct timeval tv;
333 + mulhdu(offset, do_gtod.varp->tb_to_xs); 321 __do_gettimeofday(&tv, cur_tb);
334 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); 322 if (tv.tv_sec <= xtime.tv_sec &&
323 (tv.tv_sec < xtime.tv_sec ||
324 tv.tv_usec * 1000 <= xtime.tv_nsec))
325 return;
326 }
327 if (tlen != last_tick_len) {
328 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
329 last_tick_len = tlen;
330 } else
331 t2x = do_gtod.varp->tb_to_xs;
332 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
333 do_div(new_stamp_xsec, 1000000000);
334 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
335 update_gtod(cur_tb, new_stamp_xsec, t2x);
335} 336}
336 337
337#ifdef CONFIG_SMP 338#ifdef CONFIG_SMP
@@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs)
462 write_seqlock(&xtime_lock); 463 write_seqlock(&xtime_lock);
463 tb_last_jiffy += tb_ticks_per_jiffy; 464 tb_last_jiffy += tb_ticks_per_jiffy;
464 tb_last_stamp = per_cpu(last_jiffy, cpu); 465 tb_last_stamp = per_cpu(last_jiffy, cpu);
465 timer_recalc_offset(tb_last_jiffy);
466 do_timer(regs); 466 do_timer(regs);
467 timer_sync_xtime(tb_last_jiffy); 467 timer_recalc_offset(tb_last_jiffy);
468 timer_check_rtc(); 468 timer_check_rtc();
469 write_sequnlock(&xtime_lock); 469 write_sequnlock(&xtime_lock);
470 if (adjusting_time && (time_adjust == 0))
471 ppc_adjtimex();
472 } 470 }
473 471
474 next_dec = tb_ticks_per_jiffy - ticks; 472 next_dec = tb_ticks_per_jiffy - ticks;
@@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs)
492 490
493void wakeup_decrementer(void) 491void wakeup_decrementer(void)
494{ 492{
495 int i; 493 unsigned long ticks;
496 494
497 set_dec(tb_ticks_per_jiffy);
498 /* 495 /*
499 * We don't expect this to be called on a machine with a 601, 496 * The timebase gets saved on sleep and restored on wakeup,
500 * so using get_tbl is fine. 497 * so all we need to do is to reset the decrementer.
501 */ 498 */
502 tb_last_stamp = tb_last_jiffy = get_tb(); 499 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
503 for_each_cpu(i) 500 if (ticks < tb_ticks_per_jiffy)
504 per_cpu(last_jiffy, i) = tb_last_stamp; 501 ticks = tb_ticks_per_jiffy - ticks;
502 else
503 ticks = 1;
504 set_dec(ticks);
505} 505}
506 506
507#ifdef CONFIG_SMP 507#ifdef CONFIG_SMP
@@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv)
541 time_t wtm_sec, new_sec = tv->tv_sec; 541 time_t wtm_sec, new_sec = tv->tv_sec;
542 long wtm_nsec, new_nsec = tv->tv_nsec; 542 long wtm_nsec, new_nsec = tv->tv_nsec;
543 unsigned long flags; 543 unsigned long flags;
544 long int tb_delta; 544 u64 new_xsec;
545 u64 new_xsec, tb_delta_xs; 545 unsigned long tb_delta;
546 546
547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
548 return -EINVAL; 548 return -EINVAL;
@@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv)
563 first_settimeofday = 0; 563 first_settimeofday = 0;
564 } 564 }
565#endif 565#endif
566
567 /*
568 * Subtract off the number of nanoseconds since the
569 * beginning of the last tick.
570 * Note that since we don't increment jiffies_64 anywhere other
571 * than in do_timer (since we don't have a lost tick problem),
572 * wall_jiffies will always be the same as jiffies,
573 * and therefore the (jiffies - wall_jiffies) computation
574 * has been removed.
575 */
566 tb_delta = tb_ticks_since(tb_last_stamp); 576 tb_delta = tb_ticks_since(tb_last_stamp);
567 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 577 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
568 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); 578 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
569 579
570 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 580 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
571 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 581 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv)
580 590
581 ntp_clear(); 591 ntp_clear();
582 592
583 new_xsec = 0; 593 new_xsec = xtime.tv_nsec;
584 if (new_nsec != 0) { 594 if (new_xsec != 0) {
585 new_xsec = (u64)new_nsec * XSEC_PER_SEC; 595 new_xsec *= XSEC_PER_SEC;
586 do_div(new_xsec, NSEC_PER_SEC); 596 do_div(new_xsec, NSEC_PER_SEC);
587 } 597 }
588 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; 598 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
589 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 599 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
590 600
591 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 601 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@@ -671,7 +681,7 @@ void __init time_init(void)
671 unsigned long flags; 681 unsigned long flags;
672 unsigned long tm = 0; 682 unsigned long tm = 0;
673 struct div_result res; 683 struct div_result res;
674 u64 scale; 684 u64 scale, x;
675 unsigned shift; 685 unsigned shift;
676 686
677 if (ppc_md.time_init != NULL) 687 if (ppc_md.time_init != NULL)
@@ -693,11 +703,36 @@ void __init time_init(void)
693 } 703 }
694 704
695 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 705 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
696 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; 706 tb_ticks_per_sec = ppc_tb_freq;
697 tb_ticks_per_usec = ppc_tb_freq / 1000000; 707 tb_ticks_per_usec = ppc_tb_freq / 1000000;
698 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 708 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
699 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); 709
700 tb_to_xs = res.result_low; 710 /*
711 * Calculate the length of each tick in ns. It will not be
712 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
713 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
714 * rounded up.
715 */
716 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
717 do_div(x, ppc_tb_freq);
718 tick_nsec = x;
719 last_tick_len = x << TICKLEN_SCALE;
720
721 /*
722 * Compute ticklen_to_xs, which is a factor which gets multiplied
723 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
724 * It is computed as:
725 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
726 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
727 * so as to give the result as a 0.64 fixed-point fraction.
728 */
729 div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
730 tb_ticks_per_jiffy, &res);
731 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
732 ticklen_to_xs = res.result_low;
733
734 /* Compute tb_to_xs from tick_nsec */
735 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
701 736
702 /* 737 /*
703 * Compute scale factor for sched_clock. 738 * Compute scale factor for sched_clock.
@@ -724,6 +759,14 @@ void __init time_init(void)
724 tm = get_boot_time(); 759 tm = get_boot_time();
725 760
726 write_seqlock_irqsave(&xtime_lock, flags); 761 write_seqlock_irqsave(&xtime_lock, flags);
762
763 /* If platform provided a timezone (pmac), we correct the time */
764 if (timezone_offset) {
765 sys_tz.tz_minuteswest = -timezone_offset / 60;
766 sys_tz.tz_dsttime = 0;
767 tm -= timezone_offset;
768 }
769
727 xtime.tv_sec = tm; 770 xtime.tv_sec = tm;
728 xtime.tv_nsec = 0; 771 xtime.tv_nsec = 0;
729 do_gtod.varp = &do_gtod.vars[0]; 772 do_gtod.varp = &do_gtod.vars[0];
@@ -738,18 +781,11 @@ void __init time_init(void)
738 vdso_data->tb_orig_stamp = tb_last_jiffy; 781 vdso_data->tb_orig_stamp = tb_last_jiffy;
739 vdso_data->tb_update_count = 0; 782 vdso_data->tb_update_count = 0;
740 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 783 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
741 vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 784 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
742 vdso_data->tb_to_xs = tb_to_xs; 785 vdso_data->tb_to_xs = tb_to_xs;
743 786
744 time_freq = 0; 787 time_freq = 0;
745 788
746 /* If platform provided a timezone (pmac), we correct the time */
747 if (timezone_offset) {
748 sys_tz.tz_minuteswest = -timezone_offset / 60;
749 sys_tz.tz_dsttime = 0;
750 xtime.tv_sec -= timezone_offset;
751 }
752
753 last_rtc_update = xtime.tv_sec; 789 last_rtc_update = xtime.tv_sec;
754 set_normalized_timespec(&wall_to_monotonic, 790 set_normalized_timespec(&wall_to_monotonic,
755 -xtime.tv_sec, -xtime.tv_nsec); 791 -xtime.tv_sec, -xtime.tv_nsec);
@@ -759,126 +795,6 @@ void __init time_init(void)
759 set_dec(tb_ticks_per_jiffy); 795 set_dec(tb_ticks_per_jiffy);
760} 796}
761 797
762/*
763 * After adjtimex is called, adjust the conversion of tb ticks
764 * to microseconds to keep do_gettimeofday synchronized
765 * with ntpd.
766 *
767 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
768 * adjust the frequency.
769 */
770
771/* #define DEBUG_PPC_ADJTIMEX 1 */
772
773void ppc_adjtimex(void)
774{
775#ifdef CONFIG_PPC64
776 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
777 new_tb_to_xs, new_xsec, new_stamp_xsec;
778 unsigned long tb_ticks_per_sec_delta;
779 long delta_freq, ltemp;
780 struct div_result divres;
781 unsigned long flags;
782 long singleshot_ppm = 0;
783
784 /*
785 * Compute parts per million frequency adjustment to
786 * accomplish the time adjustment implied by time_offset to be
787 * applied over the elapsed time indicated by time_constant.
788 * Use SHIFT_USEC to get it into the same units as
789 * time_freq.
790 */
791 if ( time_offset < 0 ) {
792 ltemp = -time_offset;
793 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
794 ltemp >>= SHIFT_KG + time_constant;
795 ltemp = -ltemp;
796 } else {
797 ltemp = time_offset;
798 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
799 ltemp >>= SHIFT_KG + time_constant;
800 }
801
802 /* If there is a single shot time adjustment in progress */
803 if ( time_adjust ) {
804#ifdef DEBUG_PPC_ADJTIMEX
805 printk("ppc_adjtimex: ");
806 if ( adjusting_time == 0 )
807 printk("starting ");
808 printk("single shot time_adjust = %ld\n", time_adjust);
809#endif
810
811 adjusting_time = 1;
812
813 /*
814 * Compute parts per million frequency adjustment
815 * to match time_adjust
816 */
817 singleshot_ppm = tickadj * HZ;
818 /*
819 * The adjustment should be tickadj*HZ to match the code in
820 * linux/kernel/timer.c, but experiments show that this is too
821 * large. 3/4 of tickadj*HZ seems about right
822 */
823 singleshot_ppm -= singleshot_ppm / 4;
824 /* Use SHIFT_USEC to get it into the same units as time_freq */
825 singleshot_ppm <<= SHIFT_USEC;
826 if ( time_adjust < 0 )
827 singleshot_ppm = -singleshot_ppm;
828 }
829 else {
830#ifdef DEBUG_PPC_ADJTIMEX
831 if ( adjusting_time )
832 printk("ppc_adjtimex: ending single shot time_adjust\n");
833#endif
834 adjusting_time = 0;
835 }
836
837 /* Add up all of the frequency adjustments */
838 delta_freq = time_freq + ltemp + singleshot_ppm;
839
840 /*
841 * Compute a new value for tb_ticks_per_sec based on
842 * the frequency adjustment
843 */
844 den = 1000000 * (1 << (SHIFT_USEC - 8));
845 if ( delta_freq < 0 ) {
846 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
847 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
848 }
849 else {
850 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
851 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
852 }
853
854#ifdef DEBUG_PPC_ADJTIMEX
855 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
856 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
857#endif
858
859 /*
860 * Compute a new value of tb_to_xs (used to convert tb to
861 * microseconds) and a new value of stamp_xsec which is the
862 * time (in 1/2^20 second units) corresponding to
863 * tb_orig_stamp. This new value of stamp_xsec compensates
864 * for the change in frequency (implied by the new tb_to_xs)
865 * which guarantees that the current time remains the same.
866 */
867 write_seqlock_irqsave( &xtime_lock, flags );
868 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
869 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
870 new_tb_to_xs = divres.result_low;
871 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
872
873 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
874 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
875
876 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
877
878 write_sequnlock_irqrestore( &xtime_lock, flags );
879#endif /* CONFIG_PPC64 */
880}
881
882 798
883#define FEBRUARY 2 799#define FEBRUARY 2
884#define STARTOFTIME 1970 800#define STARTOFTIME 1970