aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c545
1 files changed, 359 insertions, 186 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1886045a2fd8..4a27218a086c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -50,6 +50,8 @@
50#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h> 51#include <linux/percpu.h>
52#include <linux/rtc.h> 52#include <linux/rtc.h>
53#include <linux/jiffies.h>
54#include <linux/posix-timers.h>
53 55
54#include <asm/io.h> 56#include <asm/io.h>
55#include <asm/processor.h> 57#include <asm/processor.h>
@@ -97,9 +99,18 @@ unsigned long tb_ticks_per_jiffy;
97unsigned long tb_ticks_per_usec = 100; /* sane default */ 99unsigned long tb_ticks_per_usec = 100; /* sane default */
98EXPORT_SYMBOL(tb_ticks_per_usec); 100EXPORT_SYMBOL(tb_ticks_per_usec);
99unsigned long tb_ticks_per_sec; 101unsigned long tb_ticks_per_sec;
102EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
100u64 tb_to_xs; 103u64 tb_to_xs;
101unsigned tb_to_us; 104unsigned tb_to_us;
102unsigned long processor_freq; 105
106#define TICKLEN_SCALE (SHIFT_SCALE - 10)
107u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
108u64 ticklen_to_xs; /* 0.64 fraction */
109
110/* If last_tick_len corresponds to about 1/HZ seconds, then
111 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
112#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
113
103DEFINE_SPINLOCK(rtc_lock); 114DEFINE_SPINLOCK(rtc_lock);
104EXPORT_SYMBOL_GPL(rtc_lock); 115EXPORT_SYMBOL_GPL(rtc_lock);
105 116
@@ -113,10 +124,6 @@ extern unsigned long wall_jiffies;
113extern struct timezone sys_tz; 124extern struct timezone sys_tz;
114static long timezone_offset; 125static long timezone_offset;
115 126
116void ppc_adjtimex(void);
117
118static unsigned adjusting_time = 0;
119
120unsigned long ppc_proc_freq; 127unsigned long ppc_proc_freq;
121unsigned long ppc_tb_freq; 128unsigned long ppc_tb_freq;
122 129
@@ -130,6 +137,224 @@ unsigned long tb_last_stamp;
130 */ 137 */
131DEFINE_PER_CPU(unsigned long, last_jiffy); 138DEFINE_PER_CPU(unsigned long, last_jiffy);
132 139
140#ifdef CONFIG_VIRT_CPU_ACCOUNTING
141/*
142 * Factors for converting from cputime_t (timebase ticks) to
143 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
144 * These are all stored as 0.64 fixed-point binary fractions.
145 */
146u64 __cputime_jiffies_factor;
147EXPORT_SYMBOL(__cputime_jiffies_factor);
148u64 __cputime_msec_factor;
149EXPORT_SYMBOL(__cputime_msec_factor);
150u64 __cputime_sec_factor;
151EXPORT_SYMBOL(__cputime_sec_factor);
152u64 __cputime_clockt_factor;
153EXPORT_SYMBOL(__cputime_clockt_factor);
154
155static void calc_cputime_factors(void)
156{
157 struct div_result res;
158
159 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
160 __cputime_jiffies_factor = res.result_low;
161 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
162 __cputime_msec_factor = res.result_low;
163 div128_by_32(1, 0, tb_ticks_per_sec, &res);
164 __cputime_sec_factor = res.result_low;
165 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
166 __cputime_clockt_factor = res.result_low;
167}
168
169/*
170 * Read the PURR on systems that have it, otherwise the timebase.
171 */
172static u64 read_purr(void)
173{
174 if (cpu_has_feature(CPU_FTR_PURR))
175 return mfspr(SPRN_PURR);
176 return mftb();
177}
178
179/*
180 * Account time for a transition between system, hard irq
181 * or soft irq state.
182 */
183void account_system_vtime(struct task_struct *tsk)
184{
185 u64 now, delta;
186 unsigned long flags;
187
188 local_irq_save(flags);
189 now = read_purr();
190 delta = now - get_paca()->startpurr;
191 get_paca()->startpurr = now;
192 if (!in_interrupt()) {
193 delta += get_paca()->system_time;
194 get_paca()->system_time = 0;
195 }
196 account_system_time(tsk, 0, delta);
197 local_irq_restore(flags);
198}
199
200/*
201 * Transfer the user and system times accumulated in the paca
202 * by the exception entry and exit code to the generic process
203 * user and system time records.
204 * Must be called with interrupts disabled.
205 */
206void account_process_vtime(struct task_struct *tsk)
207{
208 cputime_t utime;
209
210 utime = get_paca()->user_time;
211 get_paca()->user_time = 0;
212 account_user_time(tsk, utime);
213}
214
215static void account_process_time(struct pt_regs *regs)
216{
217 int cpu = smp_processor_id();
218
219 account_process_vtime(current);
220 run_local_timers();
221 if (rcu_pending(cpu))
222 rcu_check_callbacks(cpu, user_mode(regs));
223 scheduler_tick();
224 run_posix_cpu_timers(current);
225}
226
227#ifdef CONFIG_PPC_SPLPAR
228/*
229 * Stuff for accounting stolen time.
230 */
231struct cpu_purr_data {
232 int initialized; /* thread is running */
233 u64 tb0; /* timebase at origin time */
234 u64 purr0; /* PURR at origin time */
235 u64 tb; /* last TB value read */
236 u64 purr; /* last PURR value read */
237 u64 stolen; /* stolen time so far */
238 spinlock_t lock;
239};
240
241static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
242
243static void snapshot_tb_and_purr(void *data)
244{
245 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
246
247 p->tb0 = mftb();
248 p->purr0 = mfspr(SPRN_PURR);
249 p->tb = p->tb0;
250 p->purr = 0;
251 wmb();
252 p->initialized = 1;
253}
254
255/*
256 * Called during boot when all cpus have come up.
257 */
258void snapshot_timebases(void)
259{
260 int cpu;
261
262 if (!cpu_has_feature(CPU_FTR_PURR))
263 return;
264 for_each_cpu(cpu)
265 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
266 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
267}
268
269void calculate_steal_time(void)
270{
271 u64 tb, purr, t0;
272 s64 stolen;
273 struct cpu_purr_data *p0, *pme, *phim;
274 int cpu;
275
276 if (!cpu_has_feature(CPU_FTR_PURR))
277 return;
278 cpu = smp_processor_id();
279 pme = &per_cpu(cpu_purr_data, cpu);
280 if (!pme->initialized)
281 return; /* this can happen in early boot */
282 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
283 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
284 spin_lock(&p0->lock);
285 tb = mftb();
286 purr = mfspr(SPRN_PURR) - pme->purr0;
287 if (!phim->initialized || !cpu_online(cpu ^ 1)) {
288 stolen = (tb - pme->tb) - (purr - pme->purr);
289 } else {
290 t0 = pme->tb0;
291 if (phim->tb0 < t0)
292 t0 = phim->tb0;
293 stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
294 }
295 if (stolen > 0) {
296 account_steal_time(current, stolen);
297 p0->stolen += stolen;
298 }
299 pme->tb = tb;
300 pme->purr = purr;
301 spin_unlock(&p0->lock);
302}
303
304/*
305 * Must be called before the cpu is added to the online map when
306 * a cpu is being brought up at runtime.
307 */
308static void snapshot_purr(void)
309{
310 int cpu;
311 u64 purr;
312 struct cpu_purr_data *p0, *pme, *phim;
313 unsigned long flags;
314
315 if (!cpu_has_feature(CPU_FTR_PURR))
316 return;
317 cpu = smp_processor_id();
318 pme = &per_cpu(cpu_purr_data, cpu);
319 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
320 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
321 spin_lock_irqsave(&p0->lock, flags);
322 pme->tb = pme->tb0 = mftb();
323 purr = mfspr(SPRN_PURR);
324 if (!phim->initialized) {
325 pme->purr = 0;
326 pme->purr0 = purr;
327 } else {
328 /* set p->purr and p->purr0 for no change in p0->stolen */
329 pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
330 pme->purr0 = purr - pme->purr;
331 }
332 pme->initialized = 1;
333 spin_unlock_irqrestore(&p0->lock, flags);
334}
335
336#endif /* CONFIG_PPC_SPLPAR */
337
338#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
339#define calc_cputime_factors()
340#define account_process_time(regs) update_process_times(user_mode(regs))
341#define calculate_steal_time() do { } while (0)
342#endif
343
344#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
345#define snapshot_purr() do { } while (0)
346#endif
347
348/*
349 * Called when a cpu comes up after the system has finished booting,
350 * i.e. as a result of a hotplug cpu action.
351 */
352void snapshot_timebase(void)
353{
354 __get_cpu_var(last_jiffy) = get_tb();
355 snapshot_purr();
356}
357
133void __delay(unsigned long loops) 358void __delay(unsigned long loops)
134{ 359{
135 unsigned long start; 360 unsigned long start;
@@ -178,8 +403,7 @@ static __inline__ void timer_check_rtc(void)
178 */ 403 */
179 if (ppc_md.set_rtc_time && ntp_synced() && 404 if (ppc_md.set_rtc_time && ntp_synced() &&
180 xtime.tv_sec - last_rtc_update >= 659 && 405 xtime.tv_sec - last_rtc_update >= 659 &&
181 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 406 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
182 jiffies - wall_jiffies == 1) {
183 struct rtc_time tm; 407 struct rtc_time tm;
184 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); 408 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
185 tm.tm_year -= 1900; 409 tm.tm_year -= 1900;
@@ -226,15 +450,14 @@ void do_gettimeofday(struct timeval *tv)
226 if (__USE_RTC()) { 450 if (__USE_RTC()) {
227 /* do this the old way */ 451 /* do this the old way */
228 unsigned long flags, seq; 452 unsigned long flags, seq;
229 unsigned int sec, nsec, usec, lost; 453 unsigned int sec, nsec, usec;
230 454
231 do { 455 do {
232 seq = read_seqbegin_irqsave(&xtime_lock, flags); 456 seq = read_seqbegin_irqsave(&xtime_lock, flags);
233 sec = xtime.tv_sec; 457 sec = xtime.tv_sec;
234 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 458 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
235 lost = jiffies - wall_jiffies;
236 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 459 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
237 usec = nsec / 1000 + lost * (1000000 / HZ); 460 usec = nsec / 1000;
238 while (usec >= 1000000) { 461 while (usec >= 1000000) {
239 usec -= 1000000; 462 usec -= 1000000;
240 ++sec; 463 ++sec;
@@ -248,23 +471,6 @@ void do_gettimeofday(struct timeval *tv)
248 471
249EXPORT_SYMBOL(do_gettimeofday); 472EXPORT_SYMBOL(do_gettimeofday);
250 473
251/* Synchronize xtime with do_gettimeofday */
252
253static inline void timer_sync_xtime(unsigned long cur_tb)
254{
255#ifdef CONFIG_PPC64
256 /* why do we do this? */
257 struct timeval my_tv;
258
259 __do_gettimeofday(&my_tv, cur_tb);
260
261 if (xtime.tv_sec <= my_tv.tv_sec) {
262 xtime.tv_sec = my_tv.tv_sec;
263 xtime.tv_nsec = my_tv.tv_usec * 1000;
264 }
265#endif
266}
267
268/* 474/*
269 * There are two copies of tb_to_xs and stamp_xsec so that no 475 * There are two copies of tb_to_xs and stamp_xsec so that no
270 * lock is needed to access and use these values in 476 * lock is needed to access and use these values in
@@ -297,9 +503,9 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
297 * the two values of tb_update_count match and are even then the 503 * the two values of tb_update_count match and are even then the
298 * tb_to_xs and stamp_xsec values are consistent. If not, then it 504 * tb_to_xs and stamp_xsec values are consistent. If not, then it
299 * loops back and reads them again until this criteria is met. 505 * loops back and reads them again until this criteria is met.
506 * We expect the caller to have done the first increment of
507 * vdso_data->tb_update_count already.
300 */ 508 */
301 ++(vdso_data->tb_update_count);
302 smp_wmb();
303 vdso_data->tb_orig_stamp = new_tb_stamp; 509 vdso_data->tb_orig_stamp = new_tb_stamp;
304 vdso_data->stamp_xsec = new_stamp_xsec; 510 vdso_data->stamp_xsec = new_stamp_xsec;
305 vdso_data->tb_to_xs = new_tb_to_xs; 511 vdso_data->tb_to_xs = new_tb_to_xs;
@@ -323,15 +529,40 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
323{ 529{
324 unsigned long offset; 530 unsigned long offset;
325 u64 new_stamp_xsec; 531 u64 new_stamp_xsec;
532 u64 tlen, t2x;
533 u64 tb, xsec_old, xsec_new;
534 struct gettimeofday_vars *varp;
326 535
327 if (__USE_RTC()) 536 if (__USE_RTC())
328 return; 537 return;
538 tlen = current_tick_length();
329 offset = cur_tb - do_gtod.varp->tb_orig_stamp; 539 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
330 if ((offset & 0x80000000u) == 0) 540 if (tlen == last_tick_len && offset < 0x80000000u)
331 return; 541 return;
332 new_stamp_xsec = do_gtod.varp->stamp_xsec 542 if (tlen != last_tick_len) {
333 + mulhdu(offset, do_gtod.varp->tb_to_xs); 543 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
334 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); 544 last_tick_len = tlen;
545 } else
546 t2x = do_gtod.varp->tb_to_xs;
547 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
548 do_div(new_stamp_xsec, 1000000000);
549 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
550
551 ++vdso_data->tb_update_count;
552 smp_mb();
553
554 /*
555 * Make sure time doesn't go backwards for userspace gettimeofday.
556 */
557 tb = get_tb();
558 varp = do_gtod.varp;
559 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
560 + varp->stamp_xsec;
561 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
562 if (xsec_new < xsec_old)
563 new_stamp_xsec += xsec_old - xsec_new;
564
565 update_gtod(cur_tb, new_stamp_xsec, t2x);
335} 566}
336 567
337#ifdef CONFIG_SMP 568#ifdef CONFIG_SMP
@@ -381,6 +612,7 @@ static void iSeries_tb_recal(void)
381 new_tb_ticks_per_jiffy, sign, tick_diff ); 612 new_tb_ticks_per_jiffy, sign, tick_diff );
382 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 613 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
383 tb_ticks_per_sec = new_tb_ticks_per_sec; 614 tb_ticks_per_sec = new_tb_ticks_per_sec;
615 calc_cputime_factors();
384 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 616 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
385 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 617 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
386 tb_to_xs = divres.result_low; 618 tb_to_xs = divres.result_low;
@@ -429,6 +661,7 @@ void timer_interrupt(struct pt_regs * regs)
429 irq_enter(); 661 irq_enter();
430 662
431 profile_tick(CPU_PROFILING, regs); 663 profile_tick(CPU_PROFILING, regs);
664 calculate_steal_time();
432 665
433#ifdef CONFIG_PPC_ISERIES 666#ifdef CONFIG_PPC_ISERIES
434 get_lppaca()->int_dword.fields.decr_int = 0; 667 get_lppaca()->int_dword.fields.decr_int = 0;
@@ -450,7 +683,7 @@ void timer_interrupt(struct pt_regs * regs)
450 * is the case. 683 * is the case.
451 */ 684 */
452 if (!cpu_is_offline(cpu)) 685 if (!cpu_is_offline(cpu))
453 update_process_times(user_mode(regs)); 686 account_process_time(regs);
454 687
455 /* 688 /*
456 * No need to check whether cpu is offline here; boot_cpuid 689 * No need to check whether cpu is offline here; boot_cpuid
@@ -462,13 +695,10 @@ void timer_interrupt(struct pt_regs * regs)
462 write_seqlock(&xtime_lock); 695 write_seqlock(&xtime_lock);
463 tb_last_jiffy += tb_ticks_per_jiffy; 696 tb_last_jiffy += tb_ticks_per_jiffy;
464 tb_last_stamp = per_cpu(last_jiffy, cpu); 697 tb_last_stamp = per_cpu(last_jiffy, cpu);
465 timer_recalc_offset(tb_last_jiffy);
466 do_timer(regs); 698 do_timer(regs);
467 timer_sync_xtime(tb_last_jiffy); 699 timer_recalc_offset(tb_last_jiffy);
468 timer_check_rtc(); 700 timer_check_rtc();
469 write_sequnlock(&xtime_lock); 701 write_sequnlock(&xtime_lock);
470 if (adjusting_time && (time_adjust == 0))
471 ppc_adjtimex();
472 } 702 }
473 703
474 next_dec = tb_ticks_per_jiffy - ticks; 704 next_dec = tb_ticks_per_jiffy - ticks;
@@ -492,29 +722,45 @@ void timer_interrupt(struct pt_regs * regs)
492 722
493void wakeup_decrementer(void) 723void wakeup_decrementer(void)
494{ 724{
495 int i; 725 unsigned long ticks;
496 726
497 set_dec(tb_ticks_per_jiffy);
498 /* 727 /*
499 * We don't expect this to be called on a machine with a 601, 728 * The timebase gets saved on sleep and restored on wakeup,
500 * so using get_tbl is fine. 729 * so all we need to do is to reset the decrementer.
501 */ 730 */
502 tb_last_stamp = tb_last_jiffy = get_tb(); 731 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
503 for_each_cpu(i) 732 if (ticks < tb_ticks_per_jiffy)
504 per_cpu(last_jiffy, i) = tb_last_stamp; 733 ticks = tb_ticks_per_jiffy - ticks;
734 else
735 ticks = 1;
736 set_dec(ticks);
505} 737}
506 738
507#ifdef CONFIG_SMP 739#ifdef CONFIG_SMP
508void __init smp_space_timers(unsigned int max_cpus) 740void __init smp_space_timers(unsigned int max_cpus)
509{ 741{
510 int i; 742 int i;
743 unsigned long half = tb_ticks_per_jiffy / 2;
511 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 744 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
512 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 745 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
513 746
514 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 747 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
515 previous_tb -= tb_ticks_per_jiffy; 748 previous_tb -= tb_ticks_per_jiffy;
749 /*
750 * The stolen time calculation for POWER5 shared-processor LPAR
751 * systems works better if the two threads' timebase interrupts
752 * are staggered by half a jiffy with respect to each other.
753 */
516 for_each_cpu(i) { 754 for_each_cpu(i) {
517 if (i != boot_cpuid) { 755 if (i == boot_cpuid)
756 continue;
757 if (i == (boot_cpuid ^ 1))
758 per_cpu(last_jiffy, i) =
759 per_cpu(last_jiffy, boot_cpuid) - half;
760 else if (i & 1)
761 per_cpu(last_jiffy, i) =
762 per_cpu(last_jiffy, i ^ 1) + half;
763 else {
518 previous_tb += offset; 764 previous_tb += offset;
519 per_cpu(last_jiffy, i) = previous_tb; 765 per_cpu(last_jiffy, i) = previous_tb;
520 } 766 }
@@ -541,8 +787,8 @@ int do_settimeofday(struct timespec *tv)
541 time_t wtm_sec, new_sec = tv->tv_sec; 787 time_t wtm_sec, new_sec = tv->tv_sec;
542 long wtm_nsec, new_nsec = tv->tv_nsec; 788 long wtm_nsec, new_nsec = tv->tv_nsec;
543 unsigned long flags; 789 unsigned long flags;
544 long int tb_delta; 790 u64 new_xsec;
545 u64 new_xsec, tb_delta_xs; 791 unsigned long tb_delta;
546 792
547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 793 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
548 return -EINVAL; 794 return -EINVAL;
@@ -563,9 +809,23 @@ int do_settimeofday(struct timespec *tv)
563 first_settimeofday = 0; 809 first_settimeofday = 0;
564 } 810 }
565#endif 811#endif
812
813 /* Make userspace gettimeofday spin until we're done. */
814 ++vdso_data->tb_update_count;
815 smp_mb();
816
817 /*
818 * Subtract off the number of nanoseconds since the
819 * beginning of the last tick.
820 * Note that since we don't increment jiffies_64 anywhere other
821 * than in do_timer (since we don't have a lost tick problem),
822 * wall_jiffies will always be the same as jiffies,
823 * and therefore the (jiffies - wall_jiffies) computation
824 * has been removed.
825 */
566 tb_delta = tb_ticks_since(tb_last_stamp); 826 tb_delta = tb_ticks_since(tb_last_stamp);
567 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 827 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
568 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); 828 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
569 829
570 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 830 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
571 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 831 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -580,12 +840,12 @@ int do_settimeofday(struct timespec *tv)
580 840
581 ntp_clear(); 841 ntp_clear();
582 842
583 new_xsec = 0; 843 new_xsec = xtime.tv_nsec;
584 if (new_nsec != 0) { 844 if (new_xsec != 0) {
585 new_xsec = (u64)new_nsec * XSEC_PER_SEC; 845 new_xsec *= XSEC_PER_SEC;
586 do_div(new_xsec, NSEC_PER_SEC); 846 do_div(new_xsec, NSEC_PER_SEC);
587 } 847 }
588 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; 848 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
589 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 849 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
590 850
591 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 851 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@@ -671,7 +931,7 @@ void __init time_init(void)
671 unsigned long flags; 931 unsigned long flags;
672 unsigned long tm = 0; 932 unsigned long tm = 0;
673 struct div_result res; 933 struct div_result res;
674 u64 scale; 934 u64 scale, x;
675 unsigned shift; 935 unsigned shift;
676 936
677 if (ppc_md.time_init != NULL) 937 if (ppc_md.time_init != NULL)
@@ -693,11 +953,43 @@ void __init time_init(void)
693 } 953 }
694 954
695 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 955 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
696 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; 956 tb_ticks_per_sec = ppc_tb_freq;
697 tb_ticks_per_usec = ppc_tb_freq / 1000000; 957 tb_ticks_per_usec = ppc_tb_freq / 1000000;
698 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 958 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
699 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); 959 calc_cputime_factors();
700 tb_to_xs = res.result_low; 960
961 /*
962 * Calculate the length of each tick in ns. It will not be
963 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
964 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
965 * rounded up.
966 */
967 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
968 do_div(x, ppc_tb_freq);
969 tick_nsec = x;
970 last_tick_len = x << TICKLEN_SCALE;
971
972 /*
973 * Compute ticklen_to_xs, which is a factor which gets multiplied
974 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
975 * It is computed as:
976 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
977 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
978 * which turns out to be N = 51 - SHIFT_HZ.
979 * This gives the result as a 0.64 fixed-point fraction.
980 * That value is reduced by an offset amounting to 1 xsec per
981 * 2^31 timebase ticks to avoid problems with time going backwards
982 * by 1 xsec when we do timer_recalc_offset due to losing the
983 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
984 * since there are 2^20 xsec in a second.
985 */
986 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
987 tb_ticks_per_jiffy << SHIFT_HZ, &res);
988 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
989 ticklen_to_xs = res.result_low;
990
991 /* Compute tb_to_xs from tick_nsec */
992 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
701 993
702 /* 994 /*
703 * Compute scale factor for sched_clock. 995 * Compute scale factor for sched_clock.
@@ -724,6 +1016,14 @@ void __init time_init(void)
724 tm = get_boot_time(); 1016 tm = get_boot_time();
725 1017
726 write_seqlock_irqsave(&xtime_lock, flags); 1018 write_seqlock_irqsave(&xtime_lock, flags);
1019
1020 /* If platform provided a timezone (pmac), we correct the time */
1021 if (timezone_offset) {
1022 sys_tz.tz_minuteswest = -timezone_offset / 60;
1023 sys_tz.tz_dsttime = 0;
1024 tm -= timezone_offset;
1025 }
1026
727 xtime.tv_sec = tm; 1027 xtime.tv_sec = tm;
728 xtime.tv_nsec = 0; 1028 xtime.tv_nsec = 0;
729 do_gtod.varp = &do_gtod.vars[0]; 1029 do_gtod.varp = &do_gtod.vars[0];
@@ -738,18 +1038,11 @@ void __init time_init(void)
738 vdso_data->tb_orig_stamp = tb_last_jiffy; 1038 vdso_data->tb_orig_stamp = tb_last_jiffy;
739 vdso_data->tb_update_count = 0; 1039 vdso_data->tb_update_count = 0;
740 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1040 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
741 vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 1041 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
742 vdso_data->tb_to_xs = tb_to_xs; 1042 vdso_data->tb_to_xs = tb_to_xs;
743 1043
744 time_freq = 0; 1044 time_freq = 0;
745 1045
746 /* If platform provided a timezone (pmac), we correct the time */
747 if (timezone_offset) {
748 sys_tz.tz_minuteswest = -timezone_offset / 60;
749 sys_tz.tz_dsttime = 0;
750 xtime.tv_sec -= timezone_offset;
751 }
752
753 last_rtc_update = xtime.tv_sec; 1046 last_rtc_update = xtime.tv_sec;
754 set_normalized_timespec(&wall_to_monotonic, 1047 set_normalized_timespec(&wall_to_monotonic,
755 -xtime.tv_sec, -xtime.tv_nsec); 1048 -xtime.tv_sec, -xtime.tv_nsec);
@@ -759,126 +1052,6 @@ void __init time_init(void)
759 set_dec(tb_ticks_per_jiffy); 1052 set_dec(tb_ticks_per_jiffy);
760} 1053}
761 1054
762/*
763 * After adjtimex is called, adjust the conversion of tb ticks
764 * to microseconds to keep do_gettimeofday synchronized
765 * with ntpd.
766 *
767 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
768 * adjust the frequency.
769 */
770
771/* #define DEBUG_PPC_ADJTIMEX 1 */
772
773void ppc_adjtimex(void)
774{
775#ifdef CONFIG_PPC64
776 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
777 new_tb_to_xs, new_xsec, new_stamp_xsec;
778 unsigned long tb_ticks_per_sec_delta;
779 long delta_freq, ltemp;
780 struct div_result divres;
781 unsigned long flags;
782 long singleshot_ppm = 0;
783
784 /*
785 * Compute parts per million frequency adjustment to
786 * accomplish the time adjustment implied by time_offset to be
787 * applied over the elapsed time indicated by time_constant.
788 * Use SHIFT_USEC to get it into the same units as
789 * time_freq.
790 */
791 if ( time_offset < 0 ) {
792 ltemp = -time_offset;
793 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
794 ltemp >>= SHIFT_KG + time_constant;
795 ltemp = -ltemp;
796 } else {
797 ltemp = time_offset;
798 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
799 ltemp >>= SHIFT_KG + time_constant;
800 }
801
802 /* If there is a single shot time adjustment in progress */
803 if ( time_adjust ) {
804#ifdef DEBUG_PPC_ADJTIMEX
805 printk("ppc_adjtimex: ");
806 if ( adjusting_time == 0 )
807 printk("starting ");
808 printk("single shot time_adjust = %ld\n", time_adjust);
809#endif
810
811 adjusting_time = 1;
812
813 /*
814 * Compute parts per million frequency adjustment
815 * to match time_adjust
816 */
817 singleshot_ppm = tickadj * HZ;
818 /*
819 * The adjustment should be tickadj*HZ to match the code in
820 * linux/kernel/timer.c, but experiments show that this is too
821 * large. 3/4 of tickadj*HZ seems about right
822 */
823 singleshot_ppm -= singleshot_ppm / 4;
824 /* Use SHIFT_USEC to get it into the same units as time_freq */
825 singleshot_ppm <<= SHIFT_USEC;
826 if ( time_adjust < 0 )
827 singleshot_ppm = -singleshot_ppm;
828 }
829 else {
830#ifdef DEBUG_PPC_ADJTIMEX
831 if ( adjusting_time )
832 printk("ppc_adjtimex: ending single shot time_adjust\n");
833#endif
834 adjusting_time = 0;
835 }
836
837 /* Add up all of the frequency adjustments */
838 delta_freq = time_freq + ltemp + singleshot_ppm;
839
840 /*
841 * Compute a new value for tb_ticks_per_sec based on
842 * the frequency adjustment
843 */
844 den = 1000000 * (1 << (SHIFT_USEC - 8));
845 if ( delta_freq < 0 ) {
846 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
847 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
848 }
849 else {
850 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
851 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
852 }
853
854#ifdef DEBUG_PPC_ADJTIMEX
855 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
856 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
857#endif
858
859 /*
860 * Compute a new value of tb_to_xs (used to convert tb to
861 * microseconds) and a new value of stamp_xsec which is the
862 * time (in 1/2^20 second units) corresponding to
863 * tb_orig_stamp. This new value of stamp_xsec compensates
864 * for the change in frequency (implied by the new tb_to_xs)
865 * which guarantees that the current time remains the same.
866 */
867 write_seqlock_irqsave( &xtime_lock, flags );
868 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
869 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
870 new_tb_to_xs = divres.result_low;
871 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
872
873 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
874 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
875
876 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
877
878 write_sequnlock_irqrestore( &xtime_lock, flags );
879#endif /* CONFIG_PPC64 */
880}
881
882 1055
883#define FEBRUARY 2 1056#define FEBRUARY 2
884#define STARTOFTIME 1970 1057#define STARTOFTIME 1970