aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c503
1 files changed, 227 insertions, 276 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index c627cf86d1e3..9368da371f36 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -65,24 +65,68 @@
65#include <asm/div64.h> 65#include <asm/div64.h>
66#include <asm/smp.h> 66#include <asm/smp.h>
67#include <asm/vdso_datapage.h> 67#include <asm/vdso_datapage.h>
68#ifdef CONFIG_PPC64
69#include <asm/firmware.h> 68#include <asm/firmware.h>
70#endif
71#ifdef CONFIG_PPC_ISERIES 69#ifdef CONFIG_PPC_ISERIES
72#include <asm/iseries/it_lp_queue.h> 70#include <asm/iseries/it_lp_queue.h>
73#include <asm/iseries/hv_call_xm.h> 71#include <asm/iseries/hv_call_xm.h>
74#endif 72#endif
75#include <asm/smp.h>
76 73
77/* keep track of when we need to update the rtc */ 74/* powerpc clocksource/clockevent code */
78time_t last_rtc_update; 75
76#include <linux/clockchips.h>
77#include <linux/clocksource.h>
78
79static cycle_t rtc_read(void);
80static struct clocksource clocksource_rtc = {
81 .name = "rtc",
82 .rating = 400,
83 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
84 .mask = CLOCKSOURCE_MASK(64),
85 .shift = 22,
86 .mult = 0, /* To be filled in */
87 .read = rtc_read,
88};
89
90static cycle_t timebase_read(void);
91static struct clocksource clocksource_timebase = {
92 .name = "timebase",
93 .rating = 400,
94 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
95 .mask = CLOCKSOURCE_MASK(64),
96 .shift = 22,
97 .mult = 0, /* To be filled in */
98 .read = timebase_read,
99};
100
101#define DECREMENTER_MAX 0x7fffffff
102
103static int decrementer_set_next_event(unsigned long evt,
104 struct clock_event_device *dev);
105static void decrementer_set_mode(enum clock_event_mode mode,
106 struct clock_event_device *dev);
107
108static struct clock_event_device decrementer_clockevent = {
109 .name = "decrementer",
110 .rating = 200,
111 .shift = 16,
112 .mult = 0, /* To be filled in */
113 .irq = 0,
114 .set_next_event = decrementer_set_next_event,
115 .set_mode = decrementer_set_mode,
116 .features = CLOCK_EVT_FEAT_ONESHOT,
117};
118
119static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120void init_decrementer_clockevent(void);
121static DEFINE_PER_CPU(u64, decrementer_next_tb);
122
79#ifdef CONFIG_PPC_ISERIES 123#ifdef CONFIG_PPC_ISERIES
80static unsigned long __initdata iSeries_recal_titan; 124static unsigned long __initdata iSeries_recal_titan;
81static signed long __initdata iSeries_recal_tb; 125static signed long __initdata iSeries_recal_tb;
82#endif
83 126
84/* The decrementer counts down by 128 every 128ns on a 601. */ 127/* Forward declaration is only needed for iSereis compiles */
85#define DECREMENTER_COUNT_601 (1000000000 / HZ) 128void __init clocksource_init(void);
129#endif
86 130
87#define XSEC_PER_SEC (1024*1024) 131#define XSEC_PER_SEC (1024*1024)
88 132
@@ -349,98 +393,6 @@ void udelay(unsigned long usecs)
349} 393}
350EXPORT_SYMBOL(udelay); 394EXPORT_SYMBOL(udelay);
351 395
352static __inline__ void timer_check_rtc(void)
353{
354 /*
355 * update the rtc when needed, this should be performed on the
356 * right fraction of a second. Half or full second ?
357 * Full second works on mk48t59 clocks, others need testing.
358 * Note that this update is basically only used through
359 * the adjtimex system calls. Setting the HW clock in
360 * any other way is a /dev/rtc and userland business.
361 * This is still wrong by -0.5/+1.5 jiffies because of the
362 * timer interrupt resolution and possible delay, but here we
363 * hit a quantization limit which can only be solved by higher
364 * resolution timers and decoupling time management from timer
365 * interrupts. This is also wrong on the clocks
366 * which require being written at the half second boundary.
367 * We should have an rtc call that only sets the minutes and
368 * seconds like on Intel to avoid problems with non UTC clocks.
369 */
370 if (ppc_md.set_rtc_time && ntp_synced() &&
371 xtime.tv_sec - last_rtc_update >= 659 &&
372 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
373 struct rtc_time tm;
374 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
375 tm.tm_year -= 1900;
376 tm.tm_mon -= 1;
377 if (ppc_md.set_rtc_time(&tm) == 0)
378 last_rtc_update = xtime.tv_sec + 1;
379 else
380 /* Try again one minute later */
381 last_rtc_update += 60;
382 }
383}
384
385/*
386 * This version of gettimeofday has microsecond resolution.
387 */
388static inline void __do_gettimeofday(struct timeval *tv)
389{
390 unsigned long sec, usec;
391 u64 tb_ticks, xsec;
392 struct gettimeofday_vars *temp_varp;
393 u64 temp_tb_to_xs, temp_stamp_xsec;
394
395 /*
396 * These calculations are faster (gets rid of divides)
397 * if done in units of 1/2^20 rather than microseconds.
398 * The conversion to microseconds at the end is done
399 * without a divide (and in fact, without a multiply)
400 */
401 temp_varp = do_gtod.varp;
402
403 /* Sampling the time base must be done after loading
404 * do_gtod.varp in order to avoid racing with update_gtod.
405 */
406 data_barrier(temp_varp);
407 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
408 temp_tb_to_xs = temp_varp->tb_to_xs;
409 temp_stamp_xsec = temp_varp->stamp_xsec;
410 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
411 sec = xsec / XSEC_PER_SEC;
412 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
413 usec = SCALE_XSEC(usec, 1000000);
414
415 tv->tv_sec = sec;
416 tv->tv_usec = usec;
417}
418
419void do_gettimeofday(struct timeval *tv)
420{
421 if (__USE_RTC()) {
422 /* do this the old way */
423 unsigned long flags, seq;
424 unsigned int sec, nsec, usec;
425
426 do {
427 seq = read_seqbegin_irqsave(&xtime_lock, flags);
428 sec = xtime.tv_sec;
429 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
430 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
431 usec = nsec / 1000;
432 while (usec >= 1000000) {
433 usec -= 1000000;
434 ++sec;
435 }
436 tv->tv_sec = sec;
437 tv->tv_usec = usec;
438 return;
439 }
440 __do_gettimeofday(tv);
441}
442
443EXPORT_SYMBOL(do_gettimeofday);
444 396
445/* 397/*
446 * There are two copies of tb_to_xs and stamp_xsec so that no 398 * There are two copies of tb_to_xs and stamp_xsec so that no
@@ -486,56 +438,6 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
486 ++(vdso_data->tb_update_count); 438 ++(vdso_data->tb_update_count);
487} 439}
488 440
489/*
490 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
491 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
492 * difference tb - tb_orig_stamp small enough to always fit inside a
493 * 32 bits number. This is a requirement of our fast 32 bits userland
494 * implementation in the vdso. If we "miss" a call to this function
495 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
496 * with a too big difference, then the vdso will fallback to calling
497 * the syscall
498 */
499static __inline__ void timer_recalc_offset(u64 cur_tb)
500{
501 unsigned long offset;
502 u64 new_stamp_xsec;
503 u64 tlen, t2x;
504 u64 tb, xsec_old, xsec_new;
505 struct gettimeofday_vars *varp;
506
507 if (__USE_RTC())
508 return;
509 tlen = current_tick_length();
510 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
511 if (tlen == last_tick_len && offset < 0x80000000u)
512 return;
513 if (tlen != last_tick_len) {
514 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
515 last_tick_len = tlen;
516 } else
517 t2x = do_gtod.varp->tb_to_xs;
518 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
519 do_div(new_stamp_xsec, 1000000000);
520 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
521
522 ++vdso_data->tb_update_count;
523 smp_mb();
524
525 /*
526 * Make sure time doesn't go backwards for userspace gettimeofday.
527 */
528 tb = get_tb();
529 varp = do_gtod.varp;
530 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
531 + varp->stamp_xsec;
532 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
533 if (xsec_new < xsec_old)
534 new_stamp_xsec += xsec_old - xsec_new;
535
536 update_gtod(cur_tb, new_stamp_xsec, t2x);
537}
538
539#ifdef CONFIG_SMP 441#ifdef CONFIG_SMP
540unsigned long profile_pc(struct pt_regs *regs) 442unsigned long profile_pc(struct pt_regs *regs)
541{ 443{
@@ -607,6 +509,8 @@ static int __init iSeries_tb_recal(void)
607 iSeries_recal_titan = titan; 509 iSeries_recal_titan = titan;
608 iSeries_recal_tb = tb; 510 iSeries_recal_tb = tb;
609 511
512 /* Called here as now we know accurate values for the timebase */
513 clocksource_init();
610 return 0; 514 return 0;
611} 515}
612late_initcall(iSeries_tb_recal); 516late_initcall(iSeries_tb_recal);
@@ -636,20 +540,30 @@ void __init iSeries_time_init_early(void)
636void timer_interrupt(struct pt_regs * regs) 540void timer_interrupt(struct pt_regs * regs)
637{ 541{
638 struct pt_regs *old_regs; 542 struct pt_regs *old_regs;
639 int next_dec;
640 int cpu = smp_processor_id(); 543 int cpu = smp_processor_id();
641 unsigned long ticks; 544 struct clock_event_device *evt = &per_cpu(decrementers, cpu);
642 u64 tb_next_jiffy; 545 u64 now;
546
547 /* Ensure a positive value is written to the decrementer, or else
548 * some CPUs will continuue to take decrementer exceptions */
549 set_dec(DECREMENTER_MAX);
643 550
644#ifdef CONFIG_PPC32 551#ifdef CONFIG_PPC32
645 if (atomic_read(&ppc_n_lost_interrupts) != 0) 552 if (atomic_read(&ppc_n_lost_interrupts) != 0)
646 do_IRQ(regs); 553 do_IRQ(regs);
647#endif 554#endif
648 555
556 now = get_tb_or_rtc();
557 if (now < per_cpu(decrementer_next_tb, cpu)) {
558 /* not time for this event yet */
559 now = per_cpu(decrementer_next_tb, cpu) - now;
560 if (now <= DECREMENTER_MAX)
561 set_dec((unsigned int)now - 1);
562 return;
563 }
649 old_regs = set_irq_regs(regs); 564 old_regs = set_irq_regs(regs);
650 irq_enter(); 565 irq_enter();
651 566
652 profile_tick(CPU_PROFILING);
653 calculate_steal_time(); 567 calculate_steal_time();
654 568
655#ifdef CONFIG_PPC_ISERIES 569#ifdef CONFIG_PPC_ISERIES
@@ -657,46 +571,20 @@ void timer_interrupt(struct pt_regs * regs)
657 get_lppaca()->int_dword.fields.decr_int = 0; 571 get_lppaca()->int_dword.fields.decr_int = 0;
658#endif 572#endif
659 573
660 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 574 /*
661 >= tb_ticks_per_jiffy) { 575 * We cannot disable the decrementer, so in the period
662 /* Update last_jiffy */ 576 * between this cpu's being marked offline in cpu_online_map
663 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; 577 * and calling stop-self, it is taking timer interrupts.
664 /* Handle RTCL overflow on 601 */ 578 * Avoid calling into the scheduler rebalancing code if this
665 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) 579 * is the case.
666 per_cpu(last_jiffy, cpu) -= 1000000000; 580 */
667 581 if (!cpu_is_offline(cpu))
668 /* 582 account_process_time(regs);
669 * We cannot disable the decrementer, so in the period
670 * between this cpu's being marked offline in cpu_online_map
671 * and calling stop-self, it is taking timer interrupts.
672 * Avoid calling into the scheduler rebalancing code if this
673 * is the case.
674 */
675 if (!cpu_is_offline(cpu))
676 account_process_time(regs);
677
678 /*
679 * No need to check whether cpu is offline here; boot_cpuid
680 * should have been fixed up by now.
681 */
682 if (cpu != boot_cpuid)
683 continue;
684 583
685 write_seqlock(&xtime_lock); 584 if (evt->event_handler)
686 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 585 evt->event_handler(evt);
687 if (__USE_RTC() && tb_next_jiffy >= 1000000000) 586 else
688 tb_next_jiffy -= 1000000000; 587 evt->set_next_event(DECREMENTER_MAX, evt);
689 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
690 tb_last_jiffy = tb_next_jiffy;
691 do_timer(1);
692 timer_recalc_offset(tb_last_jiffy);
693 timer_check_rtc();
694 }
695 write_sequnlock(&xtime_lock);
696 }
697
698 next_dec = tb_ticks_per_jiffy - ticks;
699 set_dec(next_dec);
700 588
701#ifdef CONFIG_PPC_ISERIES 589#ifdef CONFIG_PPC_ISERIES
702 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 590 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
@@ -762,71 +650,6 @@ unsigned long long sched_clock(void)
762 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 650 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
763} 651}
764 652
765int do_settimeofday(struct timespec *tv)
766{
767 time_t wtm_sec, new_sec = tv->tv_sec;
768 long wtm_nsec, new_nsec = tv->tv_nsec;
769 unsigned long flags;
770 u64 new_xsec;
771 unsigned long tb_delta;
772
773 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
774 return -EINVAL;
775
776 write_seqlock_irqsave(&xtime_lock, flags);
777
778 /*
779 * Updating the RTC is not the job of this code. If the time is
780 * stepped under NTP, the RTC will be updated after STA_UNSYNC
781 * is cleared. Tools like clock/hwclock either copy the RTC
782 * to the system time, in which case there is no point in writing
783 * to the RTC again, or write to the RTC but then they don't call
784 * settimeofday to perform this operation.
785 */
786
787 /* Make userspace gettimeofday spin until we're done. */
788 ++vdso_data->tb_update_count;
789 smp_mb();
790
791 /*
792 * Subtract off the number of nanoseconds since the
793 * beginning of the last tick.
794 */
795 tb_delta = tb_ticks_since(tb_last_jiffy);
796 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
797 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
798
799 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
800 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
801
802 set_normalized_timespec(&xtime, new_sec, new_nsec);
803 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
804
805 /* In case of a large backwards jump in time with NTP, we want the
806 * clock to be updated as soon as the PLL is again in lock.
807 */
808 last_rtc_update = new_sec - 658;
809
810 ntp_clear();
811
812 new_xsec = xtime.tv_nsec;
813 if (new_xsec != 0) {
814 new_xsec *= XSEC_PER_SEC;
815 do_div(new_xsec, NSEC_PER_SEC);
816 }
817 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
818 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
819
820 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
821 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
822
823 write_sequnlock_irqrestore(&xtime_lock, flags);
824 clock_was_set();
825 return 0;
826}
827
828EXPORT_SYMBOL(do_settimeofday);
829
830static int __init get_freq(char *name, int cells, unsigned long *val) 653static int __init get_freq(char *name, int cells, unsigned long *val)
831{ 654{
832 struct device_node *cpu; 655 struct device_node *cpu;
@@ -869,7 +692,7 @@ void __init generic_calibrate_decr(void)
869 "(not found)\n"); 692 "(not found)\n");
870 } 693 }
871 694
872#ifdef CONFIG_BOOKE 695#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
873 /* Set the time base to zero */ 696 /* Set the time base to zero */
874 mtspr(SPRN_TBWL, 0); 697 mtspr(SPRN_TBWL, 0);
875 mtspr(SPRN_TBWU, 0); 698 mtspr(SPRN_TBWU, 0);
@@ -882,12 +705,35 @@ void __init generic_calibrate_decr(void)
882#endif 705#endif
883} 706}
884 707
885unsigned long get_boot_time(void) 708int update_persistent_clock(struct timespec now)
886{ 709{
887 struct rtc_time tm; 710 struct rtc_time tm;
888 711
889 if (ppc_md.get_boot_time) 712 if (!ppc_md.set_rtc_time)
890 return ppc_md.get_boot_time(); 713 return 0;
714
715 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
716 tm.tm_year -= 1900;
717 tm.tm_mon -= 1;
718
719 return ppc_md.set_rtc_time(&tm);
720}
721
722unsigned long read_persistent_clock(void)
723{
724 struct rtc_time tm;
725 static int first = 1;
726
727 /* XXX this is a litle fragile but will work okay in the short term */
728 if (first) {
729 first = 0;
730 if (ppc_md.time_init)
731 timezone_offset = ppc_md.time_init();
732
733 /* get_boot_time() isn't guaranteed to be safe to call late */
734 if (ppc_md.get_boot_time)
735 return ppc_md.get_boot_time() -timezone_offset;
736 }
891 if (!ppc_md.get_rtc_time) 737 if (!ppc_md.get_rtc_time)
892 return 0; 738 return 0;
893 ppc_md.get_rtc_time(&tm); 739 ppc_md.get_rtc_time(&tm);
@@ -895,18 +741,128 @@ unsigned long get_boot_time(void)
895 tm.tm_hour, tm.tm_min, tm.tm_sec); 741 tm.tm_hour, tm.tm_min, tm.tm_sec);
896} 742}
897 743
744/* clocksource code */
745static cycle_t rtc_read(void)
746{
747 return (cycle_t)get_rtc();
748}
749
750static cycle_t timebase_read(void)
751{
752 return (cycle_t)get_tb();
753}
754
755void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
756{
757 u64 t2x, stamp_xsec;
758
759 if (clock != &clocksource_timebase)
760 return;
761
762 /* Make userspace gettimeofday spin until we're done. */
763 ++vdso_data->tb_update_count;
764 smp_mb();
765
766 /* XXX this assumes clock->shift == 22 */
767 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
768 t2x = (u64) clock->mult * 4611686018ULL;
769 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
770 do_div(stamp_xsec, 1000000000);
771 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
772 update_gtod(clock->cycle_last, stamp_xsec, t2x);
773}
774
775void update_vsyscall_tz(void)
776{
777 /* Make userspace gettimeofday spin until we're done. */
778 ++vdso_data->tb_update_count;
779 smp_mb();
780 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
781 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
782 smp_mb();
783 ++vdso_data->tb_update_count;
784}
785
786void __init clocksource_init(void)
787{
788 struct clocksource *clock;
789
790 if (__USE_RTC())
791 clock = &clocksource_rtc;
792 else
793 clock = &clocksource_timebase;
794
795 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
796
797 if (clocksource_register(clock)) {
798 printk(KERN_ERR "clocksource: %s is already registered\n",
799 clock->name);
800 return;
801 }
802
803 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
804 clock->name, clock->mult, clock->shift);
805}
806
807static int decrementer_set_next_event(unsigned long evt,
808 struct clock_event_device *dev)
809{
810 __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
811 /* The decrementer interrupts on the 0 -> -1 transition */
812 if (evt)
813 --evt;
814 set_dec(evt);
815 return 0;
816}
817
818static void decrementer_set_mode(enum clock_event_mode mode,
819 struct clock_event_device *dev)
820{
821 if (mode != CLOCK_EVT_MODE_ONESHOT)
822 decrementer_set_next_event(DECREMENTER_MAX, dev);
823}
824
825static void register_decrementer_clockevent(int cpu)
826{
827 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
828
829 *dec = decrementer_clockevent;
830 dec->cpumask = cpumask_of_cpu(cpu);
831
832 printk(KERN_ERR "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
833 dec->name, dec->mult, dec->shift, cpu);
834
835 clockevents_register_device(dec);
836}
837
838void init_decrementer_clockevent(void)
839{
840 int cpu = smp_processor_id();
841
842 decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
843 decrementer_clockevent.shift);
844 decrementer_clockevent.max_delta_ns =
845 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
846 decrementer_clockevent.min_delta_ns = 1000;
847
848 register_decrementer_clockevent(cpu);
849}
850
851void secondary_cpu_time_init(void)
852{
853 /* FIME: Should make unrelatred change to move snapshot_timebase
854 * call here ! */
855 register_decrementer_clockevent(smp_processor_id());
856}
857
898/* This function is only called on the boot processor */ 858/* This function is only called on the boot processor */
899void __init time_init(void) 859void __init time_init(void)
900{ 860{
901 unsigned long flags; 861 unsigned long flags;
902 unsigned long tm = 0;
903 struct div_result res; 862 struct div_result res;
904 u64 scale, x; 863 u64 scale, x;
905 unsigned shift; 864 unsigned shift;
906 865
907 if (ppc_md.time_init != NULL)
908 timezone_offset = ppc_md.time_init();
909
910 if (__USE_RTC()) { 866 if (__USE_RTC()) {
911 /* 601 processor: dec counts down by 128 every 128ns */ 867 /* 601 processor: dec counts down by 128 every 128ns */
912 ppc_tb_freq = 1000000000; 868 ppc_tb_freq = 1000000000;
@@ -981,19 +937,14 @@ void __init time_init(void)
981 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 937 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
982 boot_tb = get_tb_or_rtc(); 938 boot_tb = get_tb_or_rtc();
983 939
984 tm = get_boot_time();
985
986 write_seqlock_irqsave(&xtime_lock, flags); 940 write_seqlock_irqsave(&xtime_lock, flags);
987 941
988 /* If platform provided a timezone (pmac), we correct the time */ 942 /* If platform provided a timezone (pmac), we correct the time */
989 if (timezone_offset) { 943 if (timezone_offset) {
990 sys_tz.tz_minuteswest = -timezone_offset / 60; 944 sys_tz.tz_minuteswest = -timezone_offset / 60;
991 sys_tz.tz_dsttime = 0; 945 sys_tz.tz_dsttime = 0;
992 tm -= timezone_offset;
993 } 946 }
994 947
995 xtime.tv_sec = tm;
996 xtime.tv_nsec = 0;
997 do_gtod.varp = &do_gtod.vars[0]; 948 do_gtod.varp = &do_gtod.vars[0];
998 do_gtod.var_idx = 0; 949 do_gtod.var_idx = 0;
999 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 950 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
@@ -1011,13 +962,13 @@ void __init time_init(void)
1011 962
1012 time_freq = 0; 963 time_freq = 0;
1013 964
1014 last_rtc_update = xtime.tv_sec;
1015 set_normalized_timespec(&wall_to_monotonic,
1016 -xtime.tv_sec, -xtime.tv_nsec);
1017 write_sequnlock_irqrestore(&xtime_lock, flags); 965 write_sequnlock_irqrestore(&xtime_lock, flags);
1018 966
1019 /* Not exact, but the timer interrupt takes care of this */ 967 /* Register the clocksource, if we're not running on iSeries */
1020 set_dec(tb_ticks_per_jiffy); 968 if (!firmware_has_feature(FW_FEATURE_ISERIES))
969 clocksource_init();
970
971 init_decrementer_clockevent();
1021} 972}
1022 973
1023 974