diff options
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 342 |
1 files changed, 138 insertions, 204 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 6a2e5f8dc725..fd74268d8663 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -46,6 +46,10 @@ static void time_interpolator_update(long delta_nsec); | |||
46 | #define time_interpolator_update(x) | 46 | #define time_interpolator_update(x) |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | ||
50 | |||
51 | EXPORT_SYMBOL(jiffies_64); | ||
52 | |||
49 | /* | 53 | /* |
50 | * per-CPU timer vector definitions: | 54 | * per-CPU timer vector definitions: |
51 | */ | 55 | */ |
@@ -91,30 +95,6 @@ static inline void set_running_timer(tvec_base_t *base, | |||
91 | #endif | 95 | #endif |
92 | } | 96 | } |
93 | 97 | ||
94 | static void check_timer_failed(struct timer_list *timer) | ||
95 | { | ||
96 | static int whine_count; | ||
97 | if (whine_count < 16) { | ||
98 | whine_count++; | ||
99 | printk("Uninitialised timer!\n"); | ||
100 | printk("This is just a warning. Your computer is OK\n"); | ||
101 | printk("function=0x%p, data=0x%lx\n", | ||
102 | timer->function, timer->data); | ||
103 | dump_stack(); | ||
104 | } | ||
105 | /* | ||
106 | * Now fix it up | ||
107 | */ | ||
108 | timer->magic = TIMER_MAGIC; | ||
109 | } | ||
110 | |||
111 | static inline void check_timer(struct timer_list *timer) | ||
112 | { | ||
113 | if (timer->magic != TIMER_MAGIC) | ||
114 | check_timer_failed(timer); | ||
115 | } | ||
116 | |||
117 | |||
118 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) | 98 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
119 | { | 99 | { |
120 | unsigned long expires = timer->expires; | 100 | unsigned long expires = timer->expires; |
@@ -177,7 +157,6 @@ void fastcall init_timer(struct timer_list *timer) | |||
177 | { | 157 | { |
178 | timer->entry.next = NULL; | 158 | timer->entry.next = NULL; |
179 | timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; | 159 | timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; |
180 | timer->magic = TIMER_MAGIC; | ||
181 | } | 160 | } |
182 | EXPORT_SYMBOL(init_timer); | 161 | EXPORT_SYMBOL(init_timer); |
183 | 162 | ||
@@ -230,7 +209,6 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
230 | int ret = 0; | 209 | int ret = 0; |
231 | 210 | ||
232 | BUG_ON(!timer->function); | 211 | BUG_ON(!timer->function); |
233 | check_timer(timer); | ||
234 | 212 | ||
235 | base = lock_timer_base(timer, &flags); | 213 | base = lock_timer_base(timer, &flags); |
236 | 214 | ||
@@ -283,9 +261,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
283 | unsigned long flags; | 261 | unsigned long flags; |
284 | 262 | ||
285 | BUG_ON(timer_pending(timer) || !timer->function); | 263 | BUG_ON(timer_pending(timer) || !timer->function); |
286 | |||
287 | check_timer(timer); | ||
288 | |||
289 | spin_lock_irqsave(&base->t_base.lock, flags); | 264 | spin_lock_irqsave(&base->t_base.lock, flags); |
290 | timer->base = &base->t_base; | 265 | timer->base = &base->t_base; |
291 | internal_add_timer(base, timer); | 266 | internal_add_timer(base, timer); |
@@ -316,8 +291,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires) | |||
316 | { | 291 | { |
317 | BUG_ON(!timer->function); | 292 | BUG_ON(!timer->function); |
318 | 293 | ||
319 | check_timer(timer); | ||
320 | |||
321 | /* | 294 | /* |
322 | * This is a common optimization triggered by the | 295 | * This is a common optimization triggered by the |
323 | * networking code - if the timer is re-modified | 296 | * networking code - if the timer is re-modified |
@@ -348,8 +321,6 @@ int del_timer(struct timer_list *timer) | |||
348 | unsigned long flags; | 321 | unsigned long flags; |
349 | int ret = 0; | 322 | int ret = 0; |
350 | 323 | ||
351 | check_timer(timer); | ||
352 | |||
353 | if (timer_pending(timer)) { | 324 | if (timer_pending(timer)) { |
354 | base = lock_timer_base(timer, &flags); | 325 | base = lock_timer_base(timer, &flags); |
355 | if (timer_pending(timer)) { | 326 | if (timer_pending(timer)) { |
@@ -412,8 +383,6 @@ out: | |||
412 | */ | 383 | */ |
413 | int del_timer_sync(struct timer_list *timer) | 384 | int del_timer_sync(struct timer_list *timer) |
414 | { | 385 | { |
415 | check_timer(timer); | ||
416 | |||
417 | for (;;) { | 386 | for (;;) { |
418 | int ret = try_to_del_timer_sync(timer); | 387 | int ret = try_to_del_timer_sync(timer); |
419 | if (ret >= 0) | 388 | if (ret >= 0) |
@@ -632,143 +601,118 @@ long time_next_adjust; | |||
632 | */ | 601 | */ |
633 | static void second_overflow(void) | 602 | static void second_overflow(void) |
634 | { | 603 | { |
635 | long ltemp; | 604 | long ltemp; |
636 | 605 | ||
637 | /* Bump the maxerror field */ | 606 | /* Bump the maxerror field */ |
638 | time_maxerror += time_tolerance >> SHIFT_USEC; | 607 | time_maxerror += time_tolerance >> SHIFT_USEC; |
639 | if ( time_maxerror > NTP_PHASE_LIMIT ) { | 608 | if (time_maxerror > NTP_PHASE_LIMIT) { |
640 | time_maxerror = NTP_PHASE_LIMIT; | 609 | time_maxerror = NTP_PHASE_LIMIT; |
641 | time_status |= STA_UNSYNC; | 610 | time_status |= STA_UNSYNC; |
642 | } | ||
643 | |||
644 | /* | ||
645 | * Leap second processing. If in leap-insert state at | ||
646 | * the end of the day, the system clock is set back one | ||
647 | * second; if in leap-delete state, the system clock is | ||
648 | * set ahead one second. The microtime() routine or | ||
649 | * external clock driver will insure that reported time | ||
650 | * is always monotonic. The ugly divides should be | ||
651 | * replaced. | ||
652 | */ | ||
653 | switch (time_state) { | ||
654 | |||
655 | case TIME_OK: | ||
656 | if (time_status & STA_INS) | ||
657 | time_state = TIME_INS; | ||
658 | else if (time_status & STA_DEL) | ||
659 | time_state = TIME_DEL; | ||
660 | break; | ||
661 | |||
662 | case TIME_INS: | ||
663 | if (xtime.tv_sec % 86400 == 0) { | ||
664 | xtime.tv_sec--; | ||
665 | wall_to_monotonic.tv_sec++; | ||
666 | /* The timer interpolator will make time change gradually instead | ||
667 | * of an immediate jump by one second. | ||
668 | */ | ||
669 | time_interpolator_update(-NSEC_PER_SEC); | ||
670 | time_state = TIME_OOP; | ||
671 | clock_was_set(); | ||
672 | printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n"); | ||
673 | } | 611 | } |
674 | break; | 612 | |
675 | 613 | /* | |
676 | case TIME_DEL: | 614 | * Leap second processing. If in leap-insert state at the end of the |
677 | if ((xtime.tv_sec + 1) % 86400 == 0) { | 615 | * day, the system clock is set back one second; if in leap-delete |
678 | xtime.tv_sec++; | 616 | * state, the system clock is set ahead one second. The microtime() |
679 | wall_to_monotonic.tv_sec--; | 617 | * routine or external clock driver will insure that reported time is |
680 | /* Use of time interpolator for a gradual change of time */ | 618 | * always monotonic. The ugly divides should be replaced. |
681 | time_interpolator_update(NSEC_PER_SEC); | 619 | */ |
682 | time_state = TIME_WAIT; | 620 | switch (time_state) { |
683 | clock_was_set(); | 621 | case TIME_OK: |
684 | printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n"); | 622 | if (time_status & STA_INS) |
623 | time_state = TIME_INS; | ||
624 | else if (time_status & STA_DEL) | ||
625 | time_state = TIME_DEL; | ||
626 | break; | ||
627 | case TIME_INS: | ||
628 | if (xtime.tv_sec % 86400 == 0) { | ||
629 | xtime.tv_sec--; | ||
630 | wall_to_monotonic.tv_sec++; | ||
631 | /* | ||
632 | * The timer interpolator will make time change | ||
633 | * gradually instead of an immediate jump by one second | ||
634 | */ | ||
635 | time_interpolator_update(-NSEC_PER_SEC); | ||
636 | time_state = TIME_OOP; | ||
637 | clock_was_set(); | ||
638 | printk(KERN_NOTICE "Clock: inserting leap second " | ||
639 | "23:59:60 UTC\n"); | ||
640 | } | ||
641 | break; | ||
642 | case TIME_DEL: | ||
643 | if ((xtime.tv_sec + 1) % 86400 == 0) { | ||
644 | xtime.tv_sec++; | ||
645 | wall_to_monotonic.tv_sec--; | ||
646 | /* | ||
647 | * Use of time interpolator for a gradual change of | ||
648 | * time | ||
649 | */ | ||
650 | time_interpolator_update(NSEC_PER_SEC); | ||
651 | time_state = TIME_WAIT; | ||
652 | clock_was_set(); | ||
653 | printk(KERN_NOTICE "Clock: deleting leap second " | ||
654 | "23:59:59 UTC\n"); | ||
655 | } | ||
656 | break; | ||
657 | case TIME_OOP: | ||
658 | time_state = TIME_WAIT; | ||
659 | break; | ||
660 | case TIME_WAIT: | ||
661 | if (!(time_status & (STA_INS | STA_DEL))) | ||
662 | time_state = TIME_OK; | ||
685 | } | 663 | } |
686 | break; | 664 | |
687 | 665 | /* | |
688 | case TIME_OOP: | 666 | * Compute the phase adjustment for the next second. In PLL mode, the |
689 | time_state = TIME_WAIT; | 667 | * offset is reduced by a fixed factor times the time constant. In FLL |
690 | break; | 668 | * mode the offset is used directly. In either mode, the maximum phase |
691 | 669 | * adjustment for each second is clamped so as to spread the adjustment | |
692 | case TIME_WAIT: | 670 | * over not more than the number of seconds between updates. |
693 | if (!(time_status & (STA_INS | STA_DEL))) | 671 | */ |
694 | time_state = TIME_OK; | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * Compute the phase adjustment for the next second. In | ||
699 | * PLL mode, the offset is reduced by a fixed factor | ||
700 | * times the time constant. In FLL mode the offset is | ||
701 | * used directly. In either mode, the maximum phase | ||
702 | * adjustment for each second is clamped so as to spread | ||
703 | * the adjustment over not more than the number of | ||
704 | * seconds between updates. | ||
705 | */ | ||
706 | if (time_offset < 0) { | ||
707 | ltemp = -time_offset; | ||
708 | if (!(time_status & STA_FLL)) | ||
709 | ltemp >>= SHIFT_KG + time_constant; | ||
710 | if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) | ||
711 | ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; | ||
712 | time_offset += ltemp; | ||
713 | time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | ||
714 | } else { | ||
715 | ltemp = time_offset; | 672 | ltemp = time_offset; |
716 | if (!(time_status & STA_FLL)) | 673 | if (!(time_status & STA_FLL)) |
717 | ltemp >>= SHIFT_KG + time_constant; | 674 | ltemp = shift_right(ltemp, SHIFT_KG + time_constant); |
718 | if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) | 675 | ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); |
719 | ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; | 676 | ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); |
720 | time_offset -= ltemp; | 677 | time_offset -= ltemp; |
721 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | 678 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); |
722 | } | 679 | |
723 | 680 | /* | |
724 | /* | 681 | * Compute the frequency estimate and additional phase adjustment due |
725 | * Compute the frequency estimate and additional phase | 682 | * to frequency error for the next second. When the PPS signal is |
726 | * adjustment due to frequency error for the next | 683 | * engaged, gnaw on the watchdog counter and update the frequency |
727 | * second. When the PPS signal is engaged, gnaw on the | 684 | * computed by the pll and the PPS signal. |
728 | * watchdog counter and update the frequency computed by | 685 | */ |
729 | * the pll and the PPS signal. | 686 | pps_valid++; |
730 | */ | 687 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ |
731 | pps_valid++; | 688 | pps_jitter = MAXTIME; |
732 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ | 689 | pps_stabil = MAXFREQ; |
733 | pps_jitter = MAXTIME; | 690 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | |
734 | pps_stabil = MAXFREQ; | 691 | STA_PPSWANDER | STA_PPSERROR); |
735 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | 692 | } |
736 | STA_PPSWANDER | STA_PPSERROR); | 693 | ltemp = time_freq + pps_freq; |
737 | } | 694 | time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); |
738 | ltemp = time_freq + pps_freq; | ||
739 | if (ltemp < 0) | ||
740 | time_adj -= -ltemp >> | ||
741 | (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); | ||
742 | else | ||
743 | time_adj += ltemp >> | ||
744 | (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); | ||
745 | 695 | ||
746 | #if HZ == 100 | 696 | #if HZ == 100 |
747 | /* Compensate for (HZ==100) != (1 << SHIFT_HZ). | 697 | /* |
748 | * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14) | 698 | * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to |
749 | */ | 699 | * get 128.125; => only 0.125% error (p. 14) |
750 | if (time_adj < 0) | 700 | */ |
751 | time_adj -= (-time_adj >> 2) + (-time_adj >> 5); | 701 | time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); |
752 | else | ||
753 | time_adj += (time_adj >> 2) + (time_adj >> 5); | ||
754 | #endif | 702 | #endif |
755 | #if HZ == 250 | 703 | #if HZ == 250 |
756 | /* Compensate for (HZ==250) != (1 << SHIFT_HZ). | 704 | /* |
757 | * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14) | 705 | * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and |
758 | */ | 706 | * 0.78125% to get 255.85938; => only 0.05% error (p. 14) |
759 | if (time_adj < 0) | 707 | */ |
760 | time_adj -= (-time_adj >> 6) + (-time_adj >> 7); | 708 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); |
761 | else | ||
762 | time_adj += (time_adj >> 6) + (time_adj >> 7); | ||
763 | #endif | 709 | #endif |
764 | #if HZ == 1000 | 710 | #if HZ == 1000 |
765 | /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). | 711 | /* |
766 | * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) | 712 | * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and |
767 | */ | 713 | * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) |
768 | if (time_adj < 0) | 714 | */ |
769 | time_adj -= (-time_adj >> 6) + (-time_adj >> 7); | 715 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); |
770 | else | ||
771 | time_adj += (time_adj >> 6) + (time_adj >> 7); | ||
772 | #endif | 716 | #endif |
773 | } | 717 | } |
774 | 718 | ||
@@ -777,23 +721,20 @@ static void update_wall_time_one_tick(void) | |||
777 | { | 721 | { |
778 | long time_adjust_step, delta_nsec; | 722 | long time_adjust_step, delta_nsec; |
779 | 723 | ||
780 | if ( (time_adjust_step = time_adjust) != 0 ) { | 724 | if ((time_adjust_step = time_adjust) != 0 ) { |
781 | /* We are doing an adjtime thing. | 725 | /* |
782 | * | 726 | * We are doing an adjtime thing. Prepare time_adjust_step to |
783 | * Prepare time_adjust_step to be within bounds. | 727 | * be within bounds. Note that a positive time_adjust means we |
784 | * Note that a positive time_adjust means we want the clock | 728 | * want the clock to run faster. |
785 | * to run faster. | 729 | * |
786 | * | 730 | * Limit the amount of the step to be in the range |
787 | * Limit the amount of the step to be in the range | 731 | * -tickadj .. +tickadj |
788 | * -tickadj .. +tickadj | 732 | */ |
789 | */ | 733 | time_adjust_step = min(time_adjust_step, (long)tickadj); |
790 | if (time_adjust > tickadj) | 734 | time_adjust_step = max(time_adjust_step, (long)-tickadj); |
791 | time_adjust_step = tickadj; | 735 | |
792 | else if (time_adjust < -tickadj) | 736 | /* Reduce by this step the amount of time left */ |
793 | time_adjust_step = -tickadj; | 737 | time_adjust -= time_adjust_step; |
794 | |||
795 | /* Reduce by this step the amount of time left */ | ||
796 | time_adjust -= time_adjust_step; | ||
797 | } | 738 | } |
798 | delta_nsec = tick_nsec + time_adjust_step * 1000; | 739 | delta_nsec = tick_nsec + time_adjust_step * 1000; |
799 | /* | 740 | /* |
@@ -801,13 +742,8 @@ static void update_wall_time_one_tick(void) | |||
801 | * advance the tick more. | 742 | * advance the tick more. |
802 | */ | 743 | */ |
803 | time_phase += time_adj; | 744 | time_phase += time_adj; |
804 | if (time_phase <= -FINENSEC) { | 745 | if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) { |
805 | long ltemp = -time_phase >> (SHIFT_SCALE - 10); | 746 | long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10)); |
806 | time_phase += ltemp << (SHIFT_SCALE - 10); | ||
807 | delta_nsec -= ltemp; | ||
808 | } | ||
809 | else if (time_phase >= FINENSEC) { | ||
810 | long ltemp = time_phase >> (SHIFT_SCALE - 10); | ||
811 | time_phase -= ltemp << (SHIFT_SCALE - 10); | 747 | time_phase -= ltemp << (SHIFT_SCALE - 10); |
812 | delta_nsec += ltemp; | 748 | delta_nsec += ltemp; |
813 | } | 749 | } |
@@ -1137,8 +1073,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout) | |||
1137 | if (timeout < 0) | 1073 | if (timeout < 0) |
1138 | { | 1074 | { |
1139 | printk(KERN_ERR "schedule_timeout: wrong timeout " | 1075 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
1140 | "value %lx from %p\n", timeout, | 1076 | "value %lx from %p\n", timeout, |
1141 | __builtin_return_address(0)); | 1077 | __builtin_return_address(0)); |
1142 | current->state = TASK_RUNNING; | 1078 | current->state = TASK_RUNNING; |
1143 | goto out; | 1079 | goto out; |
1144 | } | 1080 | } |
@@ -1146,12 +1082,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout) | |||
1146 | 1082 | ||
1147 | expire = timeout + jiffies; | 1083 | expire = timeout + jiffies; |
1148 | 1084 | ||
1149 | init_timer(&timer); | 1085 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1150 | timer.expires = expire; | 1086 | __mod_timer(&timer, expire); |
1151 | timer.data = (unsigned long) current; | ||
1152 | timer.function = process_timeout; | ||
1153 | |||
1154 | add_timer(&timer); | ||
1155 | schedule(); | 1087 | schedule(); |
1156 | del_singleshot_timer_sync(&timer); | 1088 | del_singleshot_timer_sync(&timer); |
1157 | 1089 | ||
@@ -1168,15 +1100,15 @@ EXPORT_SYMBOL(schedule_timeout); | |||
1168 | */ | 1100 | */ |
1169 | signed long __sched schedule_timeout_interruptible(signed long timeout) | 1101 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1170 | { | 1102 | { |
1171 | __set_current_state(TASK_INTERRUPTIBLE); | 1103 | __set_current_state(TASK_INTERRUPTIBLE); |
1172 | return schedule_timeout(timeout); | 1104 | return schedule_timeout(timeout); |
1173 | } | 1105 | } |
1174 | EXPORT_SYMBOL(schedule_timeout_interruptible); | 1106 | EXPORT_SYMBOL(schedule_timeout_interruptible); |
1175 | 1107 | ||
1176 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | 1108 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1177 | { | 1109 | { |
1178 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1110 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1179 | return schedule_timeout(timeout); | 1111 | return schedule_timeout(timeout); |
1180 | } | 1112 | } |
1181 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1113 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1182 | 1114 | ||
@@ -1516,16 +1448,18 @@ static void time_interpolator_update(long delta_nsec) | |||
1516 | if (!time_interpolator) | 1448 | if (!time_interpolator) |
1517 | return; | 1449 | return; |
1518 | 1450 | ||
1519 | /* The interpolator compensates for late ticks by accumulating | 1451 | /* |
1520 | * the late time in time_interpolator->offset. A tick earlier than | 1452 | * The interpolator compensates for late ticks by accumulating the late |
1521 | * expected will lead to a reset of the offset and a corresponding | 1453 | * time in time_interpolator->offset. A tick earlier than expected will |
1522 | * jump of the clock forward. Again this only works if the | 1454 | * lead to a reset of the offset and a corresponding jump of the clock |
1523 | * interpolator clock is running slightly slower than the regular clock | 1455 | * forward. Again this only works if the interpolator clock is running |
1524 | * and the tuning logic insures that. | 1456 | * slightly slower than the regular clock and the tuning logic insures |
1525 | */ | 1457 | * that. |
1458 | */ | ||
1526 | 1459 | ||
1527 | counter = time_interpolator_get_counter(1); | 1460 | counter = time_interpolator_get_counter(1); |
1528 | offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator); | 1461 | offset = time_interpolator->offset + |
1462 | GET_TI_NSECS(counter, time_interpolator); | ||
1529 | 1463 | ||
1530 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | 1464 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) |
1531 | time_interpolator->offset = offset - delta_nsec; | 1465 | time_interpolator->offset = offset - delta_nsec; |