diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/timer.c | 252 |
1 files changed, 126 insertions, 126 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 6b94adb45b03..cc18857601e2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -632,77 +632,74 @@ long time_next_adjust; | |||
632 | */ | 632 | */ |
633 | static void second_overflow(void) | 633 | static void second_overflow(void) |
634 | { | 634 | { |
635 | long ltemp; | 635 | long ltemp; |
636 | 636 | ||
637 | /* Bump the maxerror field */ | 637 | /* Bump the maxerror field */ |
638 | time_maxerror += time_tolerance >> SHIFT_USEC; | 638 | time_maxerror += time_tolerance >> SHIFT_USEC; |
639 | if ( time_maxerror > NTP_PHASE_LIMIT ) { | 639 | if (time_maxerror > NTP_PHASE_LIMIT) { |
640 | time_maxerror = NTP_PHASE_LIMIT; | 640 | time_maxerror = NTP_PHASE_LIMIT; |
641 | time_status |= STA_UNSYNC; | 641 | time_status |= STA_UNSYNC; |
642 | } | ||
643 | |||
644 | /* | ||
645 | * Leap second processing. If in leap-insert state at | ||
646 | * the end of the day, the system clock is set back one | ||
647 | * second; if in leap-delete state, the system clock is | ||
648 | * set ahead one second. The microtime() routine or | ||
649 | * external clock driver will insure that reported time | ||
650 | * is always monotonic. The ugly divides should be | ||
651 | * replaced. | ||
652 | */ | ||
653 | switch (time_state) { | ||
654 | |||
655 | case TIME_OK: | ||
656 | if (time_status & STA_INS) | ||
657 | time_state = TIME_INS; | ||
658 | else if (time_status & STA_DEL) | ||
659 | time_state = TIME_DEL; | ||
660 | break; | ||
661 | |||
662 | case TIME_INS: | ||
663 | if (xtime.tv_sec % 86400 == 0) { | ||
664 | xtime.tv_sec--; | ||
665 | wall_to_monotonic.tv_sec++; | ||
666 | /* The timer interpolator will make time change gradually instead | ||
667 | * of an immediate jump by one second. | ||
668 | */ | ||
669 | time_interpolator_update(-NSEC_PER_SEC); | ||
670 | time_state = TIME_OOP; | ||
671 | clock_was_set(); | ||
672 | printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n"); | ||
673 | } | 642 | } |
674 | break; | 643 | |
675 | 644 | /* | |
676 | case TIME_DEL: | 645 | * Leap second processing. If in leap-insert state at the end of the |
677 | if ((xtime.tv_sec + 1) % 86400 == 0) { | 646 | * day, the system clock is set back one second; if in leap-delete |
678 | xtime.tv_sec++; | 647 | * state, the system clock is set ahead one second. The microtime() |
679 | wall_to_monotonic.tv_sec--; | 648 | * routine or external clock driver will insure that reported time is |
680 | /* Use of time interpolator for a gradual change of time */ | 649 | * always monotonic. The ugly divides should be replaced. |
681 | time_interpolator_update(NSEC_PER_SEC); | 650 | */ |
682 | time_state = TIME_WAIT; | 651 | switch (time_state) { |
683 | clock_was_set(); | 652 | case TIME_OK: |
684 | printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n"); | 653 | if (time_status & STA_INS) |
654 | time_state = TIME_INS; | ||
655 | else if (time_status & STA_DEL) | ||
656 | time_state = TIME_DEL; | ||
657 | break; | ||
658 | case TIME_INS: | ||
659 | if (xtime.tv_sec % 86400 == 0) { | ||
660 | xtime.tv_sec--; | ||
661 | wall_to_monotonic.tv_sec++; | ||
662 | /* | ||
663 | * The timer interpolator will make time change | ||
664 | * gradually instead of an immediate jump by one second | ||
665 | */ | ||
666 | time_interpolator_update(-NSEC_PER_SEC); | ||
667 | time_state = TIME_OOP; | ||
668 | clock_was_set(); | ||
669 | printk(KERN_NOTICE "Clock: inserting leap second " | ||
670 | "23:59:60 UTC\n"); | ||
671 | } | ||
672 | break; | ||
673 | case TIME_DEL: | ||
674 | if ((xtime.tv_sec + 1) % 86400 == 0) { | ||
675 | xtime.tv_sec++; | ||
676 | wall_to_monotonic.tv_sec--; | ||
677 | /* | ||
678 | * Use of time interpolator for a gradual change of | ||
679 | * time | ||
680 | */ | ||
681 | time_interpolator_update(NSEC_PER_SEC); | ||
682 | time_state = TIME_WAIT; | ||
683 | clock_was_set(); | ||
684 | printk(KERN_NOTICE "Clock: deleting leap second " | ||
685 | "23:59:59 UTC\n"); | ||
686 | } | ||
687 | break; | ||
688 | case TIME_OOP: | ||
689 | time_state = TIME_WAIT; | ||
690 | break; | ||
691 | case TIME_WAIT: | ||
692 | if (!(time_status & (STA_INS | STA_DEL))) | ||
693 | time_state = TIME_OK; | ||
685 | } | 694 | } |
686 | break; | 695 | |
687 | 696 | /* | |
688 | case TIME_OOP: | 697 | * Compute the phase adjustment for the next second. In PLL mode, the |
689 | time_state = TIME_WAIT; | 698 | * offset is reduced by a fixed factor times the time constant. In FLL |
690 | break; | 699 | * mode the offset is used directly. In either mode, the maximum phase |
691 | 700 | * adjustment for each second is clamped so as to spread the adjustment | |
692 | case TIME_WAIT: | 701 | * over not more than the number of seconds between updates. |
693 | if (!(time_status & (STA_INS | STA_DEL))) | 702 | */ |
694 | time_state = TIME_OK; | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * Compute the phase adjustment for the next second. In | ||
699 | * PLL mode, the offset is reduced by a fixed factor | ||
700 | * times the time constant. In FLL mode the offset is | ||
701 | * used directly. In either mode, the maximum phase | ||
702 | * adjustment for each second is clamped so as to spread | ||
703 | * the adjustment over not more than the number of | ||
704 | * seconds between updates. | ||
705 | */ | ||
706 | ltemp = time_offset; | 703 | ltemp = time_offset; |
707 | if (!(time_status & STA_FLL)) | 704 | if (!(time_status & STA_FLL)) |
708 | ltemp = shift_right(ltemp, SHIFT_KG + time_constant); | 705 | ltemp = shift_right(ltemp, SHIFT_KG + time_constant); |
@@ -711,40 +708,42 @@ static void second_overflow(void) | |||
711 | time_offset -= ltemp; | 708 | time_offset -= ltemp; |
712 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | 709 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); |
713 | 710 | ||
714 | /* | 711 | /* |
715 | * Compute the frequency estimate and additional phase | 712 | * Compute the frequency estimate and additional phase adjustment due |
716 | * adjustment due to frequency error for the next | 713 | * to frequency error for the next second. When the PPS signal is |
717 | * second. When the PPS signal is engaged, gnaw on the | 714 | * engaged, gnaw on the watchdog counter and update the frequency |
718 | * watchdog counter and update the frequency computed by | 715 | * computed by the pll and the PPS signal. |
719 | * the pll and the PPS signal. | 716 | */ |
720 | */ | 717 | pps_valid++; |
721 | pps_valid++; | 718 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ |
722 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ | 719 | pps_jitter = MAXTIME; |
723 | pps_jitter = MAXTIME; | 720 | pps_stabil = MAXFREQ; |
724 | pps_stabil = MAXFREQ; | 721 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | |
725 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | 722 | STA_PPSWANDER | STA_PPSERROR); |
726 | STA_PPSWANDER | STA_PPSERROR); | 723 | } |
727 | } | 724 | ltemp = time_freq + pps_freq; |
728 | ltemp = time_freq + pps_freq; | 725 | time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); |
729 | time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); | ||
730 | 726 | ||
731 | #if HZ == 100 | 727 | #if HZ == 100 |
732 | /* Compensate for (HZ==100) != (1 << SHIFT_HZ). | 728 | /* |
733 | * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14) | 729 | * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to |
734 | */ | 730 | * get 128.125; => only 0.125% error (p. 14) |
735 | time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); | 731 | */ |
732 | time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); | ||
736 | #endif | 733 | #endif |
737 | #if HZ == 250 | 734 | #if HZ == 250 |
738 | /* Compensate for (HZ==250) != (1 << SHIFT_HZ). | 735 | /* |
739 | * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14) | 736 | * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and |
740 | */ | 737 | * 0.78125% to get 255.85938; => only 0.05% error (p. 14) |
741 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | 738 | */ |
739 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | ||
742 | #endif | 740 | #endif |
743 | #if HZ == 1000 | 741 | #if HZ == 1000 |
744 | /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). | 742 | /* |
745 | * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) | 743 | * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and |
746 | */ | 744 | * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) |
747 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | 745 | */ |
746 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | ||
748 | #endif | 747 | #endif |
749 | } | 748 | } |
750 | 749 | ||
@@ -753,21 +752,20 @@ static void update_wall_time_one_tick(void) | |||
753 | { | 752 | { |
754 | long time_adjust_step, delta_nsec; | 753 | long time_adjust_step, delta_nsec; |
755 | 754 | ||
756 | if ( (time_adjust_step = time_adjust) != 0 ) { | 755 | if ((time_adjust_step = time_adjust) != 0 ) { |
757 | /* We are doing an adjtime thing. | 756 | /* |
758 | * | 757 | * We are doing an adjtime thing. Prepare time_adjust_step to |
759 | * Prepare time_adjust_step to be within bounds. | 758 | * be within bounds. Note that a positive time_adjust means we |
760 | * Note that a positive time_adjust means we want the clock | 759 | * want the clock to run faster. |
761 | * to run faster. | 760 | * |
762 | * | 761 | * Limit the amount of the step to be in the range |
763 | * Limit the amount of the step to be in the range | 762 | * -tickadj .. +tickadj |
764 | * -tickadj .. +tickadj | 763 | */ |
765 | */ | 764 | time_adjust_step = min(time_adjust_step, (long)tickadj); |
766 | time_adjust_step = min(time_adjust_step, (long)tickadj); | 765 | time_adjust_step = max(time_adjust_step, (long)-tickadj); |
767 | time_adjust_step = max(time_adjust_step, (long)-tickadj); | 766 | |
768 | 767 | /* Reduce by this step the amount of time left */ | |
769 | /* Reduce by this step the amount of time left */ | 768 | time_adjust -= time_adjust_step; |
770 | time_adjust -= time_adjust_step; | ||
771 | } | 769 | } |
772 | delta_nsec = tick_nsec + time_adjust_step * 1000; | 770 | delta_nsec = tick_nsec + time_adjust_step * 1000; |
773 | /* | 771 | /* |
@@ -1106,8 +1104,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout) | |||
1106 | if (timeout < 0) | 1104 | if (timeout < 0) |
1107 | { | 1105 | { |
1108 | printk(KERN_ERR "schedule_timeout: wrong timeout " | 1106 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
1109 | "value %lx from %p\n", timeout, | 1107 | "value %lx from %p\n", timeout, |
1110 | __builtin_return_address(0)); | 1108 | __builtin_return_address(0)); |
1111 | current->state = TASK_RUNNING; | 1109 | current->state = TASK_RUNNING; |
1112 | goto out; | 1110 | goto out; |
1113 | } | 1111 | } |
@@ -1133,15 +1131,15 @@ EXPORT_SYMBOL(schedule_timeout); | |||
1133 | */ | 1131 | */ |
1134 | signed long __sched schedule_timeout_interruptible(signed long timeout) | 1132 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1135 | { | 1133 | { |
1136 | __set_current_state(TASK_INTERRUPTIBLE); | 1134 | __set_current_state(TASK_INTERRUPTIBLE); |
1137 | return schedule_timeout(timeout); | 1135 | return schedule_timeout(timeout); |
1138 | } | 1136 | } |
1139 | EXPORT_SYMBOL(schedule_timeout_interruptible); | 1137 | EXPORT_SYMBOL(schedule_timeout_interruptible); |
1140 | 1138 | ||
1141 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | 1139 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1142 | { | 1140 | { |
1143 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1141 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1144 | return schedule_timeout(timeout); | 1142 | return schedule_timeout(timeout); |
1145 | } | 1143 | } |
1146 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1144 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1147 | 1145 | ||
@@ -1481,16 +1479,18 @@ static void time_interpolator_update(long delta_nsec) | |||
1481 | if (!time_interpolator) | 1479 | if (!time_interpolator) |
1482 | return; | 1480 | return; |
1483 | 1481 | ||
1484 | /* The interpolator compensates for late ticks by accumulating | 1482 | /* |
1485 | * the late time in time_interpolator->offset. A tick earlier than | 1483 | * The interpolator compensates for late ticks by accumulating the late |
1486 | * expected will lead to a reset of the offset and a corresponding | 1484 | * time in time_interpolator->offset. A tick earlier than expected will |
1487 | * jump of the clock forward. Again this only works if the | 1485 | * lead to a reset of the offset and a corresponding jump of the clock |
1488 | * interpolator clock is running slightly slower than the regular clock | 1486 | * forward. Again this only works if the interpolator clock is running |
1489 | * and the tuning logic insures that. | 1487 | * slightly slower than the regular clock and the tuning logic insures |
1490 | */ | 1488 | * that. |
1489 | */ | ||
1491 | 1490 | ||
1492 | counter = time_interpolator_get_counter(1); | 1491 | counter = time_interpolator_get_counter(1); |
1493 | offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator); | 1492 | offset = time_interpolator->offset + |
1493 | GET_TI_NSECS(counter, time_interpolator); | ||
1494 | 1494 | ||
1495 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | 1495 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) |
1496 | time_interpolator->offset = offset - delta_nsec; | 1496 | time_interpolator->offset = offset - delta_nsec; |