diff options
Diffstat (limited to 'kernel/timer.c')
| -rw-r--r-- | kernel/timer.c | 151 |
1 files changed, 109 insertions, 42 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 890a56937cfa..5bb6b7976eec 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -770,7 +770,7 @@ static void update_ntp_one_tick(void) | |||
| 770 | * specified number of bits to the right of the binary point. | 770 | * specified number of bits to the right of the binary point. |
| 771 | * This function has no side-effects. | 771 | * This function has no side-effects. |
| 772 | */ | 772 | */ |
| 773 | u64 current_tick_length(long shift) | 773 | u64 current_tick_length(void) |
| 774 | { | 774 | { |
| 775 | long delta_nsec; | 775 | long delta_nsec; |
| 776 | u64 ret; | 776 | u64 ret; |
| @@ -779,14 +779,8 @@ u64 current_tick_length(long shift) | |||
| 779 | * ie: nanosecond value shifted by (SHIFT_SCALE - 10) | 779 | * ie: nanosecond value shifted by (SHIFT_SCALE - 10) |
| 780 | */ | 780 | */ |
| 781 | delta_nsec = tick_nsec + adjtime_adjustment() * 1000; | 781 | delta_nsec = tick_nsec + adjtime_adjustment() * 1000; |
| 782 | ret = ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj; | 782 | ret = (u64)delta_nsec << TICK_LENGTH_SHIFT; |
| 783 | 783 | ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); | |
| 784 | /* convert from (SHIFT_SCALE - 10) to specified shift scale: */ | ||
| 785 | shift = shift - (SHIFT_SCALE - 10); | ||
| 786 | if (shift < 0) | ||
| 787 | ret >>= -shift; | ||
| 788 | else | ||
| 789 | ret <<= shift; | ||
| 790 | 784 | ||
| 791 | return ret; | 785 | return ret; |
| 792 | } | 786 | } |
| @@ -794,7 +788,6 @@ u64 current_tick_length(long shift) | |||
| 794 | /* XXX - all of this timekeeping code should be later moved to time.c */ | 788 | /* XXX - all of this timekeeping code should be later moved to time.c */ |
| 795 | #include <linux/clocksource.h> | 789 | #include <linux/clocksource.h> |
| 796 | static struct clocksource *clock; /* pointer to current clocksource */ | 790 | static struct clocksource *clock; /* pointer to current clocksource */ |
| 797 | static cycle_t last_clock_cycle; /* cycle value at last update_wall_time */ | ||
| 798 | 791 | ||
| 799 | #ifdef CONFIG_GENERIC_TIME | 792 | #ifdef CONFIG_GENERIC_TIME |
| 800 | /** | 793 | /** |
| @@ -813,7 +806,7 @@ static inline s64 __get_nsec_offset(void) | |||
| 813 | cycle_now = clocksource_read(clock); | 806 | cycle_now = clocksource_read(clock); |
| 814 | 807 | ||
| 815 | /* calculate the delta since the last update_wall_time: */ | 808 | /* calculate the delta since the last update_wall_time: */ |
| 816 | cycle_delta = (cycle_now - last_clock_cycle) & clock->mask; | 809 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 817 | 810 | ||
| 818 | /* convert to nanoseconds: */ | 811 | /* convert to nanoseconds: */ |
| 819 | ns_offset = cyc2ns(clock, cycle_delta); | 812 | ns_offset = cyc2ns(clock, cycle_delta); |
| @@ -927,7 +920,7 @@ static int change_clocksource(void) | |||
| 927 | timespec_add_ns(&xtime, nsec); | 920 | timespec_add_ns(&xtime, nsec); |
| 928 | 921 | ||
| 929 | clock = new; | 922 | clock = new; |
| 930 | last_clock_cycle = now; | 923 | clock->cycle_last = now; |
| 931 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | 924 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
| 932 | clock->name); | 925 | clock->name); |
| 933 | return 1; | 926 | return 1; |
| @@ -968,7 +961,7 @@ void __init timekeeping_init(void) | |||
| 968 | write_seqlock_irqsave(&xtime_lock, flags); | 961 | write_seqlock_irqsave(&xtime_lock, flags); |
| 969 | clock = clocksource_get_next(); | 962 | clock = clocksource_get_next(); |
| 970 | clocksource_calculate_interval(clock, tick_nsec); | 963 | clocksource_calculate_interval(clock, tick_nsec); |
| 971 | last_clock_cycle = clocksource_read(clock); | 964 | clock->cycle_last = clocksource_read(clock); |
| 972 | ntp_clear(); | 965 | ntp_clear(); |
| 973 | write_sequnlock_irqrestore(&xtime_lock, flags); | 966 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 974 | } | 967 | } |
| @@ -988,7 +981,7 @@ static int timekeeping_resume(struct sys_device *dev) | |||
| 988 | 981 | ||
| 989 | write_seqlock_irqsave(&xtime_lock, flags); | 982 | write_seqlock_irqsave(&xtime_lock, flags); |
| 990 | /* restart the last cycle value */ | 983 | /* restart the last cycle value */ |
| 991 | last_clock_cycle = clocksource_read(clock); | 984 | clock->cycle_last = clocksource_read(clock); |
| 992 | write_sequnlock_irqrestore(&xtime_lock, flags); | 985 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 993 | return 0; | 986 | return 0; |
| 994 | } | 987 | } |
| @@ -1015,60 +1008,134 @@ static int __init timekeeping_init_device(void) | |||
| 1015 | device_initcall(timekeeping_init_device); | 1008 | device_initcall(timekeeping_init_device); |
| 1016 | 1009 | ||
| 1017 | /* | 1010 | /* |
| 1011 | * If the error is already larger, we look ahead another tick, | ||
| 1012 | * to compensate for late or lost adjustments. | ||
| 1013 | */ | ||
| 1014 | static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset) | ||
| 1015 | { | ||
| 1016 | int adj; | ||
| 1017 | |||
| 1018 | /* | ||
| 1019 | * As soon as the machine is synchronized to the external time | ||
| 1020 | * source this should be the common case. | ||
| 1021 | */ | ||
| 1022 | error >>= 2; | ||
| 1023 | if (likely(sign > 0 ? error <= *interval : error >= *interval)) | ||
| 1024 | return sign; | ||
| 1025 | |||
| 1026 | /* | ||
| 1027 | * An extra look ahead dampens the effect of the current error, | ||
| 1028 | * which can grow quite large with continously late updates, as | ||
| 1029 | * it would dominate the adjustment value and can lead to | ||
| 1030 | * oscillation. | ||
| 1031 | */ | ||
| 1032 | error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); | ||
| 1033 | error -= clock->xtime_interval >> 1; | ||
| 1034 | |||
| 1035 | adj = 0; | ||
| 1036 | while (1) { | ||
| 1037 | error >>= 1; | ||
| 1038 | if (sign > 0 ? error <= *interval : error >= *interval) | ||
| 1039 | break; | ||
| 1040 | adj++; | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | /* | ||
| 1044 | * Add the current adjustments to the error and take the offset | ||
| 1045 | * into account, the latter can cause the error to be hardly | ||
| 1046 | * reduced at the next tick. Check the error again if there's | ||
| 1047 | * room for another adjustment, thus further reducing the error | ||
| 1048 | * which otherwise had to be corrected at the next update. | ||
| 1049 | */ | ||
| 1050 | error = (error << 1) - *interval + *offset; | ||
| 1051 | if (sign > 0 ? error > *interval : error < *interval) | ||
| 1052 | adj++; | ||
| 1053 | |||
| 1054 | *interval <<= adj; | ||
| 1055 | *offset <<= adj; | ||
| 1056 | return sign << adj; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | /* | ||
| 1060 | * Adjust the multiplier to reduce the error value, | ||
| 1061 | * this is optimized for the most common adjustments of -1,0,1, | ||
| 1062 | * for other values we can do a bit more work. | ||
| 1063 | */ | ||
| 1064 | static void clocksource_adjust(struct clocksource *clock, s64 offset) | ||
| 1065 | { | ||
| 1066 | s64 error, interval = clock->cycle_interval; | ||
| 1067 | int adj; | ||
| 1068 | |||
| 1069 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | ||
| 1070 | if (error > interval) { | ||
| 1071 | adj = clocksource_bigadjust(1, error, &interval, &offset); | ||
| 1072 | } else if (error < -interval) { | ||
| 1073 | interval = -interval; | ||
| 1074 | offset = -offset; | ||
| 1075 | adj = clocksource_bigadjust(-1, error, &interval, &offset); | ||
| 1076 | } else | ||
| 1077 | return; | ||
| 1078 | |||
| 1079 | clock->mult += adj; | ||
| 1080 | clock->xtime_interval += interval; | ||
| 1081 | clock->xtime_nsec -= offset; | ||
| 1082 | clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | /* | ||
| 1018 | * update_wall_time - Uses the current clocksource to increment the wall time | 1086 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 1019 | * | 1087 | * |
| 1020 | * Called from the timer interrupt, must hold a write on xtime_lock. | 1088 | * Called from the timer interrupt, must hold a write on xtime_lock. |
| 1021 | */ | 1089 | */ |
| 1022 | static void update_wall_time(void) | 1090 | static void update_wall_time(void) |
| 1023 | { | 1091 | { |
| 1024 | static s64 remainder_snsecs, error; | 1092 | cycle_t offset; |
| 1025 | s64 snsecs_per_sec; | ||
| 1026 | cycle_t now, offset; | ||
| 1027 | 1093 | ||
| 1028 | snsecs_per_sec = (s64)NSEC_PER_SEC << clock->shift; | 1094 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; |
| 1029 | remainder_snsecs += (s64)xtime.tv_nsec << clock->shift; | ||
| 1030 | 1095 | ||
| 1031 | now = clocksource_read(clock); | 1096 | #ifdef CONFIG_GENERIC_TIME |
| 1032 | offset = (now - last_clock_cycle)&clock->mask; | 1097 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; |
| 1098 | #else | ||
| 1099 | offset = clock->cycle_interval; | ||
| 1100 | #endif | ||
| 1033 | 1101 | ||
| 1034 | /* normally this loop will run just once, however in the | 1102 | /* normally this loop will run just once, however in the |
| 1035 | * case of lost or late ticks, it will accumulate correctly. | 1103 | * case of lost or late ticks, it will accumulate correctly. |
| 1036 | */ | 1104 | */ |
| 1037 | while (offset > clock->interval_cycles) { | 1105 | while (offset >= clock->cycle_interval) { |
| 1038 | /* get the ntp interval in clock shifted nanoseconds */ | ||
| 1039 | s64 ntp_snsecs = current_tick_length(clock->shift); | ||
| 1040 | |||
| 1041 | /* accumulate one interval */ | 1106 | /* accumulate one interval */ |
| 1042 | remainder_snsecs += clock->interval_snsecs; | 1107 | clock->xtime_nsec += clock->xtime_interval; |
| 1043 | last_clock_cycle += clock->interval_cycles; | 1108 | clock->cycle_last += clock->cycle_interval; |
| 1044 | offset -= clock->interval_cycles; | 1109 | offset -= clock->cycle_interval; |
| 1110 | |||
| 1111 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | ||
| 1112 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | ||
| 1113 | xtime.tv_sec++; | ||
| 1114 | second_overflow(); | ||
| 1115 | } | ||
| 1045 | 1116 | ||
| 1046 | /* interpolator bits */ | 1117 | /* interpolator bits */ |
| 1047 | time_interpolator_update(clock->interval_snsecs | 1118 | time_interpolator_update(clock->xtime_interval |
| 1048 | >> clock->shift); | 1119 | >> clock->shift); |
| 1049 | /* increment the NTP state machine */ | 1120 | /* increment the NTP state machine */ |
| 1050 | update_ntp_one_tick(); | 1121 | update_ntp_one_tick(); |
| 1051 | 1122 | ||
| 1052 | /* accumulate error between NTP and clock interval */ | 1123 | /* accumulate error between NTP and clock interval */ |
| 1053 | error += (ntp_snsecs - (s64)clock->interval_snsecs); | 1124 | clock->error += current_tick_length(); |
| 1125 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | ||
| 1126 | } | ||
| 1054 | 1127 | ||
| 1055 | /* correct the clock when NTP error is too big */ | 1128 | /* correct the clock when NTP error is too big */ |
| 1056 | remainder_snsecs += make_ntp_adj(clock, offset, &error); | 1129 | clocksource_adjust(clock, offset); |
| 1057 | 1130 | ||
| 1058 | if (remainder_snsecs >= snsecs_per_sec) { | ||
| 1059 | remainder_snsecs -= snsecs_per_sec; | ||
| 1060 | xtime.tv_sec++; | ||
| 1061 | second_overflow(); | ||
| 1062 | } | ||
| 1063 | } | ||
| 1064 | /* store full nanoseconds into xtime */ | 1131 | /* store full nanoseconds into xtime */ |
| 1065 | xtime.tv_nsec = remainder_snsecs >> clock->shift; | 1132 | xtime.tv_nsec = clock->xtime_nsec >> clock->shift; |
| 1066 | remainder_snsecs -= (s64)xtime.tv_nsec << clock->shift; | 1133 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
| 1067 | 1134 | ||
| 1068 | /* check to see if there is a new clocksource to use */ | 1135 | /* check to see if there is a new clocksource to use */ |
| 1069 | if (change_clocksource()) { | 1136 | if (change_clocksource()) { |
| 1070 | error = 0; | 1137 | clock->error = 0; |
| 1071 | remainder_snsecs = 0; | 1138 | clock->xtime_nsec = 0; |
| 1072 | clocksource_calculate_interval(clock, tick_nsec); | 1139 | clocksource_calculate_interval(clock, tick_nsec); |
| 1073 | } | 1140 | } |
| 1074 | } | 1141 | } |
