diff options
-rw-r--r-- | kernel/timer.c | 85 |
1 files changed, 47 insertions, 38 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 396a3c024c2c..2a87430a58d4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -891,6 +891,7 @@ int do_settimeofday(struct timespec *tv) | |||
891 | set_normalized_timespec(&xtime, sec, nsec); | 891 | set_normalized_timespec(&xtime, sec, nsec); |
892 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | 892 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); |
893 | 893 | ||
894 | clock->error = 0; | ||
894 | ntp_clear(); | 895 | ntp_clear(); |
895 | 896 | ||
896 | write_sequnlock_irqrestore(&xtime_lock, flags); | 897 | write_sequnlock_irqrestore(&xtime_lock, flags); |
@@ -1008,52 +1009,52 @@ static int __init timekeeping_init_device(void) | |||
1008 | device_initcall(timekeeping_init_device); | 1009 | device_initcall(timekeeping_init_device); |
1009 | 1010 | ||
1010 | /* | 1011 | /* |
1011 | * If the error is already larger, we look ahead another tick, | 1012 | * If the error is already larger, we look ahead even further |
1012 | * to compensate for late or lost adjustments. | 1013 | * to compensate for late or lost adjustments. |
1013 | */ | 1014 | */ |
1014 | static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset) | 1015 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset) |
1015 | { | 1016 | { |
1016 | int adj; | 1017 | s64 tick_error, i; |
1018 | u32 look_ahead, adj; | ||
1019 | s32 error2, mult; | ||
1017 | 1020 | ||
1018 | /* | 1021 | /* |
1019 | * As soon as the machine is synchronized to the external time | 1022 | * Use the current error value to determine how much to look ahead. |
1020 | * source this should be the common case. | 1023 | * The larger the error the slower we adjust for it to avoid problems |
1024 | * with losing too many ticks, otherwise we would overadjust and | ||
1025 | * produce an even larger error. The smaller the adjustment the | ||
1026 | * faster we try to adjust for it, as lost ticks can do less harm | ||
1027 | * here. This is tuned so that an error of about 1 msec is adusted | ||
1028 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | ||
1021 | */ | 1029 | */ |
1022 | error >>= 2; | 1030 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); |
1023 | if (likely(sign > 0 ? error <= *interval : error >= *interval)) | 1031 | error2 = abs(error2); |
1024 | return sign; | 1032 | for (look_ahead = 0; error2 > 0; look_ahead++) |
1033 | error2 >>= 2; | ||
1025 | 1034 | ||
1026 | /* | 1035 | /* |
1027 | * An extra look ahead dampens the effect of the current error, | 1036 | * Now calculate the error in (1 << look_ahead) ticks, but first |
1028 | * which can grow quite large with continously late updates, as | 1037 | * remove the single look ahead already included in the error. |
1029 | * it would dominate the adjustment value and can lead to | ||
1030 | * oscillation. | ||
1031 | */ | 1038 | */ |
1032 | error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); | 1039 | tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); |
1033 | error -= clock->xtime_interval >> 1; | 1040 | tick_error -= clock->xtime_interval >> 1; |
1034 | 1041 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
1035 | adj = 0; | 1042 | |
1036 | while (1) { | 1043 | /* Finally calculate the adjustment shift value. */ |
1037 | error >>= 1; | 1044 | i = *interval; |
1038 | if (sign > 0 ? error <= *interval : error >= *interval) | 1045 | mult = 1; |
1039 | break; | 1046 | if (error < 0) { |
1040 | adj++; | 1047 | error = -error; |
1048 | *interval = -*interval; | ||
1049 | *offset = -*offset; | ||
1050 | mult = -1; | ||
1041 | } | 1051 | } |
1042 | 1052 | for (adj = 0; error > i; adj++) | |
1043 | /* | 1053 | error >>= 1; |
1044 | * Add the current adjustments to the error and take the offset | ||
1045 | * into account, the latter can cause the error to be hardly | ||
1046 | * reduced at the next tick. Check the error again if there's | ||
1047 | * room for another adjustment, thus further reducing the error | ||
1048 | * which otherwise had to be corrected at the next update. | ||
1049 | */ | ||
1050 | error = (error << 1) - *interval + *offset; | ||
1051 | if (sign > 0 ? error > *interval : error < *interval) | ||
1052 | adj++; | ||
1053 | 1054 | ||
1054 | *interval <<= adj; | 1055 | *interval <<= adj; |
1055 | *offset <<= adj; | 1056 | *offset <<= adj; |
1056 | return sign << adj; | 1057 | return mult << adj; |
1057 | } | 1058 | } |
1058 | 1059 | ||
1059 | /* | 1060 | /* |
@@ -1068,11 +1069,19 @@ static void clocksource_adjust(struct clocksource *clock, s64 offset) | |||
1068 | 1069 | ||
1069 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | 1070 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); |
1070 | if (error > interval) { | 1071 | if (error > interval) { |
1071 | adj = clocksource_bigadjust(1, error, &interval, &offset); | 1072 | error >>= 2; |
1073 | if (likely(error <= interval)) | ||
1074 | adj = 1; | ||
1075 | else | ||
1076 | adj = clocksource_bigadjust(error, &interval, &offset); | ||
1072 | } else if (error < -interval) { | 1077 | } else if (error < -interval) { |
1073 | interval = -interval; | 1078 | error >>= 2; |
1074 | offset = -offset; | 1079 | if (likely(error >= -interval)) { |
1075 | adj = clocksource_bigadjust(-1, error, &interval, &offset); | 1080 | adj = -1; |
1081 | interval = -interval; | ||
1082 | offset = -offset; | ||
1083 | } else | ||
1084 | adj = clocksource_bigadjust(error, &interval, &offset); | ||
1076 | } else | 1085 | } else |
1077 | return; | 1086 | return; |
1078 | 1087 | ||
@@ -1129,7 +1138,7 @@ static void update_wall_time(void) | |||
1129 | clocksource_adjust(clock, offset); | 1138 | clocksource_adjust(clock, offset); |
1130 | 1139 | ||
1131 | /* store full nanoseconds into xtime */ | 1140 | /* store full nanoseconds into xtime */ |
1132 | xtime.tv_nsec = clock->xtime_nsec >> clock->shift; | 1141 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; |
1133 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; | 1142 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
1134 | 1143 | ||
1135 | /* check to see if there is a new clocksource to use */ | 1144 | /* check to see if there is a new clocksource to use */ |