aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:16:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:16:48 -0400
commitbcd550745fc54f789c14e7526e0633222c505faa (patch)
treec3fe11a6503b7ffdd4406a9fece5c40b3e2a3f6d
parent93f378883cecb9dcb2cf5b51d9d24175906659da (diff)
parent646783a389828e76e813f50791f7999429c821bc (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner. * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ia64: vsyscall: Add missing paranthesis alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n x86: vdso: Put declaration before code x86-64: Inline vdso clock_gettime helpers x86-64: Simplify and optimize vdso clock_gettime monotonic variants kernel-time: fix s/then/than/ spelling errors time: remove no_sync_cmos_clock time: Avoid scary backtraces when warning of > 11% adj alarmtimer: Make sure we initialize the rtctimer ntp: Fix leap-second hrtimer livelock x86, tsc: Skip refined tsc calibration on systems with reliable TSC rtc: Provide flag for rtc devices that don't support UIE ia64: vsyscall: Use seqcount instead of seqlock x86: vdso: Use seqcount instead of seqlock x86: vdso: Remove bogus locking in update_vsyscall_tz() time: Remove bogus comments time: Fix change_clocksource locking time: x86: Fix race switching from vsyscall to non-vsyscall clock
-rw-r--r--arch/ia64/kernel/asm-offsets.c4
-rw-r--r--arch/ia64/kernel/fsys.S2
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h2
-rw-r--r--arch/ia64/kernel/time.c10
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/vsyscall_64.c25
-rw-r--r--arch/x86/vdso/vclock_gettime.c135
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timex.h2
-rw-r--r--kernel/time.c6
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/ntp.c134
-rw-r--r--kernel/time/timekeeping.c51
18 files changed, 193 insertions, 226 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index af565016904..a48bd9a9927 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -269,8 +269,8 @@ void foo(void)
269 BLANK(); 269 BLANK();
270 270
271 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ 271 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
272 DEFINE(IA64_GTOD_LOCK_OFFSET, 272 DEFINE(IA64_GTOD_SEQ_OFFSET,
273 offsetof (struct fsyscall_gtod_data_t, lock)); 273 offsetof (struct fsyscall_gtod_data_t, seq));
274 DEFINE(IA64_GTOD_WALL_TIME_OFFSET, 274 DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
275 offsetof (struct fsyscall_gtod_data_t, wall_time)); 275 offsetof (struct fsyscall_gtod_data_t, wall_time));
276 DEFINE(IA64_GTOD_MONO_TIME_OFFSET, 276 DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index f15d8601827..cc26edac0ec 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address)
173 FSYS_RETURN 173 FSYS_RETURN
174END(fsys_set_tid_address) 174END(fsys_set_tid_address)
175 175
176#if IA64_GTOD_LOCK_OFFSET !=0 176#if IA64_GTOD_SEQ_OFFSET !=0
177#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t 177#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
178#endif 178#endif
179#if IA64_ITC_JITTER_OFFSET !=0 179#if IA64_ITC_JITTER_OFFSET !=0
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 57d2ee6c83e..146b15b5fec 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8struct fsyscall_gtod_data_t { 8struct fsyscall_gtod_data_t {
9 seqlock_t lock; 9 seqcount_t seq;
10 struct timespec wall_time; 10 struct timespec wall_time;
11 struct timespec monotonic_time; 11 struct timespec monotonic_time;
12 cycle_t clk_mask; 12 cycle_t clk_mask;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa94bdda9de..ecc904b33c5 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -34,9 +34,7 @@
34 34
35static cycle_t itc_get_cycles(struct clocksource *cs); 35static cycle_t itc_get_cycles(struct clocksource *cs);
36 36
37struct fsyscall_gtod_data_t fsyscall_gtod_data = { 37struct fsyscall_gtod_data_t fsyscall_gtod_data;
38 .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
39};
40 38
41struct itc_jitter_data_t itc_jitter_data; 39struct itc_jitter_data_t itc_jitter_data;
42 40
@@ -459,9 +457,7 @@ void update_vsyscall_tz(void)
459void update_vsyscall(struct timespec *wall, struct timespec *wtm, 457void update_vsyscall(struct timespec *wall, struct timespec *wtm,
460 struct clocksource *c, u32 mult) 458 struct clocksource *c, u32 mult)
461{ 459{
462 unsigned long flags; 460 write_seqcount_begin(&fsyscall_gtod_data.seq);
463
464 write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
465 461
466 /* copy fsyscall clock data */ 462 /* copy fsyscall clock data */
467 fsyscall_gtod_data.clk_mask = c->mask; 463 fsyscall_gtod_data.clk_mask = c->mask;
@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
484 fsyscall_gtod_data.monotonic_time.tv_sec++; 480 fsyscall_gtod_data.monotonic_time.tv_sec++;
485 } 481 }
486 482
487 write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); 483 write_seqcount_end(&fsyscall_gtod_data.seq);
488} 484}
489 485
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcace..8b38be2de9e 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,13 +5,8 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqlock_t lock; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
15 struct { /* extract of a clocksource struct */ 10 struct { /* extract of a clocksource struct */
16 int vclock_mode; 11 int vclock_mode;
17 cycle_t cycle_last; 12 cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
19 u32 mult; 14 u32 mult;
20 u32 shift; 15 u32 shift;
21 } clock; 16 } clock;
22 struct timespec wall_to_monotonic; 17
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u32 wall_time_nsec;
21 u32 monotonic_time_nsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
23 struct timespec wall_time_coarse; 25 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
24}; 27};
25extern struct vsyscall_gtod_data vsyscall_gtod_data; 28extern struct vsyscall_gtod_data vsyscall_gtod_data;
26 29
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 899a03f2d18..fc0a147e372 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
933 clocksource_tsc.rating = 0; 933 clocksource_tsc.rating = 0;
934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
935 } 935 }
936
937 /*
938 * Trust the results of the earlier calibration on systems
939 * exporting a reliable TSC.
940 */
941 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
942 clocksource_register_khz(&clocksource_tsc, tsc_khz);
943 return 0;
944 }
945
936 schedule_delayed_work(&tsc_irqwork, 0); 946 schedule_delayed_work(&tsc_irqwork, 0);
937 return 0; 947 return 0;
938} 948}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba939356..d5c69860b52 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
52#include "vsyscall_trace.h" 52#include "vsyscall_trace.h"
53 53
54DEFINE_VVAR(int, vgetcpu_mode); 54DEFINE_VVAR(int, vgetcpu_mode);
55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56{
57 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58};
59 56
60static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 57static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
80 77
81void update_vsyscall_tz(void) 78void update_vsyscall_tz(void)
82{ 79{
83 unsigned long flags;
84
85 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
86 /* sys_tz has changed */
87 vsyscall_gtod_data.sys_tz = sys_tz; 80 vsyscall_gtod_data.sys_tz = sys_tz;
88 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
89} 81}
90 82
91void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 83void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
92 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
93{ 85{
94 unsigned long flags; 86 struct timespec monotonic;
95 87
96 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); 88 write_seqcount_begin(&vsyscall_gtod_data.seq);
97 89
98 /* copy vsyscall data */ 90 /* copy vsyscall data */
99 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 91 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
101 vsyscall_gtod_data.clock.mask = clock->mask; 93 vsyscall_gtod_data.clock.mask = clock->mask;
102 vsyscall_gtod_data.clock.mult = mult; 94 vsyscall_gtod_data.clock.mult = mult;
103 vsyscall_gtod_data.clock.shift = clock->shift; 95 vsyscall_gtod_data.clock.shift = clock->shift;
96
104 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 97 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
105 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
106 vsyscall_gtod_data.wall_to_monotonic = *wtm; 99
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
107 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
108 107
109 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 108 write_seqcount_end(&vsyscall_gtod_data.seq);
110} 109}
111 110
112static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6bc0e723b6e..885eff49d6a 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
70 return ret; 70 return ret;
71} 71}
72 72
73notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
73notrace static inline long vgetns(void) 83notrace static inline long vgetns(void)
74{ 84{
75 long v; 85 long v;
76 cycles_t cycles; 86 cycles_t cycles;
77 if (gtod->clock.vclock_mode == VCLOCK_TSC) 87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
78 cycles = vread_tsc(); 88 cycles = vread_tsc();
79 else 89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
80 cycles = vread_hpet(); 90 cycles = vread_hpet();
91 else
92 return 0;
81 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; 93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
82 return (v * gtod->clock.mult) >> gtod->clock.shift; 94 return (v * gtod->clock.mult) >> gtod->clock.shift;
83} 95}
84 96
85notrace static noinline int do_realtime(struct timespec *ts) 97/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
98notrace static int __always_inline do_realtime(struct timespec *ts)
86{ 99{
87 unsigned long seq, ns; 100 unsigned long seq, ns;
101 int mode;
102
88 do { 103 do {
89 seq = read_seqbegin(&gtod->lock); 104 seq = read_seqcount_begin(&gtod->seq);
105 mode = gtod->clock.vclock_mode;
90 ts->tv_sec = gtod->wall_time_sec; 106 ts->tv_sec = gtod->wall_time_sec;
91 ts->tv_nsec = gtod->wall_time_nsec; 107 ts->tv_nsec = gtod->wall_time_nsec;
92 ns = vgetns(); 108 ns = vgetns();
93 } while (unlikely(read_seqretry(&gtod->lock, seq))); 109 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
110
94 timespec_add_ns(ts, ns); 111 timespec_add_ns(ts, ns);
95 return 0; 112 return mode;
96} 113}
97 114
98notrace static noinline int do_monotonic(struct timespec *ts) 115notrace static int do_monotonic(struct timespec *ts)
99{ 116{
100 unsigned long seq, ns, secs; 117 unsigned long seq, ns;
118 int mode;
119
101 do { 120 do {
102 seq = read_seqbegin(&gtod->lock); 121 seq = read_seqcount_begin(&gtod->seq);
103 secs = gtod->wall_time_sec; 122 mode = gtod->clock.vclock_mode;
104 ns = gtod->wall_time_nsec + vgetns(); 123 ts->tv_sec = gtod->monotonic_time_sec;
105 secs += gtod->wall_to_monotonic.tv_sec; 124 ts->tv_nsec = gtod->monotonic_time_nsec;
106 ns += gtod->wall_to_monotonic.tv_nsec; 125 ns = vgetns();
107 } while (unlikely(read_seqretry(&gtod->lock, seq))); 126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
108 127 timespec_add_ns(ts, ns);
109 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
110 * are all guaranteed to be nonnegative.
111 */
112 while (ns >= NSEC_PER_SEC) {
113 ns -= NSEC_PER_SEC;
114 ++secs;
115 }
116 ts->tv_sec = secs;
117 ts->tv_nsec = ns;
118 128
119 return 0; 129 return mode;
120} 130}
121 131
122notrace static noinline int do_realtime_coarse(struct timespec *ts) 132notrace static int do_realtime_coarse(struct timespec *ts)
123{ 133{
124 unsigned long seq; 134 unsigned long seq;
125 do { 135 do {
126 seq = read_seqbegin(&gtod->lock); 136 seq = read_seqcount_begin(&gtod->seq);
127 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 137 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
128 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 138 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
129 } while (unlikely(read_seqretry(&gtod->lock, seq))); 139 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
130 return 0; 140 return 0;
131} 141}
132 142
133notrace static noinline int do_monotonic_coarse(struct timespec *ts) 143notrace static int do_monotonic_coarse(struct timespec *ts)
134{ 144{
135 unsigned long seq, ns, secs; 145 unsigned long seq;
136 do { 146 do {
137 seq = read_seqbegin(&gtod->lock); 147 seq = read_seqcount_begin(&gtod->seq);
138 secs = gtod->wall_time_coarse.tv_sec; 148 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
139 ns = gtod->wall_time_coarse.tv_nsec; 149 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
140 secs += gtod->wall_to_monotonic.tv_sec; 150 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
141 ns += gtod->wall_to_monotonic.tv_nsec;
142 } while (unlikely(read_seqretry(&gtod->lock, seq)));
143
144 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
145 * guaranteed to be between 0 and NSEC_PER_SEC.
146 */
147 if (ns >= NSEC_PER_SEC) {
148 ns -= NSEC_PER_SEC;
149 ++secs;
150 }
151 ts->tv_sec = secs;
152 ts->tv_nsec = ns;
153 151
154 return 0; 152 return 0;
155} 153}
156 154
157notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 155notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
158{ 156{
157 int ret = VCLOCK_NONE;
158
159 switch (clock) { 159 switch (clock) {
160 case CLOCK_REALTIME: 160 case CLOCK_REALTIME:
161 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 161 ret = do_realtime(ts);
162 return do_realtime(ts);
163 break; 162 break;
164 case CLOCK_MONOTONIC: 163 case CLOCK_MONOTONIC:
165 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 164 ret = do_monotonic(ts);
166 return do_monotonic(ts);
167 break; 165 break;
168 case CLOCK_REALTIME_COARSE: 166 case CLOCK_REALTIME_COARSE:
169 return do_realtime_coarse(ts); 167 return do_realtime_coarse(ts);
@@ -171,32 +169,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
171 return do_monotonic_coarse(ts); 169 return do_monotonic_coarse(ts);
172 } 170 }
173 171
174 return vdso_fallback_gettime(clock, ts); 172 if (ret == VCLOCK_NONE)
173 return vdso_fallback_gettime(clock, ts);
174 return 0;
175} 175}
176int clock_gettime(clockid_t, struct timespec *) 176int clock_gettime(clockid_t, struct timespec *)
177 __attribute__((weak, alias("__vdso_clock_gettime"))); 177 __attribute__((weak, alias("__vdso_clock_gettime")));
178 178
179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
180{ 180{
181 long ret; 181 long ret = VCLOCK_NONE;
182 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { 182
183 if (likely(tv != NULL)) { 183 if (likely(tv != NULL)) {
184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
185 offsetof(struct timespec, tv_nsec) || 185 offsetof(struct timespec, tv_nsec) ||
186 sizeof(*tv) != sizeof(struct timespec)); 186 sizeof(*tv) != sizeof(struct timespec));
187 do_realtime((struct timespec *)tv); 187 ret = do_realtime((struct timespec *)tv);
188 tv->tv_usec /= 1000; 188 tv->tv_usec /= 1000;
189 }
190 if (unlikely(tz != NULL)) {
191 /* Avoid memcpy. Some old compilers fail to inline it */
192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195 return 0;
196 } 189 }
197 asm("syscall" : "=a" (ret) : 190 if (unlikely(tz != NULL)) {
198 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); 191 /* Avoid memcpy. Some old compilers fail to inline it */
199 return ret; 192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195
196 if (ret == VCLOCK_NONE)
197 return vdso_fallback_gtod(tv, tz);
198 return 0;
200} 199}
201int gettimeofday(struct timeval *, struct timezone *) 200int gettimeofday(struct timeval *, struct timezone *)
202 __attribute__((weak, alias("__vdso_gettimeofday"))); 201 __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dc87eda6581..eb415bd7649 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
458 if (rtc->uie_rtctimer.enabled == enabled) 458 if (rtc->uie_rtctimer.enabled == enabled)
459 goto out; 459 goto out;
460 460
461 if (rtc->uie_unsupported) {
462 err = -EINVAL;
463 goto out;
464 }
465
461 if (enabled) { 466 if (enabled) {
462 struct rtc_time tm; 467 struct rtc_time tm;
463 ktime_t now, onesec; 468 ktime_t now, onesec;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index e954a759ba8..42f5f829b3e 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
360 &mpc5200_rtc_ops, THIS_MODULE); 360 &mpc5200_rtc_ops, THIS_MODULE);
361 } 361 }
362 362
363 rtc->rtc->uie_unsupported = 1;
364
363 if (IS_ERR(rtc->rtc)) { 365 if (IS_ERR(rtc->rtc)) {
364 err = PTR_ERR(rtc->rtc); 366 err = PTR_ERR(rtc->rtc);
365 goto out_free_irq; 367 goto out_free_irq;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 93f4d035076..fcabfb4873c 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -202,7 +202,8 @@ struct rtc_device
202 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ 202 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
203 int pie_enabled; 203 int pie_enabled;
204 struct work_struct irqwork; 204 struct work_struct irqwork;
205 205 /* Some hardware can't support UIE mode */
206 int uie_unsupported;
206 207
207#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 208#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
208 struct work_struct uie_task; 209 struct work_struct uie_task;
diff --git a/include/linux/time.h b/include/linux/time.h
index b3061782dec..97734e9409c 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
116extern void read_persistent_clock(struct timespec *ts); 116extern void read_persistent_clock(struct timespec *ts);
117extern void read_boot_clock(struct timespec *ts); 117extern void read_boot_clock(struct timespec *ts);
118extern int update_persistent_clock(struct timespec now); 118extern int update_persistent_clock(struct timespec now);
119extern int no_sync_cmos_clock __read_mostly;
120void timekeeping_init(void); 119void timekeeping_init(void);
121extern int timekeeping_suspended; 120extern int timekeeping_suspended;
122 121
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b75e1864ed1..99bc88b1fc0 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -252,7 +252,7 @@ extern void ntp_clear(void);
252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ 252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
253extern u64 ntp_tick_length(void); 253extern u64 ntp_tick_length(void);
254 254
255extern void second_overflow(void); 255extern int second_overflow(unsigned long secs);
256extern int do_adjtimex(struct timex *); 256extern int do_adjtimex(struct timex *);
257extern void hardpps(const struct timespec *, const struct timespec *); 257extern void hardpps(const struct timespec *, const struct timespec *);
258 258
diff --git a/kernel/time.c b/kernel/time.c
index 73e416db0a1..ba744cf8069 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
163 return error; 163 return error;
164 164
165 if (tz) { 165 if (tz) {
166 /* SMP safe, global irq locking makes it work. */
167 sys_tz = *tz; 166 sys_tz = *tz;
168 update_vsyscall_tz(); 167 update_vsyscall_tz();
169 if (firsttime) { 168 if (firsttime) {
@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
173 } 172 }
174 } 173 }
175 if (tv) 174 if (tv)
176 {
177 /* SMP safe, again the code in arch/foo/time.c should
178 * globally block out interrupts when it runs.
179 */
180 return do_settimeofday(tv); 175 return do_settimeofday(tv);
181 }
182 return 0; 176 return 0;
183} 177}
184 178
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8a46f5d6450..8a538c55fc7 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
96 return 0; 96 return 0;
97} 97}
98 98
99static inline void alarmtimer_rtc_timer_init(void)
100{
101 rtc_timer_init(&rtctimer, NULL, NULL);
102}
103
99static struct class_interface alarmtimer_rtc_interface = { 104static struct class_interface alarmtimer_rtc_interface = {
100 .add_dev = &alarmtimer_rtc_add_device, 105 .add_dev = &alarmtimer_rtc_add_device,
101}; 106};
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void)
117#define rtcdev (NULL) 122#define rtcdev (NULL)
118static inline int alarmtimer_rtc_interface_setup(void) { return 0; } 123static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
119static inline void alarmtimer_rtc_interface_remove(void) { } 124static inline void alarmtimer_rtc_interface_remove(void) { }
125static inline void alarmtimer_rtc_timer_init(void) { }
120#endif 126#endif
121 127
122/** 128/**
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void)
783 .nsleep = alarm_timer_nsleep, 789 .nsleep = alarm_timer_nsleep,
784 }; 790 };
785 791
792 alarmtimer_rtc_timer_init();
793
786 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); 794 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
787 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); 795 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
788 796
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index a45ca167ab2..c9583382141 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
500{ 500{
501 u64 ret; 501 u64 ret;
502 /* 502 /*
503 * We won't try to correct for more then 11% adjustments (110,000 ppm), 503 * We won't try to correct for more than 11% adjustments (110,000 ppm),
504 */ 504 */
505 ret = (u64)cs->mult * 11; 505 ret = (u64)cs->mult * 11;
506 do_div(ret,100); 506 do_div(ret,100);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6e039b144da..f03fd83b170 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -34,8 +34,6 @@ unsigned long tick_nsec;
34static u64 tick_length; 34static u64 tick_length;
35static u64 tick_length_base; 35static u64 tick_length_base;
36 36
37static struct hrtimer leap_timer;
38
39#define MAX_TICKADJ 500LL /* usecs */ 37#define MAX_TICKADJ 500LL /* usecs */
40#define MAX_TICKADJ_SCALED \ 38#define MAX_TICKADJ_SCALED \
41 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 39 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -381,70 +379,63 @@ u64 ntp_tick_length(void)
381 379
382 380
383/* 381/*
384 * Leap second processing. If in leap-insert state at the end of the 382 * this routine handles the overflow of the microsecond field
385 * day, the system clock is set back one second; if in leap-delete 383 *
386 * state, the system clock is set ahead one second. 384 * The tricky bits of code to handle the accurate clock support
385 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
386 * They were originally developed for SUN and DEC kernels.
387 * All the kudos should go to Dave for this stuff.
388 *
389 * Also handles leap second processing, and returns leap offset
387 */ 390 */
388static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) 391int second_overflow(unsigned long secs)
389{ 392{
390 enum hrtimer_restart res = HRTIMER_NORESTART; 393 s64 delta;
391 unsigned long flags;
392 int leap = 0; 394 int leap = 0;
395 unsigned long flags;
393 396
394 spin_lock_irqsave(&ntp_lock, flags); 397 spin_lock_irqsave(&ntp_lock, flags);
398
399 /*
400 * Leap second processing. If in leap-insert state at the end of the
401 * day, the system clock is set back one second; if in leap-delete
402 * state, the system clock is set ahead one second.
403 */
395 switch (time_state) { 404 switch (time_state) {
396 case TIME_OK: 405 case TIME_OK:
406 if (time_status & STA_INS)
407 time_state = TIME_INS;
408 else if (time_status & STA_DEL)
409 time_state = TIME_DEL;
397 break; 410 break;
398 case TIME_INS: 411 case TIME_INS:
399 leap = -1; 412 if (secs % 86400 == 0) {
400 time_state = TIME_OOP; 413 leap = -1;
401 printk(KERN_NOTICE 414 time_state = TIME_OOP;
402 "Clock: inserting leap second 23:59:60 UTC\n"); 415 printk(KERN_NOTICE
403 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 416 "Clock: inserting leap second 23:59:60 UTC\n");
404 res = HRTIMER_RESTART; 417 }
405 break; 418 break;
406 case TIME_DEL: 419 case TIME_DEL:
407 leap = 1; 420 if ((secs + 1) % 86400 == 0) {
408 time_tai--; 421 leap = 1;
409 time_state = TIME_WAIT; 422 time_tai--;
410 printk(KERN_NOTICE 423 time_state = TIME_WAIT;
411 "Clock: deleting leap second 23:59:59 UTC\n"); 424 printk(KERN_NOTICE
425 "Clock: deleting leap second 23:59:59 UTC\n");
426 }
412 break; 427 break;
413 case TIME_OOP: 428 case TIME_OOP:
414 time_tai++; 429 time_tai++;
415 time_state = TIME_WAIT; 430 time_state = TIME_WAIT;
416 /* fall through */ 431 break;
432
417 case TIME_WAIT: 433 case TIME_WAIT:
418 if (!(time_status & (STA_INS | STA_DEL))) 434 if (!(time_status & (STA_INS | STA_DEL)))
419 time_state = TIME_OK; 435 time_state = TIME_OK;
420 break; 436 break;
421 } 437 }
422 spin_unlock_irqrestore(&ntp_lock, flags);
423 438
424 /*
425 * We have to call this outside of the ntp_lock to keep
426 * the proper locking hierarchy
427 */
428 if (leap)
429 timekeeping_leap_insert(leap);
430
431 return res;
432}
433
434/*
435 * this routine handles the overflow of the microsecond field
436 *
437 * The tricky bits of code to handle the accurate clock support
438 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
439 * They were originally developed for SUN and DEC kernels.
440 * All the kudos should go to Dave for this stuff.
441 */
442void second_overflow(void)
443{
444 s64 delta;
445 unsigned long flags;
446
447 spin_lock_irqsave(&ntp_lock, flags);
448 439
449 /* Bump the maxerror field */ 440 /* Bump the maxerror field */
450 time_maxerror += MAXFREQ / NSEC_PER_USEC; 441 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -481,15 +472,17 @@ void second_overflow(void)
481 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) 472 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
482 << NTP_SCALE_SHIFT; 473 << NTP_SCALE_SHIFT;
483 time_adjust = 0; 474 time_adjust = 0;
475
476
477
484out: 478out:
485 spin_unlock_irqrestore(&ntp_lock, flags); 479 spin_unlock_irqrestore(&ntp_lock, flags);
480
481 return leap;
486} 482}
487 483
488#ifdef CONFIG_GENERIC_CMOS_UPDATE 484#ifdef CONFIG_GENERIC_CMOS_UPDATE
489 485
490/* Disable the cmos update - used by virtualization and embedded */
491int no_sync_cmos_clock __read_mostly;
492
493static void sync_cmos_clock(struct work_struct *work); 486static void sync_cmos_clock(struct work_struct *work);
494 487
495static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); 488static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -536,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work)
536 529
537static void notify_cmos_timer(void) 530static void notify_cmos_timer(void)
538{ 531{
539 if (!no_sync_cmos_clock) 532 schedule_delayed_work(&sync_cmos_work, 0);
540 schedule_delayed_work(&sync_cmos_work, 0);
541} 533}
542 534
543#else 535#else
544static inline void notify_cmos_timer(void) { } 536static inline void notify_cmos_timer(void) { }
545#endif 537#endif
546 538
547/*
548 * Start the leap seconds timer:
549 */
550static inline void ntp_start_leap_timer(struct timespec *ts)
551{
552 long now = ts->tv_sec;
553
554 if (time_status & STA_INS) {
555 time_state = TIME_INS;
556 now += 86400 - now % 86400;
557 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
558
559 return;
560 }
561
562 if (time_status & STA_DEL) {
563 time_state = TIME_DEL;
564 now += 86400 - (now + 1) % 86400;
565 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
566 }
567}
568 539
569/* 540/*
570 * Propagate a new txc->status value into the NTP state: 541 * Propagate a new txc->status value into the NTP state:
@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
589 time_status &= STA_RONLY; 560 time_status &= STA_RONLY;
590 time_status |= txc->status & ~STA_RONLY; 561 time_status |= txc->status & ~STA_RONLY;
591 562
592 switch (time_state) {
593 case TIME_OK:
594 ntp_start_leap_timer(ts);
595 break;
596 case TIME_INS:
597 case TIME_DEL:
598 time_state = TIME_OK;
599 ntp_start_leap_timer(ts);
600 case TIME_WAIT:
601 if (!(time_status & (STA_INS | STA_DEL)))
602 time_state = TIME_OK;
603 break;
604 case TIME_OOP:
605 hrtimer_restart(&leap_timer);
606 break;
607 }
608} 563}
609/* 564/*
610 * Called with the xtime lock held, so we can access and modify 565 * Called with the xtime lock held, so we can access and modify
@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc)
686 (txc->tick < 900000/USER_HZ || 641 (txc->tick < 900000/USER_HZ ||
687 txc->tick > 1100000/USER_HZ)) 642 txc->tick > 1100000/USER_HZ))
688 return -EINVAL; 643 return -EINVAL;
689
690 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
691 hrtimer_cancel(&leap_timer);
692 } 644 }
693 645
694 if (txc->modes & ADJ_SETOFFSET) { 646 if (txc->modes & ADJ_SETOFFSET) {
@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
1010void __init ntp_init(void) 962void __init ntp_init(void)
1011{ 963{
1012 ntp_clear(); 964 ntp_clear();
1013 hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1014 leap_timer.function = ntp_leap_second;
1015} 965}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 15be32e19c6..d66b21308f7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp)
184} 184}
185 185
186 186
187void timekeeping_leap_insert(int leapsecond)
188{
189 unsigned long flags;
190
191 write_seqlock_irqsave(&timekeeper.lock, flags);
192 timekeeper.xtime.tv_sec += leapsecond;
193 timekeeper.wall_to_monotonic.tv_sec -= leapsecond;
194 timekeeping_update(false);
195 write_sequnlock_irqrestore(&timekeeper.lock, flags);
196
197}
198
199/** 187/**
200 * timekeeping_forward_now - update clock to the current time 188 * timekeeping_forward_now - update clock to the current time
201 * 189 *
@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
448static int change_clocksource(void *data) 436static int change_clocksource(void *data)
449{ 437{
450 struct clocksource *new, *old; 438 struct clocksource *new, *old;
439 unsigned long flags;
451 440
452 new = (struct clocksource *) data; 441 new = (struct clocksource *) data;
453 442
443 write_seqlock_irqsave(&timekeeper.lock, flags);
444
454 timekeeping_forward_now(); 445 timekeeping_forward_now();
455 if (!new->enable || new->enable(new) == 0) { 446 if (!new->enable || new->enable(new) == 0) {
456 old = timekeeper.clock; 447 old = timekeeper.clock;
@@ -458,6 +449,10 @@ static int change_clocksource(void *data)
458 if (old->disable) 449 if (old->disable)
459 old->disable(old); 450 old->disable(old);
460 } 451 }
452 timekeeping_update(true);
453
454 write_sequnlock_irqrestore(&timekeeper.lock, flags);
455
461 return 0; 456 return 0;
462} 457}
463 458
@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset)
827 int adj; 822 int adj;
828 823
829 /* 824 /*
830 * The point of this is to check if the error is greater then half 825 * The point of this is to check if the error is greater than half
831 * an interval. 826 * an interval.
832 * 827 *
833 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. 828 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset)
835 * Note we subtract one in the shift, so that error is really error*2. 830 * Note we subtract one in the shift, so that error is really error*2.
836 * This "saves" dividing(shifting) interval twice, but keeps the 831 * This "saves" dividing(shifting) interval twice, but keeps the
837 * (error > interval) comparison as still measuring if error is 832 * (error > interval) comparison as still measuring if error is
838 * larger then half an interval. 833 * larger than half an interval.
839 * 834 *
840 * Note: It does not "save" on aggravation when reading the code. 835 * Note: It does not "save" on aggravation when reading the code.
841 */ 836 */
@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset)
843 if (error > interval) { 838 if (error > interval) {
844 /* 839 /*
845 * We now divide error by 4(via shift), which checks if 840 * We now divide error by 4(via shift), which checks if
846 * the error is greater then twice the interval. 841 * the error is greater than twice the interval.
847 * If it is greater, we need a bigadjust, if its smaller, 842 * If it is greater, we need a bigadjust, if its smaller,
848 * we can adjust by 1. 843 * we can adjust by 1.
849 */ 844 */
@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset)
874 } else /* No adjustment needed */ 869 } else /* No adjustment needed */
875 return; 870 return;
876 871
877 WARN_ONCE(timekeeper.clock->maxadj && 872 if (unlikely(timekeeper.clock->maxadj &&
878 (timekeeper.mult + adj > timekeeper.clock->mult + 873 (timekeeper.mult + adj >
879 timekeeper.clock->maxadj), 874 timekeeper.clock->mult + timekeeper.clock->maxadj))) {
880 "Adjusting %s more then 11%% (%ld vs %ld)\n", 875 printk_once(KERN_WARNING
876 "Adjusting %s more than 11%% (%ld vs %ld)\n",
881 timekeeper.clock->name, (long)timekeeper.mult + adj, 877 timekeeper.clock->name, (long)timekeeper.mult + adj,
882 (long)timekeeper.clock->mult + 878 (long)timekeeper.clock->mult +
883 timekeeper.clock->maxadj); 879 timekeeper.clock->maxadj);
880 }
884 /* 881 /*
885 * So the following can be confusing. 882 * So the following can be confusing.
886 * 883 *
@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
952 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 949 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
953 u64 raw_nsecs; 950 u64 raw_nsecs;
954 951
955 /* If the offset is smaller then a shifted interval, do nothing */ 952 /* If the offset is smaller than a shifted interval, do nothing */
956 if (offset < timekeeper.cycle_interval<<shift) 953 if (offset < timekeeper.cycle_interval<<shift)
957 return offset; 954 return offset;
958 955
@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
962 959
963 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 960 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
964 while (timekeeper.xtime_nsec >= nsecps) { 961 while (timekeeper.xtime_nsec >= nsecps) {
962 int leap;
965 timekeeper.xtime_nsec -= nsecps; 963 timekeeper.xtime_nsec -= nsecps;
966 timekeeper.xtime.tv_sec++; 964 timekeeper.xtime.tv_sec++;
967 second_overflow(); 965 leap = second_overflow(timekeeper.xtime.tv_sec);
966 timekeeper.xtime.tv_sec += leap;
968 } 967 }
969 968
970 /* Accumulate raw time */ 969 /* Accumulate raw time */
@@ -1018,13 +1017,13 @@ static void update_wall_time(void)
1018 * With NO_HZ we may have to accumulate many cycle_intervals 1017 * With NO_HZ we may have to accumulate many cycle_intervals
1019 * (think "ticks") worth of time at once. To do this efficiently, 1018 * (think "ticks") worth of time at once. To do this efficiently,
1020 * we calculate the largest doubling multiple of cycle_intervals 1019 * we calculate the largest doubling multiple of cycle_intervals
1021 * that is smaller then the offset. We then accumulate that 1020 * that is smaller than the offset. We then accumulate that
1022 * chunk in one go, and then try to consume the next smaller 1021 * chunk in one go, and then try to consume the next smaller
1023 * doubled multiple. 1022 * doubled multiple.
1024 */ 1023 */
1025 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 1024 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
1026 shift = max(0, shift); 1025 shift = max(0, shift);
1027 /* Bound shift to one less then what overflows tick_length */ 1026 /* Bound shift to one less than what overflows tick_length */
1028 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1027 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1029 shift = min(shift, maxshift); 1028 shift = min(shift, maxshift);
1030 while (offset >= timekeeper.cycle_interval) { 1029 while (offset >= timekeeper.cycle_interval) {
@@ -1072,12 +1071,14 @@ static void update_wall_time(void)
1072 1071
1073 /* 1072 /*
1074 * Finally, make sure that after the rounding 1073 * Finally, make sure that after the rounding
1075 * xtime.tv_nsec isn't larger then NSEC_PER_SEC 1074 * xtime.tv_nsec isn't larger than NSEC_PER_SEC
1076 */ 1075 */
1077 if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { 1076 if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
1077 int leap;
1078 timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; 1078 timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
1079 timekeeper.xtime.tv_sec++; 1079 timekeeper.xtime.tv_sec++;
1080 second_overflow(); 1080 leap = second_overflow(timekeeper.xtime.tv_sec);
1081 timekeeper.xtime.tv_sec += leap;
1081 } 1082 }
1082 1083
1083 timekeeping_update(false); 1084 timekeeping_update(false);