aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:16:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:16:48 -0400
commitbcd550745fc54f789c14e7526e0633222c505faa (patch)
treec3fe11a6503b7ffdd4406a9fece5c40b3e2a3f6d /arch/x86
parent93f378883cecb9dcb2cf5b51d9d24175906659da (diff)
parent646783a389828e76e813f50791f7999429c821bc (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner. * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ia64: vsyscall: Add missing paranthesis alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n x86: vdso: Put declaration before code x86-64: Inline vdso clock_gettime helpers x86-64: Simplify and optimize vdso clock_gettime monotonic variants kernel-time: fix s/then/than/ spelling errors time: remove no_sync_cmos_clock time: Avoid scary backtraces when warning of > 11% adj alarmtimer: Make sure we initialize the rtctimer ntp: Fix leap-second hrtimer livelock x86, tsc: Skip refined tsc calibration on systems with reliable TSC rtc: Provide flag for rtc devices that don't support UIE ia64: vsyscall: Use seqcount instead of seqlock x86: vdso: Use seqcount instead of seqlock x86: vdso: Remove bogus locking in update_vsyscall_tz() time: Remove bogus comments time: Fix change_clocksource locking time: x86: Fix race switching from vsyscall to non-vsyscall clock
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/vsyscall_64.c25
-rw-r--r--arch/x86/vdso/vclock_gettime.c135
4 files changed, 99 insertions, 88 deletions
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcaceb..8b38be2de9e1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,13 +5,8 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqlock_t lock; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
15 struct { /* extract of a clocksource struct */ 10 struct { /* extract of a clocksource struct */
16 int vclock_mode; 11 int vclock_mode;
17 cycle_t cycle_last; 12 cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
19 u32 mult; 14 u32 mult;
20 u32 shift; 15 u32 shift;
21 } clock; 16 } clock;
22 struct timespec wall_to_monotonic; 17
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u32 wall_time_nsec;
21 u32 monotonic_time_nsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
23 struct timespec wall_time_coarse; 25 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
24}; 27};
25extern struct vsyscall_gtod_data vsyscall_gtod_data; 28extern struct vsyscall_gtod_data vsyscall_gtod_data;
26 29
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 899a03f2d181..fc0a147e3727 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
933 clocksource_tsc.rating = 0; 933 clocksource_tsc.rating = 0;
934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
935 } 935 }
936
937 /*
938 * Trust the results of the earlier calibration on systems
939 * exporting a reliable TSC.
940 */
941 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
942 clocksource_register_khz(&clocksource_tsc, tsc_khz);
943 return 0;
944 }
945
936 schedule_delayed_work(&tsc_irqwork, 0); 946 schedule_delayed_work(&tsc_irqwork, 0);
937 return 0; 947 return 0;
938} 948}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba9393564..d5c69860b524 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
52#include "vsyscall_trace.h" 52#include "vsyscall_trace.h"
53 53
54DEFINE_VVAR(int, vgetcpu_mode); 54DEFINE_VVAR(int, vgetcpu_mode);
55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56{
57 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58};
59 56
60static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 57static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
80 77
81void update_vsyscall_tz(void) 78void update_vsyscall_tz(void)
82{ 79{
83 unsigned long flags;
84
85 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
86 /* sys_tz has changed */
87 vsyscall_gtod_data.sys_tz = sys_tz; 80 vsyscall_gtod_data.sys_tz = sys_tz;
88 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
89} 81}
90 82
91void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 83void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
92 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
93{ 85{
94 unsigned long flags; 86 struct timespec monotonic;
95 87
96 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); 88 write_seqcount_begin(&vsyscall_gtod_data.seq);
97 89
98 /* copy vsyscall data */ 90 /* copy vsyscall data */
99 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 91 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
101 vsyscall_gtod_data.clock.mask = clock->mask; 93 vsyscall_gtod_data.clock.mask = clock->mask;
102 vsyscall_gtod_data.clock.mult = mult; 94 vsyscall_gtod_data.clock.mult = mult;
103 vsyscall_gtod_data.clock.shift = clock->shift; 95 vsyscall_gtod_data.clock.shift = clock->shift;
96
104 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 97 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
105 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
106 vsyscall_gtod_data.wall_to_monotonic = *wtm; 99
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
107 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
108 107
109 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 108 write_seqcount_end(&vsyscall_gtod_data.seq);
110} 109}
111 110
112static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6bc0e723b6e8..885eff49d6ab 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
70 return ret; 70 return ret;
71} 71}
72 72
73notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
73notrace static inline long vgetns(void) 83notrace static inline long vgetns(void)
74{ 84{
75 long v; 85 long v;
76 cycles_t cycles; 86 cycles_t cycles;
77 if (gtod->clock.vclock_mode == VCLOCK_TSC) 87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
78 cycles = vread_tsc(); 88 cycles = vread_tsc();
79 else 89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
80 cycles = vread_hpet(); 90 cycles = vread_hpet();
91 else
92 return 0;
81 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; 93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
82 return (v * gtod->clock.mult) >> gtod->clock.shift; 94 return (v * gtod->clock.mult) >> gtod->clock.shift;
83} 95}
84 96
85notrace static noinline int do_realtime(struct timespec *ts) 97/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
98notrace static int __always_inline do_realtime(struct timespec *ts)
86{ 99{
87 unsigned long seq, ns; 100 unsigned long seq, ns;
101 int mode;
102
88 do { 103 do {
89 seq = read_seqbegin(&gtod->lock); 104 seq = read_seqcount_begin(&gtod->seq);
105 mode = gtod->clock.vclock_mode;
90 ts->tv_sec = gtod->wall_time_sec; 106 ts->tv_sec = gtod->wall_time_sec;
91 ts->tv_nsec = gtod->wall_time_nsec; 107 ts->tv_nsec = gtod->wall_time_nsec;
92 ns = vgetns(); 108 ns = vgetns();
93 } while (unlikely(read_seqretry(&gtod->lock, seq))); 109 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
110
94 timespec_add_ns(ts, ns); 111 timespec_add_ns(ts, ns);
95 return 0; 112 return mode;
96} 113}
97 114
98notrace static noinline int do_monotonic(struct timespec *ts) 115notrace static int do_monotonic(struct timespec *ts)
99{ 116{
100 unsigned long seq, ns, secs; 117 unsigned long seq, ns;
118 int mode;
119
101 do { 120 do {
102 seq = read_seqbegin(&gtod->lock); 121 seq = read_seqcount_begin(&gtod->seq);
103 secs = gtod->wall_time_sec; 122 mode = gtod->clock.vclock_mode;
104 ns = gtod->wall_time_nsec + vgetns(); 123 ts->tv_sec = gtod->monotonic_time_sec;
105 secs += gtod->wall_to_monotonic.tv_sec; 124 ts->tv_nsec = gtod->monotonic_time_nsec;
106 ns += gtod->wall_to_monotonic.tv_nsec; 125 ns = vgetns();
107 } while (unlikely(read_seqretry(&gtod->lock, seq))); 126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
108 127 timespec_add_ns(ts, ns);
109 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
110 * are all guaranteed to be nonnegative.
111 */
112 while (ns >= NSEC_PER_SEC) {
113 ns -= NSEC_PER_SEC;
114 ++secs;
115 }
116 ts->tv_sec = secs;
117 ts->tv_nsec = ns;
118 128
119 return 0; 129 return mode;
120} 130}
121 131
122notrace static noinline int do_realtime_coarse(struct timespec *ts) 132notrace static int do_realtime_coarse(struct timespec *ts)
123{ 133{
124 unsigned long seq; 134 unsigned long seq;
125 do { 135 do {
126 seq = read_seqbegin(&gtod->lock); 136 seq = read_seqcount_begin(&gtod->seq);
127 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 137 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
128 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 138 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
129 } while (unlikely(read_seqretry(&gtod->lock, seq))); 139 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
130 return 0; 140 return 0;
131} 141}
132 142
133notrace static noinline int do_monotonic_coarse(struct timespec *ts) 143notrace static int do_monotonic_coarse(struct timespec *ts)
134{ 144{
135 unsigned long seq, ns, secs; 145 unsigned long seq;
136 do { 146 do {
137 seq = read_seqbegin(&gtod->lock); 147 seq = read_seqcount_begin(&gtod->seq);
138 secs = gtod->wall_time_coarse.tv_sec; 148 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
139 ns = gtod->wall_time_coarse.tv_nsec; 149 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
140 secs += gtod->wall_to_monotonic.tv_sec; 150 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
141 ns += gtod->wall_to_monotonic.tv_nsec;
142 } while (unlikely(read_seqretry(&gtod->lock, seq)));
143
144 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
145 * guaranteed to be between 0 and NSEC_PER_SEC.
146 */
147 if (ns >= NSEC_PER_SEC) {
148 ns -= NSEC_PER_SEC;
149 ++secs;
150 }
151 ts->tv_sec = secs;
152 ts->tv_nsec = ns;
153 151
154 return 0; 152 return 0;
155} 153}
156 154
157notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 155notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
158{ 156{
157 int ret = VCLOCK_NONE;
158
159 switch (clock) { 159 switch (clock) {
160 case CLOCK_REALTIME: 160 case CLOCK_REALTIME:
161 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 161 ret = do_realtime(ts);
162 return do_realtime(ts);
163 break; 162 break;
164 case CLOCK_MONOTONIC: 163 case CLOCK_MONOTONIC:
165 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 164 ret = do_monotonic(ts);
166 return do_monotonic(ts);
167 break; 165 break;
168 case CLOCK_REALTIME_COARSE: 166 case CLOCK_REALTIME_COARSE:
169 return do_realtime_coarse(ts); 167 return do_realtime_coarse(ts);
@@ -171,32 +169,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
171 return do_monotonic_coarse(ts); 169 return do_monotonic_coarse(ts);
172 } 170 }
173 171
174 return vdso_fallback_gettime(clock, ts); 172 if (ret == VCLOCK_NONE)
173 return vdso_fallback_gettime(clock, ts);
174 return 0;
175} 175}
176int clock_gettime(clockid_t, struct timespec *) 176int clock_gettime(clockid_t, struct timespec *)
177 __attribute__((weak, alias("__vdso_clock_gettime"))); 177 __attribute__((weak, alias("__vdso_clock_gettime")));
178 178
179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
180{ 180{
181 long ret; 181 long ret = VCLOCK_NONE;
182 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { 182
183 if (likely(tv != NULL)) { 183 if (likely(tv != NULL)) {
184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
185 offsetof(struct timespec, tv_nsec) || 185 offsetof(struct timespec, tv_nsec) ||
186 sizeof(*tv) != sizeof(struct timespec)); 186 sizeof(*tv) != sizeof(struct timespec));
187 do_realtime((struct timespec *)tv); 187 ret = do_realtime((struct timespec *)tv);
188 tv->tv_usec /= 1000; 188 tv->tv_usec /= 1000;
189 }
190 if (unlikely(tz != NULL)) {
191 /* Avoid memcpy. Some old compilers fail to inline it */
192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195 return 0;
196 } 189 }
197 asm("syscall" : "=a" (ret) : 190 if (unlikely(tz != NULL)) {
198 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); 191 /* Avoid memcpy. Some old compilers fail to inline it */
199 return ret; 192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195
196 if (ret == VCLOCK_NONE)
197 return vdso_fallback_gtod(tv, tz);
198 return 0;
200} 199}
201int gettimeofday(struct timeval *, struct timezone *) 200int gettimeofday(struct timeval *, struct timezone *)
202 __attribute__((weak, alias("__vdso_gettimeofday"))); 201 __attribute__((weak, alias("__vdso_gettimeofday")));