aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@MIT.EDU>2011-06-05 13:50:20 -0400
committerIngo Molnar <mingo@elte.hu>2011-06-05 15:30:33 -0400
commit0d7b8547fb67d5c2a7d954c56b3715b0e708be4a (patch)
treef2d8b6c9c06b751f1b2c1b0665e761ce8e28a358 /arch/x86/vdso
parent9fd67b4ed0714ab718f1f9bd14c344af336a6df7 (diff)
x86-64: Remove kernel.vsyscall64 sysctl
It's unnecessary overhead in code that's supposed to be highly optimized. Removing it allows us to remove one of the two syscall instructions in the vsyscall page. The only sensible use for it is for UML users, and it doesn't fully address inconsistent vsyscall results on UML. The real fix for UML is to stop using vsyscalls entirely. Signed-off-by: Andy Lutomirski <luto@mit.edu> Cc: Jesper Juhl <jj@chaosbits.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Jan Beulich <JBeulich@novell.com> Cc: richard -rw- weinberger <richard.weinberger@gmail.com> Cc: Mikael Pettersson <mikpe@it.uu.se> Cc: Andi Kleen <andi@firstfloor.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Louis Rilling <Louis.Rilling@kerlabs.com> Cc: Valdis.Kletnieks@vt.edu Cc: pageexec@freemail.hu Link: http://lkml.kernel.org/r/973ae803fe76f712da4b2740e66dccf452d3b1e4.1307292171.git.luto@mit.edu Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r--arch/x86/vdso/vclock_gettime.c55
1 files changed, 21 insertions, 34 deletions
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index a724905fdae7..cf54813ac527 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -116,21 +116,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
116 116
117notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 117notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
118{ 118{
119 if (likely(gtod->sysctl_enabled)) 119 switch (clock) {
120 switch (clock) { 120 case CLOCK_REALTIME:
121 case CLOCK_REALTIME: 121 if (likely(gtod->clock.vread))
122 if (likely(gtod->clock.vread)) 122 return do_realtime(ts);
123 return do_realtime(ts); 123 break;
124 break; 124 case CLOCK_MONOTONIC:
125 case CLOCK_MONOTONIC: 125 if (likely(gtod->clock.vread))
126 if (likely(gtod->clock.vread)) 126 return do_monotonic(ts);
127 return do_monotonic(ts); 127 break;
128 break; 128 case CLOCK_REALTIME_COARSE:
129 case CLOCK_REALTIME_COARSE: 129 return do_realtime_coarse(ts);
130 return do_realtime_coarse(ts); 130 case CLOCK_MONOTONIC_COARSE:
131 case CLOCK_MONOTONIC_COARSE: 131 return do_monotonic_coarse(ts);
132 return do_monotonic_coarse(ts); 132 }
133 } 133
134 return vdso_fallback_gettime(clock, ts); 134 return vdso_fallback_gettime(clock, ts);
135} 135}
136int clock_gettime(clockid_t, struct timespec *) 136int clock_gettime(clockid_t, struct timespec *)
@@ -139,7 +139,7 @@ int clock_gettime(clockid_t, struct timespec *)
139notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 139notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
140{ 140{
141 long ret; 141 long ret;
142 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { 142 if (likely(gtod->clock.vread)) {
143 if (likely(tv != NULL)) { 143 if (likely(tv != NULL)) {
144 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 144 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
145 offsetof(struct timespec, tv_nsec) || 145 offsetof(struct timespec, tv_nsec) ||
@@ -161,27 +161,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
161int gettimeofday(struct timeval *, struct timezone *) 161int gettimeofday(struct timeval *, struct timezone *)
162 __attribute__((weak, alias("__vdso_gettimeofday"))); 162 __attribute__((weak, alias("__vdso_gettimeofday")));
163 163
164/* This will break when the xtime seconds get inaccurate, but that is 164/*
165 * unlikely */ 165 * This will break when the xtime seconds get inaccurate, but that is
166 166 * unlikely
167static __always_inline long time_syscall(long *t) 167 */
168{
169 long secs;
170 asm volatile("syscall"
171 : "=a" (secs)
172 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
173 return secs;
174}
175
176notrace time_t __vdso_time(time_t *t) 168notrace time_t __vdso_time(time_t *t)
177{ 169{
178 time_t result;
179
180 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181 return time_syscall(t);
182
183 /* This is atomic on x86_64 so we don't need any locks. */ 170 /* This is atomic on x86_64 so we don't need any locks. */
184 result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); 171 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
185 172
186 if (t) 173 if (t)
187 *t = result; 174 *t = result;