aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r--arch/x86/vdso/Makefile16
-rw-r--r--arch/x86/vdso/vclock_gettime.c256
-rw-r--r--arch/x86/vdso/vdso-layout.lds.S29
-rw-r--r--arch/x86/vdso/vdso.S22
-rw-r--r--arch/x86/vdso/vdso32-setup.c301
-rw-r--r--arch/x86/vdso/vdso32.S21
-rw-r--r--arch/x86/vdso/vdso32/vclock_gettime.c30
-rw-r--r--arch/x86/vdso/vdso32/vdso32.lds.S15
-rw-r--r--arch/x86/vdso/vdsox32.S22
-rw-r--r--arch/x86/vdso/vma.c20
10 files changed, 349 insertions, 383 deletions
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 9206ac7961a5..c580d1210ffe 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -23,7 +23,8 @@ vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
23vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y)) 23vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
24 24
25# files to link into kernel 25# files to link into kernel
26obj-$(VDSO64-y) += vma.o vdso.o 26obj-y += vma.o
27obj-$(VDSO64-y) += vdso.o
27obj-$(VDSOX32-y) += vdsox32.o 28obj-$(VDSOX32-y) += vdsox32.o
28obj-$(VDSO32-y) += vdso32.o vdso32-setup.o 29obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
29 30
@@ -138,7 +139,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
138 139
139targets += vdso32/vdso32.lds 140targets += vdso32/vdso32.lds
140targets += $(vdso32-images) $(vdso32-images:=.dbg) 141targets += $(vdso32-images) $(vdso32-images:=.dbg)
141targets += vdso32/note.o $(vdso32.so-y:%=vdso32/%.o) 142targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
142 143
143extra-y += $(vdso32-images) 144extra-y += $(vdso32-images)
144 145
@@ -148,8 +149,19 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
148$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) 149$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
149$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32 150$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
150 151
152KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
153KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
154KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
155KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
156KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
157KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
158KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
159KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
160$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
161
151$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ 162$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
152 $(obj)/vdso32/vdso32.lds \ 163 $(obj)/vdso32/vdso32.lds \
164 $(obj)/vdso32/vclock_gettime.o \
153 $(obj)/vdso32/note.o \ 165 $(obj)/vdso32/note.o \
154 $(obj)/vdso32/%.o 166 $(obj)/vdso32/%.o
155 $(call if_changed,vdso) 167 $(call if_changed,vdso)
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index eb5d7a56f8d4..16d686171e9a 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -4,6 +4,9 @@
4 * 4 *
5 * Fast user context implementation of clock_gettime, gettimeofday, and time. 5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
6 * 6 *
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
9 *
7 * The code should have no internal unresolved relocations. 10 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing. 11 * Check with readelf after changing.
9 */ 12 */
@@ -11,56 +14,55 @@
11/* Disable profiling for userspace code: */ 14/* Disable profiling for userspace code: */
12#define DISABLE_BRANCH_PROFILING 15#define DISABLE_BRANCH_PROFILING
13 16
14#include <linux/kernel.h> 17#include <uapi/linux/time.h>
15#include <linux/posix-timers.h>
16#include <linux/time.h>
17#include <linux/string.h>
18#include <asm/vsyscall.h>
19#include <asm/fixmap.h>
20#include <asm/vgtod.h> 18#include <asm/vgtod.h>
21#include <asm/timex.h>
22#include <asm/hpet.h> 19#include <asm/hpet.h>
20#include <asm/vvar.h>
23#include <asm/unistd.h> 21#include <asm/unistd.h>
24#include <asm/io.h> 22#include <asm/msr.h>
25#include <asm/pvclock.h> 23#include <linux/math64.h>
24#include <linux/time.h>
26 25
27#define gtod (&VVAR(vsyscall_gtod_data)) 26#define gtod (&VVAR(vsyscall_gtod_data))
28 27
29notrace static cycle_t vread_tsc(void) 28extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
29extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
30extern time_t __vdso_time(time_t *t);
31
32#ifdef CONFIG_HPET_TIMER
33static inline u32 read_hpet_counter(const volatile void *addr)
30{ 34{
31 cycle_t ret; 35 return *(const volatile u32 *) (addr + HPET_COUNTER);
32 u64 last; 36}
37#endif
33 38
34 /* 39#ifndef BUILD_VDSO32
35 * Empirically, a fence (of type that depends on the CPU)
36 * before rdtsc is enough to ensure that rdtsc is ordered
37 * with respect to loads. The various CPU manuals are unclear
38 * as to whether rdtsc can be reordered with later loads,
39 * but no one has ever seen it happen.
40 */
41 rdtsc_barrier();
42 ret = (cycle_t)vget_cycles();
43 40
44 last = VVAR(vsyscall_gtod_data).clock.cycle_last; 41#include <linux/kernel.h>
42#include <asm/vsyscall.h>
43#include <asm/fixmap.h>
44#include <asm/pvclock.h>
45 45
46 if (likely(ret >= last)) 46static notrace cycle_t vread_hpet(void)
47 return ret; 47{
48 return read_hpet_counter((const void *)fix_to_virt(VSYSCALL_HPET));
49}
48 50
49 /* 51notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
50 * GCC likes to generate cmov here, but this branch is extremely 52{
51 * predictable (it's just a funciton of time and the likely is 53 long ret;
52 * very likely) and there's a data dependence, so force GCC 54 asm("syscall" : "=a" (ret) :
53 * to generate a branch instead. I don't barrier() because 55 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
54 * we don't actually need a barrier, and if this function 56 return ret;
55 * ever gets inlined it will generate worse code.
56 */
57 asm volatile ("");
58 return last;
59} 57}
60 58
61static notrace cycle_t vread_hpet(void) 59notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
62{ 60{
63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); 61 long ret;
62
63 asm("syscall" : "=a" (ret) :
64 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
65 return ret;
64} 66}
65 67
66#ifdef CONFIG_PARAVIRT_CLOCK 68#ifdef CONFIG_PARAVIRT_CLOCK
@@ -124,7 +126,7 @@ static notrace cycle_t vread_pvclock(int *mode)
124 *mode = VCLOCK_NONE; 126 *mode = VCLOCK_NONE;
125 127
126 /* refer to tsc.c read_tsc() comment for rationale */ 128 /* refer to tsc.c read_tsc() comment for rationale */
127 last = VVAR(vsyscall_gtod_data).clock.cycle_last; 129 last = gtod->cycle_last;
128 130
129 if (likely(ret >= last)) 131 if (likely(ret >= last))
130 return ret; 132 return ret;
@@ -133,11 +135,30 @@ static notrace cycle_t vread_pvclock(int *mode)
133} 135}
134#endif 136#endif
135 137
138#else
139
140extern u8 hpet_page
141 __attribute__((visibility("hidden")));
142
143#ifdef CONFIG_HPET_TIMER
144static notrace cycle_t vread_hpet(void)
145{
146 return read_hpet_counter((const void *)(&hpet_page));
147}
148#endif
149
136notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 150notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
137{ 151{
138 long ret; 152 long ret;
139 asm("syscall" : "=a" (ret) : 153
140 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory"); 154 asm(
155 "mov %%ebx, %%edx \n"
156 "mov %2, %%ebx \n"
157 "call VDSO32_vsyscall \n"
158 "mov %%edx, %%ebx \n"
159 : "=a" (ret)
160 : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
161 : "memory", "edx");
141 return ret; 162 return ret;
142} 163}
143 164
@@ -145,28 +166,79 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
145{ 166{
146 long ret; 167 long ret;
147 168
148 asm("syscall" : "=a" (ret) : 169 asm(
149 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); 170 "mov %%ebx, %%edx \n"
171 "mov %2, %%ebx \n"
172 "call VDSO32_vsyscall \n"
173 "mov %%edx, %%ebx \n"
174 : "=a" (ret)
175 : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
176 : "memory", "edx");
150 return ret; 177 return ret;
151} 178}
152 179
180#ifdef CONFIG_PARAVIRT_CLOCK
181
182static notrace cycle_t vread_pvclock(int *mode)
183{
184 *mode = VCLOCK_NONE;
185 return 0;
186}
187#endif
188
189#endif
190
191notrace static cycle_t vread_tsc(void)
192{
193 cycle_t ret;
194 u64 last;
195
196 /*
197 * Empirically, a fence (of type that depends on the CPU)
198 * before rdtsc is enough to ensure that rdtsc is ordered
199 * with respect to loads. The various CPU manuals are unclear
200 * as to whether rdtsc can be reordered with later loads,
201 * but no one has ever seen it happen.
202 */
203 rdtsc_barrier();
204 ret = (cycle_t)__native_read_tsc();
205
206 last = gtod->cycle_last;
207
208 if (likely(ret >= last))
209 return ret;
210
211 /*
212 * GCC likes to generate cmov here, but this branch is extremely
213 * predictable (it's just a funciton of time and the likely is
214 * very likely) and there's a data dependence, so force GCC
215 * to generate a branch instead. I don't barrier() because
216 * we don't actually need a barrier, and if this function
217 * ever gets inlined it will generate worse code.
218 */
219 asm volatile ("");
220 return last;
221}
153 222
154notrace static inline u64 vgetsns(int *mode) 223notrace static inline u64 vgetsns(int *mode)
155{ 224{
156 long v; 225 u64 v;
157 cycles_t cycles; 226 cycles_t cycles;
158 if (gtod->clock.vclock_mode == VCLOCK_TSC) 227
228 if (gtod->vclock_mode == VCLOCK_TSC)
159 cycles = vread_tsc(); 229 cycles = vread_tsc();
160 else if (gtod->clock.vclock_mode == VCLOCK_HPET) 230#ifdef CONFIG_HPET_TIMER
231 else if (gtod->vclock_mode == VCLOCK_HPET)
161 cycles = vread_hpet(); 232 cycles = vread_hpet();
233#endif
162#ifdef CONFIG_PARAVIRT_CLOCK 234#ifdef CONFIG_PARAVIRT_CLOCK
163 else if (gtod->clock.vclock_mode == VCLOCK_PVCLOCK) 235 else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
164 cycles = vread_pvclock(mode); 236 cycles = vread_pvclock(mode);
165#endif 237#endif
166 else 238 else
167 return 0; 239 return 0;
168 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; 240 v = (cycles - gtod->cycle_last) & gtod->mask;
169 return v * gtod->clock.mult; 241 return v * gtod->mult;
170} 242}
171 243
172/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ 244/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
@@ -176,106 +248,102 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
176 u64 ns; 248 u64 ns;
177 int mode; 249 int mode;
178 250
179 ts->tv_nsec = 0;
180 do { 251 do {
181 seq = raw_read_seqcount_begin(&gtod->seq); 252 seq = gtod_read_begin(gtod);
182 mode = gtod->clock.vclock_mode; 253 mode = gtod->vclock_mode;
183 ts->tv_sec = gtod->wall_time_sec; 254 ts->tv_sec = gtod->wall_time_sec;
184 ns = gtod->wall_time_snsec; 255 ns = gtod->wall_time_snsec;
185 ns += vgetsns(&mode); 256 ns += vgetsns(&mode);
186 ns >>= gtod->clock.shift; 257 ns >>= gtod->shift;
187 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 258 } while (unlikely(gtod_read_retry(gtod, seq)));
259
260 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
261 ts->tv_nsec = ns;
188 262
189 timespec_add_ns(ts, ns);
190 return mode; 263 return mode;
191} 264}
192 265
193notrace static int do_monotonic(struct timespec *ts) 266notrace static int __always_inline do_monotonic(struct timespec *ts)
194{ 267{
195 unsigned long seq; 268 unsigned long seq;
196 u64 ns; 269 u64 ns;
197 int mode; 270 int mode;
198 271
199 ts->tv_nsec = 0;
200 do { 272 do {
201 seq = raw_read_seqcount_begin(&gtod->seq); 273 seq = gtod_read_begin(gtod);
202 mode = gtod->clock.vclock_mode; 274 mode = gtod->vclock_mode;
203 ts->tv_sec = gtod->monotonic_time_sec; 275 ts->tv_sec = gtod->monotonic_time_sec;
204 ns = gtod->monotonic_time_snsec; 276 ns = gtod->monotonic_time_snsec;
205 ns += vgetsns(&mode); 277 ns += vgetsns(&mode);
206 ns >>= gtod->clock.shift; 278 ns >>= gtod->shift;
207 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 279 } while (unlikely(gtod_read_retry(gtod, seq)));
208 timespec_add_ns(ts, ns); 280
281 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
282 ts->tv_nsec = ns;
209 283
210 return mode; 284 return mode;
211} 285}
212 286
213notrace static int do_realtime_coarse(struct timespec *ts) 287notrace static void do_realtime_coarse(struct timespec *ts)
214{ 288{
215 unsigned long seq; 289 unsigned long seq;
216 do { 290 do {
217 seq = raw_read_seqcount_begin(&gtod->seq); 291 seq = gtod_read_begin(gtod);
218 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 292 ts->tv_sec = gtod->wall_time_coarse_sec;
219 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 293 ts->tv_nsec = gtod->wall_time_coarse_nsec;
220 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 294 } while (unlikely(gtod_read_retry(gtod, seq)));
221 return 0;
222} 295}
223 296
224notrace static int do_monotonic_coarse(struct timespec *ts) 297notrace static void do_monotonic_coarse(struct timespec *ts)
225{ 298{
226 unsigned long seq; 299 unsigned long seq;
227 do { 300 do {
228 seq = raw_read_seqcount_begin(&gtod->seq); 301 seq = gtod_read_begin(gtod);
229 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; 302 ts->tv_sec = gtod->monotonic_time_coarse_sec;
230 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; 303 ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
231 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 304 } while (unlikely(gtod_read_retry(gtod, seq)));
232
233 return 0;
234} 305}
235 306
236notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 307notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
237{ 308{
238 int ret = VCLOCK_NONE;
239
240 switch (clock) { 309 switch (clock) {
241 case CLOCK_REALTIME: 310 case CLOCK_REALTIME:
242 ret = do_realtime(ts); 311 if (do_realtime(ts) == VCLOCK_NONE)
312 goto fallback;
243 break; 313 break;
244 case CLOCK_MONOTONIC: 314 case CLOCK_MONOTONIC:
245 ret = do_monotonic(ts); 315 if (do_monotonic(ts) == VCLOCK_NONE)
316 goto fallback;
246 break; 317 break;
247 case CLOCK_REALTIME_COARSE: 318 case CLOCK_REALTIME_COARSE:
248 return do_realtime_coarse(ts); 319 do_realtime_coarse(ts);
320 break;
249 case CLOCK_MONOTONIC_COARSE: 321 case CLOCK_MONOTONIC_COARSE:
250 return do_monotonic_coarse(ts); 322 do_monotonic_coarse(ts);
323 break;
324 default:
325 goto fallback;
251 } 326 }
252 327
253 if (ret == VCLOCK_NONE)
254 return vdso_fallback_gettime(clock, ts);
255 return 0; 328 return 0;
329fallback:
330 return vdso_fallback_gettime(clock, ts);
256} 331}
257int clock_gettime(clockid_t, struct timespec *) 332int clock_gettime(clockid_t, struct timespec *)
258 __attribute__((weak, alias("__vdso_clock_gettime"))); 333 __attribute__((weak, alias("__vdso_clock_gettime")));
259 334
260notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 335notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
261{ 336{
262 long ret = VCLOCK_NONE;
263
264 if (likely(tv != NULL)) { 337 if (likely(tv != NULL)) {
265 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 338 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
266 offsetof(struct timespec, tv_nsec) || 339 return vdso_fallback_gtod(tv, tz);
267 sizeof(*tv) != sizeof(struct timespec));
268 ret = do_realtime((struct timespec *)tv);
269 tv->tv_usec /= 1000; 340 tv->tv_usec /= 1000;
270 } 341 }
271 if (unlikely(tz != NULL)) { 342 if (unlikely(tz != NULL)) {
272 /* Avoid memcpy. Some old compilers fail to inline it */ 343 tz->tz_minuteswest = gtod->tz_minuteswest;
273 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; 344 tz->tz_dsttime = gtod->tz_dsttime;
274 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
275 } 345 }
276 346
277 if (ret == VCLOCK_NONE)
278 return vdso_fallback_gtod(tv, tz);
279 return 0; 347 return 0;
280} 348}
281int gettimeofday(struct timeval *, struct timezone *) 349int gettimeofday(struct timeval *, struct timezone *)
@@ -287,8 +355,8 @@ int gettimeofday(struct timeval *, struct timezone *)
287 */ 355 */
288notrace time_t __vdso_time(time_t *t) 356notrace time_t __vdso_time(time_t *t)
289{ 357{
290 /* This is atomic on x86_64 so we don't need any locks. */ 358 /* This is atomic on x86 so we don't need any locks. */
291 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); 359 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
292 360
293 if (t) 361 if (t)
294 *t = result; 362 *t = result;
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 634a2cf62046..2e263f367b13 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -6,7 +6,25 @@
6 6
7SECTIONS 7SECTIONS
8{ 8{
9 . = VDSO_PRELINK + SIZEOF_HEADERS; 9#ifdef BUILD_VDSO32
10#include <asm/vdso32.h>
11
12 .hpet_sect : {
13 hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE);
14 } :text :hpet_sect
15
16 .vvar_sect : {
17 vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE);
18
19 /* Place all vvars at the offsets in asm/vvar.h. */
20#define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset;
21#define __VVAR_KERNEL_LDS
22#include <asm/vvar.h>
23#undef __VVAR_KERNEL_LDS
24#undef EMIT_VVAR
25 } :text :vvar_sect
26#endif
27 . = SIZEOF_HEADERS;
10 28
11 .hash : { *(.hash) } :text 29 .hash : { *(.hash) } :text
12 .gnu.hash : { *(.gnu.hash) } 30 .gnu.hash : { *(.gnu.hash) }
@@ -44,6 +62,11 @@ SECTIONS
44 . = ALIGN(0x100); 62 . = ALIGN(0x100);
45 63
46 .text : { *(.text*) } :text =0x90909090 64 .text : { *(.text*) } :text =0x90909090
65
66 /DISCARD/ : {
67 *(.discard)
68 *(.discard.*)
69 }
47} 70}
48 71
49/* 72/*
@@ -61,4 +84,8 @@ PHDRS
61 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 84 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
62 note PT_NOTE FLAGS(4); /* PF_R */ 85 note PT_NOTE FLAGS(4); /* PF_R */
63 eh_frame_hdr PT_GNU_EH_FRAME; 86 eh_frame_hdr PT_GNU_EH_FRAME;
87#ifdef BUILD_VDSO32
88 vvar_sect PT_NULL FLAGS(4); /* PF_R */
89 hpet_sect PT_NULL FLAGS(4); /* PF_R */
90#endif
64} 91}
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 1e13eb8c9656..be3f23b09af5 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,21 +1,3 @@
1#include <asm/page_types.h> 1#include <asm/vdso.h>
2#include <linux/linkage.h>
3 2
4__PAGE_ALIGNED_DATA 3DEFINE_VDSO_IMAGE(vdso, "arch/x86/vdso/vdso.so")
5
6 .globl vdso_start, vdso_end
7 .align PAGE_SIZE
8vdso_start:
9 .incbin "arch/x86/vdso/vdso.so"
10vdso_end:
11 .align PAGE_SIZE /* extra data here leaks to userspace. */
12
13.previous
14
15 .globl vdso_pages
16 .bss
17 .align 8
18 .type vdso_pages, @object
19vdso_pages:
20 .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
21 .size vdso_pages, .-vdso_pages
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index d6bfb876cfb0..00348980a3a6 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -16,6 +16,7 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/slab.h>
19 20
20#include <asm/cpufeature.h> 21#include <asm/cpufeature.h>
21#include <asm/msr.h> 22#include <asm/msr.h>
@@ -25,17 +26,14 @@
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
26#include <asm/vdso.h> 27#include <asm/vdso.h>
27#include <asm/proto.h> 28#include <asm/proto.h>
28 29#include <asm/fixmap.h>
29enum { 30#include <asm/hpet.h>
30 VDSO_DISABLED = 0, 31#include <asm/vvar.h>
31 VDSO_ENABLED = 1,
32 VDSO_COMPAT = 2,
33};
34 32
35#ifdef CONFIG_COMPAT_VDSO 33#ifdef CONFIG_COMPAT_VDSO
36#define VDSO_DEFAULT VDSO_COMPAT 34#define VDSO_DEFAULT 0
37#else 35#else
38#define VDSO_DEFAULT VDSO_ENABLED 36#define VDSO_DEFAULT 1
39#endif 37#endif
40 38
41#ifdef CONFIG_X86_64 39#ifdef CONFIG_X86_64
@@ -44,13 +42,6 @@ enum {
44#endif 42#endif
45 43
46/* 44/*
47 * This is the difference between the prelinked addresses in the vDSO images
48 * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
49 * in the user address space.
50 */
51#define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
52
53/*
54 * Should the kernel map a VDSO page into processes and pass its 45 * Should the kernel map a VDSO page into processes and pass its
55 * address down to glibc upon exec()? 46 * address down to glibc upon exec()?
56 */ 47 */
@@ -60,6 +51,9 @@ static int __init vdso_setup(char *s)
60{ 51{
61 vdso_enabled = simple_strtoul(s, NULL, 0); 52 vdso_enabled = simple_strtoul(s, NULL, 0);
62 53
54 if (vdso_enabled > 1)
55 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
56
63 return 1; 57 return 1;
64} 58}
65 59
@@ -76,124 +70,8 @@ __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
76EXPORT_SYMBOL_GPL(vdso_enabled); 70EXPORT_SYMBOL_GPL(vdso_enabled);
77#endif 71#endif
78 72
79static __init void reloc_symtab(Elf32_Ehdr *ehdr, 73static struct page **vdso32_pages;
80 unsigned offset, unsigned size) 74static unsigned vdso32_size;
81{
82 Elf32_Sym *sym = (void *)ehdr + offset;
83 unsigned nsym = size / sizeof(*sym);
84 unsigned i;
85
86 for(i = 0; i < nsym; i++, sym++) {
87 if (sym->st_shndx == SHN_UNDEF ||
88 sym->st_shndx == SHN_ABS)
89 continue; /* skip */
90
91 if (sym->st_shndx > SHN_LORESERVE) {
92 printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
93 sym->st_shndx);
94 continue;
95 }
96
97 switch(ELF_ST_TYPE(sym->st_info)) {
98 case STT_OBJECT:
99 case STT_FUNC:
100 case STT_SECTION:
101 case STT_FILE:
102 sym->st_value += VDSO_ADDR_ADJUST;
103 }
104 }
105}
106
107static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
108{
109 Elf32_Dyn *dyn = (void *)ehdr + offset;
110
111 for(; dyn->d_tag != DT_NULL; dyn++)
112 switch(dyn->d_tag) {
113 case DT_PLTGOT:
114 case DT_HASH:
115 case DT_STRTAB:
116 case DT_SYMTAB:
117 case DT_RELA:
118 case DT_INIT:
119 case DT_FINI:
120 case DT_REL:
121 case DT_DEBUG:
122 case DT_JMPREL:
123 case DT_VERSYM:
124 case DT_VERDEF:
125 case DT_VERNEED:
126 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
127 /* definitely pointers needing relocation */
128 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
129 break;
130
131 case DT_ENCODING ... OLD_DT_LOOS-1:
132 case DT_LOOS ... DT_HIOS-1:
133 /* Tags above DT_ENCODING are pointers if
134 they're even */
135 if (dyn->d_tag >= DT_ENCODING &&
136 (dyn->d_tag & 1) == 0)
137 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
138 break;
139
140 case DT_VERDEFNUM:
141 case DT_VERNEEDNUM:
142 case DT_FLAGS_1:
143 case DT_RELACOUNT:
144 case DT_RELCOUNT:
145 case DT_VALRNGLO ... DT_VALRNGHI:
146 /* definitely not pointers */
147 break;
148
149 case OLD_DT_LOOS ... DT_LOOS-1:
150 case DT_HIOS ... DT_VALRNGLO-1:
151 default:
152 if (dyn->d_tag > DT_ENCODING)
153 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
154 dyn->d_tag);
155 break;
156 }
157}
158
159static __init void relocate_vdso(Elf32_Ehdr *ehdr)
160{
161 Elf32_Phdr *phdr;
162 Elf32_Shdr *shdr;
163 int i;
164
165 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
166 !elf_check_arch_ia32(ehdr) ||
167 ehdr->e_type != ET_DYN);
168
169 ehdr->e_entry += VDSO_ADDR_ADJUST;
170
171 /* rebase phdrs */
172 phdr = (void *)ehdr + ehdr->e_phoff;
173 for (i = 0; i < ehdr->e_phnum; i++) {
174 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
175
176 /* relocate dynamic stuff */
177 if (phdr[i].p_type == PT_DYNAMIC)
178 reloc_dyn(ehdr, phdr[i].p_offset);
179 }
180
181 /* rebase sections */
182 shdr = (void *)ehdr + ehdr->e_shoff;
183 for(i = 0; i < ehdr->e_shnum; i++) {
184 if (!(shdr[i].sh_flags & SHF_ALLOC))
185 continue;
186
187 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
188
189 if (shdr[i].sh_type == SHT_SYMTAB ||
190 shdr[i].sh_type == SHT_DYNSYM)
191 reloc_symtab(ehdr, shdr[i].sh_offset,
192 shdr[i].sh_size);
193 }
194}
195
196static struct page *vdso32_pages[1];
197 75
198#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
199 77
@@ -212,12 +90,6 @@ void syscall32_cpu_init(void)
212 wrmsrl(MSR_CSTAR, ia32_cstar_target); 90 wrmsrl(MSR_CSTAR, ia32_cstar_target);
213} 91}
214 92
215#define compat_uses_vma 1
216
217static inline void map_compat_vdso(int map)
218{
219}
220
221#else /* CONFIG_X86_32 */ 93#else /* CONFIG_X86_32 */
222 94
223#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) 95#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
@@ -241,64 +113,36 @@ void enable_sep_cpu(void)
241 put_cpu(); 113 put_cpu();
242} 114}
243 115
244static struct vm_area_struct gate_vma;
245
246static int __init gate_vma_init(void)
247{
248 gate_vma.vm_mm = NULL;
249 gate_vma.vm_start = FIXADDR_USER_START;
250 gate_vma.vm_end = FIXADDR_USER_END;
251 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
252 gate_vma.vm_page_prot = __P101;
253
254 return 0;
255}
256
257#define compat_uses_vma 0
258
259static void map_compat_vdso(int map)
260{
261 static int vdso_mapped;
262
263 if (map == vdso_mapped)
264 return;
265
266 vdso_mapped = map;
267
268 __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
269 map ? PAGE_READONLY_EXEC : PAGE_NONE);
270
271 /* flush stray tlbs */
272 flush_tlb_all();
273}
274
275#endif /* CONFIG_X86_64 */ 116#endif /* CONFIG_X86_64 */
276 117
277int __init sysenter_setup(void) 118int __init sysenter_setup(void)
278{ 119{
279 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); 120 char *vdso32_start, *vdso32_end;
280 const void *vsyscall; 121 int npages, i;
281 size_t vsyscall_len;
282
283 vdso32_pages[0] = virt_to_page(syscall_page);
284
285#ifdef CONFIG_X86_32
286 gate_vma_init();
287#endif
288 122
123#ifdef CONFIG_COMPAT
289 if (vdso32_syscall()) { 124 if (vdso32_syscall()) {
290 vsyscall = &vdso32_syscall_start; 125 vdso32_start = vdso32_syscall_start;
291 vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start; 126 vdso32_end = vdso32_syscall_end;
292 } else if (vdso32_sysenter()){ 127 vdso32_pages = vdso32_syscall_pages;
293 vsyscall = &vdso32_sysenter_start; 128 } else
294 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start; 129#endif
130 if (vdso32_sysenter()) {
131 vdso32_start = vdso32_sysenter_start;
132 vdso32_end = vdso32_sysenter_end;
133 vdso32_pages = vdso32_sysenter_pages;
295 } else { 134 } else {
296 vsyscall = &vdso32_int80_start; 135 vdso32_start = vdso32_int80_start;
297 vsyscall_len = &vdso32_int80_end - &vdso32_int80_start; 136 vdso32_end = vdso32_int80_end;
137 vdso32_pages = vdso32_int80_pages;
298 } 138 }
299 139
300 memcpy(syscall_page, vsyscall, vsyscall_len); 140 npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE;
301 relocate_vdso(syscall_page); 141 vdso32_size = npages << PAGE_SHIFT;
142 for (i = 0; i < npages; i++)
143 vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE);
144
145 patch_vdso32(vdso32_start, vdso32_size);
302 146
303 return 0; 147 return 0;
304} 148}
@@ -309,48 +153,73 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
309 struct mm_struct *mm = current->mm; 153 struct mm_struct *mm = current->mm;
310 unsigned long addr; 154 unsigned long addr;
311 int ret = 0; 155 int ret = 0;
312 bool compat; 156 struct vm_area_struct *vma;
313 157
314#ifdef CONFIG_X86_X32_ABI 158#ifdef CONFIG_X86_X32_ABI
315 if (test_thread_flag(TIF_X32)) 159 if (test_thread_flag(TIF_X32))
316 return x32_setup_additional_pages(bprm, uses_interp); 160 return x32_setup_additional_pages(bprm, uses_interp);
317#endif 161#endif
318 162
319 if (vdso_enabled == VDSO_DISABLED) 163 if (vdso_enabled != 1) /* Other values all mean "disabled" */
320 return 0; 164 return 0;
321 165
322 down_write(&mm->mmap_sem); 166 down_write(&mm->mmap_sem);
323 167
324 /* Test compat mode once here, in case someone 168 addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0);
325 changes it via sysctl */ 169 if (IS_ERR_VALUE(addr)) {
326 compat = (vdso_enabled == VDSO_COMPAT); 170 ret = addr;
171 goto up_fail;
172 }
173
174 addr += VDSO_OFFSET(VDSO_PREV_PAGES);
327 175
328 map_compat_vdso(compat); 176 current->mm->context.vdso = (void *)addr;
329 177
330 if (compat) 178 /*
331 addr = VDSO_HIGH_BASE; 179 * MAYWRITE to allow gdb to COW and set breakpoints
332 else { 180 */
333 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 181 ret = install_special_mapping(mm,
334 if (IS_ERR_VALUE(addr)) { 182 addr,
335 ret = addr; 183 vdso32_size,
336 goto up_fail; 184 VM_READ|VM_EXEC|
337 } 185 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
186 vdso32_pages);
187
188 if (ret)
189 goto up_fail;
190
191 vma = _install_special_mapping(mm,
192 addr - VDSO_OFFSET(VDSO_PREV_PAGES),
193 VDSO_OFFSET(VDSO_PREV_PAGES),
194 VM_READ,
195 NULL);
196
197 if (IS_ERR(vma)) {
198 ret = PTR_ERR(vma);
199 goto up_fail;
338 } 200 }
339 201
340 current->mm->context.vdso = (void *)addr; 202 ret = remap_pfn_range(vma,
203 addr - VDSO_OFFSET(VDSO_VVAR_PAGE),
204 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
205 PAGE_SIZE,
206 PAGE_READONLY);
207
208 if (ret)
209 goto up_fail;
341 210
342 if (compat_uses_vma || !compat) { 211#ifdef CONFIG_HPET_TIMER
343 /* 212 if (hpet_address) {
344 * MAYWRITE to allow gdb to COW and set breakpoints 213 ret = io_remap_pfn_range(vma,
345 */ 214 addr - VDSO_OFFSET(VDSO_HPET_PAGE),
346 ret = install_special_mapping(mm, addr, PAGE_SIZE, 215 hpet_address >> PAGE_SHIFT,
347 VM_READ|VM_EXEC| 216 PAGE_SIZE,
348 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 217 pgprot_noncached(PAGE_READONLY));
349 vdso32_pages);
350 218
351 if (ret) 219 if (ret)
352 goto up_fail; 220 goto up_fail;
353 } 221 }
222#endif
354 223
355 current_thread_info()->sysenter_return = 224 current_thread_info()->sysenter_return =
356 VDSO32_SYMBOL(addr, SYSENTER_RETURN); 225 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
@@ -411,20 +280,12 @@ const char *arch_vma_name(struct vm_area_struct *vma)
411 280
412struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 281struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
413{ 282{
414 /*
415 * Check to see if the corresponding task was created in compat vdso
416 * mode.
417 */
418 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
419 return &gate_vma;
420 return NULL; 283 return NULL;
421} 284}
422 285
423int in_gate_area(struct mm_struct *mm, unsigned long addr) 286int in_gate_area(struct mm_struct *mm, unsigned long addr)
424{ 287{
425 const struct vm_area_struct *vma = get_gate_vma(mm); 288 return 0;
426
427 return vma && addr >= vma->vm_start && addr < vma->vm_end;
428} 289}
429 290
430int in_gate_area_no_mm(unsigned long addr) 291int in_gate_area_no_mm(unsigned long addr)
diff --git a/arch/x86/vdso/vdso32.S b/arch/x86/vdso/vdso32.S
index 2ce5f82c333b..018bcd9f97b4 100644
--- a/arch/x86/vdso/vdso32.S
+++ b/arch/x86/vdso/vdso32.S
@@ -1,22 +1,9 @@
1#include <linux/init.h> 1#include <asm/vdso.h>
2 2
3__INITDATA 3DEFINE_VDSO_IMAGE(vdso32_int80, "arch/x86/vdso/vdso32-int80.so")
4 4
5 .globl vdso32_int80_start, vdso32_int80_end
6vdso32_int80_start:
7 .incbin "arch/x86/vdso/vdso32-int80.so"
8vdso32_int80_end:
9
10 .globl vdso32_syscall_start, vdso32_syscall_end
11vdso32_syscall_start:
12#ifdef CONFIG_COMPAT 5#ifdef CONFIG_COMPAT
13 .incbin "arch/x86/vdso/vdso32-syscall.so" 6DEFINE_VDSO_IMAGE(vdso32_syscall, "arch/x86/vdso/vdso32-syscall.so")
14#endif 7#endif
15vdso32_syscall_end:
16
17 .globl vdso32_sysenter_start, vdso32_sysenter_end
18vdso32_sysenter_start:
19 .incbin "arch/x86/vdso/vdso32-sysenter.so"
20vdso32_sysenter_end:
21 8
22__FINIT 9DEFINE_VDSO_IMAGE(vdso32_sysenter, "arch/x86/vdso/vdso32-sysenter.so")
diff --git a/arch/x86/vdso/vdso32/vclock_gettime.c b/arch/x86/vdso/vdso32/vclock_gettime.c
new file mode 100644
index 000000000000..175cc72c0f68
--- /dev/null
+++ b/arch/x86/vdso/vdso32/vclock_gettime.c
@@ -0,0 +1,30 @@
1#define BUILD_VDSO32
2
3#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
4#undef CONFIG_OPTIMIZE_INLINING
5#endif
6
7#undef CONFIG_X86_PPRO_FENCE
8
9#ifdef CONFIG_X86_64
10
11/*
12 * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
13 * configuration
14 */
15#undef CONFIG_64BIT
16#undef CONFIG_X86_64
17#undef CONFIG_ILLEGAL_POINTER_VALUE
18#undef CONFIG_SPARSEMEM_VMEMMAP
19#undef CONFIG_NR_CPUS
20
21#define CONFIG_X86_32 1
22#define CONFIG_PAGE_OFFSET 0
23#define CONFIG_ILLEGAL_POINTER_VALUE 0
24#define CONFIG_NR_CPUS 1
25
26#define BUILD_VDSO32_64
27
28#endif
29
30#include "../vclock_gettime.c"
diff --git a/arch/x86/vdso/vdso32/vdso32.lds.S b/arch/x86/vdso/vdso32/vdso32.lds.S
index 976124bb5f92..aadb8b9994cd 100644
--- a/arch/x86/vdso/vdso32/vdso32.lds.S
+++ b/arch/x86/vdso/vdso32/vdso32.lds.S
@@ -8,7 +8,11 @@
8 * values visible using the asm-x86/vdso.h macros from the kernel proper. 8 * values visible using the asm-x86/vdso.h macros from the kernel proper.
9 */ 9 */
10 10
11#include <asm/page.h>
12
13#define BUILD_VDSO32
11#define VDSO_PRELINK 0 14#define VDSO_PRELINK 0
15
12#include "../vdso-layout.lds.S" 16#include "../vdso-layout.lds.S"
13 17
14/* The ELF entry point can be used to set the AT_SYSINFO value. */ 18/* The ELF entry point can be used to set the AT_SYSINFO value. */
@@ -19,6 +23,13 @@ ENTRY(__kernel_vsyscall);
19 */ 23 */
20VERSION 24VERSION
21{ 25{
26 LINUX_2.6 {
27 global:
28 __vdso_clock_gettime;
29 __vdso_gettimeofday;
30 __vdso_time;
31 };
32
22 LINUX_2.5 { 33 LINUX_2.5 {
23 global: 34 global:
24 __kernel_vsyscall; 35 __kernel_vsyscall;
@@ -31,7 +42,9 @@ VERSION
31/* 42/*
32 * Symbols we define here called VDSO* get their values into vdso32-syms.h. 43 * Symbols we define here called VDSO* get their values into vdso32-syms.h.
33 */ 44 */
34VDSO32_PRELINK = VDSO_PRELINK;
35VDSO32_vsyscall = __kernel_vsyscall; 45VDSO32_vsyscall = __kernel_vsyscall;
36VDSO32_sigreturn = __kernel_sigreturn; 46VDSO32_sigreturn = __kernel_sigreturn;
37VDSO32_rt_sigreturn = __kernel_rt_sigreturn; 47VDSO32_rt_sigreturn = __kernel_rt_sigreturn;
48VDSO32_clock_gettime = clock_gettime;
49VDSO32_gettimeofday = gettimeofday;
50VDSO32_time = time;
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
index 295f1c7543d8..f4aa34e7f370 100644
--- a/arch/x86/vdso/vdsox32.S
+++ b/arch/x86/vdso/vdsox32.S
@@ -1,21 +1,3 @@
1#include <asm/page_types.h> 1#include <asm/vdso.h>
2#include <linux/linkage.h>
3 2
4__PAGE_ALIGNED_DATA 3DEFINE_VDSO_IMAGE(vdsox32, "arch/x86/vdso/vdsox32.so")
5
6 .globl vdsox32_start, vdsox32_end
7 .align PAGE_SIZE
8vdsox32_start:
9 .incbin "arch/x86/vdso/vdsox32.so"
10vdsox32_end:
11 .align PAGE_SIZE /* extra data here leaks to userspace. */
12
13.previous
14
15 .globl vdsox32_pages
16 .bss
17 .align 8
18 .type vdsox32_pages, @object
19vdsox32_pages:
20 .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
21 .size vdsox32_pages, .-vdsox32_pages
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 431e87544411..1ad102613127 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -16,20 +16,22 @@
16#include <asm/vdso.h> 16#include <asm/vdso.h>
17#include <asm/page.h> 17#include <asm/page.h>
18 18
19#if defined(CONFIG_X86_64)
19unsigned int __read_mostly vdso_enabled = 1; 20unsigned int __read_mostly vdso_enabled = 1;
20 21
21extern char vdso_start[], vdso_end[]; 22DECLARE_VDSO_IMAGE(vdso);
22extern unsigned short vdso_sync_cpuid; 23extern unsigned short vdso_sync_cpuid;
23
24extern struct page *vdso_pages[];
25static unsigned vdso_size; 24static unsigned vdso_size;
26 25
27#ifdef CONFIG_X86_X32_ABI 26#ifdef CONFIG_X86_X32_ABI
28extern char vdsox32_start[], vdsox32_end[]; 27DECLARE_VDSO_IMAGE(vdsox32);
29extern struct page *vdsox32_pages[];
30static unsigned vdsox32_size; 28static unsigned vdsox32_size;
29#endif
30#endif
31 31
32static void __init patch_vdsox32(void *vdso, size_t len) 32#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
33 defined(CONFIG_COMPAT)
34void __init patch_vdso32(void *vdso, size_t len)
33{ 35{
34 Elf32_Ehdr *hdr = vdso; 36 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0; 37 Elf32_Shdr *sechdrs, *alt_sec = 0;
@@ -52,7 +54,7 @@ static void __init patch_vdsox32(void *vdso, size_t len)
52 } 54 }
53 55
54 /* If we get here, it's probably a bug. */ 56 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n"); 57 pr_warning("patch_vdso32: .altinstructions not found\n");
56 return; /* nothing to patch */ 58 return; /* nothing to patch */
57 59
58found: 60found:
@@ -61,6 +63,7 @@ found:
61} 63}
62#endif 64#endif
63 65
66#if defined(CONFIG_X86_64)
64static void __init patch_vdso64(void *vdso, size_t len) 67static void __init patch_vdso64(void *vdso, size_t len)
65{ 68{
66 Elf64_Ehdr *hdr = vdso; 69 Elf64_Ehdr *hdr = vdso;
@@ -104,7 +107,7 @@ static int __init init_vdso(void)
104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); 107 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
105 108
106#ifdef CONFIG_X86_X32_ABI 109#ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start); 110 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE; 111 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT; 112 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++) 113 for (i = 0; i < npages; i++)
@@ -204,3 +207,4 @@ static __init int vdso_setup(char *s)
204 return 0; 207 return 0;
205} 208}
206__setup("vdso=", vdso_setup); 209__setup("vdso=", vdso_setup);
210#endif