aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-01-30 07:30:43 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:43 -0500
commitaf65d64845a90c8f2fc90b97e2148ff74672e979 (patch)
treee70a57a9635acaf8154c150f95e11dcb51937fd8
parent00f8b1bc0e44ba94fb33e1fbd8ac82841d7cc570 (diff)
x86 vDSO: consolidate vdso32
This makes x86_64's ia32 emulation support share the sources used in the 32-bit kernel for the 32-bit vDSO and much of its setup code. The 32-bit vDSO mapping now behaves the same on x86_64 as on native 32-bit. The abi.syscall32 sysctl on x86_64 now takes the same values that vm.vdso_enabled takes on the 32-bit kernel. That is, 1 means a randomized vDSO location, 2 means the fixed old address. The CONFIG_COMPAT_VDSO option is now available to make this the default setting, the same meaning it has for the 32-bit kernel. (This does not affect the 64-bit vDSO.) The argument vdso32=[012] can be used on both 32-bit and 64-bit kernels to set this paramter at boot time. The vdso=[012] argument still does this same thing on the 32-bit kernel. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/ia32/Makefile2
-rw-r--r--arch/x86/ia32/ia32_binfmt.c9
-rw-r--r--arch/x86/ia32/ia32_signal.c22
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/vdso/vdso32-setup.c119
-rw-r--r--arch/x86/vdso/vdso32.S16
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--include/asm-x86/elf.h23
10 files changed, 146 insertions, 60 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b427b7c0e5d0..b3f20beea411 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1988,6 +1988,11 @@ and is between 256 and 4096 characters. It is defined in the file
1988 vdso=1: enable VDSO (default) 1988 vdso=1: enable VDSO (default)
1989 vdso=0: disable VDSO mapping 1989 vdso=0: disable VDSO mapping
1990 1990
1991 vdso32= [X86-32,X86-64]
1992 vdso32=2: enable compat VDSO (default with COMPAT_VDSO)
1993 vdso32=1: enable 32-bit VDSO (default)
1994 vdso32=0: disable 32-bit VDSO mapping
1995
1991 vector= [IA-64,SMP] 1996 vector= [IA-64,SMP]
1992 vector=percpu: enable percpu vector domain 1997 vector=percpu: enable percpu vector domain
1993 1998
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 80b7ba4056db..2f4d88babd36 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1191,9 +1191,9 @@ config HOTPLUG_CPU
1191config COMPAT_VDSO 1191config COMPAT_VDSO
1192 bool "Compat VDSO support" 1192 bool "Compat VDSO support"
1193 default y 1193 default y
1194 depends on X86_32 1194 depends on X86_32 || IA32_EMULATION
1195 help 1195 help
1196 Map the VDSO to the predictable old-style address too. 1196 Map the 32-bit VDSO to the predictable old-style address too.
1197 ---help--- 1197 ---help---
1198 Say N here if you are running a sufficiently recent glibc 1198 Say N here if you are running a sufficiently recent glibc
1199 version (2.3.3 or later), to remove the high-mapped 1199 version (2.3.3 or later), to remove the high-mapped
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index a3c997e9f39a..1f58a21a41dc 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \ 5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
6 ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o 6 ia32_binfmt.o fpu32.o ptrace32.o
7 7
8sysv-$(CONFIG_SYSVIPC) := ipc32.o 8sysv-$(CONFIG_SYSVIPC) := ipc32.o
9obj-$(CONFIG_IA32_EMULATION) += $(sysv-y) 9obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 55822d2cf053..e32974c3dd3b 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -26,7 +26,7 @@
26#include <asm/i387.h> 26#include <asm/i387.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/ia32.h> 28#include <asm/ia32.h>
29#include <asm/vsyscall32.h> 29#include <asm/vdso.h>
30 30
31#undef ELF_ARCH 31#undef ELF_ARCH
32#undef ELF_CLASS 32#undef ELF_CLASS
@@ -47,14 +47,13 @@
47#define AT_SYSINFO 32 47#define AT_SYSINFO 32
48#define AT_SYSINFO_EHDR 33 48#define AT_SYSINFO_EHDR 33
49 49
50int sysctl_vsyscall32 = 1; 50extern int sysctl_vsyscall32;
51 51
52#undef ARCH_DLINFO 52#undef ARCH_DLINFO
53#define ARCH_DLINFO do { \ 53#define ARCH_DLINFO do { \
54 if (sysctl_vsyscall32) { \ 54 if (sysctl_vsyscall32) { \
55 current->mm->context.vdso = (void *)VSYSCALL32_BASE; \ 55 NEW_AUX_ENT(AT_SYSINFO, (u32)VDSO_ENTRY); \
56 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \ 56 NEW_AUX_ENT(AT_SYSINFO_EHDR, (u32)VDSO_CURRENT_BASE); \
57 NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE); \
58 } \ 57 } \
59} while(0) 58} while(0)
60 59
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 0fc5d8563e19..39356a756b28 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -31,7 +31,7 @@
31#include <asm/sigcontext32.h> 31#include <asm/sigcontext32.h>
32#include <asm/fpu32.h> 32#include <asm/fpu32.h>
33#include <asm/proto.h> 33#include <asm/proto.h>
34#include <asm/vsyscall32.h> 34#include <asm/vdso.h>
35 35
36#define DEBUG_SIG 0 36#define DEBUG_SIG 0
37 37
@@ -465,13 +465,16 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
465 goto give_sigsegv; 465 goto give_sigsegv;
466 } 466 }
467 467
468 /* Return stub is in 32bit vsyscall page */ 468 if (ka->sa.sa_flags & SA_RESTORER) {
469 if (current->binfmt->hasvdso)
470 restorer = VSYSCALL32_SIGRETURN;
471 else
472 restorer = (void *)&frame->retcode;
473 if (ka->sa.sa_flags & SA_RESTORER)
474 restorer = ka->sa.sa_restorer; 469 restorer = ka->sa.sa_restorer;
470 } else {
471 /* Return stub is in 32bit vsyscall page */
472 if (current->binfmt->hasvdso)
473 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
474 sigreturn);
475 else
476 restorer = (void *)&frame->retcode;
477 }
475 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); 478 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
476 479
477 /* 480 /*
@@ -519,7 +522,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
519{ 522{
520 struct rt_sigframe __user *frame; 523 struct rt_sigframe __user *frame;
521 struct exec_domain *ed = current_thread_info()->exec_domain; 524 struct exec_domain *ed = current_thread_info()->exec_domain;
522 void __user *restorer = VSYSCALL32_RTSIGRETURN; 525 void __user *restorer;
523 int err = 0; 526 int err = 0;
524 527
525 /* __copy_to_user optimizes that into a single 8 byte store */ 528 /* __copy_to_user optimizes that into a single 8 byte store */
@@ -564,6 +567,9 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
564 567
565 if (ka->sa.sa_flags & SA_RESTORER) 568 if (ka->sa.sa_flags & SA_RESTORER)
566 restorer = ka->sa.sa_restorer; 569 restorer = ka->sa.sa_restorer;
570 else
571 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
572 rt_sigreturn);
567 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); 573 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
568 574
569 /* 575 /*
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 1127c716df02..47bc2760e6a8 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -15,7 +15,7 @@ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
15 15
16# files to link into kernel 16# files to link into kernel
17obj-$(VDSO64-y) += vma.o vdso.o 17obj-$(VDSO64-y) += vma.o vdso.o
18obj-$(CONFIG_X86_32) += vdso32.o vdso32-setup.o 18obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
19 19
20vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) 20vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
21 21
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index fb71a93c5dce..d97a6d7d062b 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -24,6 +24,7 @@
24#include <asm/elf.h> 24#include <asm/elf.h>
25#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
26#include <asm/vdso.h> 26#include <asm/vdso.h>
27#include <asm/proto.h>
27 28
28enum { 29enum {
29 VDSO_DISABLED = 0, 30 VDSO_DISABLED = 0,
@@ -37,14 +38,24 @@ enum {
37#define VDSO_DEFAULT VDSO_ENABLED 38#define VDSO_DEFAULT VDSO_ENABLED
38#endif 39#endif
39 40
41#ifdef CONFIG_X86_64
42#define vdso_enabled sysctl_vsyscall32
43#define arch_setup_additional_pages syscall32_setup_pages
44#endif
45
46/*
47 * This is the difference between the prelinked addresses in the vDSO images
48 * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
49 * in the user address space.
50 */
51#define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
52
40/* 53/*
41 * Should the kernel map a VDSO page into processes and pass its 54 * Should the kernel map a VDSO page into processes and pass its
42 * address down to glibc upon exec()? 55 * address down to glibc upon exec()?
43 */ 56 */
44unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; 57unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
45 58
46EXPORT_SYMBOL_GPL(vdso_enabled);
47
48static int __init vdso_setup(char *s) 59static int __init vdso_setup(char *s)
49{ 60{
50 vdso_enabled = simple_strtoul(s, NULL, 0); 61 vdso_enabled = simple_strtoul(s, NULL, 0);
@@ -52,9 +63,18 @@ static int __init vdso_setup(char *s)
52 return 1; 63 return 1;
53} 64}
54 65
55__setup("vdso=", vdso_setup); 66/*
67 * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
68 * behavior on both 64-bit and 32-bit kernels.
69 * On 32-bit kernels, vdso=[012] means the same thing.
70 */
71__setup("vdso32=", vdso_setup);
56 72
57extern asmlinkage void ia32_sysenter_target(void); 73#ifdef CONFIG_X86_32
74__setup_param("vdso=", vdso32_setup, vdso_setup, 0);
75
76EXPORT_SYMBOL_GPL(vdso_enabled);
77#endif
58 78
59static __init void reloc_symtab(Elf32_Ehdr *ehdr, 79static __init void reloc_symtab(Elf32_Ehdr *ehdr,
60 unsigned offset, unsigned size) 80 unsigned offset, unsigned size)
@@ -79,7 +99,7 @@ static __init void reloc_symtab(Elf32_Ehdr *ehdr,
79 case STT_FUNC: 99 case STT_FUNC:
80 case STT_SECTION: 100 case STT_SECTION:
81 case STT_FILE: 101 case STT_FILE:
82 sym->st_value += VDSO_HIGH_BASE; 102 sym->st_value += VDSO_ADDR_ADJUST;
83 } 103 }
84 } 104 }
85} 105}
@@ -105,7 +125,7 @@ static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
105 case DT_VERNEED: 125 case DT_VERNEED:
106 case DT_ADDRRNGLO ... DT_ADDRRNGHI: 126 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
107 /* definitely pointers needing relocation */ 127 /* definitely pointers needing relocation */
108 dyn->d_un.d_ptr += VDSO_HIGH_BASE; 128 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
109 break; 129 break;
110 130
111 case DT_ENCODING ... OLD_DT_LOOS-1: 131 case DT_ENCODING ... OLD_DT_LOOS-1:
@@ -114,7 +134,7 @@ static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
114 they're even */ 134 they're even */
115 if (dyn->d_tag >= DT_ENCODING && 135 if (dyn->d_tag >= DT_ENCODING &&
116 (dyn->d_tag & 1) == 0) 136 (dyn->d_tag & 1) == 0)
117 dyn->d_un.d_ptr += VDSO_HIGH_BASE; 137 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
118 break; 138 break;
119 139
120 case DT_VERDEFNUM: 140 case DT_VERDEFNUM:
@@ -143,15 +163,15 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr)
143 int i; 163 int i;
144 164
145 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || 165 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
146 !elf_check_arch(ehdr) || 166 !elf_check_arch_ia32(ehdr) ||
147 ehdr->e_type != ET_DYN); 167 ehdr->e_type != ET_DYN);
148 168
149 ehdr->e_entry += VDSO_HIGH_BASE; 169 ehdr->e_entry += VDSO_ADDR_ADJUST;
150 170
151 /* rebase phdrs */ 171 /* rebase phdrs */
152 phdr = (void *)ehdr + ehdr->e_phoff; 172 phdr = (void *)ehdr + ehdr->e_phoff;
153 for (i = 0; i < ehdr->e_phnum; i++) { 173 for (i = 0; i < ehdr->e_phnum; i++) {
154 phdr[i].p_vaddr += VDSO_HIGH_BASE; 174 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
155 175
156 /* relocate dynamic stuff */ 176 /* relocate dynamic stuff */
157 if (phdr[i].p_type == PT_DYNAMIC) 177 if (phdr[i].p_type == PT_DYNAMIC)
@@ -164,7 +184,7 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr)
164 if (!(shdr[i].sh_flags & SHF_ALLOC)) 184 if (!(shdr[i].sh_flags & SHF_ALLOC))
165 continue; 185 continue;
166 186
167 shdr[i].sh_addr += VDSO_HIGH_BASE; 187 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
168 188
169 if (shdr[i].sh_type == SHT_SYMTAB || 189 if (shdr[i].sh_type == SHT_SYMTAB ||
170 shdr[i].sh_type == SHT_DYNSYM) 190 shdr[i].sh_type == SHT_DYNSYM)
@@ -173,6 +193,45 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr)
173 } 193 }
174} 194}
175 195
196/*
197 * These symbols are defined by vdso32.S to mark the bounds
198 * of the ELF DSO images included therein.
199 */
200extern const char vdso32_default_start, vdso32_default_end;
201extern const char vdso32_sysenter_start, vdso32_sysenter_end;
202static struct page *vdso32_pages[1];
203
204#ifdef CONFIG_X86_64
205
206static int use_sysenter __read_mostly = -1;
207
208#define vdso32_sysenter() (use_sysenter > 0)
209
210/* May not be __init: called during resume */
211void syscall32_cpu_init(void)
212{
213 if (use_sysenter < 0)
214 use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
215
216 /* Load these always in case some future AMD CPU supports
217 SYSENTER from compat mode too. */
218 checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
219 checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
220 checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
221
222 wrmsrl(MSR_CSTAR, ia32_cstar_target);
223}
224
225#define compat_uses_vma 1
226
227static inline void map_compat_vdso(int map)
228{
229}
230
231#else /* CONFIG_X86_32 */
232
233#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
234
176void enable_sep_cpu(void) 235void enable_sep_cpu(void)
177{ 236{
178 int cpu = get_cpu(); 237 int cpu = get_cpu();
@@ -210,13 +269,7 @@ static int __init gate_vma_init(void)
210 return 0; 269 return 0;
211} 270}
212 271
213/* 272#define compat_uses_vma 0
214 * These symbols are defined by vsyscall.o to mark the bounds
215 * of the ELF DSO images included therein.
216 */
217extern const char vsyscall_int80_start, vsyscall_int80_end;
218extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
219static struct page *syscall_pages[1];
220 273
221static void map_compat_vdso(int map) 274static void map_compat_vdso(int map)
222{ 275{
@@ -227,31 +280,35 @@ static void map_compat_vdso(int map)
227 280
228 vdso_mapped = map; 281 vdso_mapped = map;
229 282
230 __set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT, 283 __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
231 map ? PAGE_READONLY_EXEC : PAGE_NONE); 284 map ? PAGE_READONLY_EXEC : PAGE_NONE);
232 285
233 /* flush stray tlbs */ 286 /* flush stray tlbs */
234 flush_tlb_all(); 287 flush_tlb_all();
235} 288}
236 289
290#endif /* CONFIG_X86_64 */
291
237int __init sysenter_setup(void) 292int __init sysenter_setup(void)
238{ 293{
239 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); 294 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
240 const void *vsyscall; 295 const void *vsyscall;
241 size_t vsyscall_len; 296 size_t vsyscall_len;
242 297
243 syscall_pages[0] = virt_to_page(syscall_page); 298 vdso32_pages[0] = virt_to_page(syscall_page);
244 299
300#ifdef CONFIG_X86_32
245 gate_vma_init(); 301 gate_vma_init();
246 302
247 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); 303 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
304#endif
248 305
249 if (!boot_cpu_has(X86_FEATURE_SEP)) { 306 if (!vdso32_sysenter()) {
250 vsyscall = &vsyscall_int80_start; 307 vsyscall = &vdso32_default_start;
251 vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start; 308 vsyscall_len = &vdso32_default_end - &vdso32_default_start;
252 } else { 309 } else {
253 vsyscall = &vsyscall_sysenter_start; 310 vsyscall = &vdso32_sysenter_start;
254 vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start; 311 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
255 } 312 }
256 313
257 memcpy(syscall_page, vsyscall, vsyscall_len); 314 memcpy(syscall_page, vsyscall, vsyscall_len);
@@ -284,7 +341,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
284 ret = addr; 341 ret = addr;
285 goto up_fail; 342 goto up_fail;
286 } 343 }
344 }
287 345
346 if (compat_uses_vma || !compat) {
288 /* 347 /*
289 * MAYWRITE to allow gdb to COW and set breakpoints 348 * MAYWRITE to allow gdb to COW and set breakpoints
290 * 349 *
@@ -298,7 +357,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
298 VM_READ|VM_EXEC| 357 VM_READ|VM_EXEC|
299 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 358 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
300 VM_ALWAYSDUMP, 359 VM_ALWAYSDUMP,
301 syscall_pages); 360 vdso32_pages);
302 361
303 if (ret) 362 if (ret)
304 goto up_fail; 363 goto up_fail;
@@ -314,6 +373,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
314 return ret; 373 return ret;
315} 374}
316 375
376#ifdef CONFIG_X86_64
377
378__initcall(sysenter_setup);
379
380#else /* CONFIG_X86_32 */
381
317const char *arch_vma_name(struct vm_area_struct *vma) 382const char *arch_vma_name(struct vm_area_struct *vma)
318{ 383{
319 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) 384 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
@@ -342,3 +407,5 @@ int in_gate_area_no_task(unsigned long addr)
342{ 407{
343 return 0; 408 return 0;
344} 409}
410
411#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/vdso/vdso32.S b/arch/x86/vdso/vdso32.S
index cab020c99c3d..1e36f72cab86 100644
--- a/arch/x86/vdso/vdso32.S
+++ b/arch/x86/vdso/vdso32.S
@@ -2,14 +2,18 @@
2 2
3__INITDATA 3__INITDATA
4 4
5 .globl vsyscall_int80_start, vsyscall_int80_end 5 .globl vdso32_default_start, vdso32_default_end
6vsyscall_int80_start: 6vdso32_default_start:
7#ifdef CONFIG_X86_32
7 .incbin "arch/x86/vdso/vdso32-int80.so" 8 .incbin "arch/x86/vdso/vdso32-int80.so"
8vsyscall_int80_end: 9#else
10 .incbin "arch/x86/vdso/vdso32-syscall.so"
11#endif
12vdso32_default_end:
9 13
10 .globl vsyscall_sysenter_start, vsyscall_sysenter_end 14 .globl vdso32_sysenter_start, vdso32_sysenter_end
11vsyscall_sysenter_start: 15vdso32_sysenter_start:
12 .incbin "arch/x86/vdso/vdso32-sysenter.so" 16 .incbin "arch/x86/vdso/vdso32-sysenter.so"
13vsyscall_sysenter_end: 17vdso32_sysenter_end:
14 18
15__FINIT 19__FINIT
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index fd91568090f4..7d6d0ef55890 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -62,8 +62,8 @@ static void xen_idle(void)
62 */ 62 */
63static void fiddle_vdso(void) 63static void fiddle_vdso(void)
64{ 64{
65 extern char vsyscall_int80_start; 65 extern const char vdso32_default_start;
66 u32 *mask = VDSO32_SYMBOL(&vsyscall_int80_start, NOTE_MASK); 66 u32 *mask = VDSO32_SYMBOL(&vdso32_default_start, NOTE_MASK);
67 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; 67 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
68} 68}
69 69
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 70edff2d5671..60f5101d9483 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -74,17 +74,19 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
74 74
75#ifdef __KERNEL__ 75#ifdef __KERNEL__
76 76
77/*
78 * This is used to ensure we don't load something for the wrong architecture.
79 */
80#define elf_check_arch_ia32(x) \
81 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
82
77#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
78#include <asm/processor.h> 84#include <asm/processor.h>
79#include <asm/system.h> /* for savesegment */ 85#include <asm/system.h> /* for savesegment */
80#include <asm/desc.h> 86#include <asm/desc.h>
81#include <asm/vdso.h> 87#include <asm/vdso.h>
82 88
83/* 89#define elf_check_arch(x) elf_check_arch_ia32(x)
84 * This is used to ensure we don't load something for the wrong architecture.
85 */
86#define elf_check_arch(x) \
87 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
88 90
89/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx 91/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
90 contains a pointer to a function which might be registered using `atexit'. 92 contains a pointer to a function which might be registered using `atexit'.
@@ -247,10 +249,6 @@ extern int dump_task_extended_fpu (struct task_struct *,
247#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG 249#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
248 250
249#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 251#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
250#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
251
252#define VDSO_ENTRY \
253 ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
254 252
255/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 253/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
256 254
@@ -262,6 +260,8 @@ do if (vdso_enabled) { \
262 260
263#else /* CONFIG_X86_32 */ 261#else /* CONFIG_X86_32 */
264 262
263#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
264
265/* 1GB for 64bit, 8MB for 32bit */ 265/* 1GB for 64bit, 8MB for 32bit */
266#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 266#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
267 267
@@ -272,6 +272,11 @@ do if (vdso_enabled) { \
272 272
273#endif /* !CONFIG_X86_32 */ 273#endif /* !CONFIG_X86_32 */
274 274
275#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
276
277#define VDSO_ENTRY \
278 ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
279
275struct linux_binprm; 280struct linux_binprm;
276 281
277#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 282#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1