diff options
author | Roland McGrath <roland@redhat.com> | 2008-01-30 07:30:43 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:43 -0500 |
commit | af65d64845a90c8f2fc90b97e2148ff74672e979 (patch) | |
tree | e70a57a9635acaf8154c150f95e11dcb51937fd8 /arch/x86/vdso | |
parent | 00f8b1bc0e44ba94fb33e1fbd8ac82841d7cc570 (diff) |
x86 vDSO: consolidate vdso32
This makes x86_64's ia32 emulation support share the sources used in the
32-bit kernel for the 32-bit vDSO and much of its setup code.
The 32-bit vDSO mapping now behaves the same on x86_64 as on native 32-bit.
The abi.syscall32 sysctl on x86_64 now takes the same values that
vm.vdso_enabled takes on the 32-bit kernel. That is, 1 means a randomized
vDSO location, 2 means the fixed old address. The CONFIG_COMPAT_VDSO
option is now available to make this the default setting, the same meaning
it has for the 32-bit kernel. (This does not affect the 64-bit vDSO.)
The argument vdso32=[012] can be used on both 32-bit and 64-bit kernels to
set this paramter at boot time. The vdso=[012] argument still does this
same thing on the 32-bit kernel.
Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r-- | arch/x86/vdso/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 119 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32.S | 16 |
3 files changed, 104 insertions, 33 deletions
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 1127c716df02..47bc2760e6a8 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -15,7 +15,7 @@ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o | |||
15 | 15 | ||
16 | # files to link into kernel | 16 | # files to link into kernel |
17 | obj-$(VDSO64-y) += vma.o vdso.o | 17 | obj-$(VDSO64-y) += vma.o vdso.o |
18 | obj-$(CONFIG_X86_32) += vdso32.o vdso32-setup.o | 18 | obj-$(VDSO32-y) += vdso32.o vdso32-setup.o |
19 | 19 | ||
20 | vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) | 20 | vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) |
21 | 21 | ||
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index fb71a93c5dce..d97a6d7d062b 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/elf.h> | 24 | #include <asm/elf.h> |
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/vdso.h> | 26 | #include <asm/vdso.h> |
27 | #include <asm/proto.h> | ||
27 | 28 | ||
28 | enum { | 29 | enum { |
29 | VDSO_DISABLED = 0, | 30 | VDSO_DISABLED = 0, |
@@ -37,14 +38,24 @@ enum { | |||
37 | #define VDSO_DEFAULT VDSO_ENABLED | 38 | #define VDSO_DEFAULT VDSO_ENABLED |
38 | #endif | 39 | #endif |
39 | 40 | ||
41 | #ifdef CONFIG_X86_64 | ||
42 | #define vdso_enabled sysctl_vsyscall32 | ||
43 | #define arch_setup_additional_pages syscall32_setup_pages | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * This is the difference between the prelinked addresses in the vDSO images | ||
48 | * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO | ||
49 | * in the user address space. | ||
50 | */ | ||
51 | #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK) | ||
52 | |||
40 | /* | 53 | /* |
41 | * Should the kernel map a VDSO page into processes and pass its | 54 | * Should the kernel map a VDSO page into processes and pass its |
42 | * address down to glibc upon exec()? | 55 | * address down to glibc upon exec()? |
43 | */ | 56 | */ |
44 | unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; | 57 | unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; |
45 | 58 | ||
46 | EXPORT_SYMBOL_GPL(vdso_enabled); | ||
47 | |||
48 | static int __init vdso_setup(char *s) | 59 | static int __init vdso_setup(char *s) |
49 | { | 60 | { |
50 | vdso_enabled = simple_strtoul(s, NULL, 0); | 61 | vdso_enabled = simple_strtoul(s, NULL, 0); |
@@ -52,9 +63,18 @@ static int __init vdso_setup(char *s) | |||
52 | return 1; | 63 | return 1; |
53 | } | 64 | } |
54 | 65 | ||
55 | __setup("vdso=", vdso_setup); | 66 | /* |
67 | * For consistency, the argument vdso32=[012] affects the 32-bit vDSO | ||
68 | * behavior on both 64-bit and 32-bit kernels. | ||
69 | * On 32-bit kernels, vdso=[012] means the same thing. | ||
70 | */ | ||
71 | __setup("vdso32=", vdso_setup); | ||
56 | 72 | ||
57 | extern asmlinkage void ia32_sysenter_target(void); | 73 | #ifdef CONFIG_X86_32 |
74 | __setup_param("vdso=", vdso32_setup, vdso_setup, 0); | ||
75 | |||
76 | EXPORT_SYMBOL_GPL(vdso_enabled); | ||
77 | #endif | ||
58 | 78 | ||
59 | static __init void reloc_symtab(Elf32_Ehdr *ehdr, | 79 | static __init void reloc_symtab(Elf32_Ehdr *ehdr, |
60 | unsigned offset, unsigned size) | 80 | unsigned offset, unsigned size) |
@@ -79,7 +99,7 @@ static __init void reloc_symtab(Elf32_Ehdr *ehdr, | |||
79 | case STT_FUNC: | 99 | case STT_FUNC: |
80 | case STT_SECTION: | 100 | case STT_SECTION: |
81 | case STT_FILE: | 101 | case STT_FILE: |
82 | sym->st_value += VDSO_HIGH_BASE; | 102 | sym->st_value += VDSO_ADDR_ADJUST; |
83 | } | 103 | } |
84 | } | 104 | } |
85 | } | 105 | } |
@@ -105,7 +125,7 @@ static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) | |||
105 | case DT_VERNEED: | 125 | case DT_VERNEED: |
106 | case DT_ADDRRNGLO ... DT_ADDRRNGHI: | 126 | case DT_ADDRRNGLO ... DT_ADDRRNGHI: |
107 | /* definitely pointers needing relocation */ | 127 | /* definitely pointers needing relocation */ |
108 | dyn->d_un.d_ptr += VDSO_HIGH_BASE; | 128 | dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; |
109 | break; | 129 | break; |
110 | 130 | ||
111 | case DT_ENCODING ... OLD_DT_LOOS-1: | 131 | case DT_ENCODING ... OLD_DT_LOOS-1: |
@@ -114,7 +134,7 @@ static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) | |||
114 | they're even */ | 134 | they're even */ |
115 | if (dyn->d_tag >= DT_ENCODING && | 135 | if (dyn->d_tag >= DT_ENCODING && |
116 | (dyn->d_tag & 1) == 0) | 136 | (dyn->d_tag & 1) == 0) |
117 | dyn->d_un.d_ptr += VDSO_HIGH_BASE; | 137 | dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; |
118 | break; | 138 | break; |
119 | 139 | ||
120 | case DT_VERDEFNUM: | 140 | case DT_VERDEFNUM: |
@@ -143,15 +163,15 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr) | |||
143 | int i; | 163 | int i; |
144 | 164 | ||
145 | BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || | 165 | BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || |
146 | !elf_check_arch(ehdr) || | 166 | !elf_check_arch_ia32(ehdr) || |
147 | ehdr->e_type != ET_DYN); | 167 | ehdr->e_type != ET_DYN); |
148 | 168 | ||
149 | ehdr->e_entry += VDSO_HIGH_BASE; | 169 | ehdr->e_entry += VDSO_ADDR_ADJUST; |
150 | 170 | ||
151 | /* rebase phdrs */ | 171 | /* rebase phdrs */ |
152 | phdr = (void *)ehdr + ehdr->e_phoff; | 172 | phdr = (void *)ehdr + ehdr->e_phoff; |
153 | for (i = 0; i < ehdr->e_phnum; i++) { | 173 | for (i = 0; i < ehdr->e_phnum; i++) { |
154 | phdr[i].p_vaddr += VDSO_HIGH_BASE; | 174 | phdr[i].p_vaddr += VDSO_ADDR_ADJUST; |
155 | 175 | ||
156 | /* relocate dynamic stuff */ | 176 | /* relocate dynamic stuff */ |
157 | if (phdr[i].p_type == PT_DYNAMIC) | 177 | if (phdr[i].p_type == PT_DYNAMIC) |
@@ -164,7 +184,7 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr) | |||
164 | if (!(shdr[i].sh_flags & SHF_ALLOC)) | 184 | if (!(shdr[i].sh_flags & SHF_ALLOC)) |
165 | continue; | 185 | continue; |
166 | 186 | ||
167 | shdr[i].sh_addr += VDSO_HIGH_BASE; | 187 | shdr[i].sh_addr += VDSO_ADDR_ADJUST; |
168 | 188 | ||
169 | if (shdr[i].sh_type == SHT_SYMTAB || | 189 | if (shdr[i].sh_type == SHT_SYMTAB || |
170 | shdr[i].sh_type == SHT_DYNSYM) | 190 | shdr[i].sh_type == SHT_DYNSYM) |
@@ -173,6 +193,45 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr) | |||
173 | } | 193 | } |
174 | } | 194 | } |
175 | 195 | ||
196 | /* | ||
197 | * These symbols are defined by vdso32.S to mark the bounds | ||
198 | * of the ELF DSO images included therein. | ||
199 | */ | ||
200 | extern const char vdso32_default_start, vdso32_default_end; | ||
201 | extern const char vdso32_sysenter_start, vdso32_sysenter_end; | ||
202 | static struct page *vdso32_pages[1]; | ||
203 | |||
204 | #ifdef CONFIG_X86_64 | ||
205 | |||
206 | static int use_sysenter __read_mostly = -1; | ||
207 | |||
208 | #define vdso32_sysenter() (use_sysenter > 0) | ||
209 | |||
210 | /* May not be __init: called during resume */ | ||
211 | void syscall32_cpu_init(void) | ||
212 | { | ||
213 | if (use_sysenter < 0) | ||
214 | use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL); | ||
215 | |||
216 | /* Load these always in case some future AMD CPU supports | ||
217 | SYSENTER from compat mode too. */ | ||
218 | checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | ||
219 | checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL); | ||
220 | checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | ||
221 | |||
222 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | ||
223 | } | ||
224 | |||
225 | #define compat_uses_vma 1 | ||
226 | |||
227 | static inline void map_compat_vdso(int map) | ||
228 | { | ||
229 | } | ||
230 | |||
231 | #else /* CONFIG_X86_32 */ | ||
232 | |||
233 | #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) | ||
234 | |||
176 | void enable_sep_cpu(void) | 235 | void enable_sep_cpu(void) |
177 | { | 236 | { |
178 | int cpu = get_cpu(); | 237 | int cpu = get_cpu(); |
@@ -210,13 +269,7 @@ static int __init gate_vma_init(void) | |||
210 | return 0; | 269 | return 0; |
211 | } | 270 | } |
212 | 271 | ||
213 | /* | 272 | #define compat_uses_vma 0 |
214 | * These symbols are defined by vsyscall.o to mark the bounds | ||
215 | * of the ELF DSO images included therein. | ||
216 | */ | ||
217 | extern const char vsyscall_int80_start, vsyscall_int80_end; | ||
218 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; | ||
219 | static struct page *syscall_pages[1]; | ||
220 | 273 | ||
221 | static void map_compat_vdso(int map) | 274 | static void map_compat_vdso(int map) |
222 | { | 275 | { |
@@ -227,31 +280,35 @@ static void map_compat_vdso(int map) | |||
227 | 280 | ||
228 | vdso_mapped = map; | 281 | vdso_mapped = map; |
229 | 282 | ||
230 | __set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT, | 283 | __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT, |
231 | map ? PAGE_READONLY_EXEC : PAGE_NONE); | 284 | map ? PAGE_READONLY_EXEC : PAGE_NONE); |
232 | 285 | ||
233 | /* flush stray tlbs */ | 286 | /* flush stray tlbs */ |
234 | flush_tlb_all(); | 287 | flush_tlb_all(); |
235 | } | 288 | } |
236 | 289 | ||
290 | #endif /* CONFIG_X86_64 */ | ||
291 | |||
237 | int __init sysenter_setup(void) | 292 | int __init sysenter_setup(void) |
238 | { | 293 | { |
239 | void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); | 294 | void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); |
240 | const void *vsyscall; | 295 | const void *vsyscall; |
241 | size_t vsyscall_len; | 296 | size_t vsyscall_len; |
242 | 297 | ||
243 | syscall_pages[0] = virt_to_page(syscall_page); | 298 | vdso32_pages[0] = virt_to_page(syscall_page); |
244 | 299 | ||
300 | #ifdef CONFIG_X86_32 | ||
245 | gate_vma_init(); | 301 | gate_vma_init(); |
246 | 302 | ||
247 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); | 303 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); |
304 | #endif | ||
248 | 305 | ||
249 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | 306 | if (!vdso32_sysenter()) { |
250 | vsyscall = &vsyscall_int80_start; | 307 | vsyscall = &vdso32_default_start; |
251 | vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start; | 308 | vsyscall_len = &vdso32_default_end - &vdso32_default_start; |
252 | } else { | 309 | } else { |
253 | vsyscall = &vsyscall_sysenter_start; | 310 | vsyscall = &vdso32_sysenter_start; |
254 | vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start; | 311 | vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start; |
255 | } | 312 | } |
256 | 313 | ||
257 | memcpy(syscall_page, vsyscall, vsyscall_len); | 314 | memcpy(syscall_page, vsyscall, vsyscall_len); |
@@ -284,7 +341,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | |||
284 | ret = addr; | 341 | ret = addr; |
285 | goto up_fail; | 342 | goto up_fail; |
286 | } | 343 | } |
344 | } | ||
287 | 345 | ||
346 | if (compat_uses_vma || !compat) { | ||
288 | /* | 347 | /* |
289 | * MAYWRITE to allow gdb to COW and set breakpoints | 348 | * MAYWRITE to allow gdb to COW and set breakpoints |
290 | * | 349 | * |
@@ -298,7 +357,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | |||
298 | VM_READ|VM_EXEC| | 357 | VM_READ|VM_EXEC| |
299 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | 358 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
300 | VM_ALWAYSDUMP, | 359 | VM_ALWAYSDUMP, |
301 | syscall_pages); | 360 | vdso32_pages); |
302 | 361 | ||
303 | if (ret) | 362 | if (ret) |
304 | goto up_fail; | 363 | goto up_fail; |
@@ -314,6 +373,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | |||
314 | return ret; | 373 | return ret; |
315 | } | 374 | } |
316 | 375 | ||
376 | #ifdef CONFIG_X86_64 | ||
377 | |||
378 | __initcall(sysenter_setup); | ||
379 | |||
380 | #else /* CONFIG_X86_32 */ | ||
381 | |||
317 | const char *arch_vma_name(struct vm_area_struct *vma) | 382 | const char *arch_vma_name(struct vm_area_struct *vma) |
318 | { | 383 | { |
319 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | 384 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) |
@@ -342,3 +407,5 @@ int in_gate_area_no_task(unsigned long addr) | |||
342 | { | 407 | { |
343 | return 0; | 408 | return 0; |
344 | } | 409 | } |
410 | |||
411 | #endif /* CONFIG_X86_64 */ | ||
diff --git a/arch/x86/vdso/vdso32.S b/arch/x86/vdso/vdso32.S index cab020c99c3d..1e36f72cab86 100644 --- a/arch/x86/vdso/vdso32.S +++ b/arch/x86/vdso/vdso32.S | |||
@@ -2,14 +2,18 @@ | |||
2 | 2 | ||
3 | __INITDATA | 3 | __INITDATA |
4 | 4 | ||
5 | .globl vsyscall_int80_start, vsyscall_int80_end | 5 | .globl vdso32_default_start, vdso32_default_end |
6 | vsyscall_int80_start: | 6 | vdso32_default_start: |
7 | #ifdef CONFIG_X86_32 | ||
7 | .incbin "arch/x86/vdso/vdso32-int80.so" | 8 | .incbin "arch/x86/vdso/vdso32-int80.so" |
8 | vsyscall_int80_end: | 9 | #else |
10 | .incbin "arch/x86/vdso/vdso32-syscall.so" | ||
11 | #endif | ||
12 | vdso32_default_end: | ||
9 | 13 | ||
10 | .globl vsyscall_sysenter_start, vsyscall_sysenter_end | 14 | .globl vdso32_sysenter_start, vdso32_sysenter_end |
11 | vsyscall_sysenter_start: | 15 | vdso32_sysenter_start: |
12 | .incbin "arch/x86/vdso/vdso32-sysenter.so" | 16 | .incbin "arch/x86/vdso/vdso32-sysenter.so" |
13 | vsyscall_sysenter_end: | 17 | vdso32_sysenter_end: |
14 | 18 | ||
15 | __FINIT | 19 | __FINIT |