diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-03-13 19:01:26 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-03-13 19:20:09 -0400 |
commit | b0b49f2673f011cad7deeabf7a683b388c351278 (patch) | |
tree | 2a3bfe9ccc51d4b2072381fe4ebd4f3630a38444 /arch/x86/vdso/vdso32-setup.c | |
parent | fa389e220254c69ffae0d403eac4146171062d08 (diff) |
x86, vdso: Remove compat vdso support
The compat vDSO is a complicated hack that's needed to maintain
compatibility with a small range of glibc versions.
This removes it and replaces it with a much simpler hack: a config
option to disable the 32-bit vDSO by default.
This also changes the default value of CONFIG_COMPAT_VDSO to n --
users configuring kernels from scratch almost certainly want that
choice.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/4bb4690899106eb11430b1186d5cc66ca9d1660c.1394751608.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso/vdso32-setup.c')
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 232 |
1 files changed, 21 insertions, 211 deletions
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index d6bfb876cfb0..ab20c04b688a 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -26,16 +26,10 @@ | |||
26 | #include <asm/vdso.h> | 26 | #include <asm/vdso.h> |
27 | #include <asm/proto.h> | 27 | #include <asm/proto.h> |
28 | 28 | ||
29 | enum { | ||
30 | VDSO_DISABLED = 0, | ||
31 | VDSO_ENABLED = 1, | ||
32 | VDSO_COMPAT = 2, | ||
33 | }; | ||
34 | |||
35 | #ifdef CONFIG_COMPAT_VDSO | 29 | #ifdef CONFIG_COMPAT_VDSO |
36 | #define VDSO_DEFAULT VDSO_COMPAT | 30 | #define VDSO_DEFAULT 0 |
37 | #else | 31 | #else |
38 | #define VDSO_DEFAULT VDSO_ENABLED | 32 | #define VDSO_DEFAULT 1 |
39 | #endif | 33 | #endif |
40 | 34 | ||
41 | #ifdef CONFIG_X86_64 | 35 | #ifdef CONFIG_X86_64 |
@@ -44,13 +38,6 @@ enum { | |||
44 | #endif | 38 | #endif |
45 | 39 | ||
46 | /* | 40 | /* |
47 | * This is the difference between the prelinked addresses in the vDSO images | ||
48 | * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO | ||
49 | * in the user address space. | ||
50 | */ | ||
51 | #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK) | ||
52 | |||
53 | /* | ||
54 | * Should the kernel map a VDSO page into processes and pass its | 41 | * Should the kernel map a VDSO page into processes and pass its |
55 | * address down to glibc upon exec()? | 42 | * address down to glibc upon exec()? |
56 | */ | 43 | */ |
@@ -60,6 +47,9 @@ static int __init vdso_setup(char *s) | |||
60 | { | 47 | { |
61 | vdso_enabled = simple_strtoul(s, NULL, 0); | 48 | vdso_enabled = simple_strtoul(s, NULL, 0); |
62 | 49 | ||
50 | if (vdso_enabled > 1) | ||
51 | pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); | ||
52 | |||
63 | return 1; | 53 | return 1; |
64 | } | 54 | } |
65 | 55 | ||
@@ -76,123 +66,6 @@ __setup_param("vdso=", vdso32_setup, vdso_setup, 0); | |||
76 | EXPORT_SYMBOL_GPL(vdso_enabled); | 66 | EXPORT_SYMBOL_GPL(vdso_enabled); |
77 | #endif | 67 | #endif |
78 | 68 | ||
79 | static __init void reloc_symtab(Elf32_Ehdr *ehdr, | ||
80 | unsigned offset, unsigned size) | ||
81 | { | ||
82 | Elf32_Sym *sym = (void *)ehdr + offset; | ||
83 | unsigned nsym = size / sizeof(*sym); | ||
84 | unsigned i; | ||
85 | |||
86 | for(i = 0; i < nsym; i++, sym++) { | ||
87 | if (sym->st_shndx == SHN_UNDEF || | ||
88 | sym->st_shndx == SHN_ABS) | ||
89 | continue; /* skip */ | ||
90 | |||
91 | if (sym->st_shndx > SHN_LORESERVE) { | ||
92 | printk(KERN_INFO "VDSO: unexpected st_shndx %x\n", | ||
93 | sym->st_shndx); | ||
94 | continue; | ||
95 | } | ||
96 | |||
97 | switch(ELF_ST_TYPE(sym->st_info)) { | ||
98 | case STT_OBJECT: | ||
99 | case STT_FUNC: | ||
100 | case STT_SECTION: | ||
101 | case STT_FILE: | ||
102 | sym->st_value += VDSO_ADDR_ADJUST; | ||
103 | } | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) | ||
108 | { | ||
109 | Elf32_Dyn *dyn = (void *)ehdr + offset; | ||
110 | |||
111 | for(; dyn->d_tag != DT_NULL; dyn++) | ||
112 | switch(dyn->d_tag) { | ||
113 | case DT_PLTGOT: | ||
114 | case DT_HASH: | ||
115 | case DT_STRTAB: | ||
116 | case DT_SYMTAB: | ||
117 | case DT_RELA: | ||
118 | case DT_INIT: | ||
119 | case DT_FINI: | ||
120 | case DT_REL: | ||
121 | case DT_DEBUG: | ||
122 | case DT_JMPREL: | ||
123 | case DT_VERSYM: | ||
124 | case DT_VERDEF: | ||
125 | case DT_VERNEED: | ||
126 | case DT_ADDRRNGLO ... DT_ADDRRNGHI: | ||
127 | /* definitely pointers needing relocation */ | ||
128 | dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; | ||
129 | break; | ||
130 | |||
131 | case DT_ENCODING ... OLD_DT_LOOS-1: | ||
132 | case DT_LOOS ... DT_HIOS-1: | ||
133 | /* Tags above DT_ENCODING are pointers if | ||
134 | they're even */ | ||
135 | if (dyn->d_tag >= DT_ENCODING && | ||
136 | (dyn->d_tag & 1) == 0) | ||
137 | dyn->d_un.d_ptr += VDSO_ADDR_ADJUST; | ||
138 | break; | ||
139 | |||
140 | case DT_VERDEFNUM: | ||
141 | case DT_VERNEEDNUM: | ||
142 | case DT_FLAGS_1: | ||
143 | case DT_RELACOUNT: | ||
144 | case DT_RELCOUNT: | ||
145 | case DT_VALRNGLO ... DT_VALRNGHI: | ||
146 | /* definitely not pointers */ | ||
147 | break; | ||
148 | |||
149 | case OLD_DT_LOOS ... DT_LOOS-1: | ||
150 | case DT_HIOS ... DT_VALRNGLO-1: | ||
151 | default: | ||
152 | if (dyn->d_tag > DT_ENCODING) | ||
153 | printk(KERN_INFO "VDSO: unexpected DT_tag %x\n", | ||
154 | dyn->d_tag); | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static __init void relocate_vdso(Elf32_Ehdr *ehdr) | ||
160 | { | ||
161 | Elf32_Phdr *phdr; | ||
162 | Elf32_Shdr *shdr; | ||
163 | int i; | ||
164 | |||
165 | BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 || | ||
166 | !elf_check_arch_ia32(ehdr) || | ||
167 | ehdr->e_type != ET_DYN); | ||
168 | |||
169 | ehdr->e_entry += VDSO_ADDR_ADJUST; | ||
170 | |||
171 | /* rebase phdrs */ | ||
172 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
173 | for (i = 0; i < ehdr->e_phnum; i++) { | ||
174 | phdr[i].p_vaddr += VDSO_ADDR_ADJUST; | ||
175 | |||
176 | /* relocate dynamic stuff */ | ||
177 | if (phdr[i].p_type == PT_DYNAMIC) | ||
178 | reloc_dyn(ehdr, phdr[i].p_offset); | ||
179 | } | ||
180 | |||
181 | /* rebase sections */ | ||
182 | shdr = (void *)ehdr + ehdr->e_shoff; | ||
183 | for(i = 0; i < ehdr->e_shnum; i++) { | ||
184 | if (!(shdr[i].sh_flags & SHF_ALLOC)) | ||
185 | continue; | ||
186 | |||
187 | shdr[i].sh_addr += VDSO_ADDR_ADJUST; | ||
188 | |||
189 | if (shdr[i].sh_type == SHT_SYMTAB || | ||
190 | shdr[i].sh_type == SHT_DYNSYM) | ||
191 | reloc_symtab(ehdr, shdr[i].sh_offset, | ||
192 | shdr[i].sh_size); | ||
193 | } | ||
194 | } | ||
195 | |||
196 | static struct page *vdso32_pages[1]; | 69 | static struct page *vdso32_pages[1]; |
197 | 70 | ||
198 | #ifdef CONFIG_X86_64 | 71 | #ifdef CONFIG_X86_64 |
@@ -212,12 +85,6 @@ void syscall32_cpu_init(void) | |||
212 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | 85 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
213 | } | 86 | } |
214 | 87 | ||
215 | #define compat_uses_vma 1 | ||
216 | |||
217 | static inline void map_compat_vdso(int map) | ||
218 | { | ||
219 | } | ||
220 | |||
221 | #else /* CONFIG_X86_32 */ | 88 | #else /* CONFIG_X86_32 */ |
222 | 89 | ||
223 | #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) | 90 | #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP)) |
@@ -241,37 +108,6 @@ void enable_sep_cpu(void) | |||
241 | put_cpu(); | 108 | put_cpu(); |
242 | } | 109 | } |
243 | 110 | ||
244 | static struct vm_area_struct gate_vma; | ||
245 | |||
246 | static int __init gate_vma_init(void) | ||
247 | { | ||
248 | gate_vma.vm_mm = NULL; | ||
249 | gate_vma.vm_start = FIXADDR_USER_START; | ||
250 | gate_vma.vm_end = FIXADDR_USER_END; | ||
251 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | ||
252 | gate_vma.vm_page_prot = __P101; | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | #define compat_uses_vma 0 | ||
258 | |||
259 | static void map_compat_vdso(int map) | ||
260 | { | ||
261 | static int vdso_mapped; | ||
262 | |||
263 | if (map == vdso_mapped) | ||
264 | return; | ||
265 | |||
266 | vdso_mapped = map; | ||
267 | |||
268 | __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT, | ||
269 | map ? PAGE_READONLY_EXEC : PAGE_NONE); | ||
270 | |||
271 | /* flush stray tlbs */ | ||
272 | flush_tlb_all(); | ||
273 | } | ||
274 | |||
275 | #endif /* CONFIG_X86_64 */ | 111 | #endif /* CONFIG_X86_64 */ |
276 | 112 | ||
277 | int __init sysenter_setup(void) | 113 | int __init sysenter_setup(void) |
@@ -282,10 +118,6 @@ int __init sysenter_setup(void) | |||
282 | 118 | ||
283 | vdso32_pages[0] = virt_to_page(syscall_page); | 119 | vdso32_pages[0] = virt_to_page(syscall_page); |
284 | 120 | ||
285 | #ifdef CONFIG_X86_32 | ||
286 | gate_vma_init(); | ||
287 | #endif | ||
288 | |||
289 | if (vdso32_syscall()) { | 121 | if (vdso32_syscall()) { |
290 | vsyscall = &vdso32_syscall_start; | 122 | vsyscall = &vdso32_syscall_start; |
291 | vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start; | 123 | vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start; |
@@ -298,7 +130,6 @@ int __init sysenter_setup(void) | |||
298 | } | 130 | } |
299 | 131 | ||
300 | memcpy(syscall_page, vsyscall, vsyscall_len); | 132 | memcpy(syscall_page, vsyscall, vsyscall_len); |
301 | relocate_vdso(syscall_page); | ||
302 | 133 | ||
303 | return 0; | 134 | return 0; |
304 | } | 135 | } |
@@ -309,48 +140,35 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
309 | struct mm_struct *mm = current->mm; | 140 | struct mm_struct *mm = current->mm; |
310 | unsigned long addr; | 141 | unsigned long addr; |
311 | int ret = 0; | 142 | int ret = 0; |
312 | bool compat; | ||
313 | 143 | ||
314 | #ifdef CONFIG_X86_X32_ABI | 144 | #ifdef CONFIG_X86_X32_ABI |
315 | if (test_thread_flag(TIF_X32)) | 145 | if (test_thread_flag(TIF_X32)) |
316 | return x32_setup_additional_pages(bprm, uses_interp); | 146 | return x32_setup_additional_pages(bprm, uses_interp); |
317 | #endif | 147 | #endif |
318 | 148 | ||
319 | if (vdso_enabled == VDSO_DISABLED) | 149 | if (vdso_enabled != 1) /* Other values all mean "disabled" */ |
320 | return 0; | 150 | return 0; |
321 | 151 | ||
322 | down_write(&mm->mmap_sem); | 152 | down_write(&mm->mmap_sem); |
323 | 153 | ||
324 | /* Test compat mode once here, in case someone | 154 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); |
325 | changes it via sysctl */ | 155 | if (IS_ERR_VALUE(addr)) { |
326 | compat = (vdso_enabled == VDSO_COMPAT); | 156 | ret = addr; |
327 | 157 | goto up_fail; | |
328 | map_compat_vdso(compat); | ||
329 | |||
330 | if (compat) | ||
331 | addr = VDSO_HIGH_BASE; | ||
332 | else { | ||
333 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
334 | if (IS_ERR_VALUE(addr)) { | ||
335 | ret = addr; | ||
336 | goto up_fail; | ||
337 | } | ||
338 | } | 158 | } |
339 | 159 | ||
340 | current->mm->context.vdso = (void *)addr; | 160 | current->mm->context.vdso = (void *)addr; |
341 | 161 | ||
342 | if (compat_uses_vma || !compat) { | 162 | /* |
343 | /* | 163 | * MAYWRITE to allow gdb to COW and set breakpoints |
344 | * MAYWRITE to allow gdb to COW and set breakpoints | 164 | */ |
345 | */ | 165 | ret = install_special_mapping(mm, addr, PAGE_SIZE, |
346 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | 166 | VM_READ|VM_EXEC| |
347 | VM_READ|VM_EXEC| | 167 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
348 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 168 | vdso32_pages); |
349 | vdso32_pages); | 169 | |
350 | 170 | if (ret) | |
351 | if (ret) | 171 | goto up_fail; |
352 | goto up_fail; | ||
353 | } | ||
354 | 172 | ||
355 | current_thread_info()->sysenter_return = | 173 | current_thread_info()->sysenter_return = |
356 | VDSO32_SYMBOL(addr, SYSENTER_RETURN); | 174 | VDSO32_SYMBOL(addr, SYSENTER_RETURN); |
@@ -411,20 +229,12 @@ const char *arch_vma_name(struct vm_area_struct *vma) | |||
411 | 229 | ||
412 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | 230 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
413 | { | 231 | { |
414 | /* | ||
415 | * Check to see if the corresponding task was created in compat vdso | ||
416 | * mode. | ||
417 | */ | ||
418 | if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) | ||
419 | return &gate_vma; | ||
420 | return NULL; | 232 | return NULL; |
421 | } | 233 | } |
422 | 234 | ||
423 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | 235 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
424 | { | 236 | { |
425 | const struct vm_area_struct *vma = get_gate_vma(mm); | 237 | return 0; |
426 | |||
427 | return vma && addr >= vma->vm_start && addr < vma->vm_end; | ||
428 | } | 238 | } |
429 | 239 | ||
430 | int in_gate_area_no_mm(unsigned long addr) | 240 | int in_gate_area_no_mm(unsigned long addr) |