diff options
-rw-r--r-- | arch/x86/include/asm/elf.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/vdso.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/vdso32.h | 11 | ||||
-rw-r--r-- | arch/x86/vdso/vdso-layout.lds.S | 42 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.c | 14 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.h | 17 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 115 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 128 |
8 files changed, 173 insertions, 166 deletions
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 65b21bcbe9f7..1a055c81d864 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -333,11 +333,9 @@ struct linux_binprm; | |||
333 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | 333 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 |
334 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 334 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
335 | int uses_interp); | 335 | int uses_interp); |
336 | extern int x32_setup_additional_pages(struct linux_binprm *bprm, | 336 | extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, |
337 | int uses_interp); | 337 | int uses_interp); |
338 | 338 | #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages | |
339 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | ||
340 | #define compat_arch_setup_additional_pages syscall32_setup_pages | ||
341 | 339 | ||
342 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | 340 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); |
343 | #define arch_randomize_brk arch_randomize_brk | 341 | #define arch_randomize_brk arch_randomize_brk |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 389fe2ca27c2..d0a2c909c72d 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -14,6 +14,10 @@ struct vdso_image { | |||
14 | 14 | ||
15 | unsigned long alt, alt_len; | 15 | unsigned long alt, alt_len; |
16 | 16 | ||
17 | unsigned long sym_end_mapping; /* Total size of the mapping */ | ||
18 | |||
19 | unsigned long sym_vvar_page; | ||
20 | unsigned long sym_hpet_page; | ||
17 | unsigned long sym_VDSO32_NOTE_MASK; | 21 | unsigned long sym_VDSO32_NOTE_MASK; |
18 | unsigned long sym___kernel_sigreturn; | 22 | unsigned long sym___kernel_sigreturn; |
19 | unsigned long sym___kernel_rt_sigreturn; | 23 | unsigned long sym___kernel_rt_sigreturn; |
diff --git a/arch/x86/include/asm/vdso32.h b/arch/x86/include/asm/vdso32.h deleted file mode 100644 index 7efb7018406e..000000000000 --- a/arch/x86/include/asm/vdso32.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef _ASM_X86_VDSO32_H | ||
2 | #define _ASM_X86_VDSO32_H | ||
3 | |||
4 | #define VDSO_BASE_PAGE 0 | ||
5 | #define VDSO_VVAR_PAGE 1 | ||
6 | #define VDSO_HPET_PAGE 2 | ||
7 | #define VDSO_PAGES 3 | ||
8 | #define VDSO_PREV_PAGES 2 | ||
9 | #define VDSO_OFFSET(x) ((x) * PAGE_SIZE) | ||
10 | |||
11 | #endif | ||
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S index 9df017ab2285..e177c08bb4bc 100644 --- a/arch/x86/vdso/vdso-layout.lds.S +++ b/arch/x86/vdso/vdso-layout.lds.S | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <asm/vdso.h> | ||
2 | |||
1 | /* | 3 | /* |
2 | * Linker script for vDSO. This is an ELF shared object prelinked to | 4 | * Linker script for vDSO. This is an ELF shared object prelinked to |
3 | * its virtual address, and with only one read-only segment. | 5 | * its virtual address, and with only one read-only segment. |
@@ -6,20 +8,6 @@ | |||
6 | 8 | ||
7 | SECTIONS | 9 | SECTIONS |
8 | { | 10 | { |
9 | #ifdef BUILD_VDSO32 | ||
10 | #include <asm/vdso32.h> | ||
11 | |||
12 | hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE); | ||
13 | |||
14 | vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE); | ||
15 | |||
16 | /* Place all vvars at the offsets in asm/vvar.h. */ | ||
17 | #define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset; | ||
18 | #define __VVAR_KERNEL_LDS | ||
19 | #include <asm/vvar.h> | ||
20 | #undef __VVAR_KERNEL_LDS | ||
21 | #undef EMIT_VVAR | ||
22 | #endif | ||
23 | . = SIZEOF_HEADERS; | 11 | . = SIZEOF_HEADERS; |
24 | 12 | ||
25 | .hash : { *(.hash) } :text | 13 | .hash : { *(.hash) } :text |
@@ -59,11 +47,33 @@ SECTIONS | |||
59 | 47 | ||
60 | .text : { *(.text*) } :text =0x90909090, | 48 | .text : { *(.text*) } :text =0x90909090, |
61 | 49 | ||
50 | #ifdef BUILD_VDSO32 | ||
62 | /* | 51 | /* |
63 | * The comma above works around a bug in gold: | 52 | * The remainder of the vDSO consists of special pages that are |
64 | * https://sourceware.org/bugzilla/show_bug.cgi?id=16804 | 53 | * shared between the kernel and userspace. It needs to be at the |
54 | * end so that it doesn't overlap the mapping of the actual | ||
55 | * vDSO image. | ||
65 | */ | 56 | */ |
66 | 57 | ||
58 | . = ALIGN(PAGE_SIZE); | ||
59 | vvar_page = .; | ||
60 | |||
61 | /* Place all vvars at the offsets in asm/vvar.h. */ | ||
62 | #define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset; | ||
63 | #define __VVAR_KERNEL_LDS | ||
64 | #include <asm/vvar.h> | ||
65 | #undef __VVAR_KERNEL_LDS | ||
66 | #undef EMIT_VVAR | ||
67 | |||
68 | . = vvar_page + PAGE_SIZE; | ||
69 | |||
70 | hpet_page = .; | ||
71 | . = . + PAGE_SIZE; | ||
72 | #endif | ||
73 | |||
74 | . = ALIGN(PAGE_SIZE); | ||
75 | end_mapping = .; | ||
76 | |||
67 | /DISCARD/ : { | 77 | /DISCARD/ : { |
68 | *(.discard) | 78 | *(.discard) |
69 | *(.discard.*) | 79 | *(.discard.*) |
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c index 976e8e4ced92..81edd1ec9df8 100644 --- a/arch/x86/vdso/vdso2c.c +++ b/arch/x86/vdso/vdso2c.c | |||
@@ -15,7 +15,21 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | /* Symbols that we need in vdso2c. */ | 17 | /* Symbols that we need in vdso2c. */ |
18 | enum { | ||
19 | sym_vvar_page, | ||
20 | sym_hpet_page, | ||
21 | sym_end_mapping, | ||
22 | }; | ||
23 | |||
24 | const int special_pages[] = { | ||
25 | sym_vvar_page, | ||
26 | sym_hpet_page, | ||
27 | }; | ||
28 | |||
18 | char const * const required_syms[] = { | 29 | char const * const required_syms[] = { |
30 | [sym_vvar_page] = "vvar_page", | ||
31 | [sym_hpet_page] = "hpet_page", | ||
32 | [sym_end_mapping] = "end_mapping", | ||
19 | "VDSO32_NOTE_MASK", | 33 | "VDSO32_NOTE_MASK", |
20 | "VDSO32_SYSENTER_RETURN", | 34 | "VDSO32_SYSENTER_RETURN", |
21 | "__kernel_vsyscall", | 35 | "__kernel_vsyscall", |
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index 9276e5207620..ed2e894e89ab 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h | |||
@@ -87,6 +87,23 @@ static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) | |||
87 | } | 87 | } |
88 | } | 88 | } |
89 | 89 | ||
90 | /* Validate mapping addresses. */ | ||
91 | for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) { | ||
92 | if (!syms[i]) | ||
93 | continue; /* The mapping isn't used; ignore it. */ | ||
94 | |||
95 | if (syms[i] % 4096) | ||
96 | fail("%s must be a multiple of 4096\n", | ||
97 | required_syms[i]); | ||
98 | if (syms[i] < data_size) | ||
99 | fail("%s must be after the text mapping\n", | ||
100 | required_syms[i]); | ||
101 | if (syms[sym_end_mapping] < syms[i] + 4096) | ||
102 | fail("%s overruns end_mapping\n", required_syms[i]); | ||
103 | } | ||
104 | if (syms[sym_end_mapping] % 4096) | ||
105 | fail("end_mapping must be a multiple of 4096\n"); | ||
106 | |||
90 | /* Remove sections. */ | 107 | /* Remove sections. */ |
91 | hdr->e_shoff = 0; | 108 | hdr->e_shoff = 0; |
92 | hdr->e_shentsize = 0; | 109 | hdr->e_shentsize = 0; |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index d41460118a28..c3ed708e50f4 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -8,28 +8,12 @@ | |||
8 | 8 | ||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | #include <linux/thread_info.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/mm_types.h> |
13 | #include <linux/gfp.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/elf.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | 13 | ||
21 | #include <asm/cpufeature.h> | 14 | #include <asm/cpufeature.h> |
22 | #include <asm/msr.h> | 15 | #include <asm/processor.h> |
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/unistd.h> | ||
25 | #include <asm/elf.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
28 | #include <asm/proto.h> | ||
29 | #include <asm/fixmap.h> | ||
30 | #include <asm/hpet.h> | ||
31 | #include <asm/vvar.h> | ||
32 | #include <asm/vdso32.h> | ||
33 | 17 | ||
34 | #ifdef CONFIG_COMPAT_VDSO | 18 | #ifdef CONFIG_COMPAT_VDSO |
35 | #define VDSO_DEFAULT 0 | 19 | #define VDSO_DEFAULT 0 |
@@ -37,10 +21,6 @@ | |||
37 | #define VDSO_DEFAULT 1 | 21 | #define VDSO_DEFAULT 1 |
38 | #endif | 22 | #endif |
39 | 23 | ||
40 | #ifdef CONFIG_X86_64 | ||
41 | #define arch_setup_additional_pages syscall32_setup_pages | ||
42 | #endif | ||
43 | |||
44 | /* | 24 | /* |
45 | * Should the kernel map a VDSO page into processes and pass its | 25 | * Should the kernel map a VDSO page into processes and pass its |
46 | * address down to glibc upon exec()? | 26 | * address down to glibc upon exec()? |
@@ -101,95 +81,6 @@ int __init sysenter_setup(void) | |||
101 | return 0; | 81 | return 0; |
102 | } | 82 | } |
103 | 83 | ||
104 | /* Setup a VMA at program startup for the vsyscall page */ | ||
105 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
106 | { | ||
107 | struct mm_struct *mm = current->mm; | ||
108 | unsigned long addr; | ||
109 | int ret = 0; | ||
110 | struct vm_area_struct *vma; | ||
111 | unsigned long vdso32_size = selected_vdso32->size; | ||
112 | |||
113 | #ifdef CONFIG_X86_X32_ABI | ||
114 | if (test_thread_flag(TIF_X32)) | ||
115 | return x32_setup_additional_pages(bprm, uses_interp); | ||
116 | #endif | ||
117 | |||
118 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ | ||
119 | return 0; | ||
120 | |||
121 | down_write(&mm->mmap_sem); | ||
122 | |||
123 | addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0); | ||
124 | if (IS_ERR_VALUE(addr)) { | ||
125 | ret = addr; | ||
126 | goto up_fail; | ||
127 | } | ||
128 | |||
129 | addr += VDSO_OFFSET(VDSO_PREV_PAGES); | ||
130 | |||
131 | current->mm->context.vdso = (void __user *)addr; | ||
132 | |||
133 | /* | ||
134 | * MAYWRITE to allow gdb to COW and set breakpoints | ||
135 | */ | ||
136 | ret = install_special_mapping(mm, | ||
137 | addr, | ||
138 | vdso32_size, | ||
139 | VM_READ|VM_EXEC| | ||
140 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | ||
141 | selected_vdso32->pages); | ||
142 | |||
143 | if (ret) | ||
144 | goto up_fail; | ||
145 | |||
146 | vma = _install_special_mapping(mm, | ||
147 | addr - VDSO_OFFSET(VDSO_PREV_PAGES), | ||
148 | VDSO_OFFSET(VDSO_PREV_PAGES), | ||
149 | VM_READ, | ||
150 | NULL); | ||
151 | |||
152 | if (IS_ERR(vma)) { | ||
153 | ret = PTR_ERR(vma); | ||
154 | goto up_fail; | ||
155 | } | ||
156 | |||
157 | ret = remap_pfn_range(vma, | ||
158 | addr - VDSO_OFFSET(VDSO_VVAR_PAGE), | ||
159 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, | ||
160 | PAGE_SIZE, | ||
161 | PAGE_READONLY); | ||
162 | |||
163 | if (ret) | ||
164 | goto up_fail; | ||
165 | |||
166 | #ifdef CONFIG_HPET_TIMER | ||
167 | if (hpet_address) { | ||
168 | ret = io_remap_pfn_range(vma, | ||
169 | addr - VDSO_OFFSET(VDSO_HPET_PAGE), | ||
170 | hpet_address >> PAGE_SHIFT, | ||
171 | PAGE_SIZE, | ||
172 | pgprot_noncached(PAGE_READONLY)); | ||
173 | |||
174 | if (ret) | ||
175 | goto up_fail; | ||
176 | } | ||
177 | #endif | ||
178 | |||
179 | if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN) | ||
180 | current_thread_info()->sysenter_return = | ||
181 | current->mm->context.vdso + | ||
182 | selected_vdso32->sym_VDSO32_SYSENTER_RETURN; | ||
183 | |||
184 | up_fail: | ||
185 | if (ret) | ||
186 | current->mm->context.vdso = NULL; | ||
187 | |||
188 | up_write(&mm->mmap_sem); | ||
189 | |||
190 | return ret; | ||
191 | } | ||
192 | |||
193 | #ifdef CONFIG_X86_64 | 84 | #ifdef CONFIG_X86_64 |
194 | 85 | ||
195 | subsys_initcall(sysenter_setup); | 86 | subsys_initcall(sysenter_setup); |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index cf217626fb47..e915eaec4f96 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/hpet.h> | ||
18 | 19 | ||
19 | #if defined(CONFIG_X86_64) | 20 | #if defined(CONFIG_X86_64) |
20 | unsigned int __read_mostly vdso64_enabled = 1; | 21 | unsigned int __read_mostly vdso64_enabled = 1; |
@@ -36,7 +37,6 @@ void __init init_vdso_image(const struct vdso_image *image) | |||
36 | image->alt_len)); | 37 | image->alt_len)); |
37 | } | 38 | } |
38 | 39 | ||
39 | |||
40 | #if defined(CONFIG_X86_64) | 40 | #if defined(CONFIG_X86_64) |
41 | static int __init init_vdso(void) | 41 | static int __init init_vdso(void) |
42 | { | 42 | { |
@@ -49,13 +49,16 @@ static int __init init_vdso(void) | |||
49 | return 0; | 49 | return 0; |
50 | } | 50 | } |
51 | subsys_initcall(init_vdso); | 51 | subsys_initcall(init_vdso); |
52 | #endif | ||
52 | 53 | ||
53 | struct linux_binprm; | 54 | struct linux_binprm; |
54 | 55 | ||
55 | /* Put the vdso above the (randomized) stack with another randomized offset. | 56 | /* Put the vdso above the (randomized) stack with another randomized offset. |
56 | This way there is no hole in the middle of address space. | 57 | This way there is no hole in the middle of address space. |
57 | To save memory make sure it is still in the same PTE as the stack top. | 58 | To save memory make sure it is still in the same PTE as the stack top. |
58 | This doesn't give that many random bits */ | 59 | This doesn't give that many random bits. |
60 | |||
61 | Only used for the 64-bit and x32 vdsos. */ | ||
59 | static unsigned long vdso_addr(unsigned long start, unsigned len) | 62 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
60 | { | 63 | { |
61 | unsigned long addr, end; | 64 | unsigned long addr, end; |
@@ -81,23 +84,23 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) | |||
81 | return addr; | 84 | return addr; |
82 | } | 85 | } |
83 | 86 | ||
84 | /* Setup a VMA at program startup for the vsyscall page. | 87 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) |
85 | Not called for compat tasks */ | ||
86 | static int setup_additional_pages(struct linux_binprm *bprm, | ||
87 | int uses_interp, | ||
88 | struct page **pages, | ||
89 | unsigned size) | ||
90 | { | 88 | { |
91 | struct mm_struct *mm = current->mm; | 89 | struct mm_struct *mm = current->mm; |
90 | struct vm_area_struct *vma; | ||
92 | unsigned long addr; | 91 | unsigned long addr; |
93 | int ret; | 92 | int ret = 0; |
94 | 93 | ||
95 | if (!vdso64_enabled) | 94 | if (calculate_addr) { |
96 | return 0; | 95 | addr = vdso_addr(current->mm->start_stack, |
96 | image->sym_end_mapping); | ||
97 | } else { | ||
98 | addr = 0; | ||
99 | } | ||
97 | 100 | ||
98 | down_write(&mm->mmap_sem); | 101 | down_write(&mm->mmap_sem); |
99 | addr = vdso_addr(mm->start_stack, size); | 102 | |
100 | addr = get_unmapped_area(NULL, addr, size, 0, 0); | 103 | addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); |
101 | if (IS_ERR_VALUE(addr)) { | 104 | if (IS_ERR_VALUE(addr)) { |
102 | ret = addr; | 105 | ret = addr; |
103 | goto up_fail; | 106 | goto up_fail; |
@@ -105,34 +108,115 @@ static int setup_additional_pages(struct linux_binprm *bprm, | |||
105 | 108 | ||
106 | current->mm->context.vdso = (void __user *)addr; | 109 | current->mm->context.vdso = (void __user *)addr; |
107 | 110 | ||
108 | ret = install_special_mapping(mm, addr, size, | 111 | /* |
112 | * MAYWRITE to allow gdb to COW and set breakpoints | ||
113 | */ | ||
114 | ret = install_special_mapping(mm, | ||
115 | addr, | ||
116 | image->size, | ||
109 | VM_READ|VM_EXEC| | 117 | VM_READ|VM_EXEC| |
110 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 118 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
111 | pages); | 119 | image->pages); |
112 | if (ret) { | 120 | |
113 | current->mm->context.vdso = NULL; | 121 | if (ret) |
122 | goto up_fail; | ||
123 | |||
124 | vma = _install_special_mapping(mm, | ||
125 | addr + image->size, | ||
126 | image->sym_end_mapping - image->size, | ||
127 | VM_READ, | ||
128 | NULL); | ||
129 | |||
130 | if (IS_ERR(vma)) { | ||
131 | ret = PTR_ERR(vma); | ||
114 | goto up_fail; | 132 | goto up_fail; |
115 | } | 133 | } |
116 | 134 | ||
135 | if (image->sym_vvar_page) | ||
136 | ret = remap_pfn_range(vma, | ||
137 | addr + image->sym_vvar_page, | ||
138 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, | ||
139 | PAGE_SIZE, | ||
140 | PAGE_READONLY); | ||
141 | |||
142 | if (ret) | ||
143 | goto up_fail; | ||
144 | |||
145 | #ifdef CONFIG_HPET_TIMER | ||
146 | if (hpet_address && image->sym_hpet_page) { | ||
147 | ret = io_remap_pfn_range(vma, | ||
148 | addr + image->sym_hpet_page, | ||
149 | hpet_address >> PAGE_SHIFT, | ||
150 | PAGE_SIZE, | ||
151 | pgprot_noncached(PAGE_READONLY)); | ||
152 | |||
153 | if (ret) | ||
154 | goto up_fail; | ||
155 | } | ||
156 | #endif | ||
157 | |||
117 | up_fail: | 158 | up_fail: |
159 | if (ret) | ||
160 | current->mm->context.vdso = NULL; | ||
161 | |||
118 | up_write(&mm->mmap_sem); | 162 | up_write(&mm->mmap_sem); |
119 | return ret; | 163 | return ret; |
120 | } | 164 | } |
121 | 165 | ||
166 | #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) | ||
167 | static int load_vdso32(void) | ||
168 | { | ||
169 | int ret; | ||
170 | |||
171 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ | ||
172 | return 0; | ||
173 | |||
174 | ret = map_vdso(selected_vdso32, false); | ||
175 | if (ret) | ||
176 | return ret; | ||
177 | |||
178 | if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN) | ||
179 | current_thread_info()->sysenter_return = | ||
180 | current->mm->context.vdso + | ||
181 | selected_vdso32->sym_VDSO32_SYSENTER_RETURN; | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | #endif | ||
186 | |||
187 | #ifdef CONFIG_X86_64 | ||
122 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 188 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
123 | { | 189 | { |
124 | return setup_additional_pages(bprm, uses_interp, vdso_image_64.pages, | 190 | if (!vdso64_enabled) |
125 | vdso_image_64.size); | 191 | return 0; |
192 | |||
193 | return map_vdso(&vdso_image_64, true); | ||
126 | } | 194 | } |
127 | 195 | ||
196 | #ifdef CONFIG_COMPAT | ||
197 | int compat_arch_setup_additional_pages(struct linux_binprm *bprm, | ||
198 | int uses_interp) | ||
199 | { | ||
128 | #ifdef CONFIG_X86_X32_ABI | 200 | #ifdef CONFIG_X86_X32_ABI |
129 | int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 201 | if (test_thread_flag(TIF_X32)) { |
202 | if (!vdso64_enabled) | ||
203 | return 0; | ||
204 | |||
205 | return map_vdso(&vdso_image_x32, true); | ||
206 | } | ||
207 | #endif | ||
208 | |||
209 | return load_vdso32(); | ||
210 | } | ||
211 | #endif | ||
212 | #else | ||
213 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
130 | { | 214 | { |
131 | return setup_additional_pages(bprm, uses_interp, vdso_image_x32.pages, | 215 | return load_vdso32(); |
132 | vdso_image_x32.size); | ||
133 | } | 216 | } |
134 | #endif | 217 | #endif |
135 | 218 | ||
219 | #ifdef CONFIG_X86_64 | ||
136 | static __init int vdso_setup(char *s) | 220 | static __init int vdso_setup(char *s) |
137 | { | 221 | { |
138 | vdso64_enabled = simple_strtoul(s, NULL, 0); | 222 | vdso64_enabled = simple_strtoul(s, NULL, 0); |