diff options
Diffstat (limited to 'arch/x86/vdso/vma.c')
-rw-r--r-- | arch/x86/vdso/vma.c | 236 |
1 files changed, 130 insertions, 106 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 1ad102613127..e1513c47872a 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -15,115 +15,51 @@ | |||
15 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/hpet.h> | ||
18 | 19 | ||
19 | #if defined(CONFIG_X86_64) | 20 | #if defined(CONFIG_X86_64) |
20 | unsigned int __read_mostly vdso_enabled = 1; | 21 | unsigned int __read_mostly vdso64_enabled = 1; |
21 | 22 | ||
22 | DECLARE_VDSO_IMAGE(vdso); | ||
23 | extern unsigned short vdso_sync_cpuid; | 23 | extern unsigned short vdso_sync_cpuid; |
24 | static unsigned vdso_size; | ||
25 | |||
26 | #ifdef CONFIG_X86_X32_ABI | ||
27 | DECLARE_VDSO_IMAGE(vdsox32); | ||
28 | static unsigned vdsox32_size; | ||
29 | #endif | ||
30 | #endif | 24 | #endif |
31 | 25 | ||
32 | #if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \ | 26 | void __init init_vdso_image(const struct vdso_image *image) |
33 | defined(CONFIG_COMPAT) | ||
34 | void __init patch_vdso32(void *vdso, size_t len) | ||
35 | { | 27 | { |
36 | Elf32_Ehdr *hdr = vdso; | ||
37 | Elf32_Shdr *sechdrs, *alt_sec = 0; | ||
38 | char *secstrings; | ||
39 | void *alt_data; | ||
40 | int i; | 28 | int i; |
29 | int npages = (image->size) / PAGE_SIZE; | ||
41 | 30 | ||
42 | BUG_ON(len < sizeof(Elf32_Ehdr)); | 31 | BUG_ON(image->size % PAGE_SIZE != 0); |
43 | BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); | 32 | for (i = 0; i < npages; i++) |
44 | 33 | image->text_mapping.pages[i] = | |
45 | sechdrs = (void *)hdr + hdr->e_shoff; | 34 | virt_to_page(image->data + i*PAGE_SIZE); |
46 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
47 | |||
48 | for (i = 1; i < hdr->e_shnum; i++) { | ||
49 | Elf32_Shdr *shdr = &sechdrs[i]; | ||
50 | if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { | ||
51 | alt_sec = shdr; | ||
52 | goto found; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /* If we get here, it's probably a bug. */ | ||
57 | pr_warning("patch_vdso32: .altinstructions not found\n"); | ||
58 | return; /* nothing to patch */ | ||
59 | 35 | ||
60 | found: | 36 | apply_alternatives((struct alt_instr *)(image->data + image->alt), |
61 | alt_data = (void *)hdr + alt_sec->sh_offset; | 37 | (struct alt_instr *)(image->data + image->alt + |
62 | apply_alternatives(alt_data, alt_data + alt_sec->sh_size); | 38 | image->alt_len)); |
63 | } | 39 | } |
64 | #endif | ||
65 | 40 | ||
66 | #if defined(CONFIG_X86_64) | 41 | #if defined(CONFIG_X86_64) |
67 | static void __init patch_vdso64(void *vdso, size_t len) | ||
68 | { | ||
69 | Elf64_Ehdr *hdr = vdso; | ||
70 | Elf64_Shdr *sechdrs, *alt_sec = 0; | ||
71 | char *secstrings; | ||
72 | void *alt_data; | ||
73 | int i; | ||
74 | |||
75 | BUG_ON(len < sizeof(Elf64_Ehdr)); | ||
76 | BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); | ||
77 | |||
78 | sechdrs = (void *)hdr + hdr->e_shoff; | ||
79 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
80 | |||
81 | for (i = 1; i < hdr->e_shnum; i++) { | ||
82 | Elf64_Shdr *shdr = &sechdrs[i]; | ||
83 | if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { | ||
84 | alt_sec = shdr; | ||
85 | goto found; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /* If we get here, it's probably a bug. */ | ||
90 | pr_warning("patch_vdso64: .altinstructions not found\n"); | ||
91 | return; /* nothing to patch */ | ||
92 | |||
93 | found: | ||
94 | alt_data = (void *)hdr + alt_sec->sh_offset; | ||
95 | apply_alternatives(alt_data, alt_data + alt_sec->sh_size); | ||
96 | } | ||
97 | |||
98 | static int __init init_vdso(void) | 42 | static int __init init_vdso(void) |
99 | { | 43 | { |
100 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | 44 | init_vdso_image(&vdso_image_64); |
101 | int i; | ||
102 | |||
103 | patch_vdso64(vdso_start, vdso_end - vdso_start); | ||
104 | |||
105 | vdso_size = npages << PAGE_SHIFT; | ||
106 | for (i = 0; i < npages; i++) | ||
107 | vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); | ||
108 | 45 | ||
109 | #ifdef CONFIG_X86_X32_ABI | 46 | #ifdef CONFIG_X86_X32_ABI |
110 | patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start); | 47 | init_vdso_image(&vdso_image_x32); |
111 | npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE; | ||
112 | vdsox32_size = npages << PAGE_SHIFT; | ||
113 | for (i = 0; i < npages; i++) | ||
114 | vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE); | ||
115 | #endif | 48 | #endif |
116 | 49 | ||
117 | return 0; | 50 | return 0; |
118 | } | 51 | } |
119 | subsys_initcall(init_vdso); | 52 | subsys_initcall(init_vdso); |
53 | #endif | ||
120 | 54 | ||
121 | struct linux_binprm; | 55 | struct linux_binprm; |
122 | 56 | ||
123 | /* Put the vdso above the (randomized) stack with another randomized offset. | 57 | /* Put the vdso above the (randomized) stack with another randomized offset. |
124 | This way there is no hole in the middle of address space. | 58 | This way there is no hole in the middle of address space. |
125 | To save memory make sure it is still in the same PTE as the stack top. | 59 | To save memory make sure it is still in the same PTE as the stack top. |
126 | This doesn't give that many random bits */ | 60 | This doesn't give that many random bits. |
61 | |||
62 | Only used for the 64-bit and x32 vdsos. */ | ||
127 | static unsigned long vdso_addr(unsigned long start, unsigned len) | 63 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
128 | { | 64 | { |
129 | unsigned long addr, end; | 65 | unsigned long addr, end; |
@@ -149,61 +85,149 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) | |||
149 | return addr; | 85 | return addr; |
150 | } | 86 | } |
151 | 87 | ||
152 | /* Setup a VMA at program startup for the vsyscall page. | 88 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) |
153 | Not called for compat tasks */ | ||
154 | static int setup_additional_pages(struct linux_binprm *bprm, | ||
155 | int uses_interp, | ||
156 | struct page **pages, | ||
157 | unsigned size) | ||
158 | { | 89 | { |
159 | struct mm_struct *mm = current->mm; | 90 | struct mm_struct *mm = current->mm; |
91 | struct vm_area_struct *vma; | ||
160 | unsigned long addr; | 92 | unsigned long addr; |
161 | int ret; | 93 | int ret = 0; |
162 | 94 | static struct page *no_pages[] = {NULL}; | |
163 | if (!vdso_enabled) | 95 | static struct vm_special_mapping vvar_mapping = { |
164 | return 0; | 96 | .name = "[vvar]", |
97 | .pages = no_pages, | ||
98 | }; | ||
99 | |||
100 | if (calculate_addr) { | ||
101 | addr = vdso_addr(current->mm->start_stack, | ||
102 | image->sym_end_mapping); | ||
103 | } else { | ||
104 | addr = 0; | ||
105 | } | ||
165 | 106 | ||
166 | down_write(&mm->mmap_sem); | 107 | down_write(&mm->mmap_sem); |
167 | addr = vdso_addr(mm->start_stack, size); | 108 | |
168 | addr = get_unmapped_area(NULL, addr, size, 0, 0); | 109 | addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); |
169 | if (IS_ERR_VALUE(addr)) { | 110 | if (IS_ERR_VALUE(addr)) { |
170 | ret = addr; | 111 | ret = addr; |
171 | goto up_fail; | 112 | goto up_fail; |
172 | } | 113 | } |
173 | 114 | ||
174 | current->mm->context.vdso = (void *)addr; | 115 | current->mm->context.vdso = (void __user *)addr; |
175 | 116 | ||
176 | ret = install_special_mapping(mm, addr, size, | 117 | /* |
177 | VM_READ|VM_EXEC| | 118 | * MAYWRITE to allow gdb to COW and set breakpoints |
178 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 119 | */ |
179 | pages); | 120 | vma = _install_special_mapping(mm, |
180 | if (ret) { | 121 | addr, |
181 | current->mm->context.vdso = NULL; | 122 | image->size, |
123 | VM_READ|VM_EXEC| | ||
124 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | ||
125 | &image->text_mapping); | ||
126 | |||
127 | if (IS_ERR(vma)) { | ||
128 | ret = PTR_ERR(vma); | ||
182 | goto up_fail; | 129 | goto up_fail; |
183 | } | 130 | } |
184 | 131 | ||
132 | vma = _install_special_mapping(mm, | ||
133 | addr + image->size, | ||
134 | image->sym_end_mapping - image->size, | ||
135 | VM_READ, | ||
136 | &vvar_mapping); | ||
137 | |||
138 | if (IS_ERR(vma)) { | ||
139 | ret = PTR_ERR(vma); | ||
140 | goto up_fail; | ||
141 | } | ||
142 | |||
143 | if (image->sym_vvar_page) | ||
144 | ret = remap_pfn_range(vma, | ||
145 | addr + image->sym_vvar_page, | ||
146 | __pa_symbol(&__vvar_page) >> PAGE_SHIFT, | ||
147 | PAGE_SIZE, | ||
148 | PAGE_READONLY); | ||
149 | |||
150 | if (ret) | ||
151 | goto up_fail; | ||
152 | |||
153 | #ifdef CONFIG_HPET_TIMER | ||
154 | if (hpet_address && image->sym_hpet_page) { | ||
155 | ret = io_remap_pfn_range(vma, | ||
156 | addr + image->sym_hpet_page, | ||
157 | hpet_address >> PAGE_SHIFT, | ||
158 | PAGE_SIZE, | ||
159 | pgprot_noncached(PAGE_READONLY)); | ||
160 | |||
161 | if (ret) | ||
162 | goto up_fail; | ||
163 | } | ||
164 | #endif | ||
165 | |||
185 | up_fail: | 166 | up_fail: |
167 | if (ret) | ||
168 | current->mm->context.vdso = NULL; | ||
169 | |||
186 | up_write(&mm->mmap_sem); | 170 | up_write(&mm->mmap_sem); |
187 | return ret; | 171 | return ret; |
188 | } | 172 | } |
189 | 173 | ||
174 | #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) | ||
175 | static int load_vdso32(void) | ||
176 | { | ||
177 | int ret; | ||
178 | |||
179 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ | ||
180 | return 0; | ||
181 | |||
182 | ret = map_vdso(selected_vdso32, false); | ||
183 | if (ret) | ||
184 | return ret; | ||
185 | |||
186 | if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN) | ||
187 | current_thread_info()->sysenter_return = | ||
188 | current->mm->context.vdso + | ||
189 | selected_vdso32->sym_VDSO32_SYSENTER_RETURN; | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | #ifdef CONFIG_X86_64 | ||
190 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 196 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
191 | { | 197 | { |
192 | return setup_additional_pages(bprm, uses_interp, vdso_pages, | 198 | if (!vdso64_enabled) |
193 | vdso_size); | 199 | return 0; |
200 | |||
201 | return map_vdso(&vdso_image_64, true); | ||
194 | } | 202 | } |
195 | 203 | ||
204 | #ifdef CONFIG_COMPAT | ||
205 | int compat_arch_setup_additional_pages(struct linux_binprm *bprm, | ||
206 | int uses_interp) | ||
207 | { | ||
196 | #ifdef CONFIG_X86_X32_ABI | 208 | #ifdef CONFIG_X86_X32_ABI |
197 | int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 209 | if (test_thread_flag(TIF_X32)) { |
210 | if (!vdso64_enabled) | ||
211 | return 0; | ||
212 | |||
213 | return map_vdso(&vdso_image_x32, true); | ||
214 | } | ||
215 | #endif | ||
216 | |||
217 | return load_vdso32(); | ||
218 | } | ||
219 | #endif | ||
220 | #else | ||
221 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
198 | { | 222 | { |
199 | return setup_additional_pages(bprm, uses_interp, vdsox32_pages, | 223 | return load_vdso32(); |
200 | vdsox32_size); | ||
201 | } | 224 | } |
202 | #endif | 225 | #endif |
203 | 226 | ||
227 | #ifdef CONFIG_X86_64 | ||
204 | static __init int vdso_setup(char *s) | 228 | static __init int vdso_setup(char *s) |
205 | { | 229 | { |
206 | vdso_enabled = simple_strtoul(s, NULL, 0); | 230 | vdso64_enabled = simple_strtoul(s, NULL, 0); |
207 | return 0; | 231 | return 0; |
208 | } | 232 | } |
209 | __setup("vdso=", vdso_setup); | 233 | __setup("vdso=", vdso_setup); |