diff options
Diffstat (limited to 'arch/x86/kernel/sysenter_32.c')
-rw-r--r-- | arch/x86/kernel/sysenter_32.c | 348 |
1 files changed, 348 insertions, 0 deletions
diff --git a/arch/x86/kernel/sysenter_32.c b/arch/x86/kernel/sysenter_32.c new file mode 100644 index 000000000000..4eb2e408764f --- /dev/null +++ b/arch/x86/kernel/sysenter_32.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/kernel/sysenter.c | ||
3 | * | ||
4 | * (C) Copyright 2002 Linus Torvalds | ||
5 | * Portions based on the vdso-randomization code from exec-shield: | ||
6 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | ||
7 | * | ||
8 | * This file contains the needed initializations to support sysenter. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/thread_info.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/gfp.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/elf.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
22 | #include <asm/cpufeature.h> | ||
23 | #include <asm/msr.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/unistd.h> | ||
26 | #include <asm/elf.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | enum { | ||
30 | VDSO_DISABLED = 0, | ||
31 | VDSO_ENABLED = 1, | ||
32 | VDSO_COMPAT = 2, | ||
33 | }; | ||
34 | |||
35 | #ifdef CONFIG_COMPAT_VDSO | ||
36 | #define VDSO_DEFAULT VDSO_COMPAT | ||
37 | #else | ||
38 | #define VDSO_DEFAULT VDSO_ENABLED | ||
39 | #endif | ||
40 | |||
41 | /* | ||
42 | * Should the kernel map a VDSO page into processes and pass its | ||
43 | * address down to glibc upon exec()? | ||
44 | */ | ||
45 | unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT; | ||
46 | |||
47 | EXPORT_SYMBOL_GPL(vdso_enabled); | ||
48 | |||
49 | static int __init vdso_setup(char *s) | ||
50 | { | ||
51 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
52 | |||
53 | return 1; | ||
54 | } | ||
55 | |||
56 | __setup("vdso=", vdso_setup); | ||
57 | |||
58 | extern asmlinkage void sysenter_entry(void); | ||
59 | |||
60 | static __init void reloc_symtab(Elf32_Ehdr *ehdr, | ||
61 | unsigned offset, unsigned size) | ||
62 | { | ||
63 | Elf32_Sym *sym = (void *)ehdr + offset; | ||
64 | unsigned nsym = size / sizeof(*sym); | ||
65 | unsigned i; | ||
66 | |||
67 | for(i = 0; i < nsym; i++, sym++) { | ||
68 | if (sym->st_shndx == SHN_UNDEF || | ||
69 | sym->st_shndx == SHN_ABS) | ||
70 | continue; /* skip */ | ||
71 | |||
72 | if (sym->st_shndx > SHN_LORESERVE) { | ||
73 | printk(KERN_INFO "VDSO: unexpected st_shndx %x\n", | ||
74 | sym->st_shndx); | ||
75 | continue; | ||
76 | } | ||
77 | |||
78 | switch(ELF_ST_TYPE(sym->st_info)) { | ||
79 | case STT_OBJECT: | ||
80 | case STT_FUNC: | ||
81 | case STT_SECTION: | ||
82 | case STT_FILE: | ||
83 | sym->st_value += VDSO_HIGH_BASE; | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset) | ||
89 | { | ||
90 | Elf32_Dyn *dyn = (void *)ehdr + offset; | ||
91 | |||
92 | for(; dyn->d_tag != DT_NULL; dyn++) | ||
93 | switch(dyn->d_tag) { | ||
94 | case DT_PLTGOT: | ||
95 | case DT_HASH: | ||
96 | case DT_STRTAB: | ||
97 | case DT_SYMTAB: | ||
98 | case DT_RELA: | ||
99 | case DT_INIT: | ||
100 | case DT_FINI: | ||
101 | case DT_REL: | ||
102 | case DT_DEBUG: | ||
103 | case DT_JMPREL: | ||
104 | case DT_VERSYM: | ||
105 | case DT_VERDEF: | ||
106 | case DT_VERNEED: | ||
107 | case DT_ADDRRNGLO ... DT_ADDRRNGHI: | ||
108 | /* definitely pointers needing relocation */ | ||
109 | dyn->d_un.d_ptr += VDSO_HIGH_BASE; | ||
110 | break; | ||
111 | |||
112 | case DT_ENCODING ... OLD_DT_LOOS-1: | ||
113 | case DT_LOOS ... DT_HIOS-1: | ||
114 | /* Tags above DT_ENCODING are pointers if | ||
115 | they're even */ | ||
116 | if (dyn->d_tag >= DT_ENCODING && | ||
117 | (dyn->d_tag & 1) == 0) | ||
118 | dyn->d_un.d_ptr += VDSO_HIGH_BASE; | ||
119 | break; | ||
120 | |||
121 | case DT_VERDEFNUM: | ||
122 | case DT_VERNEEDNUM: | ||
123 | case DT_FLAGS_1: | ||
124 | case DT_RELACOUNT: | ||
125 | case DT_RELCOUNT: | ||
126 | case DT_VALRNGLO ... DT_VALRNGHI: | ||
127 | /* definitely not pointers */ | ||
128 | break; | ||
129 | |||
130 | case OLD_DT_LOOS ... DT_LOOS-1: | ||
131 | case DT_HIOS ... DT_VALRNGLO-1: | ||
132 | default: | ||
133 | if (dyn->d_tag > DT_ENCODING) | ||
134 | printk(KERN_INFO "VDSO: unexpected DT_tag %x\n", | ||
135 | dyn->d_tag); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static __init void relocate_vdso(Elf32_Ehdr *ehdr) | ||
141 | { | ||
142 | Elf32_Phdr *phdr; | ||
143 | Elf32_Shdr *shdr; | ||
144 | int i; | ||
145 | |||
146 | BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || | ||
147 | !elf_check_arch(ehdr) || | ||
148 | ehdr->e_type != ET_DYN); | ||
149 | |||
150 | ehdr->e_entry += VDSO_HIGH_BASE; | ||
151 | |||
152 | /* rebase phdrs */ | ||
153 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
154 | for (i = 0; i < ehdr->e_phnum; i++) { | ||
155 | phdr[i].p_vaddr += VDSO_HIGH_BASE; | ||
156 | |||
157 | /* relocate dynamic stuff */ | ||
158 | if (phdr[i].p_type == PT_DYNAMIC) | ||
159 | reloc_dyn(ehdr, phdr[i].p_offset); | ||
160 | } | ||
161 | |||
162 | /* rebase sections */ | ||
163 | shdr = (void *)ehdr + ehdr->e_shoff; | ||
164 | for(i = 0; i < ehdr->e_shnum; i++) { | ||
165 | if (!(shdr[i].sh_flags & SHF_ALLOC)) | ||
166 | continue; | ||
167 | |||
168 | shdr[i].sh_addr += VDSO_HIGH_BASE; | ||
169 | |||
170 | if (shdr[i].sh_type == SHT_SYMTAB || | ||
171 | shdr[i].sh_type == SHT_DYNSYM) | ||
172 | reloc_symtab(ehdr, shdr[i].sh_offset, | ||
173 | shdr[i].sh_size); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | void enable_sep_cpu(void) | ||
178 | { | ||
179 | int cpu = get_cpu(); | ||
180 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | ||
181 | |||
182 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | ||
183 | put_cpu(); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | tss->x86_tss.ss1 = __KERNEL_CS; | ||
188 | tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss; | ||
189 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | ||
190 | wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0); | ||
191 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0); | ||
192 | put_cpu(); | ||
193 | } | ||
194 | |||
195 | static struct vm_area_struct gate_vma; | ||
196 | |||
197 | static int __init gate_vma_init(void) | ||
198 | { | ||
199 | gate_vma.vm_mm = NULL; | ||
200 | gate_vma.vm_start = FIXADDR_USER_START; | ||
201 | gate_vma.vm_end = FIXADDR_USER_END; | ||
202 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | ||
203 | gate_vma.vm_page_prot = __P101; | ||
204 | /* | ||
205 | * Make sure the vDSO gets into every core dump. | ||
206 | * Dumping its contents makes post-mortem fully interpretable later | ||
207 | * without matching up the same kernel and hardware config to see | ||
208 | * what PC values meant. | ||
209 | */ | ||
210 | gate_vma.vm_flags |= VM_ALWAYSDUMP; | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * These symbols are defined by vsyscall.o to mark the bounds | ||
216 | * of the ELF DSO images included therein. | ||
217 | */ | ||
218 | extern const char vsyscall_int80_start, vsyscall_int80_end; | ||
219 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; | ||
220 | static struct page *syscall_pages[1]; | ||
221 | |||
222 | static void map_compat_vdso(int map) | ||
223 | { | ||
224 | static int vdso_mapped; | ||
225 | |||
226 | if (map == vdso_mapped) | ||
227 | return; | ||
228 | |||
229 | vdso_mapped = map; | ||
230 | |||
231 | __set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT, | ||
232 | map ? PAGE_READONLY_EXEC : PAGE_NONE); | ||
233 | |||
234 | /* flush stray tlbs */ | ||
235 | flush_tlb_all(); | ||
236 | } | ||
237 | |||
238 | int __init sysenter_setup(void) | ||
239 | { | ||
240 | void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
241 | const void *vsyscall; | ||
242 | size_t vsyscall_len; | ||
243 | |||
244 | syscall_pages[0] = virt_to_page(syscall_page); | ||
245 | |||
246 | gate_vma_init(); | ||
247 | |||
248 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); | ||
249 | |||
250 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | ||
251 | vsyscall = &vsyscall_int80_start; | ||
252 | vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start; | ||
253 | } else { | ||
254 | vsyscall = &vsyscall_sysenter_start; | ||
255 | vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start; | ||
256 | } | ||
257 | |||
258 | memcpy(syscall_page, vsyscall, vsyscall_len); | ||
259 | relocate_vdso(syscall_page); | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | /* Defined in vsyscall-sysenter.S */ | ||
265 | extern void SYSENTER_RETURN; | ||
266 | |||
267 | /* Setup a VMA at program startup for the vsyscall page */ | ||
268 | int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | ||
269 | { | ||
270 | struct mm_struct *mm = current->mm; | ||
271 | unsigned long addr; | ||
272 | int ret = 0; | ||
273 | bool compat; | ||
274 | |||
275 | down_write(&mm->mmap_sem); | ||
276 | |||
277 | /* Test compat mode once here, in case someone | ||
278 | changes it via sysctl */ | ||
279 | compat = (vdso_enabled == VDSO_COMPAT); | ||
280 | |||
281 | map_compat_vdso(compat); | ||
282 | |||
283 | if (compat) | ||
284 | addr = VDSO_HIGH_BASE; | ||
285 | else { | ||
286 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
287 | if (IS_ERR_VALUE(addr)) { | ||
288 | ret = addr; | ||
289 | goto up_fail; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * MAYWRITE to allow gdb to COW and set breakpoints | ||
294 | * | ||
295 | * Make sure the vDSO gets into every core dump. | ||
296 | * Dumping its contents makes post-mortem fully | ||
297 | * interpretable later without matching up the same | ||
298 | * kernel and hardware config to see what PC values | ||
299 | * meant. | ||
300 | */ | ||
301 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
302 | VM_READ|VM_EXEC| | ||
303 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
304 | VM_ALWAYSDUMP, | ||
305 | syscall_pages); | ||
306 | |||
307 | if (ret) | ||
308 | goto up_fail; | ||
309 | } | ||
310 | |||
311 | current->mm->context.vdso = (void *)addr; | ||
312 | current_thread_info()->sysenter_return = | ||
313 | (void *)VDSO_SYM(&SYSENTER_RETURN); | ||
314 | |||
315 | up_fail: | ||
316 | up_write(&mm->mmap_sem); | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
322 | { | ||
323 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | ||
324 | return "[vdso]"; | ||
325 | return NULL; | ||
326 | } | ||
327 | |||
328 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | ||
329 | { | ||
330 | struct mm_struct *mm = tsk->mm; | ||
331 | |||
332 | /* Check to see if this task was created in compat vdso mode */ | ||
333 | if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) | ||
334 | return &gate_vma; | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
338 | int in_gate_area(struct task_struct *task, unsigned long addr) | ||
339 | { | ||
340 | const struct vm_area_struct *vma = get_gate_vma(task); | ||
341 | |||
342 | return vma && addr >= vma->vm_start && addr < vma->vm_end; | ||
343 | } | ||
344 | |||
345 | int in_gate_area_no_task(unsigned long addr) | ||
346 | { | ||
347 | return 0; | ||
348 | } | ||