diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:10 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:10 -0400 |
commit | 7648b1330c335601b7c09c25f77a03cda128fcab (patch) | |
tree | 8b92b501dc746b135bf9019472b425e8ef052714 /arch/x86/vdso/vma.c | |
parent | 185f3d38900f750a4566f87cde6a178f3595a115 (diff) |
x86_64: move vdso
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/vdso/vma.c')
-rw-r--r-- | arch/x86/vdso/vma.c | 140 |
1 files changed, 140 insertions, 0 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c new file mode 100644 index 000000000000..ff9333e5fb08 --- /dev/null +++ b/arch/x86/vdso/vma.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Set up the VMAs to tell the VM about the vDSO. | ||
3 | * Copyright 2007 Andi Kleen, SUSE Labs. | ||
4 | * Subject to the GPL, v.2 | ||
5 | */ | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/err.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/random.h> | ||
11 | #include <asm/vsyscall.h> | ||
12 | #include <asm/vgtod.h> | ||
13 | #include <asm/proto.h> | ||
14 | #include "voffset.h" | ||
15 | |||
16 | int vdso_enabled = 1; | ||
17 | |||
18 | #define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x; | ||
19 | #include "vextern.h" | ||
20 | #undef VEXTERN | ||
21 | |||
22 | extern char vdso_kernel_start[], vdso_start[], vdso_end[]; | ||
23 | extern unsigned short vdso_sync_cpuid; | ||
24 | |||
25 | struct page **vdso_pages; | ||
26 | |||
27 | static inline void *var_ref(void *vbase, char *var, char *name) | ||
28 | { | ||
29 | unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET; | ||
30 | void *p = vbase + offset; | ||
31 | if (*(void **)p != (void *)VMAGIC) { | ||
32 | printk("VDSO: variable %s broken\n", name); | ||
33 | vdso_enabled = 0; | ||
34 | } | ||
35 | return p; | ||
36 | } | ||
37 | |||
38 | static int __init init_vdso_vars(void) | ||
39 | { | ||
40 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | ||
41 | int i; | ||
42 | char *vbase; | ||
43 | |||
44 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); | ||
45 | if (!vdso_pages) | ||
46 | goto oom; | ||
47 | for (i = 0; i < npages; i++) { | ||
48 | struct page *p; | ||
49 | p = alloc_page(GFP_KERNEL); | ||
50 | if (!p) | ||
51 | goto oom; | ||
52 | vdso_pages[i] = p; | ||
53 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); | ||
54 | } | ||
55 | |||
56 | vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL); | ||
57 | if (!vbase) | ||
58 | goto oom; | ||
59 | |||
60 | if (memcmp(vbase, "\177ELF", 4)) { | ||
61 | printk("VDSO: I'm broken; not ELF\n"); | ||
62 | vdso_enabled = 0; | ||
63 | } | ||
64 | |||
65 | #define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x) | ||
66 | #define VEXTERN(x) \ | ||
67 | V(vdso_ ## x) = &__ ## x; | ||
68 | #include "vextern.h" | ||
69 | #undef VEXTERN | ||
70 | return 0; | ||
71 | |||
72 | oom: | ||
73 | printk("Cannot allocate vdso\n"); | ||
74 | vdso_enabled = 0; | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | __initcall(init_vdso_vars); | ||
78 | |||
79 | struct linux_binprm; | ||
80 | |||
81 | /* Put the vdso above the (randomized) stack with another randomized offset. | ||
82 | This way there is no hole in the middle of address space. | ||
83 | To save memory make sure it is still in the same PTE as the stack top. | ||
84 | This doesn't give that many random bits */ | ||
85 | static unsigned long vdso_addr(unsigned long start, unsigned len) | ||
86 | { | ||
87 | unsigned long addr, end; | ||
88 | unsigned offset; | ||
89 | end = (start + PMD_SIZE - 1) & PMD_MASK; | ||
90 | if (end >= TASK_SIZE64) | ||
91 | end = TASK_SIZE64; | ||
92 | end -= len; | ||
93 | /* This loses some more bits than a modulo, but is cheaper */ | ||
94 | offset = get_random_int() & (PTRS_PER_PTE - 1); | ||
95 | addr = start + (offset << PAGE_SHIFT); | ||
96 | if (addr >= end) | ||
97 | addr = end; | ||
98 | return addr; | ||
99 | } | ||
100 | |||
101 | /* Setup a VMA at program startup for the vsyscall page. | ||
102 | Not called for compat tasks */ | ||
103 | int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | ||
104 | { | ||
105 | struct mm_struct *mm = current->mm; | ||
106 | unsigned long addr; | ||
107 | int ret; | ||
108 | unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE); | ||
109 | |||
110 | if (!vdso_enabled) | ||
111 | return 0; | ||
112 | |||
113 | down_write(&mm->mmap_sem); | ||
114 | addr = vdso_addr(mm->start_stack, len); | ||
115 | addr = get_unmapped_area(NULL, addr, len, 0, 0); | ||
116 | if (IS_ERR_VALUE(addr)) { | ||
117 | ret = addr; | ||
118 | goto up_fail; | ||
119 | } | ||
120 | |||
121 | ret = install_special_mapping(mm, addr, len, | ||
122 | VM_READ|VM_EXEC| | ||
123 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
124 | VM_ALWAYSDUMP, | ||
125 | vdso_pages); | ||
126 | if (ret) | ||
127 | goto up_fail; | ||
128 | |||
129 | current->mm->context.vdso = (void *)addr; | ||
130 | up_fail: | ||
131 | up_write(&mm->mmap_sem); | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static __init int vdso_setup(char *s) | ||
136 | { | ||
137 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
138 | return 0; | ||
139 | } | ||
140 | __setup("vdso=", vdso_setup); | ||