aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/entry/vdso/vma.c98
1 files changed, 48 insertions, 50 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 4459e73e234d..23c881caabd1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -37,54 +37,6 @@ void __init init_vdso_image(const struct vdso_image *image)
37 37
38struct linux_binprm; 38struct linux_binprm;
39 39
40/*
41 * Put the vdso above the (randomized) stack with another randomized
42 * offset. This way there is no hole in the middle of address space.
43 * To save memory make sure it is still in the same PTE as the stack
44 * top. This doesn't give that many random bits.
45 *
46 * Note that this algorithm is imperfect: the distribution of the vdso
47 * start address within a PMD is biased toward the end.
48 *
49 * Only used for the 64-bit and x32 vdsos.
50 */
51static unsigned long vdso_addr(unsigned long start, unsigned len)
52{
53#ifdef CONFIG_X86_32
54 return 0;
55#else
56 unsigned long addr, end;
57 unsigned offset;
58
59 /*
60 * Round up the start address. It can start out unaligned as a result
61 * of stack start randomization.
62 */
63 start = PAGE_ALIGN(start);
64
65 /* Round the lowest possible end address up to a PMD boundary. */
66 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
67 if (end >= TASK_SIZE_MAX)
68 end = TASK_SIZE_MAX;
69 end -= len;
70
71 if (end > start) {
72 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
73 addr = start + (offset << PAGE_SHIFT);
74 } else {
75 addr = start;
76 }
77
78 /*
79 * Forcibly align the final address in case we have a hardware
80 * issue that requires alignment for performance reasons.
81 */
82 addr = align_vdso_addr(addr);
83
84 return addr;
85#endif
86}
87
88static int vdso_fault(const struct vm_special_mapping *sm, 40static int vdso_fault(const struct vm_special_mapping *sm,
89 struct vm_area_struct *vma, struct vm_fault *vmf) 41 struct vm_area_struct *vma, struct vm_fault *vmf)
90{ 42{
@@ -249,12 +201,58 @@ up_fail:
249 return ret; 201 return ret;
250} 202}
251 203
204#ifdef CONFIG_X86_64
205/*
206 * Put the vdso above the (randomized) stack with another randomized
207 * offset. This way there is no hole in the middle of address space.
208 * To save memory make sure it is still in the same PTE as the stack
209 * top. This doesn't give that many random bits.
210 *
211 * Note that this algorithm is imperfect: the distribution of the vdso
212 * start address within a PMD is biased toward the end.
213 *
214 * Only used for the 64-bit and x32 vdsos.
215 */
216static unsigned long vdso_addr(unsigned long start, unsigned len)
217{
218 unsigned long addr, end;
219 unsigned offset;
220
221 /*
222 * Round up the start address. It can start out unaligned as a result
223 * of stack start randomization.
224 */
225 start = PAGE_ALIGN(start);
226
227 /* Round the lowest possible end address up to a PMD boundary. */
228 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
229 if (end >= TASK_SIZE_MAX)
230 end = TASK_SIZE_MAX;
231 end -= len;
232
233 if (end > start) {
234 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
235 addr = start + (offset << PAGE_SHIFT);
236 } else {
237 addr = start;
238 }
239
240 /*
241 * Forcibly align the final address in case we have a hardware
242 * issue that requires alignment for performance reasons.
243 */
244 addr = align_vdso_addr(addr);
245
246 return addr;
247}
248
252static int map_vdso_randomized(const struct vdso_image *image) 249static int map_vdso_randomized(const struct vdso_image *image)
253{ 250{
254 unsigned long addr = vdso_addr(current->mm->start_stack, 251 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
255 image->size - image->sym_vvar_start); 252
256 return map_vdso(image, addr); 253 return map_vdso(image, addr);
257} 254}
255#endif
258 256
259int map_vdso_once(const struct vdso_image *image, unsigned long addr) 257int map_vdso_once(const struct vdso_image *image, unsigned long addr)
260{ 258{