aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-12-21 05:16:49 -0500
committerIngo Molnar <mingo@kernel.org>2014-12-21 05:16:49 -0500
commitfbe1bf140671619508dfa575d74a185ae53c5dbb (patch)
tree628721c854003ce977435dd4064521e0c9fefa4c /arch
parent97bf6af1f928216fd6c5a66e8a57bfa95a659672 (diff)
parent394f56fe480140877304d342dec46d50dc823d46 (diff)
Merge tag 'pr-20141220-x86-vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux into x86/urgent
Pull a VDSO fix from Andy Lutomirski: "One vdso fix for a longstanding ASLR bug that's been in the news lately. The vdso base address has always been randomized, and I don't think there's anything particularly wrong with the range over which it's randomized, but the implementation seems to have been buggy since the very beginning. This fixes the implementation to remove a large bias that caused a small fraction of possible vdso load addresess to be vastly more likely than the rest of the possible addresses." Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/vdso/vma.c45
1 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
41 41
42struct linux_binprm; 42struct linux_binprm;
43 43
44/* Put the vdso above the (randomized) stack with another randomized offset. 44/*
45 This way there is no hole in the middle of address space. 45 * Put the vdso above the (randomized) stack with another randomized
46 To save memory make sure it is still in the same PTE as the stack top. 46 * offset. This way there is no hole in the middle of address space.
47 This doesn't give that many random bits. 47 * To save memory make sure it is still in the same PTE as the stack
48 48 * top. This doesn't give that many random bits.
49 Only used for the 64-bit and x32 vdsos. */ 49 *
50 * Note that this algorithm is imperfect: the distribution of the vdso
51 * start address within a PMD is biased toward the end.
52 *
53 * Only used for the 64-bit and x32 vdsos.
54 */
50static unsigned long vdso_addr(unsigned long start, unsigned len) 55static unsigned long vdso_addr(unsigned long start, unsigned len)
51{ 56{
52#ifdef CONFIG_X86_32 57#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
54#else 59#else
55 unsigned long addr, end; 60 unsigned long addr, end;
56 unsigned offset; 61 unsigned offset;
57 end = (start + PMD_SIZE - 1) & PMD_MASK; 62
63 /*
64 * Round up the start address. It can start out unaligned as a result
65 * of stack start randomization.
66 */
67 start = PAGE_ALIGN(start);
68
69 /* Round the lowest possible end address up to a PMD boundary. */
70 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
58 if (end >= TASK_SIZE_MAX) 71 if (end >= TASK_SIZE_MAX)
59 end = TASK_SIZE_MAX; 72 end = TASK_SIZE_MAX;
60 end -= len; 73 end -= len;
61 /* This loses some more bits than a modulo, but is cheaper */ 74
62 offset = get_random_int() & (PTRS_PER_PTE - 1); 75 if (end > start) {
63 addr = start + (offset << PAGE_SHIFT); 76 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
64 if (addr >= end) 77 addr = start + (offset << PAGE_SHIFT);
65 addr = end; 78 } else {
79 addr = start;
80 }
66 81
67 /* 82 /*
68 * page-align it here so that get_unmapped_area doesn't 83 * Forcibly align the final address in case we have a hardware
69 * align it wrongfully again to the next page. addr can come in 4K 84 * issue that requires alignment for performance reasons.
70 * unaligned here as a result of stack start randomization.
71 */ 85 */
72 addr = PAGE_ALIGN(addr);
73 addr = align_vdso_addr(addr); 86 addr = align_vdso_addr(addr);
74 87
75 return addr; 88 return addr;