aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-12-19 19:04:11 -0500
committerAndy Lutomirski <luto@amacapital.net>2014-12-20 19:56:57 -0500
commit394f56fe480140877304d342dec46d50dc823d46 (patch)
tree7a5aaa30bdeb3bbcfb6bd08003bf3d1bd4bef1bf /arch
parente589c9e13aeb0c5539bf1314b3a78442ea8fc0c2 (diff)
x86_64, vdso: Fix the vdso address randomization algorithm
The theory behind vdso randomization is that it's mapped at a random offset above the top of the stack. To avoid wasting a page of memory for an extra page table, the vdso isn't supposed to extend past the lowest PMD into which it can fit. Other than that, the address should be a uniformly distributed address that meets all of the alignment requirements. The current algorithm is buggy: the vdso has about a 50% probability of being at the very end of a PMD. The current algorithm also has a decent chance of failing outright due to incorrect handling of the case where the top of the stack is near the top of its PMD. This fixes the implementation. The paxtest estimate of vdso "randomisation" improves from 11 bits to 18 bits. (Disclaimer: I don't know what the paxtest code is actually calculating.) It's worth noting that this algorithm is inherently biased: the vdso is more likely to end up near the end of its PMD than near the beginning. Ideally we would either nix the PMD sharing requirement or jointly randomize the vdso and the stack to reduce the bias. In the mean time, this is a considerable improvement with basically no risk of compatibility issues, since the allowed outputs of the algorithm are unchanged. As an easy test, doing this: for i in `seq 10000` do grep -P vdso /proc/self/maps |cut -d- -f1 done |sort |uniq -d used to produce lots of output (1445 lines on my most recent run). A tiny subset looks like this: 7fffdfffe000 7fffe01fe000 7fffe05fe000 7fffe07fe000 7fffe09fe000 7fffe0bfe000 7fffe0dfe000 Note the suspicious fe000 endings. With the fix, I get a much more palatable 76 repeated addresses. Reviewed-by: Kees Cook <keescook@chromium.org> Cc: stable@vger.kernel.org Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/vdso/vma.c45
1 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
41 41
42struct linux_binprm; 42struct linux_binprm;
43 43
44/* Put the vdso above the (randomized) stack with another randomized offset. 44/*
45 This way there is no hole in the middle of address space. 45 * Put the vdso above the (randomized) stack with another randomized
46 To save memory make sure it is still in the same PTE as the stack top. 46 * offset. This way there is no hole in the middle of address space.
47 This doesn't give that many random bits. 47 * To save memory make sure it is still in the same PTE as the stack
48 48 * top. This doesn't give that many random bits.
49 Only used for the 64-bit and x32 vdsos. */ 49 *
50 * Note that this algorithm is imperfect: the distribution of the vdso
51 * start address within a PMD is biased toward the end.
52 *
53 * Only used for the 64-bit and x32 vdsos.
54 */
50static unsigned long vdso_addr(unsigned long start, unsigned len) 55static unsigned long vdso_addr(unsigned long start, unsigned len)
51{ 56{
52#ifdef CONFIG_X86_32 57#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
54#else 59#else
55 unsigned long addr, end; 60 unsigned long addr, end;
56 unsigned offset; 61 unsigned offset;
57 end = (start + PMD_SIZE - 1) & PMD_MASK; 62
63 /*
64 * Round up the start address. It can start out unaligned as a result
65 * of stack start randomization.
66 */
67 start = PAGE_ALIGN(start);
68
69 /* Round the lowest possible end address up to a PMD boundary. */
70 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
58 if (end >= TASK_SIZE_MAX) 71 if (end >= TASK_SIZE_MAX)
59 end = TASK_SIZE_MAX; 72 end = TASK_SIZE_MAX;
60 end -= len; 73 end -= len;
61 /* This loses some more bits than a modulo, but is cheaper */ 74
62 offset = get_random_int() & (PTRS_PER_PTE - 1); 75 if (end > start) {
63 addr = start + (offset << PAGE_SHIFT); 76 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
64 if (addr >= end) 77 addr = start + (offset << PAGE_SHIFT);
65 addr = end; 78 } else {
79 addr = start;
80 }
66 81
67 /* 82 /*
68 * page-align it here so that get_unmapped_area doesn't 83 * Forcibly align the final address in case we have a hardware
69 * align it wrongfully again to the next page. addr can come in 4K 84 * issue that requires alignment for performance reasons.
70 * unaligned here as a result of stack start randomization.
71 */ 85 */
72 addr = PAGE_ALIGN(addr);
73 addr = align_vdso_addr(addr); 86 addr = align_vdso_addr(addr);
74 87
75 return addr; 88 return addr;