diff options
| -rw-r--r-- | arch/x86/vdso/vma.c | 45 |
1 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 009495b9ab4b..1c9f750c3859 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
| @@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image) | |||
| 41 | 41 | ||
| 42 | struct linux_binprm; | 42 | struct linux_binprm; |
| 43 | 43 | ||
| 44 | /* Put the vdso above the (randomized) stack with another randomized offset. | 44 | /* |
| 45 | This way there is no hole in the middle of address space. | 45 | * Put the vdso above the (randomized) stack with another randomized |
| 46 | To save memory make sure it is still in the same PTE as the stack top. | 46 | * offset. This way there is no hole in the middle of address space. |
| 47 | This doesn't give that many random bits. | 47 | * To save memory make sure it is still in the same PTE as the stack |
| 48 | 48 | * top. This doesn't give that many random bits. | |
| 49 | Only used for the 64-bit and x32 vdsos. */ | 49 | * |
| 50 | * Note that this algorithm is imperfect: the distribution of the vdso | ||
| 51 | * start address within a PMD is biased toward the end. | ||
| 52 | * | ||
| 53 | * Only used for the 64-bit and x32 vdsos. | ||
| 54 | */ | ||
| 50 | static unsigned long vdso_addr(unsigned long start, unsigned len) | 55 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
| 51 | { | 56 | { |
| 52 | #ifdef CONFIG_X86_32 | 57 | #ifdef CONFIG_X86_32 |
| @@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) | |||
| 54 | #else | 59 | #else |
| 55 | unsigned long addr, end; | 60 | unsigned long addr, end; |
| 56 | unsigned offset; | 61 | unsigned offset; |
| 57 | end = (start + PMD_SIZE - 1) & PMD_MASK; | 62 | |
| 63 | /* | ||
| 64 | * Round up the start address. It can start out unaligned as a result | ||
| 65 | * of stack start randomization. | ||
| 66 | */ | ||
| 67 | start = PAGE_ALIGN(start); | ||
| 68 | |||
| 69 | /* Round the lowest possible end address up to a PMD boundary. */ | ||
| 70 | end = (start + len + PMD_SIZE - 1) & PMD_MASK; | ||
| 58 | if (end >= TASK_SIZE_MAX) | 71 | if (end >= TASK_SIZE_MAX) |
| 59 | end = TASK_SIZE_MAX; | 72 | end = TASK_SIZE_MAX; |
| 60 | end -= len; | 73 | end -= len; |
| 61 | /* This loses some more bits than a modulo, but is cheaper */ | 74 | |
| 62 | offset = get_random_int() & (PTRS_PER_PTE - 1); | 75 | if (end > start) { |
| 63 | addr = start + (offset << PAGE_SHIFT); | 76 | offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); |
| 64 | if (addr >= end) | 77 | addr = start + (offset << PAGE_SHIFT); |
| 65 | addr = end; | 78 | } else { |
| 79 | addr = start; | ||
| 80 | } | ||
| 66 | 81 | ||
| 67 | /* | 82 | /* |
| 68 | * page-align it here so that get_unmapped_area doesn't | 83 | * Forcibly align the final address in case we have a hardware |
| 69 | * align it wrongfully again to the next page. addr can come in 4K | 84 | * issue that requires alignment for performance reasons. |
| 70 | * unaligned here as a result of stack start randomization. | ||
| 71 | */ | 85 | */ |
| 72 | addr = PAGE_ALIGN(addr); | ||
| 73 | addr = align_vdso_addr(addr); | 86 | addr = align_vdso_addr(addr); |
| 74 | 87 | ||
| 75 | return addr; | 88 | return addr; |
