aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/module.c
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2014-02-25 19:59:17 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2014-02-25 20:07:26 -0500
commite2b32e6785138d92d2a40e0d0473575c8c7310a2 (patch)
tree1522d169745c58697194f3656bb437363bdfcd78 /arch/x86/kernel/module.c
parentcfbf8d4857c26a8a307fb7cd258074c9dcd8c691 (diff)
x86, kaslr: randomize module base load address
Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/module.c')
-rw-r--r--arch/x86/kernel/module.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 18be189368bb..49483137371f 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -28,6 +28,7 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/gfp.h> 29#include <linux/gfp.h>
30#include <linux/jump_label.h> 30#include <linux/jump_label.h>
31#include <linux/random.h>
31 32
32#include <asm/page.h> 33#include <asm/page.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -43,13 +44,49 @@ do { \
43} while (0) 44} while (0)
44#endif 45#endif
45 46
47#ifdef CONFIG_RANDOMIZE_BASE
48static unsigned long module_load_offset;
49static int randomize_modules = 1;
50
51static int __init parse_nokaslr(char *p)
52{
53 randomize_modules = 0;
54 return 0;
55}
56early_param("nokaslr", parse_nokaslr);
57
58static unsigned long int get_module_load_offset(void)
59{
60 if (randomize_modules) {
61 mutex_lock(&module_mutex);
62 /*
63 * Calculate the module_load_offset the first time this
64 * code is called. Once calculated it stays the same until
65 * reboot.
66 */
67 if (module_load_offset == 0)
68 module_load_offset =
69 (get_random_int() % 1024 + 1) * PAGE_SIZE;
70 mutex_unlock(&module_mutex);
71 }
72 return module_load_offset;
73}
74#else
75static unsigned long int get_module_load_offset(void)
76{
77 return 0;
78}
79#endif
80
46void *module_alloc(unsigned long size) 81void *module_alloc(unsigned long size)
47{ 82{
48 if (PAGE_ALIGN(size) > MODULES_LEN) 83 if (PAGE_ALIGN(size) > MODULES_LEN)
49 return NULL; 84 return NULL;
50 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 85 return __vmalloc_node_range(size, 1,
51 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 86 MODULES_VADDR + get_module_load_offset(),
52 NUMA_NO_NODE, __builtin_return_address(0)); 87 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
88 PAGE_KERNEL_EXEC, NUMA_NO_NODE,
89 __builtin_return_address(0));
53} 90}
54 91
55#ifdef CONFIG_X86_32 92#ifdef CONFIG_X86_32