aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2015-04-01 06:49:52 -0400
committerIngo Molnar <mingo@kernel.org>2015-04-03 09:26:15 -0400
commit78cac48c0434c82e860fade3cd0420a7a4adbb08 (patch)
tree76e1afefb1a0f9baad05487738de9f1b4a126aa7 /arch/x86/kernel
parent47091e3c5b072daca29a15d2a3caf40359b0d140 (diff)
x86/mm/KASLR: Propagate KASLR status to kernel proper
Commit: e2b32e678513 ("x86, kaslr: randomize module base load address") made module base address randomization unconditional and didn't regard disabled KKASLR due to CONFIG_HIBERNATION and command line option "nokaslr". For more info see (now reverted) commit: f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation") In order to propagate KASLR status to kernel proper, we need a single bit in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the top-down allocated bits for bits supposed to be used by the bootloader. Originally-From: Jiri Kosina <jkosina@suse.cz> Suggested-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/module.c11
-rw-r--r--arch/x86/kernel/setup.c13
2 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d1ac80b72c72..005c03e93fc5 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -33,6 +33,7 @@
33 33
34#include <asm/page.h> 34#include <asm/page.h>
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/setup.h>
36 37
37#if 0 38#if 0
38#define DEBUGP(fmt, ...) \ 39#define DEBUGP(fmt, ...) \
@@ -47,21 +48,13 @@ do { \
47 48
48#ifdef CONFIG_RANDOMIZE_BASE 49#ifdef CONFIG_RANDOMIZE_BASE
49static unsigned long module_load_offset; 50static unsigned long module_load_offset;
50static int randomize_modules = 1;
51 51
52/* Mutex protects the module_load_offset. */ 52/* Mutex protects the module_load_offset. */
53static DEFINE_MUTEX(module_kaslr_mutex); 53static DEFINE_MUTEX(module_kaslr_mutex);
54 54
55static int __init parse_nokaslr(char *p)
56{
57 randomize_modules = 0;
58 return 0;
59}
60early_param("nokaslr", parse_nokaslr);
61
62static unsigned long int get_module_load_offset(void) 55static unsigned long int get_module_load_offset(void)
63{ 56{
64 if (randomize_modules) { 57 if (kaslr_enabled()) {
65 mutex_lock(&module_kaslr_mutex); 58 mutex_lock(&module_kaslr_mutex);
66 /* 59 /*
67 * Calculate the module_load_offset the first time this 60 * Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0a2421cca01f..014466b152b5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -832,10 +832,15 @@ static void __init trim_low_memory_range(void)
832static int 832static int
833dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 833dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
834{ 834{
835 pr_emerg("Kernel Offset: 0x%lx from 0x%lx " 835 if (kaslr_enabled()) {
836 "(relocation range: 0x%lx-0x%lx)\n", 836 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
837 (unsigned long)&_text - __START_KERNEL, __START_KERNEL, 837 (unsigned long)&_text - __START_KERNEL,
838 __START_KERNEL_map, MODULES_VADDR-1); 838 __START_KERNEL,
839 __START_KERNEL_map,
840 MODULES_VADDR-1);
841 } else {
842 pr_emerg("Kernel Offset: disabled\n");
843 }
839 844
840 return 0; 845 return 0;
841} 846}