aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-01-04 06:32:03 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-01-04 18:39:57 -0500
commit1dddd25125112ba49706518ac9077a1026a18f37 (patch)
tree1a7dbf5aabe4314da221e2323ce66c9d2c4e87c1
parentf2078904810373211fb15f91888fba14c01a4acc (diff)
x86/kaslr: Fix the vaddr_end mess
vaddr_end for KASLR is only documented in the KASLR code itself and is adjusted depending on config options. So it's not surprising that a change of the memory layout causes KASLR to have the wrong vaddr_end. This can map arbitrary stuff into other areas causing hard to understand problems. Remove the whole ifdef magic and define the start of the cpu_entry_area to be the end of the KASLR vaddr range. Add documentation to that effect. Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") Reported-by: Benjamin Gilbert <benjamin.gilbert@coreos.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Benjamin Gilbert <benjamin.gilbert@coreos.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: stable <stable@vger.kernel.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Garnier <thgarnie@google.com>, Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos
-rw-r--r--Documentation/x86/x86_64/mm.txt6
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h8
-rw-r--r--arch/x86/mm/kaslr.c32
3 files changed, 22 insertions, 24 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index f7dabe1f01e9..ea91cb61a602 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
12... unused hole ... 12... unused hole ...
13ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 13ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
14... unused hole ... 14... unused hole ...
15 vaddr_end for KASLR
15fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping 16fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
16fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI 17fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
17ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 18ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
@@ -37,6 +38,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
37... unused hole ... 38... unused hole ...
38ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 39ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
39... unused hole ... 40... unused hole ...
41 vaddr_end for KASLR
40fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping 42fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
41... unused hole ... 43... unused hole ...
42ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 44ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
@@ -71,3 +73,7 @@ during EFI runtime calls.
71Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all 73Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
72physical memory, vmalloc/ioremap space and virtual memory map are randomized. 74physical memory, vmalloc/ioremap space and virtual memory map are randomized.
73Their order is preserved but their base will be offset early at boot time. 75Their order is preserved but their base will be offset early at boot time.
76
77Be very careful vs. KASLR when changing anything here. The KASLR address
78range must not overlap with anything except the KASAN shadow area, which is
79correct as KASAN disables KASLR.
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 61b4b60bdc13..6b8f73dcbc2c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
76#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 76#define PGDIR_MASK (~(PGDIR_SIZE - 1))
77 77
78/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 78/*
79 * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
80 *
81 * Be very careful vs. KASLR when changing anything here. The KASLR address
82 * range must not overlap with anything except the KASAN shadow area, which
83 * is correct as KASAN disables KASLR.
84 */
79#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 85#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
80 86
81#ifdef CONFIG_X86_5LEVEL 87#ifdef CONFIG_X86_5LEVEL
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 879ef930e2c2..aedebd2ebf1e 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -34,25 +34,14 @@
34#define TB_SHIFT 40 34#define TB_SHIFT 40
35 35
36/* 36/*
37 * Virtual address start and end range for randomization. The end changes base 37 * Virtual address start and end range for randomization.
38 * on configuration to have the highest amount of space for randomization.
39 * It increases the possible random position for each randomized region.
40 * 38 *
41 * You need to add an if/def entry if you introduce a new memory region 39 * The end address could depend on more configuration options to make the
42 * compatible with KASLR. Your entry must be in logical order with memory 40 * highest amount of space for randomization available, but that's too hard
43 * layout. For example, ESPFIX is before EFI because its virtual address is 41 * to keep straight and caused issues already.
44 * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
45 * ensure that this order is correct and won't be changed.
46 */ 42 */
47static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; 43static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48 44static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
49#if defined(CONFIG_X86_ESPFIX64)
50static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
51#elif defined(CONFIG_EFI)
52static const unsigned long vaddr_end = EFI_VA_END;
53#else
54static const unsigned long vaddr_end = __START_KERNEL_map;
55#endif
56 45
57/* Default values */ 46/* Default values */
58unsigned long page_offset_base = __PAGE_OFFSET_BASE; 47unsigned long page_offset_base = __PAGE_OFFSET_BASE;
@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
101 unsigned long remain_entropy; 90 unsigned long remain_entropy;
102 91
103 /* 92 /*
104 * All these BUILD_BUG_ON checks ensures the memory layout is 93 * These BUILD_BUG_ON checks ensure the memory layout is consistent
105 * consistent with the vaddr_start/vaddr_end variables. 94 * with the vaddr_start/vaddr_end variables. These checks are very
95 * limited....
106 */ 96 */
107 BUILD_BUG_ON(vaddr_start >= vaddr_end); 97 BUILD_BUG_ON(vaddr_start >= vaddr_end);
108 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 98 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
109 vaddr_end >= EFI_VA_END);
110 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
111 IS_ENABLED(CONFIG_EFI)) &&
112 vaddr_end >= __START_KERNEL_map);
113 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); 99 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
114 100
115 if (!kaslr_memory_enabled()) 101 if (!kaslr_memory_enabled())