aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Garnier <thgarnie@google.com>2016-06-21 20:47:02 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-08 11:33:46 -0400
commit0483e1fa6e09d4948272680f691dccb1edb9677f (patch)
tree53bdd068412075b976d0dff8e665e02e16844b56
parentb234e8a09003af108d3573f0369e25c080676b14 (diff)
x86/mm: Implement ASLR for kernel memory regions
Randomizes the virtual address space of kernel memory regions for x86_64. This first patch adds the infrastructure and does not randomize any region. The following patches will randomize the physical memory mapping, vmalloc and vmemmap regions. This security feature mitigates exploits relying on predictable kernel addresses. These addresses can be used to disclose the kernel modules base addresses or corrupt specific structures to elevate privileges bypassing the current implementation of KASLR. This feature can be enabled with the CONFIG_RANDOMIZE_MEMORY option. The order of each memory region is not changed. The feature looks at the available space for the regions based on different configuration options and randomizes the base and space between each. The size of the physical memory mapping is the available physical memory. No performance impact was detected while testing the feature. Entropy is generated using the KASLR early boot functions now shared in the lib directory (originally written by Kees Cook). Randomization is done on PGD & PUD page table levels to increase possible addresses. The physical memory mapping code was adapted to support PUD level virtual addresses. This implementation on the best configuration provides 30,000 possible virtual addresses in average for each memory region. An additional low memory page is used to ensure each CPU can start with a PGD aligned virtual address (for realmode). x86/dump_pagetable was updated to correctly display each region. Updated documentation on x86_64 memory layout accordingly. Performance data, after all patches in the series: Kernbench shows almost no difference (-+ less than 1%): Before: Average Optimal load -j 12 Run (std deviation): Elapsed Time 102.63 (1.2695) User Time 1034.89 (1.18115) System Time 87.056 (0.456416) Percent CPU 1092.9 (13.892) Context Switches 199805 (3455.33) Sleeps 97907.8 (900.636) After: Average Optimal load -j 12 Run (std deviation): Elapsed Time 102.489 (1.10636) User Time 1034.86 (1.36053) System Time 87.764 (0.49345) Percent CPU 1095 (12.7715) Context Switches 199036 (4298.1) Sleeps 97681.6 (1031.11) Hackbench shows 0% difference on average (hackbench 90 repeated 10 times): attemp,before,after 1,0.076,0.069 2,0.072,0.069 3,0.066,0.066 4,0.066,0.068 5,0.066,0.067 6,0.066,0.069 7,0.067,0.066 8,0.063,0.067 9,0.067,0.065 10,0.068,0.071 average,0.0677,0.0677 Signed-off-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Alexander Popov <alpopov@ptsecurity.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lv Zheng <lv.zheng@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1466556426-32664-6-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/x86/x86_64/mm.txt4
-rw-r--r--arch/x86/Kconfig17
-rw-r--r--arch/x86/include/asm/kaslr.h6
-rw-r--r--arch/x86/include/asm/pgtable.h7
-rw-r--r--arch/x86/kernel/setup.c3
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/dump_pagetables.c16
-rw-r--r--arch/x86/mm/init.c1
-rw-r--r--arch/x86/mm/kaslr.c152
9 files changed, 202 insertions, 5 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 5aa738346062..8c7dd5957ae1 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -39,4 +39,8 @@ memory window (this size is arbitrary, it can be raised later if needed).
39The mappings are not part of any other kernel PGD and are only available 39The mappings are not part of any other kernel PGD and are only available
40during EFI runtime calls. 40during EFI runtime calls.
41 41
42Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
43physical memory, vmalloc/ioremap space and virtual memory map are randomized.
44Their order is preserved but their base will be offset early at boot time.
45
42-Andi Kleen, Jul 2004 46-Andi Kleen, Jul 2004
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 930fe88095d3..9719b8eb38d3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1993,6 +1993,23 @@ config PHYSICAL_ALIGN
1993 1993
1994 Don't change this unless you know what you are doing. 1994 Don't change this unless you know what you are doing.
1995 1995
1996config RANDOMIZE_MEMORY
1997 bool "Randomize the kernel memory sections"
1998 depends on X86_64
1999 depends on RANDOMIZE_BASE
2000 default RANDOMIZE_BASE
2001 ---help---
2002 Randomizes the base virtual address of kernel memory sections
2003 (physical memory mapping, vmalloc & vmemmap). This security feature
2004 makes exploits relying on predictable memory locations less reliable.
2005
2006 The order of allocations remains unchanged. Entropy is generated in
2007 the same way as RANDOMIZE_BASE. Current implementation in the optimal
2008 configuration have in average 30,000 different possible virtual
2009 addresses for each memory section.
2010
2011 If unsure, say N.
2012
1996config HOTPLUG_CPU 2013config HOTPLUG_CPU
1997 bool "Support for hot-pluggable CPUs" 2014 bool "Support for hot-pluggable CPUs"
1998 depends on SMP 2015 depends on SMP
diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index 5547438db5ea..683c9d736314 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -3,4 +3,10 @@
3 3
4unsigned long kaslr_get_random_long(const char *purpose); 4unsigned long kaslr_get_random_long(const char *purpose);
5 5
6#ifdef CONFIG_RANDOMIZE_MEMORY
7void kernel_randomize_memory(void);
8#else
9static inline void kernel_randomize_memory(void) { }
10#endif /* CONFIG_RANDOMIZE_MEMORY */
11
6#endif 12#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index d455bef39e9c..5472682a307f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -732,11 +732,16 @@ void early_alloc_pgt_buf(void);
732#ifdef CONFIG_X86_64 732#ifdef CONFIG_X86_64
733/* Realmode trampoline initialization. */ 733/* Realmode trampoline initialization. */
734extern pgd_t trampoline_pgd_entry; 734extern pgd_t trampoline_pgd_entry;
735static inline void __meminit init_trampoline(void) 735static inline void __meminit init_trampoline_default(void)
736{ 736{
737 /* Default trampoline pgd value */ 737 /* Default trampoline pgd value */
738 trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)]; 738 trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
739} 739}
740# ifdef CONFIG_RANDOMIZE_MEMORY
741void __meminit init_trampoline(void);
742# else
743# define init_trampoline init_trampoline_default
744# endif
740#else 745#else
741static inline void init_trampoline(void) { } 746static inline void init_trampoline(void) { }
742#endif 747#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4e7b3991b60..a2616584b6e9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -113,6 +113,7 @@
113#include <asm/prom.h> 113#include <asm/prom.h>
114#include <asm/microcode.h> 114#include <asm/microcode.h>
115#include <asm/mmu_context.h> 115#include <asm/mmu_context.h>
116#include <asm/kaslr.h>
116 117
117/* 118/*
118 * max_low_pfn_mapped: highest direct mapped pfn under 4GB 119 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -942,6 +943,8 @@ void __init setup_arch(char **cmdline_p)
942 943
943 x86_init.oem.arch_setup(); 944 x86_init.oem.arch_setup();
944 945
946 kernel_randomize_memory();
947
945 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 948 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
946 setup_memory_map(); 949 setup_memory_map();
947 parse_setup_data(); 950 parse_setup_data();
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 62c0043a5fd5..96d2b847e09e 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -37,4 +37,5 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
37 37
38obj-$(CONFIG_X86_INTEL_MPX) += mpx.o 38obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
39obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o 39obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
40obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
40 41
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 99bfb192803f..9a17250bcbe0 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -72,9 +72,9 @@ static struct addr_marker address_markers[] = {
72 { 0, "User Space" }, 72 { 0, "User Space" },
73#ifdef CONFIG_X86_64 73#ifdef CONFIG_X86_64
74 { 0x8000000000000000UL, "Kernel Space" }, 74 { 0x8000000000000000UL, "Kernel Space" },
75 { PAGE_OFFSET, "Low Kernel Mapping" }, 75 { 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
76 { VMALLOC_START, "vmalloc() Area" }, 76 { 0/* VMALLOC_START */, "vmalloc() Area" },
77 { VMEMMAP_START, "Vmemmap" }, 77 { 0/* VMEMMAP_START */, "Vmemmap" },
78# ifdef CONFIG_X86_ESPFIX64 78# ifdef CONFIG_X86_ESPFIX64
79 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 79 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
80# endif 80# endif
@@ -434,8 +434,16 @@ void ptdump_walk_pgd_level_checkwx(void)
434 434
435static int __init pt_dump_init(void) 435static int __init pt_dump_init(void)
436{ 436{
437 /*
438 * Various markers are not compile-time constants, so assign them
439 * here.
440 */
441#ifdef CONFIG_X86_64
442 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
443 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
444 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
445#endif
437#ifdef CONFIG_X86_32 446#ifdef CONFIG_X86_32
438 /* Not a compile-time constant on x86-32 */
439 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 447 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
440 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 448 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
441# ifdef CONFIG_HIGHMEM 449# ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4252acdfcbbd..cc82830bc8c4 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -17,6 +17,7 @@
17#include <asm/proto.h> 17#include <asm/proto.h>
18#include <asm/dma.h> /* for MAX_DMA_PFN */ 18#include <asm/dma.h> /* for MAX_DMA_PFN */
19#include <asm/microcode.h> 19#include <asm/microcode.h>
20#include <asm/kaslr.h>
20 21
21/* 22/*
22 * We need to define the tracepoints somewhere, and tlb.c 23 * We need to define the tracepoints somewhere, and tlb.c
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
new file mode 100644
index 000000000000..d5380a48e8fb
--- /dev/null
+++ b/arch/x86/mm/kaslr.c
@@ -0,0 +1,152 @@
1/*
2 * This file implements KASLR memory randomization for x86_64. It randomizes
3 * the virtual address space of kernel memory regions (physical memory
4 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
5 * exploits relying on predictable kernel addresses.
6 *
7 * Entropy is generated using the KASLR early boot functions now shared in
8 * the lib directory (originally written by Kees Cook). Randomization is
9 * done on PGD & PUD page table levels to increase possible addresses. The
10 * physical memory mapping code was adapted to support PUD level virtual
11 * addresses. This implementation on the best configuration provides 30,000
12 * possible virtual addresses in average for each memory region. An additional
13 * low memory page is used to ensure each CPU can start with a PGD aligned
14 * virtual address (for realmode).
15 *
16 * The order of each memory region is not changed. The feature looks at
17 * the available space for the regions based on different configuration
18 * options and randomizes the base and space between each. The size of the
19 * physical memory mapping is the available physical memory.
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/random.h>
25
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/setup.h>
29#include <asm/kaslr.h>
30
31#include "mm_internal.h"
32
33#define TB_SHIFT 40
34
35/*
36 * Virtual address start and end range for randomization. The end changes base
37 * on configuration to have the highest amount of space for randomization.
38 * It increases the possible random position for each randomized region.
39 *
40 * You need to add an if/def entry if you introduce a new memory region
41 * compatible with KASLR. Your entry must be in logical order with memory
42 * layout. For example, ESPFIX is before EFI because its virtual address is
43 * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
44 * ensure that this order is correct and won't be changed.
45 */
46static const unsigned long vaddr_start;
47static const unsigned long vaddr_end;
48
49/*
50 * Memory regions randomized by KASLR (except modules that use a separate logic
51 * earlier during boot). The list is ordered based on virtual addresses. This
52 * order is kept after randomization.
53 */
54static __initdata struct kaslr_memory_region {
55 unsigned long *base;
56 unsigned long size_tb;
57} kaslr_regions[] = {
58};
59
60/* Get size in bytes used by the memory region */
61static inline unsigned long get_padding(struct kaslr_memory_region *region)
62{
63 return (region->size_tb << TB_SHIFT);
64}
65
66/*
67 * Apply no randomization if KASLR was disabled at boot or if KASAN
68 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
69 */
70static inline bool kaslr_memory_enabled(void)
71{
72 return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
73}
74
75/* Initialize base and padding for each memory region randomized with KASLR */
76void __init kernel_randomize_memory(void)
77{
78 size_t i;
79 unsigned long vaddr = vaddr_start;
80 unsigned long rand;
81 struct rnd_state rand_state;
82 unsigned long remain_entropy;
83
84 if (!kaslr_memory_enabled())
85 return;
86
87 /* Calculate entropy available between regions */
88 remain_entropy = vaddr_end - vaddr_start;
89 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
90 remain_entropy -= get_padding(&kaslr_regions[i]);
91
92 prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
93
94 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
95 unsigned long entropy;
96
97 /*
98 * Select a random virtual address using the extra entropy
99 * available.
100 */
101 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
102 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
103 entropy = (rand % (entropy + 1)) & PUD_MASK;
104 vaddr += entropy;
105 *kaslr_regions[i].base = vaddr;
106
107 /*
108 * Jump the region and add a minimum padding based on
109 * randomization alignment.
110 */
111 vaddr += get_padding(&kaslr_regions[i]);
112 vaddr = round_up(vaddr + 1, PUD_SIZE);
113 remain_entropy -= entropy;
114 }
115}
116
117/*
118 * Create PGD aligned trampoline table to allow real mode initialization
119 * of additional CPUs. Consume only 1 low memory page.
120 */
121void __meminit init_trampoline(void)
122{
123 unsigned long paddr, paddr_next;
124 pgd_t *pgd;
125 pud_t *pud_page, *pud_page_tramp;
126 int i;
127
128 if (!kaslr_memory_enabled()) {
129 init_trampoline_default();
130 return;
131 }
132
133 pud_page_tramp = alloc_low_page();
134
135 paddr = 0;
136 pgd = pgd_offset_k((unsigned long)__va(paddr));
137 pud_page = (pud_t *) pgd_page_vaddr(*pgd);
138
139 for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
140 pud_t *pud, *pud_tramp;
141 unsigned long vaddr = (unsigned long)__va(paddr);
142
143 pud_tramp = pud_page_tramp + pud_index(paddr);
144 pud = pud_page + pud_index(vaddr);
145 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
146
147 *pud_tramp = *pud;
148 }
149
150 set_pgd(&trampoline_pgd_entry,
151 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
152}