aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/kaslr.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-01-26 08:12:01 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2016-02-24 09:57:27 -0500
commitf80fb3a3d50843a401dac4b566b3b131da8077a2 (patch)
tree1861584ef7bbae384b12bfc70dc5974328995506 /arch/arm64/kernel/kaslr.c
parent1e48ef7fcc374051730381a2a05da77eb4eafdb0 (diff)
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by the bootloader in the /chosen/kaslr-seed DT property. Depending on the size of the address space (VA_BITS) and the page size, the entropy in the virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all 4 levels), with the sidenote that displacements that result in the kernel image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB granule kernels, respectively) are not allowed, and will be rounded up to an acceptable value. If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is randomized independently from the core kernel. This makes it less likely that the location of core kernel data structures can be determined by an adversary, but causes all function calls from modules into the core kernel to be resolved via entries in the module PLTs. If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is randomized by choosing a page aligned 128 MB region inside the interval [_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of entropy (depending on page size), independently of the kernel randomization, but still guarantees that modules are within the range of relative branch and jump instructions (with the caveat that, since the module region is shared with other uses of the vmalloc area, modules may need to be loaded further away if the module region is exhausted) Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/kaslr.c')
-rw-r--r--arch/arm64/kernel/kaslr.c173
1 files changed, 173 insertions, 0 deletions
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
new file mode 100644
index 000000000000..8b32a1f8f09f
--- /dev/null
+++ b/arch/arm64/kernel/kaslr.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/crc32.h>
10#include <linux/init.h>
11#include <linux/libfdt.h>
12#include <linux/mm_types.h>
13#include <linux/sched.h>
14#include <linux/types.h>
15
16#include <asm/fixmap.h>
17#include <asm/kernel-pgtable.h>
18#include <asm/memory.h>
19#include <asm/mmu.h>
20#include <asm/pgtable.h>
21#include <asm/sections.h>
22
23u64 __read_mostly module_alloc_base;
24
25static __init u64 get_kaslr_seed(void *fdt)
26{
27 int node, len;
28 u64 *prop;
29 u64 ret;
30
31 node = fdt_path_offset(fdt, "/chosen");
32 if (node < 0)
33 return 0;
34
35 prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
36 if (!prop || len != sizeof(u64))
37 return 0;
38
39 ret = fdt64_to_cpu(*prop);
40 *prop = 0;
41 return ret;
42}
43
44static __init const u8 *get_cmdline(void *fdt)
45{
46 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
47
48 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
49 int node;
50 const u8 *prop;
51
52 node = fdt_path_offset(fdt, "/chosen");
53 if (node < 0)
54 goto out;
55
56 prop = fdt_getprop(fdt, node, "bootargs", NULL);
57 if (!prop)
58 goto out;
59 return prop;
60 }
61out:
62 return default_cmdline;
63}
64
65extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
66 pgprot_t prot);
67
68/*
69 * This routine will be executed with the kernel mapped at its default virtual
70 * address, and if it returns successfully, the kernel will be remapped, and
71 * start_kernel() will be executed from a randomized virtual offset. The
72 * relocation will result in all absolute references (e.g., static variables
73 * containing function pointers) to be reinitialized, and zero-initialized
74 * .bss variables will be reset to 0.
75 */
76u64 __init kaslr_early_init(u64 dt_phys)
77{
78 void *fdt;
79 u64 seed, offset, mask, module_range;
80 const u8 *cmdline, *str;
81 int size;
82
83 /*
84 * Set a reasonable default for module_alloc_base in case
85 * we end up running with module randomization disabled.
86 */
87 module_alloc_base = (u64)_etext - MODULES_VSIZE;
88
89 /*
90 * Try to map the FDT early. If this fails, we simply bail,
91 * and proceed with KASLR disabled. We will make another
92 * attempt at mapping the FDT in setup_machine()
93 */
94 early_fixmap_init();
95 fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
96 if (!fdt)
97 return 0;
98
99 /*
100 * Retrieve (and wipe) the seed from the FDT
101 */
102 seed = get_kaslr_seed(fdt);
103 if (!seed)
104 return 0;
105
106 /*
107 * Check if 'nokaslr' appears on the command line, and
108 * return 0 if that is the case.
109 */
110 cmdline = get_cmdline(fdt);
111 str = strstr(cmdline, "nokaslr");
112 if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
113 return 0;
114
115 /*
116 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
117 * kernel image offset from the seed. Let's place the kernel in the
118 * lower half of the VMALLOC area (VA_BITS - 2).
119 * Even if we could randomize at page granularity for 16k and 64k pages,
120 * let's always round to 2 MB so we don't interfere with the ability to
121 * map using contiguous PTEs
122 */
123 mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
124 offset = seed & mask;
125
126 /*
127 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
128 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
129 * happens, increase the KASLR offset by the size of the kernel image.
130 */
131 if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
132 (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
133 offset = (offset + (u64)(_end - _text)) & mask;
134
135 if (IS_ENABLED(CONFIG_KASAN))
136 /*
137 * KASAN does not expect the module region to intersect the
138 * vmalloc region, since shadow memory is allocated for each
139 * module at load time, whereas the vmalloc region is shadowed
140 * by KASAN zero pages. So keep modules out of the vmalloc
141 * region if KASAN is enabled.
142 */
143 return offset;
144
145 if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
146 /*
147 * Randomize the module region independently from the core
148 * kernel. This prevents modules from leaking any information
149 * about the address of the kernel itself, but results in
150 * branches between modules and the core kernel that are
151 * resolved via PLTs. (Branches between modules will be
152 * resolved normally.)
153 */
154 module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
155 module_alloc_base = VMALLOC_START;
156 } else {
157 /*
158 * Randomize the module region by setting module_alloc_base to
159 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
160 * _stext) . This guarantees that the resulting region still
161 * covers [_stext, _etext], and that all relative branches can
162 * be resolved without veneers.
163 */
164 module_range = MODULES_VSIZE - (u64)(_etext - _stext);
165 module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
166 }
167
168 /* use the lower 21 bits to randomize the base of the module region */
169 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
170 module_alloc_base &= PAGE_MASK;
171
172 return offset;
173}