aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2015-01-21 20:36:06 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2015-01-22 09:54:29 -0500
commitda141706aea52c1a9fbd28cb8d289b78819f5436 (patch)
tree6fb0fb5a11c98030393c5915802c9ec891b6df51
parent2f896d5866107e2926dcdec34a7d40bc56dd2951 (diff)
arm64: add better page protections to arm64
Add page protections for arm64 similar to those in arm. This is for security reasons to prevent certain classes of exploits. The current method: - Map all memory as either RWX or RW. We round to the nearest section to avoid creating page tables before everything is mapped - Once everything is mapped, if either end of the RWX section should not be X, we split the PMD and remap as necessary - When initmem is to be freed, we change the permissions back to RW (using stop machine if necessary to flush the TLB) - If CONFIG_DEBUG_RODATA is set, the read only sections are set read only. Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Kees Cook <keescook@chromium.org> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/Kconfig.debug23
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/mm/init.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c211
6 files changed, 233 insertions, 26 deletions
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 5fdd6dce8061..4a8741073c90 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX
66 against certain classes of kernel exploits. 66 against certain classes of kernel exploits.
67 If in doubt, say "N". 67 If in doubt, say "N".
68 68
69config DEBUG_RODATA
70 bool "Make kernel text and rodata read-only"
71 help
72 If this is set, kernel text and rodata will be made read-only. This
73 is to help catch accidental or malicious attempts to change the
74 kernel's executable code. Additionally splits rodata from kernel
75 text so it can be made explicitly non-executable.
76
77 If in doubt, say Y
78
79config DEBUG_ALIGN_RODATA
80 depends on DEBUG_RODATA && !ARM64_64K_PAGES
81 bool "Align linker sections up to SECTION_SIZE"
82 help
83 If this option is enabled, sections that may potentially be marked as
84 read only or non-executable will be aligned up to the section size of
85 the kernel. This prevents sections from being split into pages and
86 avoids a potential TLB penalty. The downside is an increase in
87 alignment and potentially wasted space. Turn on this option if
88 performance is more important than memory pressure.
89
90 If in doubt, say N
91
69endmenu 92endmenu
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 7ae31a2cc6c0..67d309cc3b6b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages);
152int set_memory_rw(unsigned long addr, int numpages); 152int set_memory_rw(unsigned long addr, int numpages);
153int set_memory_x(unsigned long addr, int numpages); 153int set_memory_x(unsigned long addr, int numpages);
154int set_memory_nx(unsigned long addr, int numpages); 154int set_memory_nx(unsigned long addr, int numpages);
155
156#ifdef CONFIG_DEBUG_RODATA
157void mark_rodata_ro(void);
158#endif
159
155#endif 160#endif
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 9965ec87cbec..5d9d2dca530d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -8,6 +8,7 @@
8#include <asm/thread_info.h> 8#include <asm/thread_info.h>
9#include <asm/memory.h> 9#include <asm/memory.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#include <asm/pgtable.h>
11 12
12#include "image.h" 13#include "image.h"
13 14
@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200;
49#define PECOFF_EDATA_PADDING 50#define PECOFF_EDATA_PADDING
50#endif 51#endif
51 52
53#ifdef CONFIG_DEBUG_ALIGN_RODATA
54#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
55#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
56#else
57#define ALIGN_DEBUG_RO
58#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
59#endif
60
52SECTIONS 61SECTIONS
53{ 62{
54 /* 63 /*
@@ -71,6 +80,7 @@ SECTIONS
71 _text = .; 80 _text = .;
72 HEAD_TEXT 81 HEAD_TEXT
73 } 82 }
83 ALIGN_DEBUG_RO
74 .text : { /* Real text segment */ 84 .text : { /* Real text segment */
75 _stext = .; /* Text and read-only data */ 85 _stext = .; /* Text and read-only data */
76 __exception_text_start = .; 86 __exception_text_start = .;
@@ -87,19 +97,22 @@ SECTIONS
87 *(.got) /* Global offset table */ 97 *(.got) /* Global offset table */
88 } 98 }
89 99
100 ALIGN_DEBUG_RO
90 RO_DATA(PAGE_SIZE) 101 RO_DATA(PAGE_SIZE)
91 EXCEPTION_TABLE(8) 102 EXCEPTION_TABLE(8)
92 NOTES 103 NOTES
104 ALIGN_DEBUG_RO
93 _etext = .; /* End of text and rodata section */ 105 _etext = .; /* End of text and rodata section */
94 106
95 . = ALIGN(PAGE_SIZE); 107 ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
96 __init_begin = .; 108 __init_begin = .;
97 109
98 INIT_TEXT_SECTION(8) 110 INIT_TEXT_SECTION(8)
99 .exit.text : { 111 .exit.text : {
100 ARM_EXIT_KEEP(EXIT_TEXT) 112 ARM_EXIT_KEEP(EXIT_TEXT)
101 } 113 }
102 . = ALIGN(16); 114
115 ALIGN_DEBUG_RO_MIN(16)
103 .init.data : { 116 .init.data : {
104 INIT_DATA 117 INIT_DATA
105 INIT_SETUP(16) 118 INIT_SETUP(16)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 11c7b701b681..43cccb5101c0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -344,6 +344,7 @@ void __init mem_init(void)
344 344
345void free_initmem(void) 345void free_initmem(void)
346{ 346{
347 fixup_init();
347 free_initmem_default(0); 348 free_initmem_default(0);
348 free_alternatives_memory(); 349 free_alternatives_memory();
349} 350}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 50c3351df9c7..ef47d99b5cbc 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1 +1,3 @@
1extern void __init bootmem_init(void); 1extern void __init bootmem_init(void);
2
3void fixup_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e57c170a91f3..91d55b6efd8a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,7 @@
26#include <linux/memblock.h> 26#include <linux/memblock.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/stop_machine.h>
29 30
30#include <asm/cputype.h> 31#include <asm/cputype.h>
31#include <asm/fixmap.h> 32#include <asm/fixmap.h>
@@ -59,21 +60,43 @@ EXPORT_SYMBOL(phys_mem_access_prot);
59static void __init *early_alloc(unsigned long sz) 60static void __init *early_alloc(unsigned long sz)
60{ 61{
61 void *ptr = __va(memblock_alloc(sz, sz)); 62 void *ptr = __va(memblock_alloc(sz, sz));
63 BUG_ON(!ptr);
62 memset(ptr, 0, sz); 64 memset(ptr, 0, sz);
63 return ptr; 65 return ptr;
64} 66}
65 67
66static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 68/*
69 * remap a PMD into pages
70 */
71static void split_pmd(pmd_t *pmd, pte_t *pte)
72{
73 unsigned long pfn = pmd_pfn(*pmd);
74 int i = 0;
75
76 do {
77 /*
78 * Need to have the least restrictive permissions available
79 * permissions will be fixed up later
80 */
81 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
82 pfn++;
83 } while (pte++, i++, i < PTRS_PER_PTE);
84}
85
86static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
67 unsigned long end, unsigned long pfn, 87 unsigned long end, unsigned long pfn,
68 pgprot_t prot) 88 pgprot_t prot,
89 void *(*alloc)(unsigned long size))
69{ 90{
70 pte_t *pte; 91 pte_t *pte;
71 92
72 if (pmd_none(*pmd)) { 93 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
73 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); 94 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
95 if (pmd_sect(*pmd))
96 split_pmd(pmd, pte);
74 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); 97 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
98 flush_tlb_all();
75 } 99 }
76 BUG_ON(pmd_bad(*pmd));
77 100
78 pte = pte_offset_kernel(pmd, addr); 101 pte = pte_offset_kernel(pmd, addr);
79 do { 102 do {
@@ -82,9 +105,22 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
82 } while (pte++, addr += PAGE_SIZE, addr != end); 105 } while (pte++, addr += PAGE_SIZE, addr != end);
83} 106}
84 107
85static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud, 108void split_pud(pud_t *old_pud, pmd_t *pmd)
109{
110 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
111 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
112 int i = 0;
113
114 do {
115 set_pmd(pmd, __pmd(addr | prot));
116 addr += PMD_SIZE;
117 } while (pmd++, i++, i < PTRS_PER_PMD);
118}
119
120static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
86 unsigned long addr, unsigned long end, 121 unsigned long addr, unsigned long end,
87 phys_addr_t phys, pgprot_t prot) 122 phys_addr_t phys, pgprot_t prot,
123 void *(*alloc)(unsigned long size))
88{ 124{
89 pmd_t *pmd; 125 pmd_t *pmd;
90 unsigned long next; 126 unsigned long next;
@@ -93,8 +129,16 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
93 * Check for initial section mappings in the pgd/pud and remove them. 129 * Check for initial section mappings in the pgd/pud and remove them.
94 */ 130 */
95 if (pud_none(*pud) || pud_bad(*pud)) { 131 if (pud_none(*pud) || pud_bad(*pud)) {
96 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); 132 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
133 if (pud_sect(*pud)) {
134 /*
135 * need to have the 1G of mappings continue to be
136 * present
137 */
138 split_pud(pud, pmd);
139 }
97 pud_populate(mm, pud, pmd); 140 pud_populate(mm, pud, pmd);
141 flush_tlb_all();
98 } 142 }
99 143
100 pmd = pmd_offset(pud, addr); 144 pmd = pmd_offset(pud, addr);
@@ -113,21 +157,34 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
113 flush_tlb_all(); 157 flush_tlb_all();
114 } else { 158 } else {
115 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 159 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
116 prot); 160 prot, alloc);
117 } 161 }
118 phys += next - addr; 162 phys += next - addr;
119 } while (pmd++, addr = next, addr != end); 163 } while (pmd++, addr = next, addr != end);
120} 164}
121 165
122static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, 166static inline bool use_1G_block(unsigned long addr, unsigned long next,
167 unsigned long phys)
168{
169 if (PAGE_SHIFT != 12)
170 return false;
171
172 if (((addr | next | phys) & ~PUD_MASK) != 0)
173 return false;
174
175 return true;
176}
177
178static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
123 unsigned long addr, unsigned long end, 179 unsigned long addr, unsigned long end,
124 phys_addr_t phys, pgprot_t prot) 180 phys_addr_t phys, pgprot_t prot,
181 void *(*alloc)(unsigned long size))
125{ 182{
126 pud_t *pud; 183 pud_t *pud;
127 unsigned long next; 184 unsigned long next;
128 185
129 if (pgd_none(*pgd)) { 186 if (pgd_none(*pgd)) {
130 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); 187 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
131 pgd_populate(mm, pgd, pud); 188 pgd_populate(mm, pgd, pud);
132 } 189 }
133 BUG_ON(pgd_bad(*pgd)); 190 BUG_ON(pgd_bad(*pgd));
@@ -139,8 +196,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
139 /* 196 /*
140 * For 4K granule only, attempt to put down a 1GB block 197 * For 4K granule only, attempt to put down a 1GB block
141 */ 198 */
142 if ((PAGE_SHIFT == 12) && 199 if (use_1G_block(addr, next, phys)) {
143 ((addr | next | phys) & ~PUD_MASK) == 0) {
144 pud_t old_pud = *pud; 200 pud_t old_pud = *pud;
145 set_pud(pud, __pud(phys | 201 set_pud(pud, __pud(phys |
146 pgprot_val(mk_sect_prot(prot)))); 202 pgprot_val(mk_sect_prot(prot))));
@@ -158,7 +214,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
158 flush_tlb_all(); 214 flush_tlb_all();
159 } 215 }
160 } else { 216 } else {
161 alloc_init_pmd(mm, pud, addr, next, phys, prot); 217 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
162 } 218 }
163 phys += next - addr; 219 phys += next - addr;
164 } while (pud++, addr = next, addr != end); 220 } while (pud++, addr = next, addr != end);
@@ -168,9 +224,10 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
168 * Create the page directory entries and any necessary page tables for the 224 * Create the page directory entries and any necessary page tables for the
169 * mapping specified by 'md'. 225 * mapping specified by 'md'.
170 */ 226 */
171static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd, 227static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
172 phys_addr_t phys, unsigned long virt, 228 phys_addr_t phys, unsigned long virt,
173 phys_addr_t size, pgprot_t prot) 229 phys_addr_t size, pgprot_t prot,
230 void *(*alloc)(unsigned long size))
174{ 231{
175 unsigned long addr, length, end, next; 232 unsigned long addr, length, end, next;
176 233
@@ -180,13 +237,23 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
180 end = addr + length; 237 end = addr + length;
181 do { 238 do {
182 next = pgd_addr_end(addr, end); 239 next = pgd_addr_end(addr, end);
183 alloc_init_pud(mm, pgd, addr, next, phys, prot); 240 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
184 phys += next - addr; 241 phys += next - addr;
185 } while (pgd++, addr = next, addr != end); 242 } while (pgd++, addr = next, addr != end);
186} 243}
187 244
188static void __init create_mapping(phys_addr_t phys, unsigned long virt, 245static void *late_alloc(unsigned long size)
189 phys_addr_t size) 246{
247 void *ptr;
248
249 BUG_ON(size > PAGE_SIZE);
250 ptr = (void *)__get_free_page(PGALLOC_GFP);
251 BUG_ON(!ptr);
252 return ptr;
253}
254
255static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
256 phys_addr_t size, pgprot_t prot)
190{ 257{
191 if (virt < VMALLOC_START) { 258 if (virt < VMALLOC_START) {
192 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 259 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
@@ -194,15 +261,71 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
194 return; 261 return;
195 } 262 }
196 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, 263 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
197 size, PAGE_KERNEL_EXEC); 264 size, prot, early_alloc);
198} 265}
199 266
200void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 267void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
201 unsigned long virt, phys_addr_t size, 268 unsigned long virt, phys_addr_t size,
202 pgprot_t prot) 269 pgprot_t prot)
203{ 270{
204 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot); 271 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
272 early_alloc);
273}
274
275static void create_mapping_late(phys_addr_t phys, unsigned long virt,
276 phys_addr_t size, pgprot_t prot)
277{
278 if (virt < VMALLOC_START) {
279 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
280 &phys, virt);
281 return;
282 }
283
284 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
285 phys, virt, size, prot, late_alloc);
286}
287
288#ifdef CONFIG_DEBUG_RODATA
289static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
290{
291 /*
292 * Set up the executable regions using the existing section mappings
293 * for now. This will get more fine grained later once all memory
294 * is mapped
295 */
296 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
297 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
298
299 if (end < kernel_x_start) {
300 create_mapping(start, __phys_to_virt(start),
301 end - start, PAGE_KERNEL);
302 } else if (start >= kernel_x_end) {
303 create_mapping(start, __phys_to_virt(start),
304 end - start, PAGE_KERNEL);
305 } else {
306 if (start < kernel_x_start)
307 create_mapping(start, __phys_to_virt(start),
308 kernel_x_start - start,
309 PAGE_KERNEL);
310 create_mapping(kernel_x_start,
311 __phys_to_virt(kernel_x_start),
312 kernel_x_end - kernel_x_start,
313 PAGE_KERNEL_EXEC);
314 if (kernel_x_end < end)
315 create_mapping(kernel_x_end,
316 __phys_to_virt(kernel_x_end),
317 end - kernel_x_end,
318 PAGE_KERNEL);
319 }
320
321}
322#else
323static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
324{
325 create_mapping(start, __phys_to_virt(start), end - start,
326 PAGE_KERNEL_EXEC);
205} 327}
328#endif
206 329
207static void __init map_mem(void) 330static void __init map_mem(void)
208{ 331{
@@ -248,14 +371,53 @@ static void __init map_mem(void)
248 memblock_set_current_limit(limit); 371 memblock_set_current_limit(limit);
249 } 372 }
250#endif 373#endif
251 374 __map_memblock(start, end);
252 create_mapping(start, __phys_to_virt(start), end - start);
253 } 375 }
254 376
255 /* Limit no longer required. */ 377 /* Limit no longer required. */
256 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 378 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
257} 379}
258 380
381void __init fixup_executable(void)
382{
383#ifdef CONFIG_DEBUG_RODATA
384 /* now that we are actually fully mapped, make the start/end more fine grained */
385 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
386 unsigned long aligned_start = round_down(__pa(_stext),
387 SECTION_SIZE);
388
389 create_mapping(aligned_start, __phys_to_virt(aligned_start),
390 __pa(_stext) - aligned_start,
391 PAGE_KERNEL);
392 }
393
394 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
395 unsigned long aligned_end = round_up(__pa(__init_end),
396 SECTION_SIZE);
397 create_mapping(__pa(__init_end), (unsigned long)__init_end,
398 aligned_end - __pa(__init_end),
399 PAGE_KERNEL);
400 }
401#endif
402}
403
404#ifdef CONFIG_DEBUG_RODATA
405void mark_rodata_ro(void)
406{
407 create_mapping_late(__pa(_stext), (unsigned long)_stext,
408 (unsigned long)_etext - (unsigned long)_stext,
409 PAGE_KERNEL_EXEC | PTE_RDONLY);
410
411}
412#endif
413
414void fixup_init(void)
415{
416 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
417 (unsigned long)__init_end - (unsigned long)__init_begin,
418 PAGE_KERNEL);
419}
420
259/* 421/*
260 * paging_init() sets up the page tables, initialises the zone memory 422 * paging_init() sets up the page tables, initialises the zone memory
261 * maps and sets up the zero page. 423 * maps and sets up the zero page.
@@ -265,6 +427,7 @@ void __init paging_init(void)
265 void *zero_page; 427 void *zero_page;
266 428
267 map_mem(); 429 map_mem();
430 fixup_executable();
268 431
269 /* 432 /*
270 * Finally flush the caches and tlb to ensure that we're in a 433 * Finally flush the caches and tlb to ensure that we're in a