aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2015-01-21 20:36:06 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2015-01-22 09:54:29 -0500
commitda141706aea52c1a9fbd28cb8d289b78819f5436 (patch)
tree6fb0fb5a11c98030393c5915802c9ec891b6df51 /arch/arm64/kernel
parent2f896d5866107e2926dcdec34a7d40bc56dd2951 (diff)
arm64: add better page protections to arm64
Add page protections for arm64 similar to those in arm. This is for security reasons to prevent certain classes of exploits. The current method: - Map all memory as either RWX or RW. We round to the nearest section to avoid creating page tables before everything is mapped - Once everything is mapped, if either end of the RWX section should not be X, we split the PMD and remap as necessary - When initmem is to be freed, we change the permissions back to RW (using stop machine if necessary to flush the TLB) - If CONFIG_DEBUG_RODATA is set, the read only sections are set read only. Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Kees Cook <keescook@chromium.org> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 9965ec87cbec..5d9d2dca530d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -8,6 +8,7 @@
8#include <asm/thread_info.h> 8#include <asm/thread_info.h>
9#include <asm/memory.h> 9#include <asm/memory.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#include <asm/pgtable.h>
11 12
12#include "image.h" 13#include "image.h"
13 14
@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200;
49#define PECOFF_EDATA_PADDING 50#define PECOFF_EDATA_PADDING
50#endif 51#endif
51 52
53#ifdef CONFIG_DEBUG_ALIGN_RODATA
54#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
55#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
56#else
57#define ALIGN_DEBUG_RO
58#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
59#endif
60
52SECTIONS 61SECTIONS
53{ 62{
54 /* 63 /*
@@ -71,6 +80,7 @@ SECTIONS
71 _text = .; 80 _text = .;
72 HEAD_TEXT 81 HEAD_TEXT
73 } 82 }
83 ALIGN_DEBUG_RO
74 .text : { /* Real text segment */ 84 .text : { /* Real text segment */
75 _stext = .; /* Text and read-only data */ 85 _stext = .; /* Text and read-only data */
76 __exception_text_start = .; 86 __exception_text_start = .;
@@ -87,19 +97,22 @@ SECTIONS
87 *(.got) /* Global offset table */ 97 *(.got) /* Global offset table */
88 } 98 }
89 99
100 ALIGN_DEBUG_RO
90 RO_DATA(PAGE_SIZE) 101 RO_DATA(PAGE_SIZE)
91 EXCEPTION_TABLE(8) 102 EXCEPTION_TABLE(8)
92 NOTES 103 NOTES
104 ALIGN_DEBUG_RO
93 _etext = .; /* End of text and rodata section */ 105 _etext = .; /* End of text and rodata section */
94 106
95 . = ALIGN(PAGE_SIZE); 107 ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
96 __init_begin = .; 108 __init_begin = .;
97 109
98 INIT_TEXT_SECTION(8) 110 INIT_TEXT_SECTION(8)
99 .exit.text : { 111 .exit.text : {
100 ARM_EXIT_KEEP(EXIT_TEXT) 112 ARM_EXIT_KEEP(EXIT_TEXT)
101 } 113 }
102 . = ALIGN(16); 114
115 ALIGN_DEBUG_RO_MIN(16)
103 .init.data : { 116 .init.data : {
104 INIT_DATA 117 INIT_DATA
105 INIT_SETUP(16) 118 INIT_SETUP(16)