aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/kasan_init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/kasan_init_64.c')
-rw-r--r--arch/x86/mm/kasan_init_64.c47
1 files changed, 42 insertions, 5 deletions
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906c6b9f..e1840f3db5b5 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) "kasan: " fmt
1#include <linux/bootmem.h> 2#include <linux/bootmem.h>
2#include <linux/kasan.h> 3#include <linux/kasan.h>
3#include <linux/kdebug.h> 4#include <linux/kdebug.h>
@@ -11,7 +12,19 @@
11extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 12extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX]; 13extern struct range pfn_mapped[E820_X_MAX];
13 14
14extern unsigned char kasan_zero_page[PAGE_SIZE]; 15static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
16static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
17static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
18
19/*
20 * This page used as early shadow. We don't use empty_zero_page
21 * at early stages, stack instrumentation could write some garbage
22 * to this page.
23 * Latter we reuse it as zero shadow for large ranges of memory
24 * that allowed to access, but not instrumented by kasan
25 * (vmalloc/vmemmap ...).
26 */
27static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
15 28
16static int __init map_range(struct range *range) 29static int __init map_range(struct range *range)
17{ 30{
@@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start,
36 pgd_clear(pgd_offset_k(start)); 49 pgd_clear(pgd_offset_k(start));
37} 50}
38 51
39void __init kasan_map_early_shadow(pgd_t *pgd) 52static void __init kasan_map_early_shadow(pgd_t *pgd)
40{ 53{
41 int i; 54 int i;
42 unsigned long start = KASAN_SHADOW_START; 55 unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
73 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { 86 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
74 WARN_ON(!pmd_none(*pmd)); 87 WARN_ON(!pmd_none(*pmd));
75 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) 88 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
76 | __PAGE_KERNEL_RO)); 89 | _KERNPG_TABLE));
77 addr += PMD_SIZE; 90 addr += PMD_SIZE;
78 pmd = pmd_offset(pud, addr); 91 pmd = pmd_offset(pud, addr);
79 } 92 }
@@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
99 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { 112 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
100 WARN_ON(!pud_none(*pud)); 113 WARN_ON(!pud_none(*pud));
101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) 114 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
102 | __PAGE_KERNEL_RO)); 115 | _KERNPG_TABLE));
103 addr += PUD_SIZE; 116 addr += PUD_SIZE;
104 pud = pud_offset(pgd, addr); 117 pud = pud_offset(pgd, addr);
105 } 118 }
@@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { 137 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
125 WARN_ON(!pgd_none(*pgd)); 138 WARN_ON(!pgd_none(*pgd));
126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) 139 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
127 | __PAGE_KERNEL_RO)); 140 | _KERNPG_TABLE));
128 addr += PGDIR_SIZE; 141 addr += PGDIR_SIZE;
129 pgd = pgd_offset_k(addr); 142 pgd = pgd_offset_k(addr);
130 } 143 }
@@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = {
166}; 179};
167#endif 180#endif
168 181
182void __init kasan_early_init(void)
183{
184 int i;
185 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
186 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
187 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
188
189 for (i = 0; i < PTRS_PER_PTE; i++)
190 kasan_zero_pte[i] = __pte(pte_val);
191
192 for (i = 0; i < PTRS_PER_PMD; i++)
193 kasan_zero_pmd[i] = __pmd(pmd_val);
194
195 for (i = 0; i < PTRS_PER_PUD; i++)
196 kasan_zero_pud[i] = __pud(pud_val);
197
198 kasan_map_early_shadow(early_level4_pgt);
199 kasan_map_early_shadow(init_level4_pgt);
200}
201
169void __init kasan_init(void) 202void __init kasan_init(void)
170{ 203{
171 int i; 204 int i;
@@ -176,6 +209,7 @@ void __init kasan_init(void)
176 209
177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); 210 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
178 load_cr3(early_level4_pgt); 211 load_cr3(early_level4_pgt);
212 __flush_tlb_all();
179 213
180 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 214 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
181 215
@@ -202,5 +236,8 @@ void __init kasan_init(void)
202 memset(kasan_zero_page, 0, PAGE_SIZE); 236 memset(kasan_zero_page, 0, PAGE_SIZE);
203 237
204 load_cr3(init_level4_pgt); 238 load_cr3(init_level4_pgt);
239 __flush_tlb_all();
205 init_task.kasan_depth = 0; 240 init_task.kasan_depth = 0;
241
242 pr_info("Kernel address sanitizer initialized\n");
206} 243}