aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Popov <alpopov@ptsecurity.com>2015-07-02 05:09:34 -0400
committerIngo Molnar <mingo@kernel.org>2015-07-06 08:53:13 -0400
commit5d5aa3cfca5cf74cd928daf3674642e6004328d1 (patch)
tree42d53dc7979c3c9141cb7aac20107b9c626ee665
parentd0f77d4d04b222a817925d33ba3589b190bfa863 (diff)
x86/kasan: Fix KASAN shadow region page tables
Currently KASAN shadow region page tables created without respect of physical offset (phys_base). This causes kernel halt when phys_base is not zero. So let's initialize KASAN shadow region page tables in kasan_early_init() using __pa_nodebug() which considers phys_base. This patch also separates x86_64_start_kernel() from KASAN low level details by moving kasan_map_early_shadow(init_level4_pgt) into kasan_early_init(). Remove the comment before clear_bss() which stopped bringing much profit to the code readability. Otherwise describing all the new order dependencies would be too verbose. Signed-off-by: Alexander Popov <alpopov@ptsecurity.com> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: <stable@vger.kernel.org> # 4.0+ Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1435828178-10975-3-git-send-email-a.ryabinin@samsung.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/kasan.h8
-rw-r--r--arch/x86/kernel/head64.c7
-rw-r--r--arch/x86/kernel/head_64.S29
-rw-r--r--arch/x86/mm/kasan_init_64.c36
4 files changed, 38 insertions, 42 deletions
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422fbad8..74a2a8dc9908 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17extern pte_t kasan_zero_pte[];
18extern pte_t kasan_zero_pmd[];
19extern pte_t kasan_zero_pud[];
20
21#ifdef CONFIG_KASAN 17#ifdef CONFIG_KASAN
22void __init kasan_map_early_shadow(pgd_t *pgd); 18void __init kasan_early_init(void);
23void __init kasan_init(void); 19void __init kasan_init(void);
24#else 20#else
25static inline void kasan_map_early_shadow(pgd_t *pgd) { } 21static inline void kasan_early_init(void) { }
26static inline void kasan_init(void) { } 22static inline void kasan_init(void) { }
27#endif 23#endif
28 24
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 65c8985e3eb2..f129a9af6357 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,13 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
161 /* Kill off the identity-map trampoline */ 161 /* Kill off the identity-map trampoline */
162 reset_early_page_tables(); 162 reset_early_page_tables();
163 163
164 kasan_map_early_shadow(early_level4_pgt);
165
166 /* clear bss before set_intr_gate with early_idt_handler */
167 clear_bss(); 164 clear_bss();
168 165
169 clear_page(init_level4_pgt); 166 clear_page(init_level4_pgt);
170 167
168 kasan_early_init();
169
171 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 170 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
172 set_intr_gate(i, early_idt_handler_array[i]); 171 set_intr_gate(i, early_idt_handler_array[i]);
173 load_idt((const struct desc_ptr *)&idt_descr); 172 load_idt((const struct desc_ptr *)&idt_descr);
@@ -182,8 +181,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
182 /* set init_level4_pgt kernel high mapping*/ 181 /* set init_level4_pgt kernel high mapping*/
183 init_level4_pgt[511] = early_level4_pgt[511]; 182 init_level4_pgt[511] = early_level4_pgt[511];
184 183
185 kasan_map_early_shadow(init_level4_pgt);
186
187 x86_64_start_reservations(real_mode_data); 184 x86_64_start_reservations(real_mode_data);
188} 185}
189 186
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e5c27f729a38..1d40ca8a73f2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
516 /* This must match the first entry in level2_kernel_pgt */ 516 /* This must match the first entry in level2_kernel_pgt */
517 .quad 0x0000000000000000 517 .quad 0x0000000000000000
518 518
519#ifdef CONFIG_KASAN
520#define FILL(VAL, COUNT) \
521 .rept (COUNT) ; \
522 .quad (VAL) ; \
523 .endr
524
525NEXT_PAGE(kasan_zero_pte)
526 FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
527NEXT_PAGE(kasan_zero_pmd)
528 FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
529NEXT_PAGE(kasan_zero_pud)
530 FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
531
532#undef FILL
533#endif
534
535
536#include "../../x86/xen/xen-head.S" 519#include "../../x86/xen/xen-head.S"
537 520
538 __PAGE_ALIGNED_BSS 521 __PAGE_ALIGNED_BSS
539NEXT_PAGE(empty_zero_page) 522NEXT_PAGE(empty_zero_page)
540 .skip PAGE_SIZE 523 .skip PAGE_SIZE
541 524
542#ifdef CONFIG_KASAN
543/*
544 * This page used as early shadow. We don't use empty_zero_page
545 * at early stages, stack instrumentation could write some garbage
546 * to this page.
547 * Latter we reuse it as zero shadow for large ranges of memory
548 * that allowed to access, but not instrumented by kasan
549 * (vmalloc/vmemmap ...).
550 */
551NEXT_PAGE(kasan_zero_page)
552 .skip PAGE_SIZE
553#endif
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906c6b9f..0e4a05fa34d7 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -11,7 +11,19 @@
11extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 11extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX]; 12extern struct range pfn_mapped[E820_X_MAX];
13 13
14extern unsigned char kasan_zero_page[PAGE_SIZE]; 14static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
15static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
16static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
17
18/*
19 * This page used as early shadow. We don't use empty_zero_page
20 * at early stages, stack instrumentation could write some garbage
21 * to this page.
22 * Latter we reuse it as zero shadow for large ranges of memory
23 * that allowed to access, but not instrumented by kasan
24 * (vmalloc/vmemmap ...).
25 */
26static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
15 27
16static int __init map_range(struct range *range) 28static int __init map_range(struct range *range)
17{ 29{
@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
36 pgd_clear(pgd_offset_k(start)); 48 pgd_clear(pgd_offset_k(start));
37} 49}
38 50
39void __init kasan_map_early_shadow(pgd_t *pgd) 51static void __init kasan_map_early_shadow(pgd_t *pgd)
40{ 52{
41 int i; 53 int i;
42 unsigned long start = KASAN_SHADOW_START; 54 unsigned long start = KASAN_SHADOW_START;
@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
166}; 178};
167#endif 179#endif
168 180
181void __init kasan_early_init(void)
182{
183 int i;
184 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
185 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
186 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
187
188 for (i = 0; i < PTRS_PER_PTE; i++)
189 kasan_zero_pte[i] = __pte(pte_val);
190
191 for (i = 0; i < PTRS_PER_PMD; i++)
192 kasan_zero_pmd[i] = __pmd(pmd_val);
193
194 for (i = 0; i < PTRS_PER_PUD; i++)
195 kasan_zero_pud[i] = __pud(pud_val);
196
197 kasan_map_early_shadow(early_level4_pgt);
198 kasan_map_early_shadow(init_level4_pgt);
199}
200
169void __init kasan_init(void) 201void __init kasan_init(void)
170{ 202{
171 int i; 203 int i;