diff options
author | Andrey Ryabinin <ryabinin.a.a@gmail.com> | 2015-08-13 01:37:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-22 08:54:55 -0400 |
commit | 69786cdb379bbc6eab14cf2393c1abd879316e85 (patch) | |
tree | ddfaf2f85b64057553a745c9080c2e6714880613 | |
parent | 920e277e17f12870188f4564887a95ae9ac03e31 (diff) |
x86/kasan, mm: Introduce generic kasan_populate_zero_shadow()
Introduce generic kasan_populate_zero_shadow(shadow_start,
shadow_end). This function maps kasan_zero_page to the
[shadow_start, shadow_end] addresses.
This replaces x86_64 specific populate_zero_shadow() and will
be used for ARM64 in follow on patches.
The main changes from original version are:
* Use p?d_populate*() instead of set_p?d()
* Use memblock allocator directly instead of vmemmap_alloc_block()
* __pa() instead of __pa_nodebug(). __pa() causes troubles
iff we use it before kasan_early_init(). kasan_populate_zero_shadow()
will be used later, so we ok with __pa() here.
Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexey Klimov <klimov.linux@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Keitel <dkeitel@codeaurora.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yury <yury.norov@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1439444244-26057-3-git-send-email-ryabinin.a.a@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 123 | ||||
-rw-r--r-- | include/linux/kasan.h | 9 | ||||
-rw-r--r-- | mm/kasan/Makefile | 2 | ||||
-rw-r--r-- | mm/kasan/kasan_init.c | 152 |
4 files changed, 167 insertions, 119 deletions
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e1840f3db5b5..9ce5da27b136 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -12,20 +12,6 @@ | |||
12 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; | 12 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
13 | extern struct range pfn_mapped[E820_X_MAX]; | 13 | extern struct range pfn_mapped[E820_X_MAX]; |
14 | 14 | ||
15 | static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
16 | static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; | ||
17 | static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; | ||
18 | |||
19 | /* | ||
20 | * This page used as early shadow. We don't use empty_zero_page | ||
21 | * at early stages, stack instrumentation could write some garbage | ||
22 | * to this page. | ||
23 | * Latter we reuse it as zero shadow for large ranges of memory | ||
24 | * that allowed to access, but not instrumented by kasan | ||
25 | * (vmalloc/vmemmap ...). | ||
26 | */ | ||
27 | static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | ||
28 | |||
29 | static int __init map_range(struct range *range) | 15 | static int __init map_range(struct range *range) |
30 | { | 16 | { |
31 | unsigned long start; | 17 | unsigned long start; |
@@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd) | |||
62 | } | 48 | } |
63 | } | 49 | } |
64 | 50 | ||
65 | static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr, | ||
66 | unsigned long end) | ||
67 | { | ||
68 | pte_t *pte = pte_offset_kernel(pmd, addr); | ||
69 | |||
70 | while (addr + PAGE_SIZE <= end) { | ||
71 | WARN_ON(!pte_none(*pte)); | ||
72 | set_pte(pte, __pte(__pa_nodebug(kasan_zero_page) | ||
73 | | __PAGE_KERNEL_RO)); | ||
74 | addr += PAGE_SIZE; | ||
75 | pte = pte_offset_kernel(pmd, addr); | ||
76 | } | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, | ||
81 | unsigned long end) | ||
82 | { | ||
83 | int ret = 0; | ||
84 | pmd_t *pmd = pmd_offset(pud, addr); | ||
85 | |||
86 | while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { | ||
87 | WARN_ON(!pmd_none(*pmd)); | ||
88 | set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) | ||
89 | | _KERNPG_TABLE)); | ||
90 | addr += PMD_SIZE; | ||
91 | pmd = pmd_offset(pud, addr); | ||
92 | } | ||
93 | if (addr < end) { | ||
94 | if (pmd_none(*pmd)) { | ||
95 | void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); | ||
96 | if (!p) | ||
97 | return -ENOMEM; | ||
98 | set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE)); | ||
99 | } | ||
100 | ret = zero_pte_populate(pmd, addr, end); | ||
101 | } | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | |||
106 | static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, | ||
107 | unsigned long end) | ||
108 | { | ||
109 | int ret = 0; | ||
110 | pud_t *pud = pud_offset(pgd, addr); | ||
111 | |||
112 | while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { | ||
113 | WARN_ON(!pud_none(*pud)); | ||
114 | set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) | ||
115 | | _KERNPG_TABLE)); | ||
116 | addr += PUD_SIZE; | ||
117 | pud = pud_offset(pgd, addr); | ||
118 | } | ||
119 | |||
120 | if (addr < end) { | ||
121 | if (pud_none(*pud)) { | ||
122 | void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); | ||
123 | if (!p) | ||
124 | return -ENOMEM; | ||
125 | set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE)); | ||
126 | } | ||
127 | ret = zero_pmd_populate(pud, addr, end); | ||
128 | } | ||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | static int __init zero_pgd_populate(unsigned long addr, unsigned long end) | ||
133 | { | ||
134 | int ret = 0; | ||
135 | pgd_t *pgd = pgd_offset_k(addr); | ||
136 | |||
137 | while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { | ||
138 | WARN_ON(!pgd_none(*pgd)); | ||
139 | set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) | ||
140 | | _KERNPG_TABLE)); | ||
141 | addr += PGDIR_SIZE; | ||
142 | pgd = pgd_offset_k(addr); | ||
143 | } | ||
144 | |||
145 | if (addr < end) { | ||
146 | if (pgd_none(*pgd)) { | ||
147 | void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); | ||
148 | if (!p) | ||
149 | return -ENOMEM; | ||
150 | set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE)); | ||
151 | } | ||
152 | ret = zero_pud_populate(pgd, addr, end); | ||
153 | } | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | |||
158 | static void __init populate_zero_shadow(const void *start, const void *end) | ||
159 | { | ||
160 | if (zero_pgd_populate((unsigned long)start, (unsigned long)end)) | ||
161 | panic("kasan: unable to map zero shadow!"); | ||
162 | } | ||
163 | |||
164 | |||
165 | #ifdef CONFIG_KASAN_INLINE | 51 | #ifdef CONFIG_KASAN_INLINE |
166 | static int kasan_die_handler(struct notifier_block *self, | 52 | static int kasan_die_handler(struct notifier_block *self, |
167 | unsigned long val, | 53 | unsigned long val, |
@@ -213,7 +99,7 @@ void __init kasan_init(void) | |||
213 | 99 | ||
214 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); | 100 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
215 | 101 | ||
216 | populate_zero_shadow((void *)KASAN_SHADOW_START, | 102 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
217 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); | 103 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
218 | 104 | ||
219 | for (i = 0; i < E820_X_MAX; i++) { | 105 | for (i = 0; i < E820_X_MAX; i++) { |
@@ -223,14 +109,15 @@ void __init kasan_init(void) | |||
223 | if (map_range(&pfn_mapped[i])) | 109 | if (map_range(&pfn_mapped[i])) |
224 | panic("kasan: unable to allocate shadow!"); | 110 | panic("kasan: unable to allocate shadow!"); |
225 | } | 111 | } |
226 | populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | 112 | kasan_populate_zero_shadow( |
227 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | 113 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
114 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | ||
228 | 115 | ||
229 | vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), | 116 | vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), |
230 | (unsigned long)kasan_mem_to_shadow(_end), | 117 | (unsigned long)kasan_mem_to_shadow(_end), |
231 | NUMA_NO_NODE); | 118 | NUMA_NO_NODE); |
232 | 119 | ||
233 | populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | 120 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
234 | (void *)KASAN_SHADOW_END); | 121 | (void *)KASAN_SHADOW_END); |
235 | 122 | ||
236 | memset(kasan_zero_page, 0, PAGE_SIZE); | 123 | memset(kasan_zero_page, 0, PAGE_SIZE); |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6fb1c7d4292c..4b9f85c963d0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -12,8 +12,17 @@ struct vm_struct; | |||
12 | #define KASAN_SHADOW_SCALE_SHIFT 3 | 12 | #define KASAN_SHADOW_SCALE_SHIFT 3 |
13 | 13 | ||
14 | #include <asm/kasan.h> | 14 | #include <asm/kasan.h> |
15 | #include <asm/pgtable.h> | ||
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | 17 | ||
18 | extern unsigned char kasan_zero_page[PAGE_SIZE]; | ||
19 | extern pte_t kasan_zero_pte[PTRS_PER_PTE]; | ||
20 | extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; | ||
21 | extern pud_t kasan_zero_pud[PTRS_PER_PUD]; | ||
22 | |||
23 | void kasan_populate_zero_shadow(const void *shadow_start, | ||
24 | const void *shadow_end); | ||
25 | |||
17 | static inline void *kasan_mem_to_shadow(const void *addr) | 26 | static inline void *kasan_mem_to_shadow(const void *addr) |
18 | { | 27 | { |
19 | return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) | 28 | return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index bd837b8c2f41..64710148941e 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -5,4 +5,4 @@ CFLAGS_REMOVE_kasan.o = -pg | |||
5 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 5 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
6 | CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) | 6 | CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) |
7 | 7 | ||
8 | obj-y := kasan.o report.o | 8 | obj-y := kasan.o report.o kasan_init.o |
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c new file mode 100644 index 000000000000..3f9a41cf0ac6 --- /dev/null +++ b/mm/kasan/kasan_init.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * This file contains some kasan initialization code. | ||
3 | * | ||
4 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. | ||
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/bootmem.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/kasan.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/memblock.h> | ||
18 | #include <linux/pfn.h> | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | |||
23 | /* | ||
24 | * This page serves two purposes: | ||
25 | * - It used as early shadow memory. The entire shadow region populated | ||
26 | * with this page, before we will be able to setup normal shadow memory. | ||
27 | * - Latter it reused it as zero shadow to cover large ranges of memory | ||
28 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). | ||
29 | */ | ||
30 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | ||
31 | |||
32 | #if CONFIG_PGTABLE_LEVELS > 3 | ||
33 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
34 | #endif | ||
35 | #if CONFIG_PGTABLE_LEVELS > 2 | ||
36 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; | ||
37 | #endif | ||
38 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; | ||
39 | |||
40 | static __init void *early_alloc(size_t size, int node) | ||
41 | { | ||
42 | return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), | ||
43 | BOOTMEM_ALLOC_ACCESSIBLE, node); | ||
44 | } | ||
45 | |||
46 | static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, | ||
47 | unsigned long end) | ||
48 | { | ||
49 | pte_t *pte = pte_offset_kernel(pmd, addr); | ||
50 | pte_t zero_pte; | ||
51 | |||
52 | zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); | ||
53 | zero_pte = pte_wrprotect(zero_pte); | ||
54 | |||
55 | while (addr + PAGE_SIZE <= end) { | ||
56 | set_pte_at(&init_mm, addr, pte, zero_pte); | ||
57 | addr += PAGE_SIZE; | ||
58 | pte = pte_offset_kernel(pmd, addr); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, | ||
63 | unsigned long end) | ||
64 | { | ||
65 | pmd_t *pmd = pmd_offset(pud, addr); | ||
66 | unsigned long next; | ||
67 | |||
68 | do { | ||
69 | next = pmd_addr_end(addr, end); | ||
70 | |||
71 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { | ||
72 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); | ||
73 | continue; | ||
74 | } | ||
75 | |||
76 | if (pmd_none(*pmd)) { | ||
77 | pmd_populate_kernel(&init_mm, pmd, | ||
78 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
79 | } | ||
80 | zero_pte_populate(pmd, addr, next); | ||
81 | } while (pmd++, addr = next, addr != end); | ||
82 | } | ||
83 | |||
84 | static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, | ||
85 | unsigned long end) | ||
86 | { | ||
87 | pud_t *pud = pud_offset(pgd, addr); | ||
88 | unsigned long next; | ||
89 | |||
90 | do { | ||
91 | next = pud_addr_end(addr, end); | ||
92 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { | ||
93 | pmd_t *pmd; | ||
94 | |||
95 | pud_populate(&init_mm, pud, kasan_zero_pmd); | ||
96 | pmd = pmd_offset(pud, addr); | ||
97 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); | ||
98 | continue; | ||
99 | } | ||
100 | |||
101 | if (pud_none(*pud)) { | ||
102 | pud_populate(&init_mm, pud, | ||
103 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
104 | } | ||
105 | zero_pmd_populate(pud, addr, next); | ||
106 | } while (pud++, addr = next, addr != end); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * kasan_populate_zero_shadow - populate shadow memory region with | ||
111 | * kasan_zero_page | ||
112 | * @shadow_start - start of the memory range to populate | ||
113 | * @shadow_end - end of the memory range to populate | ||
114 | */ | ||
115 | void __init kasan_populate_zero_shadow(const void *shadow_start, | ||
116 | const void *shadow_end) | ||
117 | { | ||
118 | unsigned long addr = (unsigned long)shadow_start; | ||
119 | unsigned long end = (unsigned long)shadow_end; | ||
120 | pgd_t *pgd = pgd_offset_k(addr); | ||
121 | unsigned long next; | ||
122 | |||
123 | do { | ||
124 | next = pgd_addr_end(addr, end); | ||
125 | |||
126 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { | ||
127 | pud_t *pud; | ||
128 | pmd_t *pmd; | ||
129 | |||
130 | /* | ||
131 | * kasan_zero_pud should be populated with pmds | ||
132 | * at this moment. | ||
133 | * [pud,pmd]_populate*() below needed only for | ||
134 | * 3,2 - level page tables where we don't have | ||
135 | * puds,pmds, so pgd_populate(), pud_populate() | ||
136 | * is noops. | ||
137 | */ | ||
138 | pgd_populate(&init_mm, pgd, kasan_zero_pud); | ||
139 | pud = pud_offset(pgd, addr); | ||
140 | pud_populate(&init_mm, pud, kasan_zero_pmd); | ||
141 | pmd = pmd_offset(pud, addr); | ||
142 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); | ||
143 | continue; | ||
144 | } | ||
145 | |||
146 | if (pgd_none(*pgd)) { | ||
147 | pgd_populate(&init_mm, pgd, | ||
148 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
149 | } | ||
150 | zero_pud_populate(pgd, addr, next); | ||
151 | } while (pgd++, addr = next, addr != end); | ||
152 | } | ||