diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 19:55:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 19:55:46 -0500 |
commit | f346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch) | |
tree | ae79f3dfb8e031da51d38f0f095f89d7d23f3643 /mm/kasan/init.c | |
parent | 00d59fde8532b2d42e80909d2e58678755e04da9 (diff) | |
parent | 0f4991e8fd48987ae476a92cdee6bfec4aff31b8 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- large KASAN update to use arm's "software tag-based mode"
- a few misc things
- sh updates
- ocfs2 updates
- just about all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits)
kernel/fork.c: mark 'stack_vm_area' with __maybe_unused
memcg, oom: notify on oom killer invocation from the charge path
mm, swap: fix swapoff with KSM pages
include/linux/gfp.h: fix typo
mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm
hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
memory_hotplug: add missing newlines to debugging output
mm: remove __hugepage_set_anon_rmap()
include/linux/vmstat.h: remove unused page state adjustment macro
mm/page_alloc.c: allow error injection
mm: migrate: drop unused argument of migrate_page_move_mapping()
blkdev: avoid migration stalls for blkdev pages
mm: migrate: provide buffer_migrate_page_norefs()
mm: migrate: move migrate_page_lock_buffers()
mm: migrate: lock buffers before migrate_page_move_mapping()
mm: migration: factor out code to compute expected number of page references
mm, page_alloc: enable pcpu_drain with zone capability
kmemleak: add config to select auto scan
mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init
...
Diffstat (limited to 'mm/kasan/init.c')
-rw-r--r-- | mm/kasan/init.c | 499 |
1 files changed, 499 insertions, 0 deletions
diff --git a/mm/kasan/init.c b/mm/kasan/init.c new file mode 100644 index 000000000000..34afad56497b --- /dev/null +++ b/mm/kasan/init.c | |||
@@ -0,0 +1,499 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains some kasan initialization code. | ||
4 | * | ||
5 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. | ||
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/memblock.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kasan.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/pfn.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | #include <asm/page.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | |||
25 | #include "kasan.h" | ||
26 | |||
27 | /* | ||
28 | * This page serves two purposes: | ||
29 | * - It used as early shadow memory. The entire shadow region populated | ||
30 | * with this page, before we will be able to setup normal shadow memory. | ||
31 | * - Latter it reused it as zero shadow to cover large ranges of memory | ||
32 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). | ||
33 | */ | ||
34 | unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss; | ||
35 | |||
36 | #if CONFIG_PGTABLE_LEVELS > 4 | ||
37 | p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; | ||
38 | static inline bool kasan_p4d_table(pgd_t pgd) | ||
39 | { | ||
40 | return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); | ||
41 | } | ||
42 | #else | ||
43 | static inline bool kasan_p4d_table(pgd_t pgd) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | #endif | ||
48 | #if CONFIG_PGTABLE_LEVELS > 3 | ||
49 | pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
50 | static inline bool kasan_pud_table(p4d_t p4d) | ||
51 | { | ||
52 | return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); | ||
53 | } | ||
54 | #else | ||
55 | static inline bool kasan_pud_table(p4d_t p4d) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | #endif | ||
60 | #if CONFIG_PGTABLE_LEVELS > 2 | ||
61 | pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss; | ||
62 | static inline bool kasan_pmd_table(pud_t pud) | ||
63 | { | ||
64 | return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); | ||
65 | } | ||
66 | #else | ||
67 | static inline bool kasan_pmd_table(pud_t pud) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | #endif | ||
72 | pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; | ||
73 | |||
74 | static inline bool kasan_pte_table(pmd_t pmd) | ||
75 | { | ||
76 | return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); | ||
77 | } | ||
78 | |||
79 | static inline bool kasan_early_shadow_page_entry(pte_t pte) | ||
80 | { | ||
81 | return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); | ||
82 | } | ||
83 | |||
84 | static __init void *early_alloc(size_t size, int node) | ||
85 | { | ||
86 | return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), | ||
87 | MEMBLOCK_ALLOC_ACCESSIBLE, node); | ||
88 | } | ||
89 | |||
90 | static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, | ||
91 | unsigned long end) | ||
92 | { | ||
93 | pte_t *pte = pte_offset_kernel(pmd, addr); | ||
94 | pte_t zero_pte; | ||
95 | |||
96 | zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)), | ||
97 | PAGE_KERNEL); | ||
98 | zero_pte = pte_wrprotect(zero_pte); | ||
99 | |||
100 | while (addr + PAGE_SIZE <= end) { | ||
101 | set_pte_at(&init_mm, addr, pte, zero_pte); | ||
102 | addr += PAGE_SIZE; | ||
103 | pte = pte_offset_kernel(pmd, addr); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, | ||
108 | unsigned long end) | ||
109 | { | ||
110 | pmd_t *pmd = pmd_offset(pud, addr); | ||
111 | unsigned long next; | ||
112 | |||
113 | do { | ||
114 | next = pmd_addr_end(addr, end); | ||
115 | |||
116 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { | ||
117 | pmd_populate_kernel(&init_mm, pmd, | ||
118 | lm_alias(kasan_early_shadow_pte)); | ||
119 | continue; | ||
120 | } | ||
121 | |||
122 | if (pmd_none(*pmd)) { | ||
123 | pte_t *p; | ||
124 | |||
125 | if (slab_is_available()) | ||
126 | p = pte_alloc_one_kernel(&init_mm, addr); | ||
127 | else | ||
128 | p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); | ||
129 | if (!p) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | pmd_populate_kernel(&init_mm, pmd, p); | ||
133 | } | ||
134 | zero_pte_populate(pmd, addr, next); | ||
135 | } while (pmd++, addr = next, addr != end); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, | ||
141 | unsigned long end) | ||
142 | { | ||
143 | pud_t *pud = pud_offset(p4d, addr); | ||
144 | unsigned long next; | ||
145 | |||
146 | do { | ||
147 | next = pud_addr_end(addr, end); | ||
148 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { | ||
149 | pmd_t *pmd; | ||
150 | |||
151 | pud_populate(&init_mm, pud, | ||
152 | lm_alias(kasan_early_shadow_pmd)); | ||
153 | pmd = pmd_offset(pud, addr); | ||
154 | pmd_populate_kernel(&init_mm, pmd, | ||
155 | lm_alias(kasan_early_shadow_pte)); | ||
156 | continue; | ||
157 | } | ||
158 | |||
159 | if (pud_none(*pud)) { | ||
160 | pmd_t *p; | ||
161 | |||
162 | if (slab_is_available()) { | ||
163 | p = pmd_alloc(&init_mm, pud, addr); | ||
164 | if (!p) | ||
165 | return -ENOMEM; | ||
166 | } else { | ||
167 | pud_populate(&init_mm, pud, | ||
168 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
169 | } | ||
170 | } | ||
171 | zero_pmd_populate(pud, addr, next); | ||
172 | } while (pud++, addr = next, addr != end); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, | ||
178 | unsigned long end) | ||
179 | { | ||
180 | p4d_t *p4d = p4d_offset(pgd, addr); | ||
181 | unsigned long next; | ||
182 | |||
183 | do { | ||
184 | next = p4d_addr_end(addr, end); | ||
185 | if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { | ||
186 | pud_t *pud; | ||
187 | pmd_t *pmd; | ||
188 | |||
189 | p4d_populate(&init_mm, p4d, | ||
190 | lm_alias(kasan_early_shadow_pud)); | ||
191 | pud = pud_offset(p4d, addr); | ||
192 | pud_populate(&init_mm, pud, | ||
193 | lm_alias(kasan_early_shadow_pmd)); | ||
194 | pmd = pmd_offset(pud, addr); | ||
195 | pmd_populate_kernel(&init_mm, pmd, | ||
196 | lm_alias(kasan_early_shadow_pte)); | ||
197 | continue; | ||
198 | } | ||
199 | |||
200 | if (p4d_none(*p4d)) { | ||
201 | pud_t *p; | ||
202 | |||
203 | if (slab_is_available()) { | ||
204 | p = pud_alloc(&init_mm, p4d, addr); | ||
205 | if (!p) | ||
206 | return -ENOMEM; | ||
207 | } else { | ||
208 | p4d_populate(&init_mm, p4d, | ||
209 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
210 | } | ||
211 | } | ||
212 | zero_pud_populate(p4d, addr, next); | ||
213 | } while (p4d++, addr = next, addr != end); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * kasan_populate_early_shadow - populate shadow memory region with | ||
220 | * kasan_early_shadow_page | ||
221 | * @shadow_start - start of the memory range to populate | ||
222 | * @shadow_end - end of the memory range to populate | ||
223 | */ | ||
224 | int __ref kasan_populate_early_shadow(const void *shadow_start, | ||
225 | const void *shadow_end) | ||
226 | { | ||
227 | unsigned long addr = (unsigned long)shadow_start; | ||
228 | unsigned long end = (unsigned long)shadow_end; | ||
229 | pgd_t *pgd = pgd_offset_k(addr); | ||
230 | unsigned long next; | ||
231 | |||
232 | do { | ||
233 | next = pgd_addr_end(addr, end); | ||
234 | |||
235 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { | ||
236 | p4d_t *p4d; | ||
237 | pud_t *pud; | ||
238 | pmd_t *pmd; | ||
239 | |||
240 | /* | ||
241 | * kasan_early_shadow_pud should be populated with pmds | ||
242 | * at this moment. | ||
243 | * [pud,pmd]_populate*() below needed only for | ||
244 | * 3,2 - level page tables where we don't have | ||
245 | * puds,pmds, so pgd_populate(), pud_populate() | ||
246 | * is noops. | ||
247 | * | ||
248 | * The ifndef is required to avoid build breakage. | ||
249 | * | ||
250 | * With 5level-fixup.h, pgd_populate() is not nop and | ||
251 | * we reference kasan_early_shadow_p4d. It's not defined | ||
252 | * unless 5-level paging enabled. | ||
253 | * | ||
254 | * The ifndef can be dropped once all KASAN-enabled | ||
255 | * architectures will switch to pgtable-nop4d.h. | ||
256 | */ | ||
257 | #ifndef __ARCH_HAS_5LEVEL_HACK | ||
258 | pgd_populate(&init_mm, pgd, | ||
259 | lm_alias(kasan_early_shadow_p4d)); | ||
260 | #endif | ||
261 | p4d = p4d_offset(pgd, addr); | ||
262 | p4d_populate(&init_mm, p4d, | ||
263 | lm_alias(kasan_early_shadow_pud)); | ||
264 | pud = pud_offset(p4d, addr); | ||
265 | pud_populate(&init_mm, pud, | ||
266 | lm_alias(kasan_early_shadow_pmd)); | ||
267 | pmd = pmd_offset(pud, addr); | ||
268 | pmd_populate_kernel(&init_mm, pmd, | ||
269 | lm_alias(kasan_early_shadow_pte)); | ||
270 | continue; | ||
271 | } | ||
272 | |||
273 | if (pgd_none(*pgd)) { | ||
274 | p4d_t *p; | ||
275 | |||
276 | if (slab_is_available()) { | ||
277 | p = p4d_alloc(&init_mm, pgd, addr); | ||
278 | if (!p) | ||
279 | return -ENOMEM; | ||
280 | } else { | ||
281 | pgd_populate(&init_mm, pgd, | ||
282 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
283 | } | ||
284 | } | ||
285 | zero_p4d_populate(pgd, addr, next); | ||
286 | } while (pgd++, addr = next, addr != end); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) | ||
292 | { | ||
293 | pte_t *pte; | ||
294 | int i; | ||
295 | |||
296 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
297 | pte = pte_start + i; | ||
298 | if (!pte_none(*pte)) | ||
299 | return; | ||
300 | } | ||
301 | |||
302 | pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); | ||
303 | pmd_clear(pmd); | ||
304 | } | ||
305 | |||
306 | static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) | ||
307 | { | ||
308 | pmd_t *pmd; | ||
309 | int i; | ||
310 | |||
311 | for (i = 0; i < PTRS_PER_PMD; i++) { | ||
312 | pmd = pmd_start + i; | ||
313 | if (!pmd_none(*pmd)) | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud))); | ||
318 | pud_clear(pud); | ||
319 | } | ||
320 | |||
321 | static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d) | ||
322 | { | ||
323 | pud_t *pud; | ||
324 | int i; | ||
325 | |||
326 | for (i = 0; i < PTRS_PER_PUD; i++) { | ||
327 | pud = pud_start + i; | ||
328 | if (!pud_none(*pud)) | ||
329 | return; | ||
330 | } | ||
331 | |||
332 | pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d))); | ||
333 | p4d_clear(p4d); | ||
334 | } | ||
335 | |||
336 | static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) | ||
337 | { | ||
338 | p4d_t *p4d; | ||
339 | int i; | ||
340 | |||
341 | for (i = 0; i < PTRS_PER_P4D; i++) { | ||
342 | p4d = p4d_start + i; | ||
343 | if (!p4d_none(*p4d)) | ||
344 | return; | ||
345 | } | ||
346 | |||
347 | p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd))); | ||
348 | pgd_clear(pgd); | ||
349 | } | ||
350 | |||
351 | static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, | ||
352 | unsigned long end) | ||
353 | { | ||
354 | unsigned long next; | ||
355 | |||
356 | for (; addr < end; addr = next, pte++) { | ||
357 | next = (addr + PAGE_SIZE) & PAGE_MASK; | ||
358 | if (next > end) | ||
359 | next = end; | ||
360 | |||
361 | if (!pte_present(*pte)) | ||
362 | continue; | ||
363 | |||
364 | if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) | ||
365 | continue; | ||
366 | pte_clear(&init_mm, addr, pte); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, | ||
371 | unsigned long end) | ||
372 | { | ||
373 | unsigned long next; | ||
374 | |||
375 | for (; addr < end; addr = next, pmd++) { | ||
376 | pte_t *pte; | ||
377 | |||
378 | next = pmd_addr_end(addr, end); | ||
379 | |||
380 | if (!pmd_present(*pmd)) | ||
381 | continue; | ||
382 | |||
383 | if (kasan_pte_table(*pmd)) { | ||
384 | if (IS_ALIGNED(addr, PMD_SIZE) && | ||
385 | IS_ALIGNED(next, PMD_SIZE)) | ||
386 | pmd_clear(pmd); | ||
387 | continue; | ||
388 | } | ||
389 | pte = pte_offset_kernel(pmd, addr); | ||
390 | kasan_remove_pte_table(pte, addr, next); | ||
391 | kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, | ||
396 | unsigned long end) | ||
397 | { | ||
398 | unsigned long next; | ||
399 | |||
400 | for (; addr < end; addr = next, pud++) { | ||
401 | pmd_t *pmd, *pmd_base; | ||
402 | |||
403 | next = pud_addr_end(addr, end); | ||
404 | |||
405 | if (!pud_present(*pud)) | ||
406 | continue; | ||
407 | |||
408 | if (kasan_pmd_table(*pud)) { | ||
409 | if (IS_ALIGNED(addr, PUD_SIZE) && | ||
410 | IS_ALIGNED(next, PUD_SIZE)) | ||
411 | pud_clear(pud); | ||
412 | continue; | ||
413 | } | ||
414 | pmd = pmd_offset(pud, addr); | ||
415 | pmd_base = pmd_offset(pud, 0); | ||
416 | kasan_remove_pmd_table(pmd, addr, next); | ||
417 | kasan_free_pmd(pmd_base, pud); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, | ||
422 | unsigned long end) | ||
423 | { | ||
424 | unsigned long next; | ||
425 | |||
426 | for (; addr < end; addr = next, p4d++) { | ||
427 | pud_t *pud; | ||
428 | |||
429 | next = p4d_addr_end(addr, end); | ||
430 | |||
431 | if (!p4d_present(*p4d)) | ||
432 | continue; | ||
433 | |||
434 | if (kasan_pud_table(*p4d)) { | ||
435 | if (IS_ALIGNED(addr, P4D_SIZE) && | ||
436 | IS_ALIGNED(next, P4D_SIZE)) | ||
437 | p4d_clear(p4d); | ||
438 | continue; | ||
439 | } | ||
440 | pud = pud_offset(p4d, addr); | ||
441 | kasan_remove_pud_table(pud, addr, next); | ||
442 | kasan_free_pud(pud_offset(p4d, 0), p4d); | ||
443 | } | ||
444 | } | ||
445 | |||
446 | void kasan_remove_zero_shadow(void *start, unsigned long size) | ||
447 | { | ||
448 | unsigned long addr, end, next; | ||
449 | pgd_t *pgd; | ||
450 | |||
451 | addr = (unsigned long)kasan_mem_to_shadow(start); | ||
452 | end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); | ||
453 | |||
454 | if (WARN_ON((unsigned long)start % | ||
455 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || | ||
456 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) | ||
457 | return; | ||
458 | |||
459 | for (; addr < end; addr = next) { | ||
460 | p4d_t *p4d; | ||
461 | |||
462 | next = pgd_addr_end(addr, end); | ||
463 | |||
464 | pgd = pgd_offset_k(addr); | ||
465 | if (!pgd_present(*pgd)) | ||
466 | continue; | ||
467 | |||
468 | if (kasan_p4d_table(*pgd)) { | ||
469 | if (IS_ALIGNED(addr, PGDIR_SIZE) && | ||
470 | IS_ALIGNED(next, PGDIR_SIZE)) | ||
471 | pgd_clear(pgd); | ||
472 | continue; | ||
473 | } | ||
474 | |||
475 | p4d = p4d_offset(pgd, addr); | ||
476 | kasan_remove_p4d_table(p4d, addr, next); | ||
477 | kasan_free_p4d(p4d_offset(pgd, 0), pgd); | ||
478 | } | ||
479 | } | ||
480 | |||
481 | int kasan_add_zero_shadow(void *start, unsigned long size) | ||
482 | { | ||
483 | int ret; | ||
484 | void *shadow_start, *shadow_end; | ||
485 | |||
486 | shadow_start = kasan_mem_to_shadow(start); | ||
487 | shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); | ||
488 | |||
489 | if (WARN_ON((unsigned long)start % | ||
490 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || | ||
491 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) | ||
492 | return -EINVAL; | ||
493 | |||
494 | ret = kasan_populate_early_shadow(shadow_start, shadow_end); | ||
495 | if (ret) | ||
496 | kasan_remove_zero_shadow(shadow_start, | ||
497 | size >> KASAN_SHADOW_SCALE_SHIFT); | ||
498 | return ret; | ||
499 | } | ||