diff options
author | Andrey Ryabinin <aryabinin@virtuozzo.com> | 2018-08-17 18:47:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 19:20:30 -0400 |
commit | 0207df4fa1a869281ddbf72db6203dbf036b3e1a (patch) | |
tree | ff843a903d9149fd06632658d4984dc724e5e7fa /mm/kasan | |
parent | 50f8b92f21d23789bd4ada593e8ddc56cc4f79fe (diff) |
kernel/memremap, kasan: make ZONE_DEVICE with work with KASAN
KASAN learns about hotadded memory via the memory hotplug notifier.
devm_memremap_pages() intentionally skips calling memory hotplug
notifiers. So KASAN doesn't know anything about new memory added by
devm_memremap_pages(). This causes a crash when KASAN tries to access
non-existent shadow memory:
BUG: unable to handle kernel paging request at ffffed0078000000
RIP: 0010:check_memory_region+0x82/0x1e0
Call Trace:
memcpy+0x1f/0x50
pmem_do_bvec+0x163/0x720
pmem_make_request+0x305/0xac0
generic_make_request+0x54f/0xcf0
submit_bio+0x9c/0x370
submit_bh_wbc+0x4c7/0x700
block_read_full_page+0x5ef/0x870
do_read_cache_page+0x2b8/0xb30
read_dev_sector+0xbd/0x3f0
read_lba.isra.0+0x277/0x670
efi_partition+0x41a/0x18f0
check_partition+0x30d/0x5e9
rescan_partitions+0x18c/0x840
__blkdev_get+0x859/0x1060
blkdev_get+0x23f/0x810
__device_add_disk+0x9c8/0xde0
pmem_attach_disk+0x9a8/0xf50
nvdimm_bus_probe+0xf3/0x3c0
driver_probe_device+0x493/0xbd0
bus_for_each_drv+0x118/0x1b0
__device_attach+0x1cd/0x2b0
bus_probe_device+0x1ac/0x260
device_add+0x90d/0x1380
nd_async_device_register+0xe/0x50
async_run_entry_fn+0xc3/0x5d0
process_one_work+0xa0a/0x1810
worker_thread+0x87/0xe80
kthread+0x2d7/0x390
ret_from_fork+0x3a/0x50
Add kasan_add_zero_shadow()/kasan_remove_zero_shadow() - post mm_init()
interface to map/unmap kasan_zero_page at requested virtual addresses.
And use it to add/remove the shadow memory for hotplugged/unplugged
device memory.
Link: http://lkml.kernel.org/r/20180629164932.740-1-aryabinin@virtuozzo.com
Fixes: 41e94a851304 ("add devm_memremap_pages")
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reported-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Dan Williams <dan.j.williams@intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r-- | mm/kasan/kasan_init.c | 316 |
1 files changed, 303 insertions, 13 deletions
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index f436246ccc79..7a2a2f13f86f 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c | |||
@@ -17,10 +17,13 @@ | |||
17 | #include <linux/memblock.h> | 17 | #include <linux/memblock.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/pfn.h> | 19 | #include <linux/pfn.h> |
20 | #include <linux/slab.h> | ||
20 | 21 | ||
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
22 | #include <asm/pgalloc.h> | 23 | #include <asm/pgalloc.h> |
23 | 24 | ||
25 | #include "kasan.h" | ||
26 | |||
24 | /* | 27 | /* |
25 | * This page serves two purposes: | 28 | * This page serves two purposes: |
26 | * - It used as early shadow memory. The entire shadow region populated | 29 | * - It used as early shadow memory. The entire shadow region populated |
@@ -32,22 +35,59 @@ unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | |||
32 | 35 | ||
33 | #if CONFIG_PGTABLE_LEVELS > 4 | 36 | #if CONFIG_PGTABLE_LEVELS > 4 |
34 | p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; | 37 | p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; |
38 | static inline bool kasan_p4d_table(pgd_t pgd) | ||
39 | { | ||
40 | return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d)); | ||
41 | } | ||
42 | #else | ||
43 | static inline bool kasan_p4d_table(pgd_t pgd) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
35 | #endif | 47 | #endif |
36 | #if CONFIG_PGTABLE_LEVELS > 3 | 48 | #if CONFIG_PGTABLE_LEVELS > 3 |
37 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | 49 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; |
50 | static inline bool kasan_pud_table(p4d_t p4d) | ||
51 | { | ||
52 | return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud)); | ||
53 | } | ||
54 | #else | ||
55 | static inline bool kasan_pud_table(p4d_t p4d) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
38 | #endif | 59 | #endif |
39 | #if CONFIG_PGTABLE_LEVELS > 2 | 60 | #if CONFIG_PGTABLE_LEVELS > 2 |
40 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; | 61 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; |
62 | static inline bool kasan_pmd_table(pud_t pud) | ||
63 | { | ||
64 | return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd)); | ||
65 | } | ||
66 | #else | ||
67 | static inline bool kasan_pmd_table(pud_t pud) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
41 | #endif | 71 | #endif |
42 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; | 72 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; |
43 | 73 | ||
74 | static inline bool kasan_pte_table(pmd_t pmd) | ||
75 | { | ||
76 | return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte)); | ||
77 | } | ||
78 | |||
79 | static inline bool kasan_zero_page_entry(pte_t pte) | ||
80 | { | ||
81 | return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page)); | ||
82 | } | ||
83 | |||
44 | static __init void *early_alloc(size_t size, int node) | 84 | static __init void *early_alloc(size_t size, int node) |
45 | { | 85 | { |
46 | return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), | 86 | return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), |
47 | BOOTMEM_ALLOC_ACCESSIBLE, node); | 87 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
48 | } | 88 | } |
49 | 89 | ||
50 | static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, | 90 | static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, |
51 | unsigned long end) | 91 | unsigned long end) |
52 | { | 92 | { |
53 | pte_t *pte = pte_offset_kernel(pmd, addr); | 93 | pte_t *pte = pte_offset_kernel(pmd, addr); |
@@ -63,7 +103,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, | |||
63 | } | 103 | } |
64 | } | 104 | } |
65 | 105 | ||
66 | static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, | 106 | static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, |
67 | unsigned long end) | 107 | unsigned long end) |
68 | { | 108 | { |
69 | pmd_t *pmd = pmd_offset(pud, addr); | 109 | pmd_t *pmd = pmd_offset(pud, addr); |
@@ -78,14 +118,24 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, | |||
78 | } | 118 | } |
79 | 119 | ||
80 | if (pmd_none(*pmd)) { | 120 | if (pmd_none(*pmd)) { |
81 | pmd_populate_kernel(&init_mm, pmd, | 121 | pte_t *p; |
82 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 122 | |
123 | if (slab_is_available()) | ||
124 | p = pte_alloc_one_kernel(&init_mm, addr); | ||
125 | else | ||
126 | p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); | ||
127 | if (!p) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | pmd_populate_kernel(&init_mm, pmd, p); | ||
83 | } | 131 | } |
84 | zero_pte_populate(pmd, addr, next); | 132 | zero_pte_populate(pmd, addr, next); |
85 | } while (pmd++, addr = next, addr != end); | 133 | } while (pmd++, addr = next, addr != end); |
134 | |||
135 | return 0; | ||
86 | } | 136 | } |
87 | 137 | ||
88 | static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr, | 138 | static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, |
89 | unsigned long end) | 139 | unsigned long end) |
90 | { | 140 | { |
91 | pud_t *pud = pud_offset(p4d, addr); | 141 | pud_t *pud = pud_offset(p4d, addr); |
@@ -103,14 +153,24 @@ static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr, | |||
103 | } | 153 | } |
104 | 154 | ||
105 | if (pud_none(*pud)) { | 155 | if (pud_none(*pud)) { |
106 | pud_populate(&init_mm, pud, | 156 | pmd_t *p; |
107 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 157 | |
158 | if (slab_is_available()) { | ||
159 | p = pmd_alloc(&init_mm, pud, addr); | ||
160 | if (!p) | ||
161 | return -ENOMEM; | ||
162 | } else { | ||
163 | pud_populate(&init_mm, pud, | ||
164 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
165 | } | ||
108 | } | 166 | } |
109 | zero_pmd_populate(pud, addr, next); | 167 | zero_pmd_populate(pud, addr, next); |
110 | } while (pud++, addr = next, addr != end); | 168 | } while (pud++, addr = next, addr != end); |
169 | |||
170 | return 0; | ||
111 | } | 171 | } |
112 | 172 | ||
113 | static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, | 173 | static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, |
114 | unsigned long end) | 174 | unsigned long end) |
115 | { | 175 | { |
116 | p4d_t *p4d = p4d_offset(pgd, addr); | 176 | p4d_t *p4d = p4d_offset(pgd, addr); |
@@ -132,11 +192,21 @@ static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, | |||
132 | } | 192 | } |
133 | 193 | ||
134 | if (p4d_none(*p4d)) { | 194 | if (p4d_none(*p4d)) { |
135 | p4d_populate(&init_mm, p4d, | 195 | pud_t *p; |
136 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 196 | |
197 | if (slab_is_available()) { | ||
198 | p = pud_alloc(&init_mm, p4d, addr); | ||
199 | if (!p) | ||
200 | return -ENOMEM; | ||
201 | } else { | ||
202 | p4d_populate(&init_mm, p4d, | ||
203 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
204 | } | ||
137 | } | 205 | } |
138 | zero_pud_populate(p4d, addr, next); | 206 | zero_pud_populate(p4d, addr, next); |
139 | } while (p4d++, addr = next, addr != end); | 207 | } while (p4d++, addr = next, addr != end); |
208 | |||
209 | return 0; | ||
140 | } | 210 | } |
141 | 211 | ||
142 | /** | 212 | /** |
@@ -145,7 +215,7 @@ static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, | |||
145 | * @shadow_start - start of the memory range to populate | 215 | * @shadow_start - start of the memory range to populate |
146 | * @shadow_end - end of the memory range to populate | 216 | * @shadow_end - end of the memory range to populate |
147 | */ | 217 | */ |
148 | void __init kasan_populate_zero_shadow(const void *shadow_start, | 218 | int __ref kasan_populate_zero_shadow(const void *shadow_start, |
149 | const void *shadow_end) | 219 | const void *shadow_end) |
150 | { | 220 | { |
151 | unsigned long addr = (unsigned long)shadow_start; | 221 | unsigned long addr = (unsigned long)shadow_start; |
@@ -191,9 +261,229 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, | |||
191 | } | 261 | } |
192 | 262 | ||
193 | if (pgd_none(*pgd)) { | 263 | if (pgd_none(*pgd)) { |
194 | pgd_populate(&init_mm, pgd, | 264 | p4d_t *p; |
195 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 265 | |
266 | if (slab_is_available()) { | ||
267 | p = p4d_alloc(&init_mm, pgd, addr); | ||
268 | if (!p) | ||
269 | return -ENOMEM; | ||
270 | } else { | ||
271 | pgd_populate(&init_mm, pgd, | ||
272 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
273 | } | ||
196 | } | 274 | } |
197 | zero_p4d_populate(pgd, addr, next); | 275 | zero_p4d_populate(pgd, addr, next); |
198 | } while (pgd++, addr = next, addr != end); | 276 | } while (pgd++, addr = next, addr != end); |
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) | ||
282 | { | ||
283 | pte_t *pte; | ||
284 | int i; | ||
285 | |||
286 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
287 | pte = pte_start + i; | ||
288 | if (!pte_none(*pte)) | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); | ||
293 | pmd_clear(pmd); | ||
294 | } | ||
295 | |||
296 | static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) | ||
297 | { | ||
298 | pmd_t *pmd; | ||
299 | int i; | ||
300 | |||
301 | for (i = 0; i < PTRS_PER_PMD; i++) { | ||
302 | pmd = pmd_start + i; | ||
303 | if (!pmd_none(*pmd)) | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud))); | ||
308 | pud_clear(pud); | ||
309 | } | ||
310 | |||
311 | static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d) | ||
312 | { | ||
313 | pud_t *pud; | ||
314 | int i; | ||
315 | |||
316 | for (i = 0; i < PTRS_PER_PUD; i++) { | ||
317 | pud = pud_start + i; | ||
318 | if (!pud_none(*pud)) | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d))); | ||
323 | p4d_clear(p4d); | ||
324 | } | ||
325 | |||
326 | static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) | ||
327 | { | ||
328 | p4d_t *p4d; | ||
329 | int i; | ||
330 | |||
331 | for (i = 0; i < PTRS_PER_P4D; i++) { | ||
332 | p4d = p4d_start + i; | ||
333 | if (!p4d_none(*p4d)) | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd))); | ||
338 | pgd_clear(pgd); | ||
339 | } | ||
340 | |||
341 | static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, | ||
342 | unsigned long end) | ||
343 | { | ||
344 | unsigned long next; | ||
345 | |||
346 | for (; addr < end; addr = next, pte++) { | ||
347 | next = (addr + PAGE_SIZE) & PAGE_MASK; | ||
348 | if (next > end) | ||
349 | next = end; | ||
350 | |||
351 | if (!pte_present(*pte)) | ||
352 | continue; | ||
353 | |||
354 | if (WARN_ON(!kasan_zero_page_entry(*pte))) | ||
355 | continue; | ||
356 | pte_clear(&init_mm, addr, pte); | ||
357 | } | ||
358 | } | ||
359 | |||
360 | static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, | ||
361 | unsigned long end) | ||
362 | { | ||
363 | unsigned long next; | ||
364 | |||
365 | for (; addr < end; addr = next, pmd++) { | ||
366 | pte_t *pte; | ||
367 | |||
368 | next = pmd_addr_end(addr, end); | ||
369 | |||
370 | if (!pmd_present(*pmd)) | ||
371 | continue; | ||
372 | |||
373 | if (kasan_pte_table(*pmd)) { | ||
374 | if (IS_ALIGNED(addr, PMD_SIZE) && | ||
375 | IS_ALIGNED(next, PMD_SIZE)) | ||
376 | pmd_clear(pmd); | ||
377 | continue; | ||
378 | } | ||
379 | pte = pte_offset_kernel(pmd, addr); | ||
380 | kasan_remove_pte_table(pte, addr, next); | ||
381 | kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, | ||
386 | unsigned long end) | ||
387 | { | ||
388 | unsigned long next; | ||
389 | |||
390 | for (; addr < end; addr = next, pud++) { | ||
391 | pmd_t *pmd, *pmd_base; | ||
392 | |||
393 | next = pud_addr_end(addr, end); | ||
394 | |||
395 | if (!pud_present(*pud)) | ||
396 | continue; | ||
397 | |||
398 | if (kasan_pmd_table(*pud)) { | ||
399 | if (IS_ALIGNED(addr, PUD_SIZE) && | ||
400 | IS_ALIGNED(next, PUD_SIZE)) | ||
401 | pud_clear(pud); | ||
402 | continue; | ||
403 | } | ||
404 | pmd = pmd_offset(pud, addr); | ||
405 | pmd_base = pmd_offset(pud, 0); | ||
406 | kasan_remove_pmd_table(pmd, addr, next); | ||
407 | kasan_free_pmd(pmd_base, pud); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, | ||
412 | unsigned long end) | ||
413 | { | ||
414 | unsigned long next; | ||
415 | |||
416 | for (; addr < end; addr = next, p4d++) { | ||
417 | pud_t *pud; | ||
418 | |||
419 | next = p4d_addr_end(addr, end); | ||
420 | |||
421 | if (!p4d_present(*p4d)) | ||
422 | continue; | ||
423 | |||
424 | if (kasan_pud_table(*p4d)) { | ||
425 | if (IS_ALIGNED(addr, P4D_SIZE) && | ||
426 | IS_ALIGNED(next, P4D_SIZE)) | ||
427 | p4d_clear(p4d); | ||
428 | continue; | ||
429 | } | ||
430 | pud = pud_offset(p4d, addr); | ||
431 | kasan_remove_pud_table(pud, addr, next); | ||
432 | kasan_free_pud(pud_offset(p4d, 0), p4d); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | void kasan_remove_zero_shadow(void *start, unsigned long size) | ||
437 | { | ||
438 | unsigned long addr, end, next; | ||
439 | pgd_t *pgd; | ||
440 | |||
441 | addr = (unsigned long)kasan_mem_to_shadow(start); | ||
442 | end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); | ||
443 | |||
444 | if (WARN_ON((unsigned long)start % | ||
445 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || | ||
446 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) | ||
447 | return; | ||
448 | |||
449 | for (; addr < end; addr = next) { | ||
450 | p4d_t *p4d; | ||
451 | |||
452 | next = pgd_addr_end(addr, end); | ||
453 | |||
454 | pgd = pgd_offset_k(addr); | ||
455 | if (!pgd_present(*pgd)) | ||
456 | continue; | ||
457 | |||
458 | if (kasan_p4d_table(*pgd)) { | ||
459 | if (IS_ALIGNED(addr, PGDIR_SIZE) && | ||
460 | IS_ALIGNED(next, PGDIR_SIZE)) | ||
461 | pgd_clear(pgd); | ||
462 | continue; | ||
463 | } | ||
464 | |||
465 | p4d = p4d_offset(pgd, addr); | ||
466 | kasan_remove_p4d_table(p4d, addr, next); | ||
467 | kasan_free_p4d(p4d_offset(pgd, 0), pgd); | ||
468 | } | ||
469 | } | ||
470 | |||
471 | int kasan_add_zero_shadow(void *start, unsigned long size) | ||
472 | { | ||
473 | int ret; | ||
474 | void *shadow_start, *shadow_end; | ||
475 | |||
476 | shadow_start = kasan_mem_to_shadow(start); | ||
477 | shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); | ||
478 | |||
479 | if (WARN_ON((unsigned long)start % | ||
480 | (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || | ||
481 | WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) | ||
482 | return -EINVAL; | ||
483 | |||
484 | ret = kasan_populate_zero_shadow(shadow_start, shadow_end); | ||
485 | if (ret) | ||
486 | kasan_remove_zero_shadow(shadow_start, | ||
487 | size >> KASAN_SHADOW_SCALE_SHIFT); | ||
488 | return ret; | ||
199 | } | 489 | } |