diff options
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r-- | mm/kasan/common.c | 697 |
1 files changed, 697 insertions, 0 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c new file mode 100644 index 000000000000..03d5d1374ca7 --- /dev/null +++ b/mm/kasan/common.c | |||
@@ -0,0 +1,697 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * This file contains common generic and tag-based KASAN code. | ||
4 | * | ||
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
7 | * | ||
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | ||
9 | * Andrey Konovalov <andreyknvl@gmail.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/export.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kasan.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/kmemleak.h> | ||
23 | #include <linux/linkage.h> | ||
24 | #include <linux/memblock.h> | ||
25 | #include <linux/memory.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/printk.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/sched/task_stack.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/stacktrace.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/vmalloc.h> | ||
36 | #include <linux/bug.h> | ||
37 | |||
38 | #include "kasan.h" | ||
39 | #include "../slab.h" | ||
40 | |||
41 | static inline int in_irqentry_text(unsigned long ptr) | ||
42 | { | ||
43 | return (ptr >= (unsigned long)&__irqentry_text_start && | ||
44 | ptr < (unsigned long)&__irqentry_text_end) || | ||
45 | (ptr >= (unsigned long)&__softirqentry_text_start && | ||
46 | ptr < (unsigned long)&__softirqentry_text_end); | ||
47 | } | ||
48 | |||
49 | static inline void filter_irq_stacks(struct stack_trace *trace) | ||
50 | { | ||
51 | int i; | ||
52 | |||
53 | if (!trace->nr_entries) | ||
54 | return; | ||
55 | for (i = 0; i < trace->nr_entries; i++) | ||
56 | if (in_irqentry_text(trace->entries[i])) { | ||
57 | /* Include the irqentry function into the stack. */ | ||
58 | trace->nr_entries = i + 1; | ||
59 | break; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static inline depot_stack_handle_t save_stack(gfp_t flags) | ||
64 | { | ||
65 | unsigned long entries[KASAN_STACK_DEPTH]; | ||
66 | struct stack_trace trace = { | ||
67 | .nr_entries = 0, | ||
68 | .entries = entries, | ||
69 | .max_entries = KASAN_STACK_DEPTH, | ||
70 | .skip = 0 | ||
71 | }; | ||
72 | |||
73 | save_stack_trace(&trace); | ||
74 | filter_irq_stacks(&trace); | ||
75 | if (trace.nr_entries != 0 && | ||
76 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | ||
77 | trace.nr_entries--; | ||
78 | |||
79 | return depot_save_stack(&trace, flags); | ||
80 | } | ||
81 | |||
82 | static inline void set_track(struct kasan_track *track, gfp_t flags) | ||
83 | { | ||
84 | track->pid = current->pid; | ||
85 | track->stack = save_stack(flags); | ||
86 | } | ||
87 | |||
88 | void kasan_enable_current(void) | ||
89 | { | ||
90 | current->kasan_depth++; | ||
91 | } | ||
92 | |||
93 | void kasan_disable_current(void) | ||
94 | { | ||
95 | current->kasan_depth--; | ||
96 | } | ||
97 | |||
98 | void kasan_check_read(const volatile void *p, unsigned int size) | ||
99 | { | ||
100 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | ||
101 | } | ||
102 | EXPORT_SYMBOL(kasan_check_read); | ||
103 | |||
104 | void kasan_check_write(const volatile void *p, unsigned int size) | ||
105 | { | ||
106 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | ||
107 | } | ||
108 | EXPORT_SYMBOL(kasan_check_write); | ||
109 | |||
110 | #undef memset | ||
111 | void *memset(void *addr, int c, size_t len) | ||
112 | { | ||
113 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); | ||
114 | |||
115 | return __memset(addr, c, len); | ||
116 | } | ||
117 | |||
118 | #undef memmove | ||
119 | void *memmove(void *dest, const void *src, size_t len) | ||
120 | { | ||
121 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
122 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
123 | |||
124 | return __memmove(dest, src, len); | ||
125 | } | ||
126 | |||
127 | #undef memcpy | ||
128 | void *memcpy(void *dest, const void *src, size_t len) | ||
129 | { | ||
130 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | ||
131 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | ||
132 | |||
133 | return __memcpy(dest, src, len); | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | ||
138 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | ||
139 | */ | ||
140 | void kasan_poison_shadow(const void *address, size_t size, u8 value) | ||
141 | { | ||
142 | void *shadow_start, *shadow_end; | ||
143 | |||
144 | /* | ||
145 | * Perform shadow offset calculation based on untagged address, as | ||
146 | * some of the callers (e.g. kasan_poison_object_data) pass tagged | ||
147 | * addresses to this function. | ||
148 | */ | ||
149 | address = reset_tag(address); | ||
150 | |||
151 | shadow_start = kasan_mem_to_shadow(address); | ||
152 | shadow_end = kasan_mem_to_shadow(address + size); | ||
153 | |||
154 | __memset(shadow_start, value, shadow_end - shadow_start); | ||
155 | } | ||
156 | |||
157 | void kasan_unpoison_shadow(const void *address, size_t size) | ||
158 | { | ||
159 | u8 tag = get_tag(address); | ||
160 | |||
161 | /* | ||
162 | * Perform shadow offset calculation based on untagged address, as | ||
163 | * some of the callers (e.g. kasan_unpoison_object_data) pass tagged | ||
164 | * addresses to this function. | ||
165 | */ | ||
166 | address = reset_tag(address); | ||
167 | |||
168 | kasan_poison_shadow(address, size, tag); | ||
169 | |||
170 | if (size & KASAN_SHADOW_MASK) { | ||
171 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | ||
172 | |||
173 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
174 | *shadow = tag; | ||
175 | else | ||
176 | *shadow = size & KASAN_SHADOW_MASK; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) | ||
181 | { | ||
182 | void *base = task_stack_page(task); | ||
183 | size_t size = sp - base; | ||
184 | |||
185 | kasan_unpoison_shadow(base, size); | ||
186 | } | ||
187 | |||
188 | /* Unpoison the entire stack for a task. */ | ||
189 | void kasan_unpoison_task_stack(struct task_struct *task) | ||
190 | { | ||
191 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); | ||
192 | } | ||
193 | |||
194 | /* Unpoison the stack for the current task beyond a watermark sp value. */ | ||
195 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) | ||
196 | { | ||
197 | /* | ||
198 | * Calculate the task stack base address. Avoid using 'current' | ||
199 | * because this function is called by early resume code which hasn't | ||
200 | * yet set up the percpu register (%gs). | ||
201 | */ | ||
202 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); | ||
203 | |||
204 | kasan_unpoison_shadow(base, watermark - base); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Clear all poison for the region between the current SP and a provided | ||
209 | * watermark value, as is sometimes required prior to hand-crafted asm function | ||
210 | * returns in the middle of functions. | ||
211 | */ | ||
212 | void kasan_unpoison_stack_above_sp_to(const void *watermark) | ||
213 | { | ||
214 | const void *sp = __builtin_frame_address(0); | ||
215 | size_t size = watermark - sp; | ||
216 | |||
217 | if (WARN_ON(sp > watermark)) | ||
218 | return; | ||
219 | kasan_unpoison_shadow(sp, size); | ||
220 | } | ||
221 | |||
222 | void kasan_alloc_pages(struct page *page, unsigned int order) | ||
223 | { | ||
224 | u8 tag; | ||
225 | unsigned long i; | ||
226 | |||
227 | if (unlikely(PageHighMem(page))) | ||
228 | return; | ||
229 | |||
230 | tag = random_tag(); | ||
231 | for (i = 0; i < (1 << order); i++) | ||
232 | page_kasan_tag_set(page + i, tag); | ||
233 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | ||
234 | } | ||
235 | |||
236 | void kasan_free_pages(struct page *page, unsigned int order) | ||
237 | { | ||
238 | if (likely(!PageHighMem(page))) | ||
239 | kasan_poison_shadow(page_address(page), | ||
240 | PAGE_SIZE << order, | ||
241 | KASAN_FREE_PAGE); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | ||
246 | * For larger allocations larger redzones are used. | ||
247 | */ | ||
248 | static inline unsigned int optimal_redzone(unsigned int object_size) | ||
249 | { | ||
250 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
251 | return 0; | ||
252 | |||
253 | return | ||
254 | object_size <= 64 - 16 ? 16 : | ||
255 | object_size <= 128 - 32 ? 32 : | ||
256 | object_size <= 512 - 64 ? 64 : | ||
257 | object_size <= 4096 - 128 ? 128 : | ||
258 | object_size <= (1 << 14) - 256 ? 256 : | ||
259 | object_size <= (1 << 15) - 512 ? 512 : | ||
260 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; | ||
261 | } | ||
262 | |||
263 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | ||
264 | slab_flags_t *flags) | ||
265 | { | ||
266 | unsigned int orig_size = *size; | ||
267 | unsigned int redzone_size; | ||
268 | int redzone_adjust; | ||
269 | |||
270 | /* Add alloc meta. */ | ||
271 | cache->kasan_info.alloc_meta_offset = *size; | ||
272 | *size += sizeof(struct kasan_alloc_meta); | ||
273 | |||
274 | /* Add free meta. */ | ||
275 | if (IS_ENABLED(CONFIG_KASAN_GENERIC) && | ||
276 | (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || | ||
277 | cache->object_size < sizeof(struct kasan_free_meta))) { | ||
278 | cache->kasan_info.free_meta_offset = *size; | ||
279 | *size += sizeof(struct kasan_free_meta); | ||
280 | } | ||
281 | |||
282 | redzone_size = optimal_redzone(cache->object_size); | ||
283 | redzone_adjust = redzone_size - (*size - cache->object_size); | ||
284 | if (redzone_adjust > 0) | ||
285 | *size += redzone_adjust; | ||
286 | |||
287 | *size = min_t(unsigned int, KMALLOC_MAX_SIZE, | ||
288 | max(*size, cache->object_size + redzone_size)); | ||
289 | |||
290 | /* | ||
291 | * If the metadata doesn't fit, don't enable KASAN at all. | ||
292 | */ | ||
293 | if (*size <= cache->kasan_info.alloc_meta_offset || | ||
294 | *size <= cache->kasan_info.free_meta_offset) { | ||
295 | cache->kasan_info.alloc_meta_offset = 0; | ||
296 | cache->kasan_info.free_meta_offset = 0; | ||
297 | *size = orig_size; | ||
298 | return; | ||
299 | } | ||
300 | |||
301 | cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE); | ||
302 | |||
303 | *flags |= SLAB_KASAN; | ||
304 | } | ||
305 | |||
306 | size_t kasan_metadata_size(struct kmem_cache *cache) | ||
307 | { | ||
308 | return (cache->kasan_info.alloc_meta_offset ? | ||
309 | sizeof(struct kasan_alloc_meta) : 0) + | ||
310 | (cache->kasan_info.free_meta_offset ? | ||
311 | sizeof(struct kasan_free_meta) : 0); | ||
312 | } | ||
313 | |||
314 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, | ||
315 | const void *object) | ||
316 | { | ||
317 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); | ||
318 | return (void *)object + cache->kasan_info.alloc_meta_offset; | ||
319 | } | ||
320 | |||
321 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | ||
322 | const void *object) | ||
323 | { | ||
324 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); | ||
325 | return (void *)object + cache->kasan_info.free_meta_offset; | ||
326 | } | ||
327 | |||
328 | void kasan_poison_slab(struct page *page) | ||
329 | { | ||
330 | unsigned long i; | ||
331 | |||
332 | for (i = 0; i < (1 << compound_order(page)); i++) | ||
333 | page_kasan_tag_reset(page + i); | ||
334 | kasan_poison_shadow(page_address(page), | ||
335 | PAGE_SIZE << compound_order(page), | ||
336 | KASAN_KMALLOC_REDZONE); | ||
337 | } | ||
338 | |||
339 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) | ||
340 | { | ||
341 | kasan_unpoison_shadow(object, cache->object_size); | ||
342 | } | ||
343 | |||
344 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) | ||
345 | { | ||
346 | kasan_poison_shadow(object, | ||
347 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | ||
348 | KASAN_KMALLOC_REDZONE); | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Since it's desirable to only call object contructors once during slab | ||
353 | * allocation, we preassign tags to all such objects. Also preassign tags for | ||
354 | * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. | ||
355 | * For SLAB allocator we can't preassign tags randomly since the freelist is | ||
356 | * stored as an array of indexes instead of a linked list. Assign tags based | ||
357 | * on objects indexes, so that objects that are next to each other get | ||
358 | * different tags. | ||
359 | * After a tag is assigned, the object always gets allocated with the same tag. | ||
360 | * The reason is that we can't change tags for objects with constructors on | ||
361 | * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor | ||
362 | * code can save the pointer to the object somewhere (e.g. in the object | ||
363 | * itself). Then if we retag it, the old saved pointer will become invalid. | ||
364 | */ | ||
365 | static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) | ||
366 | { | ||
367 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) | ||
368 | return new ? KASAN_TAG_KERNEL : random_tag(); | ||
369 | |||
370 | #ifdef CONFIG_SLAB | ||
371 | return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); | ||
372 | #else | ||
373 | return new ? random_tag() : get_tag(object); | ||
374 | #endif | ||
375 | } | ||
376 | |||
377 | void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, | ||
378 | const void *object) | ||
379 | { | ||
380 | struct kasan_alloc_meta *alloc_info; | ||
381 | |||
382 | if (!(cache->flags & SLAB_KASAN)) | ||
383 | return (void *)object; | ||
384 | |||
385 | alloc_info = get_alloc_info(cache, object); | ||
386 | __memset(alloc_info, 0, sizeof(*alloc_info)); | ||
387 | |||
388 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
389 | object = set_tag(object, assign_tag(cache, object, true)); | ||
390 | |||
391 | return (void *)object; | ||
392 | } | ||
393 | |||
394 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, | ||
395 | gfp_t flags) | ||
396 | { | ||
397 | return kasan_kmalloc(cache, object, cache->object_size, flags); | ||
398 | } | ||
399 | |||
400 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) | ||
401 | { | ||
402 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | ||
403 | return shadow_byte < 0 || | ||
404 | shadow_byte >= KASAN_SHADOW_SCALE_SIZE; | ||
405 | else | ||
406 | return tag != (u8)shadow_byte; | ||
407 | } | ||
408 | |||
409 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, | ||
410 | unsigned long ip, bool quarantine) | ||
411 | { | ||
412 | s8 shadow_byte; | ||
413 | u8 tag; | ||
414 | void *tagged_object; | ||
415 | unsigned long rounded_up_size; | ||
416 | |||
417 | tag = get_tag(object); | ||
418 | tagged_object = object; | ||
419 | object = reset_tag(object); | ||
420 | |||
421 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != | ||
422 | object)) { | ||
423 | kasan_report_invalid_free(tagged_object, ip); | ||
424 | return true; | ||
425 | } | ||
426 | |||
427 | /* RCU slabs could be legally used after free within the RCU period */ | ||
428 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) | ||
429 | return false; | ||
430 | |||
431 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); | ||
432 | if (shadow_invalid(tag, shadow_byte)) { | ||
433 | kasan_report_invalid_free(tagged_object, ip); | ||
434 | return true; | ||
435 | } | ||
436 | |||
437 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); | ||
438 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | ||
439 | |||
440 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || | ||
441 | unlikely(!(cache->flags & SLAB_KASAN))) | ||
442 | return false; | ||
443 | |||
444 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); | ||
445 | quarantine_put(get_free_info(cache, object), cache); | ||
446 | |||
447 | return IS_ENABLED(CONFIG_KASAN_GENERIC); | ||
448 | } | ||
449 | |||
450 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | ||
451 | { | ||
452 | return __kasan_slab_free(cache, object, ip, true); | ||
453 | } | ||
454 | |||
455 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, | ||
456 | size_t size, gfp_t flags) | ||
457 | { | ||
458 | unsigned long redzone_start; | ||
459 | unsigned long redzone_end; | ||
460 | u8 tag; | ||
461 | |||
462 | if (gfpflags_allow_blocking(flags)) | ||
463 | quarantine_reduce(); | ||
464 | |||
465 | if (unlikely(object == NULL)) | ||
466 | return NULL; | ||
467 | |||
468 | redzone_start = round_up((unsigned long)(object + size), | ||
469 | KASAN_SHADOW_SCALE_SIZE); | ||
470 | redzone_end = round_up((unsigned long)object + cache->object_size, | ||
471 | KASAN_SHADOW_SCALE_SIZE); | ||
472 | |||
473 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | ||
474 | tag = assign_tag(cache, object, false); | ||
475 | |||
476 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ | ||
477 | kasan_unpoison_shadow(set_tag(object, tag), size); | ||
478 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | ||
479 | KASAN_KMALLOC_REDZONE); | ||
480 | |||
481 | if (cache->flags & SLAB_KASAN) | ||
482 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); | ||
483 | |||
484 | return set_tag(object, tag); | ||
485 | } | ||
486 | EXPORT_SYMBOL(kasan_kmalloc); | ||
487 | |||
488 | void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, | ||
489 | gfp_t flags) | ||
490 | { | ||
491 | struct page *page; | ||
492 | unsigned long redzone_start; | ||
493 | unsigned long redzone_end; | ||
494 | |||
495 | if (gfpflags_allow_blocking(flags)) | ||
496 | quarantine_reduce(); | ||
497 | |||
498 | if (unlikely(ptr == NULL)) | ||
499 | return NULL; | ||
500 | |||
501 | page = virt_to_page(ptr); | ||
502 | redzone_start = round_up((unsigned long)(ptr + size), | ||
503 | KASAN_SHADOW_SCALE_SIZE); | ||
504 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | ||
505 | |||
506 | kasan_unpoison_shadow(ptr, size); | ||
507 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | ||
508 | KASAN_PAGE_REDZONE); | ||
509 | |||
510 | return (void *)ptr; | ||
511 | } | ||
512 | |||
513 | void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) | ||
514 | { | ||
515 | struct page *page; | ||
516 | |||
517 | if (unlikely(object == ZERO_SIZE_PTR)) | ||
518 | return (void *)object; | ||
519 | |||
520 | page = virt_to_head_page(object); | ||
521 | |||
522 | if (unlikely(!PageSlab(page))) | ||
523 | return kasan_kmalloc_large(object, size, flags); | ||
524 | else | ||
525 | return kasan_kmalloc(page->slab_cache, object, size, flags); | ||
526 | } | ||
527 | |||
528 | void kasan_poison_kfree(void *ptr, unsigned long ip) | ||
529 | { | ||
530 | struct page *page; | ||
531 | |||
532 | page = virt_to_head_page(ptr); | ||
533 | |||
534 | if (unlikely(!PageSlab(page))) { | ||
535 | if (ptr != page_address(page)) { | ||
536 | kasan_report_invalid_free(ptr, ip); | ||
537 | return; | ||
538 | } | ||
539 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | ||
540 | KASAN_FREE_PAGE); | ||
541 | } else { | ||
542 | __kasan_slab_free(page->slab_cache, ptr, ip, false); | ||
543 | } | ||
544 | } | ||
545 | |||
546 | void kasan_kfree_large(void *ptr, unsigned long ip) | ||
547 | { | ||
548 | if (ptr != page_address(virt_to_head_page(ptr))) | ||
549 | kasan_report_invalid_free(ptr, ip); | ||
550 | /* The object will be poisoned by page_alloc. */ | ||
551 | } | ||
552 | |||
553 | int kasan_module_alloc(void *addr, size_t size) | ||
554 | { | ||
555 | void *ret; | ||
556 | size_t scaled_size; | ||
557 | size_t shadow_size; | ||
558 | unsigned long shadow_start; | ||
559 | |||
560 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | ||
561 | scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; | ||
562 | shadow_size = round_up(scaled_size, PAGE_SIZE); | ||
563 | |||
564 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | ||
565 | return -EINVAL; | ||
566 | |||
567 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | ||
568 | shadow_start + shadow_size, | ||
569 | GFP_KERNEL, | ||
570 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | ||
571 | __builtin_return_address(0)); | ||
572 | |||
573 | if (ret) { | ||
574 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); | ||
575 | find_vm_area(addr)->flags |= VM_KASAN; | ||
576 | kmemleak_ignore(ret); | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | return -ENOMEM; | ||
581 | } | ||
582 | |||
583 | void kasan_free_shadow(const struct vm_struct *vm) | ||
584 | { | ||
585 | if (vm->flags & VM_KASAN) | ||
586 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
587 | } | ||
588 | |||
589 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
590 | static bool shadow_mapped(unsigned long addr) | ||
591 | { | ||
592 | pgd_t *pgd = pgd_offset_k(addr); | ||
593 | p4d_t *p4d; | ||
594 | pud_t *pud; | ||
595 | pmd_t *pmd; | ||
596 | pte_t *pte; | ||
597 | |||
598 | if (pgd_none(*pgd)) | ||
599 | return false; | ||
600 | p4d = p4d_offset(pgd, addr); | ||
601 | if (p4d_none(*p4d)) | ||
602 | return false; | ||
603 | pud = pud_offset(p4d, addr); | ||
604 | if (pud_none(*pud)) | ||
605 | return false; | ||
606 | |||
607 | /* | ||
608 | * We can't use pud_large() or pud_huge(), the first one is | ||
609 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse | ||
610 | * pud_bad(), if pud is bad then it's bad because it's huge. | ||
611 | */ | ||
612 | if (pud_bad(*pud)) | ||
613 | return true; | ||
614 | pmd = pmd_offset(pud, addr); | ||
615 | if (pmd_none(*pmd)) | ||
616 | return false; | ||
617 | |||
618 | if (pmd_bad(*pmd)) | ||
619 | return true; | ||
620 | pte = pte_offset_kernel(pmd, addr); | ||
621 | return !pte_none(*pte); | ||
622 | } | ||
623 | |||
624 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | ||
625 | unsigned long action, void *data) | ||
626 | { | ||
627 | struct memory_notify *mem_data = data; | ||
628 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; | ||
629 | unsigned long shadow_end, shadow_size; | ||
630 | |||
631 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; | ||
632 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); | ||
633 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); | ||
634 | shadow_size = nr_shadow_pages << PAGE_SHIFT; | ||
635 | shadow_end = shadow_start + shadow_size; | ||
636 | |||
637 | if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || | ||
638 | WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) | ||
639 | return NOTIFY_BAD; | ||
640 | |||
641 | switch (action) { | ||
642 | case MEM_GOING_ONLINE: { | ||
643 | void *ret; | ||
644 | |||
645 | /* | ||
646 | * If shadow is mapped already than it must have been mapped | ||
647 | * during the boot. This could happen if we onlining previously | ||
648 | * offlined memory. | ||
649 | */ | ||
650 | if (shadow_mapped(shadow_start)) | ||
651 | return NOTIFY_OK; | ||
652 | |||
653 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | ||
654 | shadow_end, GFP_KERNEL, | ||
655 | PAGE_KERNEL, VM_NO_GUARD, | ||
656 | pfn_to_nid(mem_data->start_pfn), | ||
657 | __builtin_return_address(0)); | ||
658 | if (!ret) | ||
659 | return NOTIFY_BAD; | ||
660 | |||
661 | kmemleak_ignore(ret); | ||
662 | return NOTIFY_OK; | ||
663 | } | ||
664 | case MEM_CANCEL_ONLINE: | ||
665 | case MEM_OFFLINE: { | ||
666 | struct vm_struct *vm; | ||
667 | |||
668 | /* | ||
669 | * shadow_start was either mapped during boot by kasan_init() | ||
670 | * or during memory online by __vmalloc_node_range(). | ||
671 | * In the latter case we can use vfree() to free shadow. | ||
672 | * Non-NULL result of the find_vm_area() will tell us if | ||
673 | * that was the second case. | ||
674 | * | ||
675 | * Currently it's not possible to free shadow mapped | ||
676 | * during boot by kasan_init(). It's because the code | ||
677 | * to do that hasn't been written yet. So we'll just | ||
678 | * leak the memory. | ||
679 | */ | ||
680 | vm = find_vm_area((void *)shadow_start); | ||
681 | if (vm) | ||
682 | vfree((void *)shadow_start); | ||
683 | } | ||
684 | } | ||
685 | |||
686 | return NOTIFY_OK; | ||
687 | } | ||
688 | |||
689 | static int __init kasan_memhotplug_init(void) | ||
690 | { | ||
691 | hotplug_memory_notifier(kasan_mem_notifier, 0); | ||
692 | |||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | core_initcall(kasan_memhotplug_init); | ||
697 | #endif | ||