summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 03:30:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:44 -0500
commit7f94ffbc4c6a1bdb51d39965e4f2acaa19bd798f (patch)
tree5b6b2519c9979a3b557a812843d054c91886e441 /mm/kasan
parent5b7c4148222d7acaa1612e5eec84fc66c88d54f3 (diff)
kasan: add hooks implementation for tag-based mode
This commit adds tag-based KASAN specific hooks implementation and adjusts common generic and tag-based KASAN ones. 1. When a new slab cache is created, tag-based KASAN rounds up the size of the objects in this cache to KASAN_SHADOW_SCALE_SIZE (== 16). 2. On each kmalloc tag-based KASAN generates a random tag, sets the shadow memory, that corresponds to this object to this tag, and embeds this tag value into the top byte of the returned pointer. 3. On each kfree tag-based KASAN poisons the shadow memory with a random tag to allow detection of use-after-free bugs. The rest of the logic of the hook implementation is very much similar to the one provided by generic KASAN. Tag-based KASAN saves allocation and free stack metadata to the slab object the same way generic KASAN does. Link: http://lkml.kernel.org/r/bda78069e3b8422039794050ddcb2d53d053ed41.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c116
-rw-r--r--mm/kasan/kasan.h8
-rw-r--r--mm/kasan/tags.c48
3 files changed, 153 insertions, 19 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7134e75447ff..27f0cae336c9 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -140,6 +140,13 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
140{ 140{
141 void *shadow_start, *shadow_end; 141 void *shadow_start, *shadow_end;
142 142
143 /*
144 * Perform shadow offset calculation based on untagged address, as
145 * some of the callers (e.g. kasan_poison_object_data) pass tagged
146 * addresses to this function.
147 */
148 address = reset_tag(address);
149
143 shadow_start = kasan_mem_to_shadow(address); 150 shadow_start = kasan_mem_to_shadow(address);
144 shadow_end = kasan_mem_to_shadow(address + size); 151 shadow_end = kasan_mem_to_shadow(address + size);
145 152
@@ -148,11 +155,24 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
148 155
149void kasan_unpoison_shadow(const void *address, size_t size) 156void kasan_unpoison_shadow(const void *address, size_t size)
150{ 157{
151 kasan_poison_shadow(address, size, 0); 158 u8 tag = get_tag(address);
159
160 /*
161 * Perform shadow offset calculation based on untagged address, as
162 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
163 * addresses to this function.
164 */
165 address = reset_tag(address);
166
167 kasan_poison_shadow(address, size, tag);
152 168
153 if (size & KASAN_SHADOW_MASK) { 169 if (size & KASAN_SHADOW_MASK) {
154 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); 170 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
155 *shadow = size & KASAN_SHADOW_MASK; 171
172 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
173 *shadow = tag;
174 else
175 *shadow = size & KASAN_SHADOW_MASK;
156 } 176 }
157} 177}
158 178
@@ -200,8 +220,9 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
200 220
201void kasan_alloc_pages(struct page *page, unsigned int order) 221void kasan_alloc_pages(struct page *page, unsigned int order)
202{ 222{
203 if (likely(!PageHighMem(page))) 223 if (unlikely(PageHighMem(page)))
204 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); 224 return;
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
205} 226}
206 227
207void kasan_free_pages(struct page *page, unsigned int order) 228void kasan_free_pages(struct page *page, unsigned int order)
@@ -218,6 +239,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
218 */ 239 */
219static inline unsigned int optimal_redzone(unsigned int object_size) 240static inline unsigned int optimal_redzone(unsigned int object_size)
220{ 241{
242 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
243 return 0;
244
221 return 245 return
222 object_size <= 64 - 16 ? 16 : 246 object_size <= 64 - 16 ? 16 :
223 object_size <= 128 - 32 ? 32 : 247 object_size <= 128 - 32 ? 32 :
@@ -232,6 +256,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
232 slab_flags_t *flags) 256 slab_flags_t *flags)
233{ 257{
234 unsigned int orig_size = *size; 258 unsigned int orig_size = *size;
259 unsigned int redzone_size;
235 int redzone_adjust; 260 int redzone_adjust;
236 261
237 /* Add alloc meta. */ 262 /* Add alloc meta. */
@@ -239,20 +264,20 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
239 *size += sizeof(struct kasan_alloc_meta); 264 *size += sizeof(struct kasan_alloc_meta);
240 265
241 /* Add free meta. */ 266 /* Add free meta. */
242 if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || 267 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
243 cache->object_size < sizeof(struct kasan_free_meta)) { 268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269 cache->object_size < sizeof(struct kasan_free_meta))) {
244 cache->kasan_info.free_meta_offset = *size; 270 cache->kasan_info.free_meta_offset = *size;
245 *size += sizeof(struct kasan_free_meta); 271 *size += sizeof(struct kasan_free_meta);
246 } 272 }
247 redzone_adjust = optimal_redzone(cache->object_size) -
248 (*size - cache->object_size);
249 273
274 redzone_size = optimal_redzone(cache->object_size);
275 redzone_adjust = redzone_size - (*size - cache->object_size);
250 if (redzone_adjust > 0) 276 if (redzone_adjust > 0)
251 *size += redzone_adjust; 277 *size += redzone_adjust;
252 278
253 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, 279 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
254 max(*size, cache->object_size + 280 max(*size, cache->object_size + redzone_size));
255 optimal_redzone(cache->object_size)));
256 281
257 /* 282 /*
258 * If the metadata doesn't fit, don't enable KASAN at all. 283 * If the metadata doesn't fit, don't enable KASAN at all.
@@ -265,6 +290,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
265 return; 290 return;
266 } 291 }
267 292
293 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
294
268 *flags |= SLAB_KASAN; 295 *flags |= SLAB_KASAN;
269} 296}
270 297
@@ -309,6 +336,32 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
309 KASAN_KMALLOC_REDZONE); 336 KASAN_KMALLOC_REDZONE);
310} 337}
311 338
339/*
340 * Since it's desirable to only call object contructors once during slab
341 * allocation, we preassign tags to all such objects. Also preassign tags for
342 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
343 * For SLAB allocator we can't preassign tags randomly since the freelist is
344 * stored as an array of indexes instead of a linked list. Assign tags based
345 * on objects indexes, so that objects that are next to each other get
346 * different tags.
347 * After a tag is assigned, the object always gets allocated with the same tag.
348 * The reason is that we can't change tags for objects with constructors on
349 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
350 * code can save the pointer to the object somewhere (e.g. in the object
351 * itself). Then if we retag it, the old saved pointer will become invalid.
352 */
353static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
354{
355 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
356 return new ? KASAN_TAG_KERNEL : random_tag();
357
358#ifdef CONFIG_SLAB
359 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
360#else
361 return new ? random_tag() : get_tag(object);
362#endif
363}
364
312void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) 365void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
313{ 366{
314 struct kasan_alloc_meta *alloc_info; 367 struct kasan_alloc_meta *alloc_info;
@@ -319,6 +372,9 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
319 alloc_info = get_alloc_info(cache, object); 372 alloc_info = get_alloc_info(cache, object);
320 __memset(alloc_info, 0, sizeof(*alloc_info)); 373 __memset(alloc_info, 0, sizeof(*alloc_info));
321 374
375 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
376 object = set_tag(object, assign_tag(cache, object, true));
377
322 return (void *)object; 378 return (void *)object;
323} 379}
324 380
@@ -327,15 +383,30 @@ void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
327 return kasan_kmalloc(cache, object, cache->object_size, flags); 383 return kasan_kmalloc(cache, object, cache->object_size, flags);
328} 384}
329 385
386static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
387{
388 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
389 return shadow_byte < 0 ||
390 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
391 else
392 return tag != (u8)shadow_byte;
393}
394
330static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 395static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
331 unsigned long ip, bool quarantine) 396 unsigned long ip, bool quarantine)
332{ 397{
333 s8 shadow_byte; 398 s8 shadow_byte;
399 u8 tag;
400 void *tagged_object;
334 unsigned long rounded_up_size; 401 unsigned long rounded_up_size;
335 402
403 tag = get_tag(object);
404 tagged_object = object;
405 object = reset_tag(object);
406
336 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != 407 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
337 object)) { 408 object)) {
338 kasan_report_invalid_free(object, ip); 409 kasan_report_invalid_free(tagged_object, ip);
339 return true; 410 return true;
340 } 411 }
341 412
@@ -344,20 +415,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
344 return false; 415 return false;
345 416
346 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); 417 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
347 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { 418 if (shadow_invalid(tag, shadow_byte)) {
348 kasan_report_invalid_free(object, ip); 419 kasan_report_invalid_free(tagged_object, ip);
349 return true; 420 return true;
350 } 421 }
351 422
352 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); 423 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
353 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 424 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
354 425
355 if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) 426 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
427 unlikely(!(cache->flags & SLAB_KASAN)))
356 return false; 428 return false;
357 429
358 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); 430 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
359 quarantine_put(get_free_info(cache, object), cache); 431 quarantine_put(get_free_info(cache, object), cache);
360 return true; 432
433 return IS_ENABLED(CONFIG_KASAN_GENERIC);
361} 434}
362 435
363bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) 436bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
@@ -370,6 +443,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
370{ 443{
371 unsigned long redzone_start; 444 unsigned long redzone_start;
372 unsigned long redzone_end; 445 unsigned long redzone_end;
446 u8 tag;
373 447
374 if (gfpflags_allow_blocking(flags)) 448 if (gfpflags_allow_blocking(flags))
375 quarantine_reduce(); 449 quarantine_reduce();
@@ -382,14 +456,18 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
382 redzone_end = round_up((unsigned long)object + cache->object_size, 456 redzone_end = round_up((unsigned long)object + cache->object_size,
383 KASAN_SHADOW_SCALE_SIZE); 457 KASAN_SHADOW_SCALE_SIZE);
384 458
385 kasan_unpoison_shadow(object, size); 459 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
460 tag = assign_tag(cache, object, false);
461
462 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
463 kasan_unpoison_shadow(set_tag(object, tag), size);
386 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 464 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
387 KASAN_KMALLOC_REDZONE); 465 KASAN_KMALLOC_REDZONE);
388 466
389 if (cache->flags & SLAB_KASAN) 467 if (cache->flags & SLAB_KASAN)
390 set_track(&get_alloc_info(cache, object)->alloc_track, flags); 468 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
391 469
392 return (void *)object; 470 return set_tag(object, tag);
393} 471}
394EXPORT_SYMBOL(kasan_kmalloc); 472EXPORT_SYMBOL(kasan_kmalloc);
395 473
@@ -439,7 +517,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
439 page = virt_to_head_page(ptr); 517 page = virt_to_head_page(ptr);
440 518
441 if (unlikely(!PageSlab(page))) { 519 if (unlikely(!PageSlab(page))) {
442 if (ptr != page_address(page)) { 520 if (reset_tag(ptr) != page_address(page)) {
443 kasan_report_invalid_free(ptr, ip); 521 kasan_report_invalid_free(ptr, ip);
444 return; 522 return;
445 } 523 }
@@ -452,7 +530,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
452 530
453void kasan_kfree_large(void *ptr, unsigned long ip) 531void kasan_kfree_large(void *ptr, unsigned long ip)
454{ 532{
455 if (ptr != page_address(virt_to_head_page(ptr))) 533 if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
456 kasan_report_invalid_free(ptr, ip); 534 kasan_report_invalid_free(ptr, ip);
457 /* The object will be poisoned by page_alloc. */ 535 /* The object will be poisoned by page_alloc. */
458} 536}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 82a23b23ff93..ea51b2d898ec 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -12,10 +12,18 @@
12#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ 12#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
13#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ 13#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
14 14
15#ifdef CONFIG_KASAN_GENERIC
15#define KASAN_FREE_PAGE 0xFF /* page was freed */ 16#define KASAN_FREE_PAGE 0xFF /* page was freed */
16#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ 17#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
17#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ 18#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
18#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ 19#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
20#else
21#define KASAN_FREE_PAGE KASAN_TAG_INVALID
22#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
23#define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
24#define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
25#endif
26
19#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ 27#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
20 28
21/* 29/*
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 1c4e7ce2e6fe..1d1b79350e28 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -78,15 +78,60 @@ void *kasan_reset_tag(const void *addr)
78void check_memory_region(unsigned long addr, size_t size, bool write, 78void check_memory_region(unsigned long addr, size_t size, bool write,
79 unsigned long ret_ip) 79 unsigned long ret_ip)
80{ 80{
81 u8 tag;
82 u8 *shadow_first, *shadow_last, *shadow;
83 void *untagged_addr;
84
85 if (unlikely(size == 0))
86 return;
87
88 tag = get_tag((const void *)addr);
89
90 /*
91 * Ignore accesses for pointers tagged with 0xff (native kernel
92 * pointer tag) to suppress false positives caused by kmap.
93 *
94 * Some kernel code was written to account for archs that don't keep
95 * high memory mapped all the time, but rather map and unmap particular
96 * pages when needed. Instead of storing a pointer to the kernel memory,
97 * this code saves the address of the page structure and offset within
98 * that page for later use. Those pages are then mapped and unmapped
99 * with kmap/kunmap when necessary and virt_to_page is used to get the
100 * virtual address of the page. For arm64 (that keeps the high memory
101 * mapped all the time), kmap is turned into a page_address call.
102
103 * The issue is that with use of the page_address + virt_to_page
104 * sequence the top byte value of the original pointer gets lost (gets
105 * set to KASAN_TAG_KERNEL (0xFF)).
106 */
107 if (tag == KASAN_TAG_KERNEL)
108 return;
109
110 untagged_addr = reset_tag((const void *)addr);
111 if (unlikely(untagged_addr <
112 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
113 kasan_report(addr, size, write, ret_ip);
114 return;
115 }
116 shadow_first = kasan_mem_to_shadow(untagged_addr);
117 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
118 for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
119 if (*shadow != tag) {
120 kasan_report(addr, size, write, ret_ip);
121 return;
122 }
123 }
81} 124}
82 125
83#define DEFINE_HWASAN_LOAD_STORE(size) \ 126#define DEFINE_HWASAN_LOAD_STORE(size) \
84 void __hwasan_load##size##_noabort(unsigned long addr) \ 127 void __hwasan_load##size##_noabort(unsigned long addr) \
85 { \ 128 { \
129 check_memory_region(addr, size, false, _RET_IP_); \
86 } \ 130 } \
87 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ 131 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \
88 void __hwasan_store##size##_noabort(unsigned long addr) \ 132 void __hwasan_store##size##_noabort(unsigned long addr) \
89 { \ 133 { \
134 check_memory_region(addr, size, true, _RET_IP_); \
90 } \ 135 } \
91 EXPORT_SYMBOL(__hwasan_store##size##_noabort) 136 EXPORT_SYMBOL(__hwasan_store##size##_noabort)
92 137
@@ -98,15 +143,18 @@ DEFINE_HWASAN_LOAD_STORE(16);
98 143
99void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) 144void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
100{ 145{
146 check_memory_region(addr, size, false, _RET_IP_);
101} 147}
102EXPORT_SYMBOL(__hwasan_loadN_noabort); 148EXPORT_SYMBOL(__hwasan_loadN_noabort);
103 149
104void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) 150void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
105{ 151{
152 check_memory_region(addr, size, true, _RET_IP_);
106} 153}
107EXPORT_SYMBOL(__hwasan_storeN_noabort); 154EXPORT_SYMBOL(__hwasan_storeN_noabort);
108 155
109void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) 156void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
110{ 157{
158 kasan_poison_shadow((void *)addr, size, tag);
111} 159}
112EXPORT_SYMBOL(__hwasan_tag_memory); 160EXPORT_SYMBOL(__hwasan_tag_memory);