aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c82
1 files changed, 54 insertions, 28 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 03d5d1374ca7..09b534fbba17 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -298,8 +298,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
298 return; 298 return;
299 } 299 }
300 300
301 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
302
303 *flags |= SLAB_KASAN; 301 *flags |= SLAB_KASAN;
304} 302}
305 303
@@ -349,28 +347,48 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
349} 347}
350 348
351/* 349/*
352 * Since it's desirable to only call object contructors once during slab 350 * This function assigns a tag to an object considering the following:
353 * allocation, we preassign tags to all such objects. Also preassign tags for 351 * 1. A cache might have a constructor, which might save a pointer to a slab
354 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. 352 * object somewhere (e.g. in the object itself). We preassign a tag for
355 * For SLAB allocator we can't preassign tags randomly since the freelist is 353 * each object in caches with constructors during slab creation and reuse
356 * stored as an array of indexes instead of a linked list. Assign tags based 354 * the same tag each time a particular object is allocated.
357 * on objects indexes, so that objects that are next to each other get 355 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358 * different tags. 356 * accessed after being freed. We preassign tags for objects in these
359 * After a tag is assigned, the object always gets allocated with the same tag. 357 * caches as well.
360 * The reason is that we can't change tags for objects with constructors on 358 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor 359 * is stored as an array of indexes instead of a linked list. Assign tags
362 * code can save the pointer to the object somewhere (e.g. in the object 360 * based on objects indexes, so that objects that are next to each other
363 * itself). Then if we retag it, the old saved pointer will become invalid. 361 * get different tags.
364 */ 362 */
365static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) 363static u8 assign_tag(struct kmem_cache *cache, const void *object,
364 bool init, bool keep_tag)
366{ 365{
366 /*
367 * 1. When an object is kmalloc()'ed, two hooks are called:
368 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
369 * tag only in the first one.
370 * 2. We reuse the same tag for krealloc'ed objects.
371 */
372 if (keep_tag)
373 return get_tag(object);
374
375 /*
376 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
377 * set, assign a tag when the object is being allocated (init == false).
378 */
367 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 379 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
368 return new ? KASAN_TAG_KERNEL : random_tag(); 380 return init ? KASAN_TAG_KERNEL : random_tag();
369 381
382 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
370#ifdef CONFIG_SLAB 383#ifdef CONFIG_SLAB
384 /* For SLAB assign tags based on the object index in the freelist. */
371 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 385 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
372#else 386#else
373 return new ? random_tag() : get_tag(object); 387 /*
388 * For SLUB assign a random tag during slab creation, otherwise reuse
389 * the already assigned tag.
390 */
391 return init ? random_tag() : get_tag(object);
374#endif 392#endif
375} 393}
376 394
@@ -386,17 +404,12 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
386 __memset(alloc_info, 0, sizeof(*alloc_info)); 404 __memset(alloc_info, 0, sizeof(*alloc_info));
387 405
388 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 406 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
389 object = set_tag(object, assign_tag(cache, object, true)); 407 object = set_tag(object,
408 assign_tag(cache, object, true, false));
390 409
391 return (void *)object; 410 return (void *)object;
392} 411}
393 412
394void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
395 gfp_t flags)
396{
397 return kasan_kmalloc(cache, object, cache->object_size, flags);
398}
399
400static inline bool shadow_invalid(u8 tag, s8 shadow_byte) 413static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
401{ 414{
402 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 415 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -452,8 +465,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
452 return __kasan_slab_free(cache, object, ip, true); 465 return __kasan_slab_free(cache, object, ip, true);
453} 466}
454 467
455void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 468static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
456 size_t size, gfp_t flags) 469 size_t size, gfp_t flags, bool keep_tag)
457{ 470{
458 unsigned long redzone_start; 471 unsigned long redzone_start;
459 unsigned long redzone_end; 472 unsigned long redzone_end;
@@ -471,7 +484,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
471 KASAN_SHADOW_SCALE_SIZE); 484 KASAN_SHADOW_SCALE_SIZE);
472 485
473 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 486 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
474 tag = assign_tag(cache, object, false); 487 tag = assign_tag(cache, object, false, keep_tag);
475 488
476 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 489 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
477 kasan_unpoison_shadow(set_tag(object, tag), size); 490 kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -483,6 +496,18 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
483 496
484 return set_tag(object, tag); 497 return set_tag(object, tag);
485} 498}
499
500void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
501 gfp_t flags)
502{
503 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
504}
505
506void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
507 size_t size, gfp_t flags)
508{
509 return __kasan_kmalloc(cache, object, size, flags, true);
510}
486EXPORT_SYMBOL(kasan_kmalloc); 511EXPORT_SYMBOL(kasan_kmalloc);
487 512
488void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 513void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
@@ -522,7 +547,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
522 if (unlikely(!PageSlab(page))) 547 if (unlikely(!PageSlab(page)))
523 return kasan_kmalloc_large(object, size, flags); 548 return kasan_kmalloc_large(object, size, flags);
524 else 549 else
525 return kasan_kmalloc(page->slab_cache, object, size, flags); 550 return __kasan_kmalloc(page->slab_cache, object, size,
551 flags, true);
526} 552}
527 553
528void kasan_poison_kfree(void *ptr, unsigned long ip) 554void kasan_poison_kfree(void *ptr, unsigned long ip)