summaryrefslogtreecommitdiffstats
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c65
1 files changed, 43 insertions, 22 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 03d5d1374ca7..73c9cbfdedf4 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -298,8 +298,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
298 return; 298 return;
299 } 299 }
300 300
301 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
302
303 *flags |= SLAB_KASAN; 301 *flags |= SLAB_KASAN;
304} 302}
305 303
@@ -349,28 +347,43 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
349} 347}
350 348
351/* 349/*
352 * Since it's desirable to only call object contructors once during slab 350 * This function assigns a tag to an object considering the following:
353 * allocation, we preassign tags to all such objects. Also preassign tags for 351 * 1. A cache might have a constructor, which might save a pointer to a slab
354 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. 352 * object somewhere (e.g. in the object itself). We preassign a tag for
355 * For SLAB allocator we can't preassign tags randomly since the freelist is 353 * each object in caches with constructors during slab creation and reuse
356 * stored as an array of indexes instead of a linked list. Assign tags based 354 * the same tag each time a particular object is allocated.
357 * on objects indexes, so that objects that are next to each other get 355 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358 * different tags. 356 * accessed after being freed. We preassign tags for objects in these
359 * After a tag is assigned, the object always gets allocated with the same tag. 357 * caches as well.
360 * The reason is that we can't change tags for objects with constructors on 358 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor 359 * is stored as an array of indexes instead of a linked list. Assign tags
362 * code can save the pointer to the object somewhere (e.g. in the object 360 * based on objects indexes, so that objects that are next to each other
363 * itself). Then if we retag it, the old saved pointer will become invalid. 361 * get different tags.
364 */ 362 */
365static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) 363static u8 assign_tag(struct kmem_cache *cache, const void *object,
364 bool init, bool krealloc)
366{ 365{
366 /* Reuse the same tag for krealloc'ed objects. */
367 if (krealloc)
368 return get_tag(object);
369
370 /*
371 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
372 * set, assign a tag when the object is being allocated (init == false).
373 */
367 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 374 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
368 return new ? KASAN_TAG_KERNEL : random_tag(); 375 return init ? KASAN_TAG_KERNEL : random_tag();
369 376
377 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
370#ifdef CONFIG_SLAB 378#ifdef CONFIG_SLAB
379 /* For SLAB assign tags based on the object index in the freelist. */
371 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 380 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
372#else 381#else
373 return new ? random_tag() : get_tag(object); 382 /*
383 * For SLUB assign a random tag during slab creation, otherwise reuse
384 * the already assigned tag.
385 */
386 return init ? random_tag() : get_tag(object);
374#endif 387#endif
375} 388}
376 389
@@ -386,7 +399,8 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
386 __memset(alloc_info, 0, sizeof(*alloc_info)); 399 __memset(alloc_info, 0, sizeof(*alloc_info));
387 400
388 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 401 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
389 object = set_tag(object, assign_tag(cache, object, true)); 402 object = set_tag(object,
403 assign_tag(cache, object, true, false));
390 404
391 return (void *)object; 405 return (void *)object;
392} 406}
@@ -452,8 +466,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
452 return __kasan_slab_free(cache, object, ip, true); 466 return __kasan_slab_free(cache, object, ip, true);
453} 467}
454 468
455void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 469static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
456 size_t size, gfp_t flags) 470 size_t size, gfp_t flags, bool krealloc)
457{ 471{
458 unsigned long redzone_start; 472 unsigned long redzone_start;
459 unsigned long redzone_end; 473 unsigned long redzone_end;
@@ -471,7 +485,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
471 KASAN_SHADOW_SCALE_SIZE); 485 KASAN_SHADOW_SCALE_SIZE);
472 486
473 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 487 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
474 tag = assign_tag(cache, object, false); 488 tag = assign_tag(cache, object, false, krealloc);
475 489
476 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 490 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
477 kasan_unpoison_shadow(set_tag(object, tag), size); 491 kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -483,6 +497,12 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
483 497
484 return set_tag(object, tag); 498 return set_tag(object, tag);
485} 499}
500
501void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
502 size_t size, gfp_t flags)
503{
504 return __kasan_kmalloc(cache, object, size, flags, false);
505}
486EXPORT_SYMBOL(kasan_kmalloc); 506EXPORT_SYMBOL(kasan_kmalloc);
487 507
488void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 508void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
@@ -522,7 +542,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
522 if (unlikely(!PageSlab(page))) 542 if (unlikely(!PageSlab(page)))
523 return kasan_kmalloc_large(object, size, flags); 543 return kasan_kmalloc_large(object, size, flags);
524 else 544 else
525 return kasan_kmalloc(page->slab_cache, object, size, flags); 545 return __kasan_kmalloc(page->slab_cache, object, size,
546 flags, true);
526} 547}
527 548
528void kasan_poison_kfree(void *ptr, unsigned long ip) 549void kasan_poison_kfree(void *ptr, unsigned long ip)