summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2016-06-24 17:49:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-06-24 20:23:52 -0400
commit9b75a867cc9ddbafcaf35029358ac500f2635ff3 (patch)
treecf3cb90c4ee9e12a42d89881c3aa12d8bb0ba33b /mm/kasan
parenta6921c2974a09bfe8d039980c0b14a305644930b (diff)
mm: mempool: kasan: don't poot mempool objects in quarantine
Currently we may put reserved by mempool elements into quarantine via kasan_kfree(). This is totally wrong since quarantine may really free these objects. So when mempool will try to use such element, use-after-free will happen. Or mempool may decide that it no longer need that element and double-free it. So don't put object into quarantine in kasan_kfree(), just poison it. Rename kasan_kfree() to kasan_poison_kfree() to respect that. Also, we shouldn't use kasan_slab_alloc()/kasan_krealloc() in kasan_unpoison_element() because those functions may update allocation stacktrace. This would be wrong for the most of the remove_element call sites. (The only call site where we may want to update alloc stacktrace is in mempool_alloc(). Kmemleak solves this by calling kmemleak_update_trace(), so we could make something like that too. But this is out of scope of this patch). Fixes: 55834c59098d ("mm: kasan: initial memory quarantine implementation") Link: http://lkml.kernel.org/r/575977C3.1010905@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reported-by: Kuthonuzo Luruo <kuthonuzo.luruo@hpe.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Kostya Serebryany <kcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 28439acda6ec..6845f9294696 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
508 kasan_kmalloc(cache, object, cache->object_size, flags); 508 kasan_kmalloc(cache, object, cache->object_size, flags);
509} 509}
510 510
511void kasan_poison_slab_free(struct kmem_cache *cache, void *object) 511static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
512{ 512{
513 unsigned long size = cache->object_size; 513 unsigned long size = cache->object_size;
514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); 514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
626 kasan_kmalloc(page->slab_cache, object, size, flags); 626 kasan_kmalloc(page->slab_cache, object, size, flags);
627} 627}
628 628
629void kasan_kfree(void *ptr) 629void kasan_poison_kfree(void *ptr)
630{ 630{
631 struct page *page; 631 struct page *page;
632 632
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
637 KASAN_FREE_PAGE); 637 KASAN_FREE_PAGE);
638 else 638 else
639 kasan_slab_free(page->slab_cache, ptr); 639 kasan_poison_slab_free(page->slab_cache, ptr);
640} 640}
641 641
642void kasan_kfree_large(const void *ptr) 642void kasan_kfree_large(const void *ptr)