aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-03-25 17:21:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 19:37:42 -0400
commite6e8379c876de16c6b78f83b15d5ac32c79cb440 (patch)
tree9ee9ebd98fbd1c61acc8b863b5bf1d4b36c5f51e
parentaaf4fb712b8311d8b950e89937479d61e9c25ba8 (diff)
kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right()
This patchset implements SLAB support for KASAN Unlike SLUB, SLAB doesn't store allocation/deallocation stacks for heap objects, therefore we reimplement this feature in mm/kasan/stackdepot.c. The intention is to ultimately switch SLUB to use this implementation as well, which will save a lot of memory (right now SLUB bloats each object by 256 bytes to store the allocation/deallocation stacks). Also neither SLUB nor SLAB delay the reuse of freed memory chunks, which is necessary for better detection of use-after-free errors. We introduce memory quarantine (mm/kasan/quarantine.c), which allows delayed reuse of deallocated memory. This patch (of 7): Rename kmalloc_large_oob_right() to kmalloc_pagealloc_oob_right(), as the test only checks the page allocator functionality. Also reimplement kmalloc_large_oob_right() so that the test allocates a large enough chunk of memory that still does not trigger the page allocator fallback. Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/test_kasan.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index c32f3b0048dc..90ad74f71535 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void)
65 kfree(ptr); 65 kfree(ptr);
66} 66}
67 67
68static noinline void __init kmalloc_large_oob_right(void) 68#ifdef CONFIG_SLUB
69static noinline void __init kmalloc_pagealloc_oob_right(void)
69{ 70{
70 char *ptr; 71 char *ptr;
71 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 72 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
72 73
74 /* Allocate a chunk that does not fit into a SLUB cache to trigger
75 * the page allocator fallback.
76 */
77 pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
78 ptr = kmalloc(size, GFP_KERNEL);
79 if (!ptr) {
80 pr_err("Allocation failed\n");
81 return;
82 }
83
84 ptr[size] = 0;
85 kfree(ptr);
86}
87#endif
88
89static noinline void __init kmalloc_large_oob_right(void)
90{
91 char *ptr;
92 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
93 /* Allocate a chunk that is large enough, but still fits into a slab
94 * and does not trigger the page allocator fallback in SLUB.
95 */
73 pr_info("kmalloc large allocation: out-of-bounds to right\n"); 96 pr_info("kmalloc large allocation: out-of-bounds to right\n");
74 ptr = kmalloc(size, GFP_KERNEL); 97 ptr = kmalloc(size, GFP_KERNEL);
75 if (!ptr) { 98 if (!ptr) {
@@ -324,6 +347,9 @@ static int __init kmalloc_tests_init(void)
324 kmalloc_oob_right(); 347 kmalloc_oob_right();
325 kmalloc_oob_left(); 348 kmalloc_oob_left();
326 kmalloc_node_oob_right(); 349 kmalloc_node_oob_right();
350#ifdef CONFIG_SLUB
351 kmalloc_pagealloc_oob_right();
352#endif
327 kmalloc_large_oob_right(); 353 kmalloc_large_oob_right();
328 kmalloc_oob_krealloc_more(); 354 kmalloc_oob_krealloc_more();
329 kmalloc_oob_krealloc_less(); 355 kmalloc_oob_krealloc_less();