aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:39:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:41 -0500
commit0316bec22ec95ea2faca6406437b0b5950553b7c (patch)
tree6a278e1515188e738df2b04e9ada375215b3df22 /mm
parenta79316c6178ca419e35feef47d47f50b4e0ee9f2 (diff)
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile3
-rw-r--r--mm/kasan/kasan.c98
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c21
-rw-r--r--mm/slab_common.c5
-rw-r--r--mm/slub.c31
6 files changed, 160 insertions, 3 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 930b52df4aca..088c68e9ec35 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -2,6 +2,9 @@
2# Makefile for the linux memory manager. 2# Makefile for the linux memory manager.
3# 3#
4 4
5KASAN_SANITIZE_slab_common.o := n
6KASAN_SANITIZE_slub.o := n
7
5mmu-y := nommu.o 8mmu-y := nommu.o
6mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 9mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ 10 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index b516eb8632b9..dc83f070edb6 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -31,6 +31,7 @@
31#include <linux/kasan.h> 31#include <linux/kasan.h>
32 32
33#include "kasan.h" 33#include "kasan.h"
34#include "../slab.h"
34 35
35/* 36/*
36 * Poisons the shadow memory for 'size' bytes starting from 'addr'. 37 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
@@ -268,6 +269,103 @@ void kasan_free_pages(struct page *page, unsigned int order)
268 KASAN_FREE_PAGE); 269 KASAN_FREE_PAGE);
269} 270}
270 271
272void kasan_poison_slab(struct page *page)
273{
274 kasan_poison_shadow(page_address(page),
275 PAGE_SIZE << compound_order(page),
276 KASAN_KMALLOC_REDZONE);
277}
278
279void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
280{
281 kasan_unpoison_shadow(object, cache->object_size);
282}
283
284void kasan_poison_object_data(struct kmem_cache *cache, void *object)
285{
286 kasan_poison_shadow(object,
287 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
288 KASAN_KMALLOC_REDZONE);
289}
290
291void kasan_slab_alloc(struct kmem_cache *cache, void *object)
292{
293 kasan_kmalloc(cache, object, cache->object_size);
294}
295
296void kasan_slab_free(struct kmem_cache *cache, void *object)
297{
298 unsigned long size = cache->object_size;
299 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
300
301 /* RCU slabs could be legally used after free within the RCU period */
302 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
303 return;
304
305 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
306}
307
308void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
309{
310 unsigned long redzone_start;
311 unsigned long redzone_end;
312
313 if (unlikely(object == NULL))
314 return;
315
316 redzone_start = round_up((unsigned long)(object + size),
317 KASAN_SHADOW_SCALE_SIZE);
318 redzone_end = round_up((unsigned long)object + cache->object_size,
319 KASAN_SHADOW_SCALE_SIZE);
320
321 kasan_unpoison_shadow(object, size);
322 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
323 KASAN_KMALLOC_REDZONE);
324}
325EXPORT_SYMBOL(kasan_kmalloc);
326
327void kasan_kmalloc_large(const void *ptr, size_t size)
328{
329 struct page *page;
330 unsigned long redzone_start;
331 unsigned long redzone_end;
332
333 if (unlikely(ptr == NULL))
334 return;
335
336 page = virt_to_page(ptr);
337 redzone_start = round_up((unsigned long)(ptr + size),
338 KASAN_SHADOW_SCALE_SIZE);
339 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
340
341 kasan_unpoison_shadow(ptr, size);
342 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
343 KASAN_PAGE_REDZONE);
344}
345
346void kasan_krealloc(const void *object, size_t size)
347{
348 struct page *page;
349
350 if (unlikely(object == ZERO_SIZE_PTR))
351 return;
352
353 page = virt_to_head_page(object);
354
355 if (unlikely(!PageSlab(page)))
356 kasan_kmalloc_large(object, size);
357 else
358 kasan_kmalloc(page->slab_cache, object, size);
359}
360
361void kasan_kfree_large(const void *ptr)
362{
363 struct page *page = virt_to_page(ptr);
364
365 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
366 KASAN_FREE_PAGE);
367}
368
271#define DEFINE_ASAN_LOAD_STORE(size) \ 369#define DEFINE_ASAN_LOAD_STORE(size) \
272 void __asan_load##size(unsigned long addr) \ 370 void __asan_load##size(unsigned long addr) \
273 { \ 371 { \
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d3c90d5dd97a..5b052ab40cf9 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -7,6 +7,11 @@
7#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) 7#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
8 8
9#define KASAN_FREE_PAGE 0xFF /* page was freed */ 9#define KASAN_FREE_PAGE 0xFF /* page was freed */
10#define KASAN_FREE_PAGE 0xFF /* page was freed */
11#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
12#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
13#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
14
10 15
11struct kasan_access_info { 16struct kasan_access_info {
12 const void *access_addr; 17 const void *access_addr;
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index fab8e7882ff1..2760edb4d0a8 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -24,6 +24,7 @@
24#include <linux/kasan.h> 24#include <linux/kasan.h>
25 25
26#include "kasan.h" 26#include "kasan.h"
27#include "../slab.h"
27 28
28/* Shadow layout customization. */ 29/* Shadow layout customization. */
29#define SHADOW_BYTES_PER_BLOCK 1 30#define SHADOW_BYTES_PER_BLOCK 1
@@ -55,8 +56,11 @@ static void print_error_description(struct kasan_access_info *info)
55 56
56 switch (shadow_val) { 57 switch (shadow_val) {
57 case KASAN_FREE_PAGE: 58 case KASAN_FREE_PAGE:
59 case KASAN_KMALLOC_FREE:
58 bug_type = "use after free"; 60 bug_type = "use after free";
59 break; 61 break;
62 case KASAN_PAGE_REDZONE:
63 case KASAN_KMALLOC_REDZONE:
60 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: 64 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
61 bug_type = "out of bounds access"; 65 bug_type = "out of bounds access";
62 break; 66 break;
@@ -77,6 +81,23 @@ static void print_address_description(struct kasan_access_info *info)
77 if ((addr >= (void *)PAGE_OFFSET) && 81 if ((addr >= (void *)PAGE_OFFSET) &&
78 (addr < high_memory)) { 82 (addr < high_memory)) {
79 struct page *page = virt_to_head_page(addr); 83 struct page *page = virt_to_head_page(addr);
84
85 if (PageSlab(page)) {
86 void *object;
87 struct kmem_cache *cache = page->slab_cache;
88 void *last_object;
89
90 object = virt_to_obj(cache, page_address(page), addr);
91 last_object = page_address(page) +
92 page->objects * cache->size;
93
94 if (unlikely(object > last_object))
95 object = last_object; /* we hit into padding */
96
97 object_err(cache, page, object,
98 "kasan: bad access detected");
99 return;
100 }
80 dump_page(page, "kasan: bad access detected"); 101 dump_page(page, "kasan: bad access detected");
81 } 102 }
82 103
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 429a4506b382..999bb3424d44 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -898,6 +898,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
898 page = alloc_kmem_pages(flags, order); 898 page = alloc_kmem_pages(flags, order);
899 ret = page ? page_address(page) : NULL; 899 ret = page ? page_address(page) : NULL;
900 kmemleak_alloc(ret, size, 1, flags); 900 kmemleak_alloc(ret, size, 1, flags);
901 kasan_kmalloc_large(ret, size);
901 return ret; 902 return ret;
902} 903}
903EXPORT_SYMBOL(kmalloc_order); 904EXPORT_SYMBOL(kmalloc_order);
@@ -1077,8 +1078,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1077 if (p) 1078 if (p)
1078 ks = ksize(p); 1079 ks = ksize(p);
1079 1080
1080 if (ks >= new_size) 1081 if (ks >= new_size) {
1082 kasan_krealloc((void *)p, new_size);
1081 return (void *)p; 1083 return (void *)p;
1084 }
1082 1085
1083 ret = kmalloc_track_caller(new_size, flags); 1086 ret = kmalloc_track_caller(new_size, flags);
1084 if (ret && p) 1087 if (ret && p)
diff --git a/mm/slub.c b/mm/slub.c
index 37555ad8894d..6832c4eab104 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1251,11 +1251,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1251static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1251static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1252{ 1252{
1253 kmemleak_alloc(ptr, size, 1, flags); 1253 kmemleak_alloc(ptr, size, 1, flags);
1254 kasan_kmalloc_large(ptr, size);
1254} 1255}
1255 1256
1256static inline void kfree_hook(const void *x) 1257static inline void kfree_hook(const void *x)
1257{ 1258{
1258 kmemleak_free(x); 1259 kmemleak_free(x);
1260 kasan_kfree_large(x);
1259} 1261}
1260 1262
1261static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 1263static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
@@ -1278,6 +1280,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
1278 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1279 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
1280 memcg_kmem_put_cache(s); 1282 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1281} 1284}
1282 1285
1283static inline void slab_free_hook(struct kmem_cache *s, void *x) 1286static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1301,6 +1304,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1301#endif 1304#endif
1302 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1305 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1303 debug_check_no_obj_freed(x, s->object_size); 1306 debug_check_no_obj_freed(x, s->object_size);
1307
1308 kasan_slab_free(s, x);
1304} 1309}
1305 1310
1306/* 1311/*
@@ -1395,8 +1400,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
1395 void *object) 1400 void *object)
1396{ 1401{
1397 setup_object_debug(s, page, object); 1402 setup_object_debug(s, page, object);
1398 if (unlikely(s->ctor)) 1403 if (unlikely(s->ctor)) {
1404 kasan_unpoison_object_data(s, object);
1399 s->ctor(object); 1405 s->ctor(object);
1406 kasan_poison_object_data(s, object);
1407 }
1400} 1408}
1401 1409
1402static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1410static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1429,6 +1437,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1429 if (unlikely(s->flags & SLAB_POISON)) 1437 if (unlikely(s->flags & SLAB_POISON))
1430 memset(start, POISON_INUSE, PAGE_SIZE << order); 1438 memset(start, POISON_INUSE, PAGE_SIZE << order);
1431 1439
1440 kasan_poison_slab(page);
1441
1432 for_each_object_idx(p, idx, s, start, page->objects) { 1442 for_each_object_idx(p, idx, s, start, page->objects) {
1433 setup_object(s, page, p); 1443 setup_object(s, page, p);
1434 if (likely(idx < page->objects)) 1444 if (likely(idx < page->objects))
@@ -2522,6 +2532,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2522{ 2532{
2523 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2533 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2524 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2534 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2535 kasan_kmalloc(s, ret, size);
2525 return ret; 2536 return ret;
2526} 2537}
2527EXPORT_SYMBOL(kmem_cache_alloc_trace); 2538EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2548,6 +2559,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2548 2559
2549 trace_kmalloc_node(_RET_IP_, ret, 2560 trace_kmalloc_node(_RET_IP_, ret,
2550 size, s->size, gfpflags, node); 2561 size, s->size, gfpflags, node);
2562
2563 kasan_kmalloc(s, ret, size);
2551 return ret; 2564 return ret;
2552} 2565}
2553EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2566EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -2933,6 +2946,7 @@ static void early_kmem_cache_node_alloc(int node)
2933 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2946 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2934 init_tracking(kmem_cache_node, n); 2947 init_tracking(kmem_cache_node, n);
2935#endif 2948#endif
2949 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
2936 init_kmem_cache_node(n); 2950 init_kmem_cache_node(n);
2937 inc_slabs_node(kmem_cache_node, node, page->objects); 2951 inc_slabs_node(kmem_cache_node, node, page->objects);
2938 2952
@@ -3305,6 +3319,8 @@ void *__kmalloc(size_t size, gfp_t flags)
3305 3319
3306 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3320 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3307 3321
3322 kasan_kmalloc(s, ret, size);
3323
3308 return ret; 3324 return ret;
3309} 3325}
3310EXPORT_SYMBOL(__kmalloc); 3326EXPORT_SYMBOL(__kmalloc);
@@ -3348,12 +3364,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3348 3364
3349 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3365 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3350 3366
3367 kasan_kmalloc(s, ret, size);
3368
3351 return ret; 3369 return ret;
3352} 3370}
3353EXPORT_SYMBOL(__kmalloc_node); 3371EXPORT_SYMBOL(__kmalloc_node);
3354#endif 3372#endif
3355 3373
3356size_t ksize(const void *object) 3374static size_t __ksize(const void *object)
3357{ 3375{
3358 struct page *page; 3376 struct page *page;
3359 3377
@@ -3369,6 +3387,15 @@ size_t ksize(const void *object)
3369 3387
3370 return slab_ksize(page->slab_cache); 3388 return slab_ksize(page->slab_cache);
3371} 3389}
3390
3391size_t ksize(const void *object)
3392{
3393 size_t size = __ksize(object);
3394 /* We assume that ksize callers could use whole allocated area,
3395 so we need unpoison this area. */
3396 kasan_krealloc(object, size);
3397 return size;
3398}
3372EXPORT_SYMBOL(ksize); 3399EXPORT_SYMBOL(ksize);
3373 3400
3374void kfree(const void *x) 3401void kfree(const void *x)