aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:39:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:41 -0500
commit0316bec22ec95ea2faca6406437b0b5950553b7c (patch)
tree6a278e1515188e738df2b04e9ada375215b3df22 /mm/slub.c
parenta79316c6178ca419e35feef47d47f50b4e0ee9f2 (diff)
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 37555ad8894d..6832c4eab104 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1251,11 +1251,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1251static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1251static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1252{ 1252{
1253 kmemleak_alloc(ptr, size, 1, flags); 1253 kmemleak_alloc(ptr, size, 1, flags);
1254 kasan_kmalloc_large(ptr, size);
1254} 1255}
1255 1256
1256static inline void kfree_hook(const void *x) 1257static inline void kfree_hook(const void *x)
1257{ 1258{
1258 kmemleak_free(x); 1259 kmemleak_free(x);
1260 kasan_kfree_large(x);
1259} 1261}
1260 1262
1261static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 1263static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
@@ -1278,6 +1280,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
1278 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1279 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
1280 memcg_kmem_put_cache(s); 1282 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1281} 1284}
1282 1285
1283static inline void slab_free_hook(struct kmem_cache *s, void *x) 1286static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1301,6 +1304,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1301#endif 1304#endif
1302 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1305 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1303 debug_check_no_obj_freed(x, s->object_size); 1306 debug_check_no_obj_freed(x, s->object_size);
1307
1308 kasan_slab_free(s, x);
1304} 1309}
1305 1310
1306/* 1311/*
@@ -1395,8 +1400,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
1395 void *object) 1400 void *object)
1396{ 1401{
1397 setup_object_debug(s, page, object); 1402 setup_object_debug(s, page, object);
1398 if (unlikely(s->ctor)) 1403 if (unlikely(s->ctor)) {
1404 kasan_unpoison_object_data(s, object);
1399 s->ctor(object); 1405 s->ctor(object);
1406 kasan_poison_object_data(s, object);
1407 }
1400} 1408}
1401 1409
1402static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1410static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1429,6 +1437,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1429 if (unlikely(s->flags & SLAB_POISON)) 1437 if (unlikely(s->flags & SLAB_POISON))
1430 memset(start, POISON_INUSE, PAGE_SIZE << order); 1438 memset(start, POISON_INUSE, PAGE_SIZE << order);
1431 1439
1440 kasan_poison_slab(page);
1441
1432 for_each_object_idx(p, idx, s, start, page->objects) { 1442 for_each_object_idx(p, idx, s, start, page->objects) {
1433 setup_object(s, page, p); 1443 setup_object(s, page, p);
1434 if (likely(idx < page->objects)) 1444 if (likely(idx < page->objects))
@@ -2522,6 +2532,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2522{ 2532{
2523 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2533 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2524 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2534 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2535 kasan_kmalloc(s, ret, size);
2525 return ret; 2536 return ret;
2526} 2537}
2527EXPORT_SYMBOL(kmem_cache_alloc_trace); 2538EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2548,6 +2559,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2548 2559
2549 trace_kmalloc_node(_RET_IP_, ret, 2560 trace_kmalloc_node(_RET_IP_, ret,
2550 size, s->size, gfpflags, node); 2561 size, s->size, gfpflags, node);
2562
2563 kasan_kmalloc(s, ret, size);
2551 return ret; 2564 return ret;
2552} 2565}
2553EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2566EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -2933,6 +2946,7 @@ static void early_kmem_cache_node_alloc(int node)
2933 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2946 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2934 init_tracking(kmem_cache_node, n); 2947 init_tracking(kmem_cache_node, n);
2935#endif 2948#endif
2949 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
2936 init_kmem_cache_node(n); 2950 init_kmem_cache_node(n);
2937 inc_slabs_node(kmem_cache_node, node, page->objects); 2951 inc_slabs_node(kmem_cache_node, node, page->objects);
2938 2952
@@ -3305,6 +3319,8 @@ void *__kmalloc(size_t size, gfp_t flags)
3305 3319
3306 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3320 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3307 3321
3322 kasan_kmalloc(s, ret, size);
3323
3308 return ret; 3324 return ret;
3309} 3325}
3310EXPORT_SYMBOL(__kmalloc); 3326EXPORT_SYMBOL(__kmalloc);
@@ -3348,12 +3364,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3348 3364
3349 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3365 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3350 3366
3367 kasan_kmalloc(s, ret, size);
3368
3351 return ret; 3369 return ret;
3352} 3370}
3353EXPORT_SYMBOL(__kmalloc_node); 3371EXPORT_SYMBOL(__kmalloc_node);
3354#endif 3372#endif
3355 3373
3356size_t ksize(const void *object) 3374static size_t __ksize(const void *object)
3357{ 3375{
3358 struct page *page; 3376 struct page *page;
3359 3377
@@ -3369,6 +3387,15 @@ size_t ksize(const void *object)
3369 3387
3370 return slab_ksize(page->slab_cache); 3388 return slab_ksize(page->slab_cache);
3371} 3389}
3390
3391size_t ksize(const void *object)
3392{
3393 size_t size = __ksize(object);
3394 /* We assume that ksize callers could use whole allocated area,
3395 so we need unpoison this area. */
3396 kasan_krealloc(object, size);
3397 return size;
3398}
3372EXPORT_SYMBOL(ksize); 3399EXPORT_SYMBOL(ksize);
3373 3400
3374void kfree(const void *x) 3401void kfree(const void *x)