aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-06-11 08:22:40 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-06-11 12:03:29 -0400
commitd5cff635290aec9ad7e6ee546aa4fae895361cbb (patch)
tree172d99e0775a2e858d6cfa3ba0a197ecfdd03fce /mm
parent04f70336c80c43a15e617b36c2043dfa0ad6ed0f (diff)
kmemleak: Add the slab memory allocation/freeing hooks
This patch adds the callbacks to kmemleak_(alloc|free) functions from the slab allocator. The patch also adds the SLAB_NOLEAKTRACE flag to avoid recursive calls to kmemleak when it allocates its own data structures. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f85831da9080..859067f8e4fd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -107,6 +107,7 @@
107#include <linux/string.h> 107#include <linux/string.h>
108#include <linux/uaccess.h> 108#include <linux/uaccess.h>
109#include <linux/nodemask.h> 109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
110#include <linux/mempolicy.h> 111#include <linux/mempolicy.h>
111#include <linux/mutex.h> 112#include <linux/mutex.h>
112#include <linux/fault-inject.h> 113#include <linux/fault-inject.h>
@@ -178,13 +179,13 @@
178 SLAB_STORE_USER | \ 179 SLAB_STORE_USER | \
179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
181 SLAB_DEBUG_OBJECTS) 182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
182#else 183#else
183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
184 SLAB_CACHE_DMA | \ 185 SLAB_CACHE_DMA | \
185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
187 SLAB_DEBUG_OBJECTS) 188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
188#endif 189#endif
189 190
190/* 191/*
@@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
964 struct array_cache *nc = NULL; 965 struct array_cache *nc = NULL;
965 966
966 nc = kmalloc_node(memsize, GFP_KERNEL, node); 967 nc = kmalloc_node(memsize, GFP_KERNEL, node);
968 /*
969 * The array_cache structures contain pointers to free object.
970 * However, when such objects are allocated or transfered to another
971 * cache the pointers are not cleared and they could be counted as
972 * valid references during a kmemleak scan. Therefore, kmemleak must
973 * not scan such objects.
974 */
975 kmemleak_no_scan(nc);
967 if (nc) { 976 if (nc) {
968 nc->avail = 0; 977 nc->avail = 0;
969 nc->limit = entries; 978 nc->limit = entries;
@@ -2621,6 +2630,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2621 /* Slab management obj is off-slab. */ 2630 /* Slab management obj is off-slab. */
2622 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2631 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2623 local_flags, nodeid); 2632 local_flags, nodeid);
2633 /*
2634 * If the first object in the slab is leaked (it's allocated
2635 * but no one has a reference to it), we want to make sure
2636 * kmemleak does not treat the ->s_mem pointer as a reference
2637 * to the object. Otherwise we will not report the leak.
2638 */
2639 kmemleak_scan_area(slabp, offsetof(struct slab, list),
2640 sizeof(struct list_head), local_flags);
2624 if (!slabp) 2641 if (!slabp)
2625 return NULL; 2642 return NULL;
2626 } else { 2643 } else {
@@ -3141,6 +3158,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3141 STATS_INC_ALLOCMISS(cachep); 3158 STATS_INC_ALLOCMISS(cachep);
3142 objp = cache_alloc_refill(cachep, flags); 3159 objp = cache_alloc_refill(cachep, flags);
3143 } 3160 }
3161 /*
3162 * To avoid a false negative, if an object that is in one of the
3163 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3164 * treat the array pointers as a reference to the object.
3165 */
3166 kmemleak_erase(&ac->entry[ac->avail]);
3144 return objp; 3167 return objp;
3145} 3168}
3146 3169
@@ -3360,6 +3383,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3360 out: 3383 out:
3361 local_irq_restore(save_flags); 3384 local_irq_restore(save_flags);
3362 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3385 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3386 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3387 flags);
3363 3388
3364 if (unlikely((flags & __GFP_ZERO) && ptr)) 3389 if (unlikely((flags & __GFP_ZERO) && ptr))
3365 memset(ptr, 0, obj_size(cachep)); 3390 memset(ptr, 0, obj_size(cachep));
@@ -3415,6 +3440,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3415 objp = __do_cache_alloc(cachep, flags); 3440 objp = __do_cache_alloc(cachep, flags);
3416 local_irq_restore(save_flags); 3441 local_irq_restore(save_flags);
3417 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3442 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3443 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3444 flags);
3418 prefetchw(objp); 3445 prefetchw(objp);
3419 3446
3420 if (unlikely((flags & __GFP_ZERO) && objp)) 3447 if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3530,6 +3557,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3530 struct array_cache *ac = cpu_cache_get(cachep); 3557 struct array_cache *ac = cpu_cache_get(cachep);
3531 3558
3532 check_irq_off(); 3559 check_irq_off();
3560 kmemleak_free_recursive(objp, cachep->flags);
3533 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3561 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3534 3562
3535 /* 3563 /*