aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2bd611fa87bf..f46b65d124e5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -107,6 +107,7 @@
107#include <linux/string.h> 107#include <linux/string.h>
108#include <linux/uaccess.h> 108#include <linux/uaccess.h>
109#include <linux/nodemask.h> 109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
110#include <linux/mempolicy.h> 111#include <linux/mempolicy.h>
111#include <linux/mutex.h> 112#include <linux/mutex.h>
112#include <linux/fault-inject.h> 113#include <linux/fault-inject.h>
@@ -178,13 +179,13 @@
178 SLAB_STORE_USER | \ 179 SLAB_STORE_USER | \
179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
181 SLAB_DEBUG_OBJECTS) 182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
182#else 183#else
183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
184 SLAB_CACHE_DMA | \ 185 SLAB_CACHE_DMA | \
185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
187 SLAB_DEBUG_OBJECTS) 188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
188#endif 189#endif
189 190
190/* 191/*
@@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
964 struct array_cache *nc = NULL; 965 struct array_cache *nc = NULL;
965 966
966 nc = kmalloc_node(memsize, gfp, node); 967 nc = kmalloc_node(memsize, gfp, node);
968 /*
969 * The array_cache structures contain pointers to free object.
970 * However, when such objects are allocated or transfered to another
971 * cache the pointers are not cleared and they could be counted as
972 * valid references during a kmemleak scan. Therefore, kmemleak must
973 * not scan such objects.
974 */
975 kmemleak_no_scan(nc);
967 if (nc) { 976 if (nc) {
968 nc->avail = 0; 977 nc->avail = 0;
969 nc->limit = entries; 978 nc->limit = entries;
@@ -2625,6 +2634,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2625 /* Slab management obj is off-slab. */ 2634 /* Slab management obj is off-slab. */
2626 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2635 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2627 local_flags, nodeid); 2636 local_flags, nodeid);
2637 /*
2638 * If the first object in the slab is leaked (it's allocated
2639 * but no one has a reference to it), we want to make sure
2640 * kmemleak does not treat the ->s_mem pointer as a reference
2641 * to the object. Otherwise we will not report the leak.
2642 */
2643 kmemleak_scan_area(slabp, offsetof(struct slab, list),
2644 sizeof(struct list_head), local_flags);
2628 if (!slabp) 2645 if (!slabp)
2629 return NULL; 2646 return NULL;
2630 } else { 2647 } else {
@@ -3145,6 +3162,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3145 STATS_INC_ALLOCMISS(cachep); 3162 STATS_INC_ALLOCMISS(cachep);
3146 objp = cache_alloc_refill(cachep, flags); 3163 objp = cache_alloc_refill(cachep, flags);
3147 } 3164 }
3165 /*
3166 * To avoid a false negative, if an object that is in one of the
3167 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3168 * treat the array pointers as a reference to the object.
3169 */
3170 kmemleak_erase(&ac->entry[ac->avail]);
3148 return objp; 3171 return objp;
3149} 3172}
3150 3173
@@ -3364,6 +3387,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3364 out: 3387 out:
3365 local_irq_restore(save_flags); 3388 local_irq_restore(save_flags);
3366 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3389 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3390 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3391 flags);
3367 3392
3368 if (unlikely((flags & __GFP_ZERO) && ptr)) 3393 if (unlikely((flags & __GFP_ZERO) && ptr))
3369 memset(ptr, 0, obj_size(cachep)); 3394 memset(ptr, 0, obj_size(cachep));
@@ -3419,6 +3444,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3419 objp = __do_cache_alloc(cachep, flags); 3444 objp = __do_cache_alloc(cachep, flags);
3420 local_irq_restore(save_flags); 3445 local_irq_restore(save_flags);
3421 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3446 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3447 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3448 flags);
3422 prefetchw(objp); 3449 prefetchw(objp);
3423 3450
3424 if (unlikely((flags & __GFP_ZERO) && objp)) 3451 if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3534,6 +3561,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3534 struct array_cache *ac = cpu_cache_get(cachep); 3561 struct array_cache *ac = cpu_cache_get(cachep);
3535 3562
3536 check_irq_off(); 3563 check_irq_off();
3564 kmemleak_free_recursive(objp, cachep->flags);
3537 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3565 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3538 3566
3539 /* 3567 /*