aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-06-11 08:23:18 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-06-11 12:03:30 -0400
commit06f22f13f3cc2eff00db09f053218e5d4b757bc8 (patch)
treeec913d87a0ed78912e22c78ce03d65f9ebd7061d
parent4374e616d28e65265a5b433ceece275449f3d2e3 (diff)
kmemleak: Add the slub memory allocation/freeing hooks
This patch adds the callbacks to kmemleak_(alloc|free) functions from the slub allocator. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--mm/slub.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5e805a6fe36c..6674a7907b0c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
20#include <linux/kmemtrace.h> 20#include <linux/kmemtrace.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/cpuset.h> 22#include <linux/cpuset.h>
23#include <linux/kmemleak.h>
23#include <linux/mempolicy.h> 24#include <linux/mempolicy.h>
24#include <linux/ctype.h> 25#include <linux/ctype.h>
25#include <linux/debugobjects.h> 26#include <linux/debugobjects.h>
@@ -143,7 +144,7 @@
143 * Set of flags that will prevent slab merging 144 * Set of flags that will prevent slab merging
144 */ 145 */
145#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 146#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
146 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 147 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
147 148
148#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 149#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
149 SLAB_CACHE_DMA) 150 SLAB_CACHE_DMA)
@@ -1617,6 +1618,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1617 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1618 if (unlikely((gfpflags & __GFP_ZERO) && object))
1618 memset(object, 0, objsize); 1619 memset(object, 0, objsize);
1619 1620
1621 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1620 return object; 1622 return object;
1621} 1623}
1622 1624
@@ -1746,6 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1746 struct kmem_cache_cpu *c; 1748 struct kmem_cache_cpu *c;
1747 unsigned long flags; 1749 unsigned long flags;
1748 1750
1751 kmemleak_free_recursive(x, s->flags);
1749 local_irq_save(flags); 1752 local_irq_save(flags);
1750 c = get_cpu_slab(s, smp_processor_id()); 1753 c = get_cpu_slab(s, smp_processor_id());
1751 debug_check_no_locks_freed(object, c->objsize); 1754 debug_check_no_locks_freed(object, c->objsize);