aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-05-09 14:35:53 -0400
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 06:40:08 -0400
commitc175eea466e760de4b69b9aad90157e7aa9ff54f (patch)
tree60bc1c115d77bba6fd6f99818eeeef2165d8e30c /mm/slab.c
parent5a896d9e7c921742d0437a452f991288f4dc2c42 (diff)
slab: add hooks for kmemcheck
We now have SLAB support for kmemcheck! This means that it doesn't matter whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck SLAB or SLUB.. ;-) Cc: Ingo Molnar <mingo@elte.hu> Cc: Christoph Lameter <clameter@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index bf0c3af143fb..95b6c5eb40b3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -114,6 +114,7 @@
114#include <linux/rtmutex.h> 114#include <linux/rtmutex.h>
115#include <linux/reciprocal_div.h> 115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h> 116#include <linux/debugobjects.h>
117#include <linux/kmemcheck.h>
117 118
118#include <asm/cacheflush.h> 119#include <asm/cacheflush.h>
119#include <asm/tlbflush.h> 120#include <asm/tlbflush.h>
@@ -179,13 +180,13 @@
179 SLAB_STORE_USER | \ 180 SLAB_STORE_USER | \
180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 182 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
183#else 184#else
184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 185# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
185 SLAB_CACHE_DMA | \ 186 SLAB_CACHE_DMA | \
186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 187 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 188 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 189 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
189#endif 190#endif
190 191
191/* 192/*
@@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1624 NR_SLAB_UNRECLAIMABLE, nr_pages); 1625 NR_SLAB_UNRECLAIMABLE, nr_pages);
1625 for (i = 0; i < nr_pages; i++) 1626 for (i = 0; i < nr_pages; i++)
1626 __SetPageSlab(page + i); 1627 __SetPageSlab(page + i);
1628
1629 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
1630 kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
1631
1627 return page_address(page); 1632 return page_address(page);
1628} 1633}
1629 1634
@@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1636 struct page *page = virt_to_page(addr); 1641 struct page *page = virt_to_page(addr);
1637 const unsigned long nr_freed = i; 1642 const unsigned long nr_freed = i;
1638 1643
1644 if (kmemcheck_page_is_tracked(page))
1645 kmemcheck_free_shadow(cachep, page, cachep->gfporder);
1646
1639 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1647 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1640 sub_zone_page_state(page_zone(page), 1648 sub_zone_page_state(page_zone(page),
1641 NR_SLAB_RECLAIMABLE, nr_freed); 1649 NR_SLAB_RECLAIMABLE, nr_freed);
@@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3309 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, 3317 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3310 flags); 3318 flags);
3311 3319
3320 if (likely(ptr))
3321 kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3322
3312 if (unlikely((flags & __GFP_ZERO) && ptr)) 3323 if (unlikely((flags & __GFP_ZERO) && ptr))
3313 memset(ptr, 0, obj_size(cachep)); 3324 memset(ptr, 0, obj_size(cachep));
3314 3325
@@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3367 flags); 3378 flags);
3368 prefetchw(objp); 3379 prefetchw(objp);
3369 3380
3381 if (likely(objp))
3382 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3383
3370 if (unlikely((flags & __GFP_ZERO) && objp)) 3384 if (unlikely((flags & __GFP_ZERO) && objp))
3371 memset(objp, 0, obj_size(cachep)); 3385 memset(objp, 0, obj_size(cachep));
3372 3386
@@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3483 kmemleak_free_recursive(objp, cachep->flags); 3497 kmemleak_free_recursive(objp, cachep->flags);
3484 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3498 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3485 3499
3500 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3501
3486 /* 3502 /*
3487 * Skip calling cache_free_alien() when the platform is not numa. 3503 * Skip calling cache_free_alien() when the platform is not numa.
3488 * This will avoid cache misses that happen while accessing slabp (which 3504 * This will avoid cache misses that happen while accessing slabp (which