aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorRoman Bobniev <Roman.Bobniev@sonymobile.com>2013-10-08 18:58:57 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:25:10 -0400
commitd56791b38e34e480d869d1b88735df16c81aa684 (patch)
treeb1333007313610a469f6ab6fa23c4f65e51a370e /mm/slub.c
parent6e4664525b1db28f8c4e1130957f70a94c19213e (diff)
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG) call the appropriate kmemleak routines. This allows for kmemleak to be configured independently of slub debug features. It also fixes a bug where kmemleak was only partially enabled in some configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Roman Bobniev <Roman.Bobniev@sonymobile.com> Signed-off-by: Tim Bird <tim.bird@sonymobile.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e3ba1f2cf60c..250062c66ec5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
928 * Hooks for other subsystems that check memory allocations. In a typical 928 * Hooks for other subsystems that check memory allocations. In a typical
929 * production configuration these hooks all should produce no code at all. 929 * production configuration these hooks all should produce no code at all.
930 */ 930 */
931static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
932{
933 kmemleak_alloc(ptr, size, 1, flags);
934}
935
936static inline void kfree_hook(const void *x)
937{
938 kmemleak_free(x);
939}
940
931static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 941static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
932{ 942{
933 flags &= gfp_allowed_mask; 943 flags &= gfp_allowed_mask;
@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
1253static inline void dec_slabs_node(struct kmem_cache *s, int node, 1263static inline void dec_slabs_node(struct kmem_cache *s, int node,
1254 int objects) {} 1264 int objects) {}
1255 1265
1266static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1267{
1268 kmemleak_alloc(ptr, size, 1, flags);
1269}
1270
1271static inline void kfree_hook(const void *x)
1272{
1273 kmemleak_free(x);
1274}
1275
1256static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1276static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1257 { return 0; } 1277 { return 0; }
1258 1278
1259static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1279static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1260 void *object) {} 1280 void *object)
1281{
1282 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1283 flags & gfp_allowed_mask);
1284}
1261 1285
1262static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1286static inline void slab_free_hook(struct kmem_cache *s, void *x)
1287{
1288 kmemleak_free_recursive(x, s->flags);
1289}
1263 1290
1264#endif /* CONFIG_SLUB_DEBUG */ 1291#endif /* CONFIG_SLUB_DEBUG */
1265 1292
@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3265 if (page) 3292 if (page)
3266 ptr = page_address(page); 3293 ptr = page_address(page);
3267 3294
3268 kmemleak_alloc(ptr, size, 1, flags); 3295 kmalloc_large_node_hook(ptr, size, flags);
3269 return ptr; 3296 return ptr;
3270} 3297}
3271 3298
@@ -3365,7 +3392,7 @@ void kfree(const void *x)
3365 page = virt_to_head_page(x); 3392 page = virt_to_head_page(x);
3366 if (unlikely(!PageSlab(page))) { 3393 if (unlikely(!PageSlab(page))) {
3367 BUG_ON(!PageCompound(page)); 3394 BUG_ON(!PageCompound(page));
3368 kmemleak_free(x); 3395 kfree_hook(x);
3369 __free_memcg_kmem_pages(page, compound_order(page)); 3396 __free_memcg_kmem_pages(page, compound_order(page));
3370 return; 3397 return;
3371 } 3398 }