diff options
author | Roman Bobniev <Roman.Bobniev@sonymobile.com> | 2013-10-08 18:58:57 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@iki.fi> | 2013-10-24 13:25:10 -0400 |
commit | d56791b38e34e480d869d1b88735df16c81aa684 (patch) | |
tree | b1333007313610a469f6ab6fa23c4f65e51a370e /mm/slub.c | |
parent | 6e4664525b1db28f8c4e1130957f70a94c19213e (diff) |
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so
that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG)
call the appropriate kmemleak routines. This allows for kmemleak
to be configured independently of slub debug features.
It also fixes a bug where kmemleak was only partially enabled in some
configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Roman Bobniev <Roman.Bobniev@sonymobile.com>
Signed-off-by: Tim Bird <tim.bird@sonymobile.com>
Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 35 |
1 files changed, 31 insertions, 4 deletions
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
928 | * Hooks for other subsystems that check memory allocations. In a typical | 928 | * Hooks for other subsystems that check memory allocations. In a typical |
929 | * production configuration these hooks all should produce no code at all. | 929 | * production configuration these hooks all should produce no code at all. |
930 | */ | 930 | */ |
931 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | ||
932 | { | ||
933 | kmemleak_alloc(ptr, size, 1, flags); | ||
934 | } | ||
935 | |||
936 | static inline void kfree_hook(const void *x) | ||
937 | { | ||
938 | kmemleak_free(x); | ||
939 | } | ||
940 | |||
931 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | 941 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) |
932 | { | 942 | { |
933 | flags &= gfp_allowed_mask; | 943 | flags &= gfp_allowed_mask; |
@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, | |||
1253 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | 1263 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
1254 | int objects) {} | 1264 | int objects) {} |
1255 | 1265 | ||
1266 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | ||
1267 | { | ||
1268 | kmemleak_alloc(ptr, size, 1, flags); | ||
1269 | } | ||
1270 | |||
1271 | static inline void kfree_hook(const void *x) | ||
1272 | { | ||
1273 | kmemleak_free(x); | ||
1274 | } | ||
1275 | |||
1256 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | 1276 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) |
1257 | { return 0; } | 1277 | { return 0; } |
1258 | 1278 | ||
1259 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | 1279 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
1260 | void *object) {} | 1280 | void *object) |
1281 | { | ||
1282 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, | ||
1283 | flags & gfp_allowed_mask); | ||
1284 | } | ||
1261 | 1285 | ||
1262 | static inline void slab_free_hook(struct kmem_cache *s, void *x) {} | 1286 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
1287 | { | ||
1288 | kmemleak_free_recursive(x, s->flags); | ||
1289 | } | ||
1263 | 1290 | ||
1264 | #endif /* CONFIG_SLUB_DEBUG */ | 1291 | #endif /* CONFIG_SLUB_DEBUG */ |
1265 | 1292 | ||
@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
3265 | if (page) | 3292 | if (page) |
3266 | ptr = page_address(page); | 3293 | ptr = page_address(page); |
3267 | 3294 | ||
3268 | kmemleak_alloc(ptr, size, 1, flags); | 3295 | kmalloc_large_node_hook(ptr, size, flags); |
3269 | return ptr; | 3296 | return ptr; |
3270 | } | 3297 | } |
3271 | 3298 | ||
@@ -3365,7 +3392,7 @@ void kfree(const void *x) | |||
3365 | page = virt_to_head_page(x); | 3392 | page = virt_to_head_page(x); |
3366 | if (unlikely(!PageSlab(page))) { | 3393 | if (unlikely(!PageSlab(page))) { |
3367 | BUG_ON(!PageCompound(page)); | 3394 | BUG_ON(!PageCompound(page)); |
3368 | kmemleak_free(x); | 3395 | kfree_hook(x); |
3369 | __free_memcg_kmem_pages(page, compound_order(page)); | 3396 | __free_memcg_kmem_pages(page, compound_order(page)); |
3370 | return; | 3397 | return; |
3371 | } | 3398 | } |