aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-10-09 18:26:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:50 -0400
commit61f47105a2c9c60e950ca808b7560f776f9bfa31 (patch)
tree10bbc7fb213285e1e673073eedf21a1991a22ea1 /mm/slab.c
parent07f361b2bee38896df8be17d8c3f8af3f3610606 (diff)
mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller()
Now, we track caller if tracing or slab debugging is enabled. If they are disabled, we could save one argument passing overhead by calling __kmalloc(_node)(). But, I think that it would be marginal. Furthermore, default slab allocator, SLUB, doesn't use this technique so I think that it's okay to change this situation. After this change, we can turn on/off CONFIG_DEBUG_SLAB without full kernel build and remove some complicated '#if' defintion. It looks more benefitial to me. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 0 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7c52b3890d25..c52bc5aa6ba0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3496,7 +3496,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3496 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3496 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3497} 3497}
3498 3498
3499#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3500void *__kmalloc_node(size_t size, gfp_t flags, int node) 3499void *__kmalloc_node(size_t size, gfp_t flags, int node)
3501{ 3500{
3502 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3501 return __do_kmalloc_node(size, flags, node, _RET_IP_);
@@ -3509,13 +3508,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3509 return __do_kmalloc_node(size, flags, node, caller); 3508 return __do_kmalloc_node(size, flags, node, caller);
3510} 3509}
3511EXPORT_SYMBOL(__kmalloc_node_track_caller); 3510EXPORT_SYMBOL(__kmalloc_node_track_caller);
3512#else
3513void *__kmalloc_node(size_t size, gfp_t flags, int node)
3514{
3515 return __do_kmalloc_node(size, flags, node, 0);
3516}
3517EXPORT_SYMBOL(__kmalloc_node);
3518#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3519#endif /* CONFIG_NUMA */ 3511#endif /* CONFIG_NUMA */
3520 3512
3521/** 3513/**
@@ -3541,8 +3533,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3541 return ret; 3533 return ret;
3542} 3534}
3543 3535
3544
3545#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3546void *__kmalloc(size_t size, gfp_t flags) 3536void *__kmalloc(size_t size, gfp_t flags)
3547{ 3537{
3548 return __do_kmalloc(size, flags, _RET_IP_); 3538 return __do_kmalloc(size, flags, _RET_IP_);
@@ -3555,14 +3545,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3555} 3545}
3556EXPORT_SYMBOL(__kmalloc_track_caller); 3546EXPORT_SYMBOL(__kmalloc_track_caller);
3557 3547
3558#else
3559void *__kmalloc(size_t size, gfp_t flags)
3560{
3561 return __do_kmalloc(size, flags, 0);
3562}
3563EXPORT_SYMBOL(__kmalloc);
3564#endif
3565
3566/** 3548/**
3567 * kmem_cache_free - Deallocate an object 3549 * kmem_cache_free - Deallocate an object
3568 * @cachep: The cache the allocation was from. 3550 * @cachep: The cache the allocation was from.