aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-11-24 16:23:34 -0500
committerPekka Enberg <penberg@kernel.org>2010-11-28 14:16:28 -0500
commit85beb5869a4f6abb52a7cf8e01de6fa57e9ee47d (patch)
tree8efa5a7ef1d97d91c56367f2882c34fb82ab606b /mm/slab.c
parent98072e4d977aabe6a39abb95951cd8bf2c2202d5 (diff)
tracing/slab: Move kmalloc tracepoint out of inline code
The tracepoint for kmalloc is in the slab inlined code which causes every instance of kmalloc to have the tracepoint. This patch moves the tracepoint out of the inline code to the slab C file, which removes a large number of inlined trace points. objdump -dr vmlinux.slab| grep 'jmpq.*<trace_kmalloc' |wc -l 213 objdump -dr vmlinux.slab.patched| grep 'jmpq.*<trace_kmalloc' |wc -l 1 This also has a nice impact on size. text data bss dec hex filename 7023060 2121564 2482432 11627056 b16a30 vmlinux.slab 6970579 2109772 2482432 11562783 b06f1f vmlinux.slab.patched Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b1e40dafbab3..dfcc8885d7d5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3653EXPORT_SYMBOL(kmem_cache_alloc); 3653EXPORT_SYMBOL(kmem_cache_alloc);
3654 3654
3655#ifdef CONFIG_TRACING 3655#ifdef CONFIG_TRACING
3656void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3656void *
3657kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3657{ 3658{
3658 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3659 void *ret;
3660
3661 ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3662
3663 trace_kmalloc(_RET_IP_, ret,
3664 size, slab_buffer_size(cachep), flags);
3665 return ret;
3659} 3666}
3660EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3667EXPORT_SYMBOL(kmem_cache_alloc_trace);
3661#endif 3668#endif
3662 3669
3663/** 3670/**
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3705EXPORT_SYMBOL(kmem_cache_alloc_node); 3712EXPORT_SYMBOL(kmem_cache_alloc_node);
3706 3713
3707#ifdef CONFIG_TRACING 3714#ifdef CONFIG_TRACING
3708void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3715void *kmem_cache_alloc_node_trace(size_t size,
3709 gfp_t flags, 3716 struct kmem_cache *cachep,
3710 int nodeid) 3717 gfp_t flags,
3718 int nodeid)
3711{ 3719{
3712 return __cache_alloc_node(cachep, flags, nodeid, 3720 void *ret;
3721
3722 ret = __cache_alloc_node(cachep, flags, nodeid,
3713 __builtin_return_address(0)); 3723 __builtin_return_address(0));
3724 trace_kmalloc_node(_RET_IP_, ret,
3725 size, slab_buffer_size(cachep),
3726 flags, nodeid);
3727 return ret;
3714} 3728}
3715EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 3729EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3716#endif 3730#endif
3717 3731
3718static __always_inline void * 3732static __always_inline void *
3719__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3733__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3720{ 3734{
3721 struct kmem_cache *cachep; 3735 struct kmem_cache *cachep;
3722 void *ret;
3723 3736
3724 cachep = kmem_find_general_cachep(size, flags); 3737 cachep = kmem_find_general_cachep(size, flags);
3725 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3738 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3726 return cachep; 3739 return cachep;
3727 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3740 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3728
3729 trace_kmalloc_node((unsigned long) caller, ret,
3730 size, cachep->buffer_size, flags, node);
3731
3732 return ret;
3733} 3741}
3734 3742
3735#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3743#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)