aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-11-24 16:23:34 -0500
committerPekka Enberg <penberg@kernel.org>2010-11-28 14:16:28 -0500
commit85beb5869a4f6abb52a7cf8e01de6fa57e9ee47d (patch)
tree8efa5a7ef1d97d91c56367f2882c34fb82ab606b
parent98072e4d977aabe6a39abb95951cd8bf2c2202d5 (diff)
tracing/slab: Move kmalloc tracepoint out of inline code
The tracepoint for kmalloc is in the slab inlined code which causes every instance of kmalloc to have the tracepoint. This patch moves the tracepoint out of the inline code to the slab C file, which removes a large number of inlined trace points. objdump -dr vmlinux.slab| grep 'jmpq.*<trace_kmalloc' |wc -l 213 objdump -dr vmlinux.slab.patched| grep 'jmpq.*<trace_kmalloc' |wc -l 1 This also has a nice impact on size. text data bss dec hex filename 7023060 2121564 2482432 11627056 b16a30 vmlinux.slab 6970579 2109772 2482432 11562783 b06f1f vmlinux.slab.patched Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slab_def.h33
-rw-r--r--mm/slab.c38
2 files changed, 36 insertions, 35 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 791a502f6906..83203ae9390b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138void *__kmalloc(size_t size, gfp_t flags); 138void *__kmalloc(size_t size, gfp_t flags);
139 139
140#ifdef CONFIG_TRACING 140#ifdef CONFIG_TRACING
141extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); 141extern void *kmem_cache_alloc_trace(size_t size,
142 struct kmem_cache *cachep, gfp_t flags);
142extern size_t slab_buffer_size(struct kmem_cache *cachep); 143extern size_t slab_buffer_size(struct kmem_cache *cachep);
143#else 144#else
144static __always_inline void * 145static __always_inline void *
145kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 146kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
146{ 147{
147 return kmem_cache_alloc(cachep, flags); 148 return kmem_cache_alloc(cachep, flags);
148} 149}
@@ -179,10 +180,7 @@ found:
179#endif 180#endif
180 cachep = malloc_sizes[i].cs_cachep; 181 cachep = malloc_sizes[i].cs_cachep;
181 182
182 ret = kmem_cache_alloc_notrace(cachep, flags); 183 ret = kmem_cache_alloc_trace(size, cachep, flags);
183
184 trace_kmalloc(_THIS_IP_, ret,
185 size, slab_buffer_size(cachep), flags);
186 184
187 return ret; 185 return ret;
188 } 186 }
@@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
194extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 192extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
195 193
196#ifdef CONFIG_TRACING 194#ifdef CONFIG_TRACING
197extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 195extern void *kmem_cache_alloc_node_trace(size_t size,
198 gfp_t flags, 196 struct kmem_cache *cachep,
199 int nodeid); 197 gfp_t flags,
198 int nodeid);
200#else 199#else
201static __always_inline void * 200static __always_inline void *
202kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 201kmem_cache_alloc_node_trace(size_t size,
203 gfp_t flags, 202 struct kmem_cache *cachep,
204 int nodeid) 203 gfp_t flags,
204 int nodeid)
205{ 205{
206 return kmem_cache_alloc_node(cachep, flags, nodeid); 206 return kmem_cache_alloc_node(cachep, flags, nodeid);
207} 207}
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211{ 211{
212 struct kmem_cache *cachep; 212 struct kmem_cache *cachep;
213 void *ret;
214 213
215 if (__builtin_constant_p(size)) { 214 if (__builtin_constant_p(size)) {
216 int i = 0; 215 int i = 0;
@@ -234,13 +233,7 @@ found:
234#endif 233#endif
235 cachep = malloc_sizes[i].cs_cachep; 234 cachep = malloc_sizes[i].cs_cachep;
236 235
237 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 236 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
238
239 trace_kmalloc_node(_THIS_IP_, ret,
240 size, slab_buffer_size(cachep),
241 flags, node);
242
243 return ret;
244 } 237 }
245 return __kmalloc_node(size, flags, node); 238 return __kmalloc_node(size, flags, node);
246} 239}
diff --git a/mm/slab.c b/mm/slab.c
index b1e40dafbab3..dfcc8885d7d5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3653EXPORT_SYMBOL(kmem_cache_alloc); 3653EXPORT_SYMBOL(kmem_cache_alloc);
3654 3654
3655#ifdef CONFIG_TRACING 3655#ifdef CONFIG_TRACING
3656void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3656void *
3657kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3657{ 3658{
3658 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3659 void *ret;
3660
3661 ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3662
3663 trace_kmalloc(_RET_IP_, ret,
3664 size, slab_buffer_size(cachep), flags);
3665 return ret;
3659} 3666}
3660EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3667EXPORT_SYMBOL(kmem_cache_alloc_trace);
3661#endif 3668#endif
3662 3669
3663/** 3670/**
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3705EXPORT_SYMBOL(kmem_cache_alloc_node); 3712EXPORT_SYMBOL(kmem_cache_alloc_node);
3706 3713
3707#ifdef CONFIG_TRACING 3714#ifdef CONFIG_TRACING
3708void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3715void *kmem_cache_alloc_node_trace(size_t size,
3709 gfp_t flags, 3716 struct kmem_cache *cachep,
3710 int nodeid) 3717 gfp_t flags,
3718 int nodeid)
3711{ 3719{
3712 return __cache_alloc_node(cachep, flags, nodeid, 3720 void *ret;
3721
3722 ret = __cache_alloc_node(cachep, flags, nodeid,
3713 __builtin_return_address(0)); 3723 __builtin_return_address(0));
3724 trace_kmalloc_node(_RET_IP_, ret,
3725 size, slab_buffer_size(cachep),
3726 flags, nodeid);
3727 return ret;
3714} 3728}
3715EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 3729EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3716#endif 3730#endif
3717 3731
3718static __always_inline void * 3732static __always_inline void *
3719__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3733__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3720{ 3734{
3721 struct kmem_cache *cachep; 3735 struct kmem_cache *cachep;
3722 void *ret;
3723 3736
3724 cachep = kmem_find_general_cachep(size, flags); 3737 cachep = kmem_find_general_cachep(size, flags);
3725 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3738 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3726 return cachep; 3739 return cachep;
3727 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3740 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3728
3729 trace_kmalloc_node((unsigned long) caller, ret,
3730 size, cachep->buffer_size, flags, node);
3731
3732 return ret;
3733} 3741}
3734 3742
3735#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3743#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)