aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 11:38:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 11:38:01 -0500
commita1e8fad5900fa94adb500c6e0dfd60a307f7a3c9 (patch)
treec619277e587a99c90e76a1e7b63746af4d959d72 /include
parente3166331a3288dd7184548896a1c7ab682f0dbe8 (diff)
parenta45b0616e7ee9db4c1b2b9a4a79a974325fa9bf3 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: Fix a crash during slabinfo -v tracing/slab: Move kmalloc tracepoint out of inline code slub: Fix slub_lock down/up imbalance slub: Fix build breakage in Documentation/vm slub tracing: move trace calls out of always inlined functions to reduce kernel code size slub: move slabinfo.c to tools/slub/slabinfo.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab_def.h33
-rw-r--r--include/linux/slub_def.h55
2 files changed, 39 insertions, 49 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 791a502f6906..83203ae9390b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138void *__kmalloc(size_t size, gfp_t flags); 138void *__kmalloc(size_t size, gfp_t flags);
139 139
140#ifdef CONFIG_TRACING 140#ifdef CONFIG_TRACING
141extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); 141extern void *kmem_cache_alloc_trace(size_t size,
142 struct kmem_cache *cachep, gfp_t flags);
142extern size_t slab_buffer_size(struct kmem_cache *cachep); 143extern size_t slab_buffer_size(struct kmem_cache *cachep);
143#else 144#else
144static __always_inline void * 145static __always_inline void *
145kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 146kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
146{ 147{
147 return kmem_cache_alloc(cachep, flags); 148 return kmem_cache_alloc(cachep, flags);
148} 149}
@@ -179,10 +180,7 @@ found:
179#endif 180#endif
180 cachep = malloc_sizes[i].cs_cachep; 181 cachep = malloc_sizes[i].cs_cachep;
181 182
182 ret = kmem_cache_alloc_notrace(cachep, flags); 183 ret = kmem_cache_alloc_trace(size, cachep, flags);
183
184 trace_kmalloc(_THIS_IP_, ret,
185 size, slab_buffer_size(cachep), flags);
186 184
187 return ret; 185 return ret;
188 } 186 }
@@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
194extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 192extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
195 193
196#ifdef CONFIG_TRACING 194#ifdef CONFIG_TRACING
197extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 195extern void *kmem_cache_alloc_node_trace(size_t size,
198 gfp_t flags, 196 struct kmem_cache *cachep,
199 int nodeid); 197 gfp_t flags,
198 int nodeid);
200#else 199#else
201static __always_inline void * 200static __always_inline void *
202kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 201kmem_cache_alloc_node_trace(size_t size,
203 gfp_t flags, 202 struct kmem_cache *cachep,
204 int nodeid) 203 gfp_t flags,
204 int nodeid)
205{ 205{
206 return kmem_cache_alloc_node(cachep, flags, nodeid); 206 return kmem_cache_alloc_node(cachep, flags, nodeid);
207} 207}
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211{ 211{
212 struct kmem_cache *cachep; 212 struct kmem_cache *cachep;
213 void *ret;
214 213
215 if (__builtin_constant_p(size)) { 214 if (__builtin_constant_p(size)) {
216 int i = 0; 215 int i = 0;
@@ -234,13 +233,7 @@ found:
234#endif 233#endif
235 cachep = malloc_sizes[i].cs_cachep; 234 cachep = malloc_sizes[i].cs_cachep;
236 235
237 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 236 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
238
239 trace_kmalloc_node(_THIS_IP_, ret,
240 size, slab_buffer_size(cachep),
241 flags, node);
242
243 return ret;
244 } 237 }
245 return __kmalloc_node(size, flags, node); 238 return __kmalloc_node(size, flags, node);
246} 239}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e4f5ed180b9b..8b6e8ae5d5ca 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,9 +10,8 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <linux/kmemleak.h>
14 13
15#include <trace/events/kmem.h> 14#include <linux/kmemleak.h>
16 15
17enum stat_item { 16enum stat_item {
18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 17 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
216void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 215void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
217void *__kmalloc(size_t size, gfp_t flags); 216void *__kmalloc(size_t size, gfp_t flags);
218 217
218static __always_inline void *
219kmalloc_order(size_t size, gfp_t flags, unsigned int order)
220{
221 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
222 kmemleak_alloc(ret, size, 1, flags);
223 return ret;
224}
225
219#ifdef CONFIG_TRACING 226#ifdef CONFIG_TRACING
220extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 227extern void *
228kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
229extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
221#else 230#else
222static __always_inline void * 231static __always_inline void *
223kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 232kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
224{ 233{
225 return kmem_cache_alloc(s, gfpflags); 234 return kmem_cache_alloc(s, gfpflags);
226} 235}
236
237static __always_inline void *
238kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
239{
240 return kmalloc_order(size, flags, order);
241}
227#endif 242#endif
228 243
229static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 244static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
230{ 245{
231 unsigned int order = get_order(size); 246 unsigned int order = get_order(size);
232 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 247 return kmalloc_order_trace(size, flags, order);
233
234 kmemleak_alloc(ret, size, 1, flags);
235 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
236
237 return ret;
238} 248}
239 249
240static __always_inline void *kmalloc(size_t size, gfp_t flags) 250static __always_inline void *kmalloc(size_t size, gfp_t flags)
241{ 251{
242 void *ret;
243
244 if (__builtin_constant_p(size)) { 252 if (__builtin_constant_p(size)) {
245 if (size > SLUB_MAX_SIZE) 253 if (size > SLUB_MAX_SIZE)
246 return kmalloc_large(size, flags); 254 return kmalloc_large(size, flags);
@@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
251 if (!s) 259 if (!s)
252 return ZERO_SIZE_PTR; 260 return ZERO_SIZE_PTR;
253 261
254 ret = kmem_cache_alloc_notrace(s, flags); 262 return kmem_cache_alloc_trace(s, flags, size);
255
256 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
257
258 return ret;
259 } 263 }
260 } 264 }
261 return __kmalloc(size, flags); 265 return __kmalloc(size, flags);
@@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
266void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
267 271
268#ifdef CONFIG_TRACING 272#ifdef CONFIG_TRACING
269extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 273extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
270 gfp_t gfpflags, 274 gfp_t gfpflags,
271 int node); 275 int node, size_t size);
272#else 276#else
273static __always_inline void * 277static __always_inline void *
274kmem_cache_alloc_node_notrace(struct kmem_cache *s, 278kmem_cache_alloc_node_trace(struct kmem_cache *s,
275 gfp_t gfpflags, 279 gfp_t gfpflags,
276 int node) 280 int node, size_t size)
277{ 281{
278 return kmem_cache_alloc_node(s, gfpflags, node); 282 return kmem_cache_alloc_node(s, gfpflags, node);
279} 283}
@@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
281 285
282static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
283{ 287{
284 void *ret;
285
286 if (__builtin_constant_p(size) && 288 if (__builtin_constant_p(size) &&
287 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 289 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
288 struct kmem_cache *s = kmalloc_slab(size); 290 struct kmem_cache *s = kmalloc_slab(size);
@@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
290 if (!s) 292 if (!s)
291 return ZERO_SIZE_PTR; 293 return ZERO_SIZE_PTR;
292 294
293 ret = kmem_cache_alloc_node_notrace(s, flags, node); 295 return kmem_cache_alloc_node_trace(s, flags, node, size);
294
295 trace_kmalloc_node(_THIS_IP_, ret,
296 size, s->size, flags, node);
297
298 return ret;
299 } 296 }
300 return __kmalloc_node(size, flags, node); 297 return __kmalloc_node(size, flags, node);
301} 298}