diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2008-08-19 13:43:26 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-12-29 08:34:07 -0500 |
commit | 5b882be4e00e53a44f47ad7eb997cac2938848bf (patch) | |
tree | a3f21e64af805bf4c13ac98dda50b0b06678d039 /include/linux/slub_def.h | |
parent | 3eae2cb24a96509e0a38cc48dc1538a2826f4e33 (diff) |
kmemtrace: SLUB hooks.
This adds hooks for the SLUB allocator, to allow tracing with kmemtrace.
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 53 |
1 files changed, 50 insertions, 3 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..dc28432b5b9a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <linux/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 205 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 206 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 207 | ||
208 | #ifdef CONFIG_KMEMTRACE | ||
209 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
210 | #else | ||
211 | static __always_inline void * | ||
212 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
213 | { | ||
214 | return kmem_cache_alloc(s, gfpflags); | ||
215 | } | ||
216 | #endif | ||
217 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 218 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 219 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 220 | unsigned int order = get_order(size); |
221 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
222 | |||
223 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
224 | size, PAGE_SIZE << order, flags); | ||
225 | |||
226 | return ret; | ||
210 | } | 227 | } |
211 | 228 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 229 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 230 | { |
231 | void *ret; | ||
232 | |||
214 | if (__builtin_constant_p(size)) { | 233 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 234 | if (size > PAGE_SIZE) |
216 | return kmalloc_large(size, flags); | 235 | return kmalloc_large(size, flags); |
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 240 | if (!s) |
222 | return ZERO_SIZE_PTR; | 241 | return ZERO_SIZE_PTR; |
223 | 242 | ||
224 | return kmem_cache_alloc(s, flags); | 243 | ret = kmem_cache_alloc_notrace(s, flags); |
244 | |||
245 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
246 | _THIS_IP_, ret, | ||
247 | size, s->size, flags); | ||
248 | |||
249 | return ret; | ||
225 | } | 250 | } |
226 | } | 251 | } |
227 | return __kmalloc(size, flags); | 252 | return __kmalloc(size, flags); |
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 256 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 257 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 258 | ||
259 | #ifdef CONFIG_KMEMTRACE | ||
260 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
261 | gfp_t gfpflags, | ||
262 | int node); | ||
263 | #else | ||
264 | static __always_inline void * | ||
265 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
266 | gfp_t gfpflags, | ||
267 | int node) | ||
268 | { | ||
269 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
270 | } | ||
271 | #endif | ||
272 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 273 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 274 | { |
275 | void *ret; | ||
276 | |||
236 | if (__builtin_constant_p(size) && | 277 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 278 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 279 | struct kmem_cache *s = kmalloc_slab(size); |
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
240 | if (!s) | 281 | if (!s) |
241 | return ZERO_SIZE_PTR; | 282 | return ZERO_SIZE_PTR; |
242 | 283 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 284 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
285 | |||
286 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
287 | _THIS_IP_, ret, | ||
288 | size, s->size, flags, node); | ||
289 | |||
290 | return ret; | ||
244 | } | 291 | } |
245 | return __kmalloc_node(size, flags, node); | 292 | return __kmalloc_node(size, flags, node); |
246 | } | 293 | } |