diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2008-08-10 13:14:05 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-12-29 08:34:04 -0500 |
commit | 36555751c6751a5bdfd6d7bdf0648343bb1ef0de (patch) | |
tree | 47ed7ab2c8971e4c5d2f5a902860b1cf9facbc42 /include/linux/slab_def.h | |
parent | aa46a7e0228c0477708ce44a0c5621902b3c157c (diff) |
kmemtrace: SLAB hooks.
This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.
We also convert some inline functions to __always_inline to make sure
_RET_IP_, which expands to __builtin_return_address(0), always works
as expected.
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r-- | include/linux/slab_def.h | 68 |
1 files changed, 59 insertions, 9 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8ebe..7555ce99f6d2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kmemtrace.h> | ||
17 | 18 | ||
18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
19 | struct cache_sizes { | 20 | struct cache_sizes { |
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
30 | 31 | ||
31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
35 | #else | ||
36 | static __always_inline void * | ||
37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
32 | { | 38 | { |
39 | return kmem_cache_alloc(cachep, flags); | ||
40 | } | ||
41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
48 | { | ||
49 | struct kmem_cache *cachep; | ||
50 | void *ret; | ||
51 | |||
33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 53 | int i = 0; |
35 | 54 | ||
@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
50 | found: | 69 | found: |
51 | #ifdef CONFIG_ZONE_DMA | 70 | #ifdef CONFIG_ZONE_DMA |
52 | if (flags & GFP_DMA) | 71 | if (flags & GFP_DMA) |
53 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 72 | cachep = malloc_sizes[i].cs_dmacachep; |
54 | flags); | 73 | else |
55 | #endif | 74 | #endif |
56 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 75 | cachep = malloc_sizes[i].cs_cachep; |
76 | |||
77 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
78 | |||
79 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
80 | size, slab_buffer_size(cachep), flags); | ||
81 | |||
82 | return ret; | ||
57 | } | 83 | } |
58 | return __kmalloc(size, flags); | 84 | return __kmalloc(size, flags); |
59 | } | 85 | } |
@@ -62,8 +88,25 @@ found: | |||
62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 88 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 89 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
64 | 90 | ||
65 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 91 | #ifdef CONFIG_KMEMTRACE |
92 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
93 | gfp_t flags, | ||
94 | int nodeid); | ||
95 | #else | ||
96 | static __always_inline void * | ||
97 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
98 | gfp_t flags, | ||
99 | int nodeid) | ||
100 | { | ||
101 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
66 | { | 106 | { |
107 | struct kmem_cache *cachep; | ||
108 | void *ret; | ||
109 | |||
67 | if (__builtin_constant_p(size)) { | 110 | if (__builtin_constant_p(size)) { |
68 | int i = 0; | 111 | int i = 0; |
69 | 112 | ||
@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
84 | found: | 127 | found: |
85 | #ifdef CONFIG_ZONE_DMA | 128 | #ifdef CONFIG_ZONE_DMA |
86 | if (flags & GFP_DMA) | 129 | if (flags & GFP_DMA) |
87 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 130 | cachep = malloc_sizes[i].cs_dmacachep; |
88 | flags, node); | 131 | else |
89 | #endif | 132 | #endif |
90 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 133 | cachep = malloc_sizes[i].cs_cachep; |
91 | flags, node); | 134 | |
135 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
136 | |||
137 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, | ||
138 | ret, size, slab_buffer_size(cachep), | ||
139 | flags, node); | ||
140 | |||
141 | return ret; | ||
92 | } | 142 | } |
93 | return __kmalloc_node(size, flags, node); | 143 | return __kmalloc_node(size, flags, node); |
94 | } | 144 | } |