diff options
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 338 |
1 files changed, 131 insertions, 207 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index c4947b8a2c03..1ef822e31c77 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -1,82 +1,106 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/linux/slab.h | 2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
3 | * Written by Mark Hemment, 1996. | 3 | * |
4 | * (markhe@nextd.demon.co.uk) | 4 | * (C) SGI 2006, Christoph Lameter <clameter@sgi.com> |
5 | * Cleaned up and restructured to ease the addition of alternative | ||
6 | * implementations of SLAB allocators. | ||
5 | */ | 7 | */ |
6 | 8 | ||
7 | #ifndef _LINUX_SLAB_H | 9 | #ifndef _LINUX_SLAB_H |
8 | #define _LINUX_SLAB_H | 10 | #define _LINUX_SLAB_H |
9 | 11 | ||
10 | #if defined(__KERNEL__) | 12 | #ifdef __KERNEL__ |
11 | |||
12 | typedef struct kmem_cache kmem_cache_t; | ||
13 | 13 | ||
14 | #include <linux/gfp.h> | 14 | #include <linux/gfp.h> |
15 | #include <linux/init.h> | 15 | #include <linux/types.h> |
16 | #include <linux/types.h> | ||
17 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | ||
18 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | ||
19 | 16 | ||
20 | /* flags for kmem_cache_alloc() */ | 17 | typedef struct kmem_cache kmem_cache_t __deprecated; |
21 | #define SLAB_NOFS GFP_NOFS | ||
22 | #define SLAB_NOIO GFP_NOIO | ||
23 | #define SLAB_ATOMIC GFP_ATOMIC | ||
24 | #define SLAB_USER GFP_USER | ||
25 | #define SLAB_KERNEL GFP_KERNEL | ||
26 | #define SLAB_DMA GFP_DMA | ||
27 | 18 | ||
28 | #define SLAB_LEVEL_MASK GFP_LEVEL_MASK | 19 | /* |
20 | * Flags to pass to kmem_cache_create(). | ||
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | ||
22 | */ | ||
23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ | ||
24 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ | ||
25 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ | ||
26 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | ||
27 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | ||
28 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ | ||
29 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ | ||
30 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ | ||
31 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | ||
32 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ | ||
33 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ | ||
34 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | ||
29 | 35 | ||
30 | #define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */ | 36 | /* Flags passed to a constructor functions */ |
37 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ | ||
38 | #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ | ||
39 | #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ | ||
31 | 40 | ||
32 | /* flags to pass to kmem_cache_create(). | 41 | /* |
33 | * The first 3 are only valid when the allocator as been build | 42 | * struct kmem_cache related prototypes |
34 | * SLAB_DEBUG_SUPPORT. | ||
35 | */ | 43 | */ |
36 | #define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ | 44 | void __init kmem_cache_init(void); |
37 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ | 45 | extern int slab_is_available(void); |
38 | #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ | ||
39 | #define SLAB_POISON 0x00000800UL /* Poison objects */ | ||
40 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ | ||
41 | #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ | ||
42 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ | ||
43 | #define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ | ||
44 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate | ||
45 | what is reclaimable later*/ | ||
46 | #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ | ||
47 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ | ||
48 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | ||
49 | 46 | ||
50 | /* flags passed to a constructor func */ | 47 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
51 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ | 48 | unsigned long, |
52 | #define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ | 49 | void (*)(void *, struct kmem_cache *, unsigned long), |
53 | #define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ | 50 | void (*)(void *, struct kmem_cache *, unsigned long)); |
51 | void kmem_cache_destroy(struct kmem_cache *); | ||
52 | int kmem_cache_shrink(struct kmem_cache *); | ||
53 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
54 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | ||
55 | void kmem_cache_free(struct kmem_cache *, void *); | ||
56 | unsigned int kmem_cache_size(struct kmem_cache *); | ||
57 | const char *kmem_cache_name(struct kmem_cache *); | ||
58 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | ||
54 | 59 | ||
55 | #ifndef CONFIG_SLOB | 60 | #ifdef CONFIG_NUMA |
61 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
62 | #else | ||
63 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | ||
64 | gfp_t flags, int node) | ||
65 | { | ||
66 | return kmem_cache_alloc(cachep, flags); | ||
67 | } | ||
68 | #endif | ||
56 | 69 | ||
57 | /* prototypes */ | 70 | /* |
58 | extern void __init kmem_cache_init(void); | 71 | * Common kmalloc functions provided by all allocators |
72 | */ | ||
73 | void *__kmalloc(size_t, gfp_t); | ||
74 | void *__kzalloc(size_t, gfp_t); | ||
75 | void kfree(const void *); | ||
76 | unsigned int ksize(const void *); | ||
59 | 77 | ||
60 | extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, | 78 | /** |
61 | void (*)(void *, kmem_cache_t *, unsigned long), | 79 | * kcalloc - allocate memory for an array. The memory is set to zero. |
62 | void (*)(void *, kmem_cache_t *, unsigned long)); | 80 | * @n: number of elements. |
63 | extern void kmem_cache_destroy(kmem_cache_t *); | 81 | * @size: element size. |
64 | extern int kmem_cache_shrink(kmem_cache_t *); | 82 | * @flags: the type of memory to allocate. |
65 | extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); | 83 | */ |
66 | extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | 84 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) |
67 | extern void kmem_cache_free(kmem_cache_t *, void *); | 85 | { |
68 | extern unsigned int kmem_cache_size(kmem_cache_t *); | 86 | if (n != 0 && size > ULONG_MAX / n) |
69 | extern const char *kmem_cache_name(kmem_cache_t *); | 87 | return NULL; |
88 | return __kzalloc(n * size, flags); | ||
89 | } | ||
70 | 90 | ||
71 | /* Size description struct for general caches. */ | 91 | /* |
72 | struct cache_sizes { | 92 | * Allocator specific definitions. These are mainly used to establish optimized |
73 | size_t cs_size; | 93 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting |
74 | kmem_cache_t *cs_cachep; | 94 | * the appropriate general cache at compile time. |
75 | kmem_cache_t *cs_dmacachep; | 95 | */ |
76 | }; | ||
77 | extern struct cache_sizes malloc_sizes[]; | ||
78 | 96 | ||
79 | extern void *__kmalloc(size_t, gfp_t); | 97 | #ifdef CONFIG_SLAB |
98 | #include <linux/slab_def.h> | ||
99 | #else | ||
100 | /* | ||
101 | * Fallback definitions for an allocator not wanting to provide | ||
102 | * its own optimized kmalloc definitions (like SLOB). | ||
103 | */ | ||
80 | 104 | ||
81 | /** | 105 | /** |
82 | * kmalloc - allocate memory | 106 | * kmalloc - allocate memory |
@@ -125,46 +149,9 @@ extern void *__kmalloc(size_t, gfp_t); | |||
125 | */ | 149 | */ |
126 | static inline void *kmalloc(size_t size, gfp_t flags) | 150 | static inline void *kmalloc(size_t size, gfp_t flags) |
127 | { | 151 | { |
128 | if (__builtin_constant_p(size)) { | ||
129 | int i = 0; | ||
130 | #define CACHE(x) \ | ||
131 | if (size <= x) \ | ||
132 | goto found; \ | ||
133 | else \ | ||
134 | i++; | ||
135 | #include "kmalloc_sizes.h" | ||
136 | #undef CACHE | ||
137 | { | ||
138 | extern void __you_cannot_kmalloc_that_much(void); | ||
139 | __you_cannot_kmalloc_that_much(); | ||
140 | } | ||
141 | found: | ||
142 | return kmem_cache_alloc((flags & GFP_DMA) ? | ||
143 | malloc_sizes[i].cs_dmacachep : | ||
144 | malloc_sizes[i].cs_cachep, flags); | ||
145 | } | ||
146 | return __kmalloc(size, flags); | 152 | return __kmalloc(size, flags); |
147 | } | 153 | } |
148 | 154 | ||
149 | /* | ||
150 | * kmalloc_track_caller is a special version of kmalloc that records the | ||
151 | * calling function of the routine calling it for slab leak tracking instead | ||
152 | * of just the calling function (confusing, eh?). | ||
153 | * It's useful when the call to kmalloc comes from a widely-used standard | ||
154 | * allocator where we care about the real place the memory allocation | ||
155 | * request comes from. | ||
156 | */ | ||
157 | #ifndef CONFIG_DEBUG_SLAB | ||
158 | #define kmalloc_track_caller(size, flags) \ | ||
159 | __kmalloc(size, flags) | ||
160 | #else | ||
161 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | ||
162 | #define kmalloc_track_caller(size, flags) \ | ||
163 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | ||
164 | #endif | ||
165 | |||
166 | extern void *__kzalloc(size_t, gfp_t); | ||
167 | |||
168 | /** | 155 | /** |
169 | * kzalloc - allocate memory. The memory is set to zero. | 156 | * kzalloc - allocate memory. The memory is set to zero. |
170 | * @size: how many bytes of memory are required. | 157 | * @size: how many bytes of memory are required. |
@@ -172,128 +159,65 @@ extern void *__kzalloc(size_t, gfp_t); | |||
172 | */ | 159 | */ |
173 | static inline void *kzalloc(size_t size, gfp_t flags) | 160 | static inline void *kzalloc(size_t size, gfp_t flags) |
174 | { | 161 | { |
175 | if (__builtin_constant_p(size)) { | ||
176 | int i = 0; | ||
177 | #define CACHE(x) \ | ||
178 | if (size <= x) \ | ||
179 | goto found; \ | ||
180 | else \ | ||
181 | i++; | ||
182 | #include "kmalloc_sizes.h" | ||
183 | #undef CACHE | ||
184 | { | ||
185 | extern void __you_cannot_kzalloc_that_much(void); | ||
186 | __you_cannot_kzalloc_that_much(); | ||
187 | } | ||
188 | found: | ||
189 | return kmem_cache_zalloc((flags & GFP_DMA) ? | ||
190 | malloc_sizes[i].cs_dmacachep : | ||
191 | malloc_sizes[i].cs_cachep, flags); | ||
192 | } | ||
193 | return __kzalloc(size, flags); | 162 | return __kzalloc(size, flags); |
194 | } | 163 | } |
164 | #endif | ||
195 | 165 | ||
196 | /** | 166 | #ifndef CONFIG_NUMA |
197 | * kcalloc - allocate memory for an array. The memory is set to zero. | ||
198 | * @n: number of elements. | ||
199 | * @size: element size. | ||
200 | * @flags: the type of memory to allocate. | ||
201 | */ | ||
202 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | ||
203 | { | ||
204 | if (n != 0 && size > ULONG_MAX / n) | ||
205 | return NULL; | ||
206 | return kzalloc(n * size, flags); | ||
207 | } | ||
208 | |||
209 | extern void kfree(const void *); | ||
210 | extern unsigned int ksize(const void *); | ||
211 | extern int slab_is_available(void); | ||
212 | |||
213 | #ifdef CONFIG_NUMA | ||
214 | extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); | ||
215 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
216 | |||
217 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
218 | { | ||
219 | if (__builtin_constant_p(size)) { | ||
220 | int i = 0; | ||
221 | #define CACHE(x) \ | ||
222 | if (size <= x) \ | ||
223 | goto found; \ | ||
224 | else \ | ||
225 | i++; | ||
226 | #include "kmalloc_sizes.h" | ||
227 | #undef CACHE | ||
228 | { | ||
229 | extern void __you_cannot_kmalloc_that_much(void); | ||
230 | __you_cannot_kmalloc_that_much(); | ||
231 | } | ||
232 | found: | ||
233 | return kmem_cache_alloc_node((flags & GFP_DMA) ? | ||
234 | malloc_sizes[i].cs_dmacachep : | ||
235 | malloc_sizes[i].cs_cachep, flags, node); | ||
236 | } | ||
237 | return __kmalloc_node(size, flags, node); | ||
238 | } | ||
239 | #else | ||
240 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) | ||
241 | { | ||
242 | return kmem_cache_alloc(cachep, flags); | ||
243 | } | ||
244 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 167 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
245 | { | 168 | { |
246 | return kmalloc(size, flags); | 169 | return kmalloc(size, flags); |
247 | } | 170 | } |
248 | #endif | ||
249 | |||
250 | extern int FASTCALL(kmem_cache_reap(int)); | ||
251 | extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); | ||
252 | |||
253 | #else /* CONFIG_SLOB */ | ||
254 | |||
255 | /* SLOB allocator routines */ | ||
256 | |||
257 | void kmem_cache_init(void); | ||
258 | struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, | ||
259 | unsigned long, | ||
260 | void (*)(void *, struct kmem_cache *, unsigned long), | ||
261 | void (*)(void *, struct kmem_cache *, unsigned long)); | ||
262 | void kmem_cache_destroy(struct kmem_cache *c); | ||
263 | void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags); | ||
264 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | ||
265 | void kmem_cache_free(struct kmem_cache *c, void *b); | ||
266 | const char *kmem_cache_name(struct kmem_cache *); | ||
267 | void *kmalloc(size_t size, gfp_t flags); | ||
268 | void *__kzalloc(size_t size, gfp_t flags); | ||
269 | void kfree(const void *m); | ||
270 | unsigned int ksize(const void *m); | ||
271 | unsigned int kmem_cache_size(struct kmem_cache *c); | ||
272 | 171 | ||
273 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | 172 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
274 | { | 173 | { |
275 | return __kzalloc(n * size, flags); | 174 | return __kmalloc(size, flags); |
276 | } | 175 | } |
176 | #endif /* !CONFIG_NUMA */ | ||
277 | 177 | ||
278 | #define kmem_cache_shrink(d) (0) | 178 | /* |
279 | #define kmem_cache_reap(a) | 179 | * kmalloc_track_caller is a special version of kmalloc that records the |
280 | #define kmem_ptr_validate(a, b) (0) | 180 | * calling function of the routine calling it for slab leak tracking instead |
281 | #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) | 181 | * of just the calling function (confusing, eh?). |
282 | #define kmalloc_node(s, f, n) kmalloc(s, f) | 182 | * It's useful when the call to kmalloc comes from a widely-used standard |
283 | #define kzalloc(s, f) __kzalloc(s, f) | 183 | * allocator where we care about the real place the memory allocation |
284 | #define kmalloc_track_caller kmalloc | 184 | * request comes from. |
185 | */ | ||
186 | #ifdef CONFIG_DEBUG_SLAB | ||
187 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | ||
188 | #define kmalloc_track_caller(size, flags) \ | ||
189 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | ||
190 | #else | ||
191 | #define kmalloc_track_caller(size, flags) \ | ||
192 | __kmalloc(size, flags) | ||
193 | #endif /* DEBUG_SLAB */ | ||
285 | 194 | ||
286 | #endif /* CONFIG_SLOB */ | 195 | #ifdef CONFIG_NUMA |
196 | /* | ||
197 | * kmalloc_node_track_caller is a special version of kmalloc_node that | ||
198 | * records the calling function of the routine calling it for slab leak | ||
199 | * tracking instead of just the calling function (confusing, eh?). | ||
200 | * It's useful when the call to kmalloc_node comes from a widely-used | ||
201 | * standard allocator where we care about the real place the memory | ||
202 | * allocation request comes from. | ||
203 | */ | ||
204 | #ifdef CONFIG_DEBUG_SLAB | ||
205 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | ||
206 | #define kmalloc_node_track_caller(size, flags, node) \ | ||
207 | __kmalloc_node_track_caller(size, flags, node, \ | ||
208 | __builtin_return_address(0)) | ||
209 | #else | ||
210 | #define kmalloc_node_track_caller(size, flags, node) \ | ||
211 | __kmalloc_node(size, flags, node) | ||
212 | #endif | ||
287 | 213 | ||
288 | /* System wide caches */ | 214 | #else /* CONFIG_NUMA */ |
289 | extern kmem_cache_t *vm_area_cachep; | ||
290 | extern kmem_cache_t *names_cachep; | ||
291 | extern kmem_cache_t *files_cachep; | ||
292 | extern kmem_cache_t *filp_cachep; | ||
293 | extern kmem_cache_t *fs_cachep; | ||
294 | extern kmem_cache_t *sighand_cachep; | ||
295 | extern kmem_cache_t *bio_cachep; | ||
296 | 215 | ||
297 | #endif /* __KERNEL__ */ | 216 | #define kmalloc_node_track_caller(size, flags, node) \ |
217 | kmalloc_track_caller(size, flags) | ||
298 | 218 | ||
219 | #endif /* DEBUG_SLAB */ | ||
220 | |||
221 | #endif /* __KERNEL__ */ | ||
299 | #endif /* _LINUX_SLAB_H */ | 222 | #endif /* _LINUX_SLAB_H */ |
223 | |||