aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h306
1 files changed, 112 insertions, 194 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2271886744f8..1ef822e31c77 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/include/linux/slab.h 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 * Written by Mark Hemment, 1996. 3 *
4 * (markhe@nextd.demon.co.uk) 4 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
5 */ 7 */
6 8
7#ifndef _LINUX_SLAB_H 9#ifndef _LINUX_SLAB_H
@@ -10,64 +12,95 @@
10#ifdef __KERNEL__ 12#ifdef __KERNEL__
11 13
12#include <linux/gfp.h> 14#include <linux/gfp.h>
13#include <linux/init.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17#include <linux/compiler.h>
18 16
19/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
20typedef struct kmem_cache kmem_cache_t __deprecated; 17typedef struct kmem_cache kmem_cache_t __deprecated;
21 18
22/* flags to pass to kmem_cache_create(). 19/*
23 * The first 3 are only valid when the allocator as been build 20 * Flags to pass to kmem_cache_create().
24 * SLAB_DEBUG_SUPPORT. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
25 */ 22 */
26#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
27#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
28#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
29#define SLAB_POISON 0x00000800UL /* Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
30#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
31#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
32#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
33#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ 30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
34#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate 31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
35 what is reclaimable later*/ 32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
36#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ 33#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
37#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
38#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
39 35
40/* flags passed to a constructor func */ 36/* Flags passed to a constructor functions */
41#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
42#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
43#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
44 40
45#ifndef CONFIG_SLOB 41/*
46 42 * struct kmem_cache related prototypes
47/* prototypes */ 43 */
48extern void __init kmem_cache_init(void); 44void __init kmem_cache_init(void);
45extern int slab_is_available(void);
49 46
50extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 47struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
51 unsigned long, 48 unsigned long,
52 void (*)(void *, struct kmem_cache *, unsigned long), 49 void (*)(void *, struct kmem_cache *, unsigned long),
53 void (*)(void *, struct kmem_cache *, unsigned long)); 50 void (*)(void *, struct kmem_cache *, unsigned long));
54extern void kmem_cache_destroy(struct kmem_cache *); 51void kmem_cache_destroy(struct kmem_cache *);
55extern int kmem_cache_shrink(struct kmem_cache *); 52int kmem_cache_shrink(struct kmem_cache *);
56extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 53void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 54void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
58extern void kmem_cache_free(struct kmem_cache *, void *); 55void kmem_cache_free(struct kmem_cache *, void *);
59extern unsigned int kmem_cache_size(struct kmem_cache *); 56unsigned int kmem_cache_size(struct kmem_cache *);
60extern const char *kmem_cache_name(struct kmem_cache *); 57const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
59
60#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62#else
63static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64 gfp_t flags, int node)
65{
66 return kmem_cache_alloc(cachep, flags);
67}
68#endif
69
70/*
71 * Common kmalloc functions provided by all allocators
72 */
73void *__kmalloc(size_t, gfp_t);
74void *__kzalloc(size_t, gfp_t);
75void kfree(const void *);
76unsigned int ksize(const void *);
77
78/**
79 * kcalloc - allocate memory for an array. The memory is set to zero.
80 * @n: number of elements.
81 * @size: element size.
82 * @flags: the type of memory to allocate.
83 */
84static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
85{
86 if (n != 0 && size > ULONG_MAX / n)
87 return NULL;
88 return __kzalloc(n * size, flags);
89}
61 90
62/* Size description struct for general caches. */ 91/*
63struct cache_sizes { 92 * Allocator specific definitions. These are mainly used to establish optimized
64 size_t cs_size; 93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
65 struct kmem_cache *cs_cachep; 94 * the appropriate general cache at compile time.
66 struct kmem_cache *cs_dmacachep; 95 */
67};
68extern struct cache_sizes malloc_sizes[];
69 96
70extern void *__kmalloc(size_t, gfp_t); 97#ifdef CONFIG_SLAB
98#include <linux/slab_def.h>
99#else
100/*
101 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB).
103 */
71 104
72/** 105/**
73 * kmalloc - allocate memory 106 * kmalloc - allocate memory
@@ -116,46 +149,9 @@ extern void *__kmalloc(size_t, gfp_t);
116 */ 149 */
117static inline void *kmalloc(size_t size, gfp_t flags) 150static inline void *kmalloc(size_t size, gfp_t flags)
118{ 151{
119 if (__builtin_constant_p(size)) {
120 int i = 0;
121#define CACHE(x) \
122 if (size <= x) \
123 goto found; \
124 else \
125 i++;
126#include "kmalloc_sizes.h"
127#undef CACHE
128 {
129 extern void __you_cannot_kmalloc_that_much(void);
130 __you_cannot_kmalloc_that_much();
131 }
132found:
133 return kmem_cache_alloc((flags & GFP_DMA) ?
134 malloc_sizes[i].cs_dmacachep :
135 malloc_sizes[i].cs_cachep, flags);
136 }
137 return __kmalloc(size, flags); 152 return __kmalloc(size, flags);
138} 153}
139 154
140/*
141 * kmalloc_track_caller is a special version of kmalloc that records the
142 * calling function of the routine calling it for slab leak tracking instead
143 * of just the calling function (confusing, eh?).
144 * It's useful when the call to kmalloc comes from a widely-used standard
145 * allocator where we care about the real place the memory allocation
146 * request comes from.
147 */
148#ifndef CONFIG_DEBUG_SLAB
149#define kmalloc_track_caller(size, flags) \
150 __kmalloc(size, flags)
151#else
152extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
153#define kmalloc_track_caller(size, flags) \
154 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
155#endif
156
157extern void *__kzalloc(size_t, gfp_t);
158
159/** 155/**
160 * kzalloc - allocate memory. The memory is set to zero. 156 * kzalloc - allocate memory. The memory is set to zero.
161 * @size: how many bytes of memory are required. 157 * @size: how many bytes of memory are required.
@@ -163,72 +159,41 @@ extern void *__kzalloc(size_t, gfp_t);
163 */ 159 */
164static inline void *kzalloc(size_t size, gfp_t flags) 160static inline void *kzalloc(size_t size, gfp_t flags)
165{ 161{
166 if (__builtin_constant_p(size)) {
167 int i = 0;
168#define CACHE(x) \
169 if (size <= x) \
170 goto found; \
171 else \
172 i++;
173#include "kmalloc_sizes.h"
174#undef CACHE
175 {
176 extern void __you_cannot_kzalloc_that_much(void);
177 __you_cannot_kzalloc_that_much();
178 }
179found:
180 return kmem_cache_zalloc((flags & GFP_DMA) ?
181 malloc_sizes[i].cs_dmacachep :
182 malloc_sizes[i].cs_cachep, flags);
183 }
184 return __kzalloc(size, flags); 162 return __kzalloc(size, flags);
185} 163}
164#endif
186 165
187/** 166#ifndef CONFIG_NUMA
188 * kcalloc - allocate memory for an array. The memory is set to zero. 167static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
189 * @n: number of elements.
190 * @size: element size.
191 * @flags: the type of memory to allocate.
192 */
193static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
194{ 168{
195 if (n != 0 && size > ULONG_MAX / n) 169 return kmalloc(size, flags);
196 return NULL;
197 return kzalloc(n * size, flags);
198} 170}
199 171
200extern void kfree(const void *); 172static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
201extern unsigned int ksize(const void *);
202extern int slab_is_available(void);
203
204#ifdef CONFIG_NUMA
205extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
206extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
207
208static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209{ 173{
210 if (__builtin_constant_p(size)) { 174 return __kmalloc(size, flags);
211 int i = 0;
212#define CACHE(x) \
213 if (size <= x) \
214 goto found; \
215 else \
216 i++;
217#include "kmalloc_sizes.h"
218#undef CACHE
219 {
220 extern void __you_cannot_kmalloc_that_much(void);
221 __you_cannot_kmalloc_that_much();
222 }
223found:
224 return kmem_cache_alloc_node((flags & GFP_DMA) ?
225 malloc_sizes[i].cs_dmacachep :
226 malloc_sizes[i].cs_cachep, flags, node);
227 }
228 return __kmalloc_node(size, flags, node);
229} 175}
176#endif /* !CONFIG_NUMA */
230 177
231/* 178/*
179 * kmalloc_track_caller is a special version of kmalloc that records the
180 * calling function of the routine calling it for slab leak tracking instead
181 * of just the calling function (confusing, eh?).
182 * It's useful when the call to kmalloc comes from a widely-used standard
183 * allocator where we care about the real place the memory allocation
184 * request comes from.
185 */
186#ifdef CONFIG_DEBUG_SLAB
187extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
188#define kmalloc_track_caller(size, flags) \
189 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
190#else
191#define kmalloc_track_caller(size, flags) \
192 __kmalloc(size, flags)
193#endif /* DEBUG_SLAB */
194
195#ifdef CONFIG_NUMA
196/*
232 * kmalloc_node_track_caller is a special version of kmalloc_node that 197 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 * records the calling function of the routine calling it for slab leak 198 * records the calling function of the routine calling it for slab leak
234 * tracking instead of just the calling function (confusing, eh?). 199 * tracking instead of just the calling function (confusing, eh?).
@@ -236,70 +201,23 @@ found:
236 * standard allocator where we care about the real place the memory 201 * standard allocator where we care about the real place the memory
237 * allocation request comes from. 202 * allocation request comes from.
238 */ 203 */
239#ifndef CONFIG_DEBUG_SLAB 204#ifdef CONFIG_DEBUG_SLAB
240#define kmalloc_node_track_caller(size, flags, node) \
241 __kmalloc_node(size, flags, node)
242#else
243extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 205extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244#define kmalloc_node_track_caller(size, flags, node) \ 206#define kmalloc_node_track_caller(size, flags, node) \
245 __kmalloc_node_track_caller(size, flags, node, \ 207 __kmalloc_node_track_caller(size, flags, node, \
246 __builtin_return_address(0)) 208 __builtin_return_address(0))
209#else
210#define kmalloc_node_track_caller(size, flags, node) \
211 __kmalloc_node(size, flags, node)
247#endif 212#endif
213
248#else /* CONFIG_NUMA */ 214#else /* CONFIG_NUMA */
249static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250 gfp_t flags, int node)
251{
252 return kmem_cache_alloc(cachep, flags);
253}
254static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
255{
256 return kmalloc(size, flags);
257}
258 215
259#define kmalloc_node_track_caller(size, flags, node) \ 216#define kmalloc_node_track_caller(size, flags, node) \
260 kmalloc_track_caller(size, flags) 217 kmalloc_track_caller(size, flags)
261#endif
262 218
263extern int FASTCALL(kmem_cache_reap(int)); 219#endif /* DEBUG_SLAB */
264extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
265
266#else /* CONFIG_SLOB */
267
268/* SLOB allocator routines */
269
270void kmem_cache_init(void);
271struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
272 unsigned long,
273 void (*)(void *, struct kmem_cache *, unsigned long),
274 void (*)(void *, struct kmem_cache *, unsigned long));
275void kmem_cache_destroy(struct kmem_cache *c);
276void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
277void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
278void kmem_cache_free(struct kmem_cache *c, void *b);
279const char *kmem_cache_name(struct kmem_cache *);
280void *kmalloc(size_t size, gfp_t flags);
281void *__kzalloc(size_t size, gfp_t flags);
282void kfree(const void *m);
283unsigned int ksize(const void *m);
284unsigned int kmem_cache_size(struct kmem_cache *c);
285
286static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
287{
288 return __kzalloc(n * size, flags);
289}
290
291#define kmem_cache_shrink(d) (0)
292#define kmem_cache_reap(a)
293#define kmem_ptr_validate(a, b) (0)
294#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
295#define kmalloc_node(s, f, n) kmalloc(s, f)
296#define kzalloc(s, f) __kzalloc(s, f)
297#define kmalloc_track_caller kmalloc
298
299#define kmalloc_node_track_caller kmalloc_node
300
301#endif /* CONFIG_SLOB */
302 220
303#endif /* __KERNEL__ */ 221#endif /* __KERNEL__ */
304
305#endif /* _LINUX_SLAB_H */ 222#endif /* _LINUX_SLAB_H */
223