aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h231
1 files changed, 163 insertions, 68 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5d168d7e0a28..0c621752caa6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -94,29 +94,6 @@
94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR) 95 (unsigned long)ZERO_SIZE_PTR)
96 96
97/*
98 * Common fields provided in kmem_cache by all slab allocators
99 * This struct is either used directly by the allocator (SLOB)
100 * or the allocator must include definitions for all fields
101 * provided in kmem_cache_common in their definition of kmem_cache.
102 *
103 * Once we can do anonymous structs (C11 standard) we could put a
104 * anonymous struct definition in these allocators so that the
105 * separate allocations in the kmem_cache structure of SLAB and
106 * SLUB is no longer needed.
107 */
108#ifdef CONFIG_SLOB
109struct kmem_cache {
110 unsigned int object_size;/* The original size of the object */
111 unsigned int size; /* The aligned/padded/added on size */
112 unsigned int align; /* Alignment as calculated */
113 unsigned long flags; /* Active flags on the slab */
114 const char *name; /* Slab name for sysfs */
115 int refcount; /* Use counter */
116 void (*ctor)(void *); /* Called on object slot creation */
117 struct list_head list; /* List of all slab caches on the system */
118};
119#endif
120 97
121struct mem_cgroup; 98struct mem_cgroup;
122/* 99/*
@@ -148,7 +125,63 @@ void kmem_cache_free(struct kmem_cache *, void *);
148 (__flags), NULL) 125 (__flags), NULL)
149 126
150/* 127/*
151 * The largest kmalloc size supported by the slab allocators is 128 * Common kmalloc functions provided by all allocators
129 */
130void * __must_check __krealloc(const void *, size_t, gfp_t);
131void * __must_check krealloc(const void *, size_t, gfp_t);
132void kfree(const void *);
133void kzfree(const void *);
134size_t ksize(const void *);
135
136/*
137 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
138 * alignment larger than the alignment of a 64-bit integer.
139 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
140 */
141#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
142#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
143#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
144#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
145#else
146#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
147#endif
148
149#ifdef CONFIG_SLOB
150/*
151 * Common fields provided in kmem_cache by all slab allocators
152 * This struct is either used directly by the allocator (SLOB)
153 * or the allocator must include definitions for all fields
154 * provided in kmem_cache_common in their definition of kmem_cache.
155 *
156 * Once we can do anonymous structs (C11 standard) we could put a
157 * anonymous struct definition in these allocators so that the
158 * separate allocations in the kmem_cache structure of SLAB and
159 * SLUB is no longer needed.
160 */
161struct kmem_cache {
162 unsigned int object_size;/* The original size of the object */
163 unsigned int size; /* The aligned/padded/added on size */
164 unsigned int align; /* Alignment as calculated */
165 unsigned long flags; /* Active flags on the slab */
166 const char *name; /* Slab name for sysfs */
167 int refcount; /* Use counter */
168 void (*ctor)(void *); /* Called on object slot creation */
169 struct list_head list; /* List of all slab caches on the system */
170};
171
172#define KMALLOC_MAX_SIZE (1UL << 30)
173
174#include <linux/slob_def.h>
175
176#else /* CONFIG_SLOB */
177
178/*
179 * Kmalloc array related definitions
180 */
181
182#ifdef CONFIG_SLAB
183/*
184 * The largest kmalloc size supported by the SLAB allocators is
152 * 32 megabyte (2^25) or the maximum allocatable page order if that is 185 * 32 megabyte (2^25) or the maximum allocatable page order if that is
153 * less than 32 MB. 186 * less than 32 MB.
154 * 187 *
@@ -158,22 +191,120 @@ void kmem_cache_free(struct kmem_cache *, void *);
158 */ 191 */
159#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 192#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
160 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 193 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
194#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
195#ifndef KMALLOC_SHIFT_LOW
196#define KMALLOC_SHIFT_LOW 5
197#endif
198#else
199/*
200 * SLUB allocates up to order 2 pages directly and otherwise
201 * passes the request to the page allocator.
202 */
203#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
204#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
205#ifndef KMALLOC_SHIFT_LOW
206#define KMALLOC_SHIFT_LOW 3
207#endif
208#endif
161 209
162#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 210/* Maximum allocatable size */
163#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212/* Maximum size for which we actually use a slab cache */
213#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
214/* Maximum order allocatable via the slab allocagtor */
215#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
164 216
165/* 217/*
166 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 218 * Kmalloc subsystem.
167 * alignment larger than the alignment of a 64-bit integer.
168 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
169 */ 219 */
170#ifdef ARCH_DMA_MINALIGN 220#ifndef KMALLOC_MIN_SIZE
171#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222#endif
223
224extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225#ifdef CONFIG_ZONE_DMA
226extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
227#endif
228
229/*
230 * Figure out which kmalloc slab an allocation of a certain size
231 * belongs to.
232 * 0 = zero alloc
233 * 1 = 65 .. 96 bytes
234 * 2 = 120 .. 192 bytes
235 * n = 2^(n-1) .. 2^n -1
236 */
237static __always_inline int kmalloc_index(size_t size)
238{
239 if (!size)
240 return 0;
241
242 if (size <= KMALLOC_MIN_SIZE)
243 return KMALLOC_SHIFT_LOW;
244
245 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
246 return 1;
247 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
248 return 2;
249 if (size <= 8) return 3;
250 if (size <= 16) return 4;
251 if (size <= 32) return 5;
252 if (size <= 64) return 6;
253 if (size <= 128) return 7;
254 if (size <= 256) return 8;
255 if (size <= 512) return 9;
256 if (size <= 1024) return 10;
257 if (size <= 2 * 1024) return 11;
258 if (size <= 4 * 1024) return 12;
259 if (size <= 8 * 1024) return 13;
260 if (size <= 16 * 1024) return 14;
261 if (size <= 32 * 1024) return 15;
262 if (size <= 64 * 1024) return 16;
263 if (size <= 128 * 1024) return 17;
264 if (size <= 256 * 1024) return 18;
265 if (size <= 512 * 1024) return 19;
266 if (size <= 1024 * 1024) return 20;
267 if (size <= 2 * 1024 * 1024) return 21;
268 if (size <= 4 * 1024 * 1024) return 22;
269 if (size <= 8 * 1024 * 1024) return 23;
270 if (size <= 16 * 1024 * 1024) return 24;
271 if (size <= 32 * 1024 * 1024) return 25;
272 if (size <= 64 * 1024 * 1024) return 26;
273 BUG();
274
275 /* Will never be reached. Needed because the compiler may complain */
276 return -1;
277}
278
279#ifdef CONFIG_SLAB
280#include <linux/slab_def.h>
281#elif defined(CONFIG_SLUB)
282#include <linux/slub_def.h>
172#else 283#else
173#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 284#error "Unknown slab allocator"
174#endif 285#endif
175 286
176/* 287/*
288 * Determine size used for the nth kmalloc cache.
289 * return size or 0 if a kmalloc cache for that
290 * size does not exist
291 */
292static __always_inline int kmalloc_size(int n)
293{
294 if (n > 2)
295 return 1 << n;
296
297 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
298 return 96;
299
300 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301 return 192;
302
303 return 0;
304}
305#endif /* !CONFIG_SLOB */
306
307/*
177 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 308 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
178 * Intended for arches that get misalignment faults even for 64 bit integer 309 * Intended for arches that get misalignment faults even for 64 bit integer
179 * aligned buffers. 310 * aligned buffers.
@@ -224,42 +355,6 @@ struct seq_file;
224int cache_show(struct kmem_cache *s, struct seq_file *m); 355int cache_show(struct kmem_cache *s, struct seq_file *m);
225void print_slabinfo_header(struct seq_file *m); 356void print_slabinfo_header(struct seq_file *m);
226 357
227/*
228 * Common kmalloc functions provided by all allocators
229 */
230void * __must_check __krealloc(const void *, size_t, gfp_t);
231void * __must_check krealloc(const void *, size_t, gfp_t);
232void kfree(const void *);
233void kzfree(const void *);
234size_t ksize(const void *);
235
236/*
237 * Allocator specific definitions. These are mainly used to establish optimized
238 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
239 * selecting the appropriate general cache at compile time.
240 *
241 * Allocators must define at least:
242 *
243 * kmem_cache_alloc()
244 * __kmalloc()
245 * kmalloc()
246 *
247 * Those wishing to support NUMA must also define:
248 *
249 * kmem_cache_alloc_node()
250 * kmalloc_node()
251 *
252 * See each allocator definition file for additional comments and
253 * implementation notes.
254 */
255#ifdef CONFIG_SLUB
256#include <linux/slub_def.h>
257#elif defined(CONFIG_SLOB)
258#include <linux/slob_def.h>
259#else
260#include <linux/slab_def.h>
261#endif
262
263/** 358/**
264 * kmalloc_array - allocate memory for an array. 359 * kmalloc_array - allocate memory for an array.
265 * @n: number of elements. 360 * @n: number of elements.