diff options
author | Christoph Lameter <cl@linux.com> | 2013-01-10 14:14:19 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-02-01 05:32:05 -0500 |
commit | ce6a50263d4ddeba1f0d08f16716a82770c03690 (patch) | |
tree | 099024fa474177d2e26709de76a211050ee9a4a1 /include/linux/slab.h | |
parent | 345046673449b5c35840e5cc34a60059cbec9305 (diff) |
slab: Common kmalloc slab index determination
Extract the function to determine the index of the slab within
the array of kmalloc caches as well as a function to determine
maximum object size from the nr of the kmalloc slab.
This is used here only to simplify slub bootstrap but will
be used later also for SLAB.
Acked-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 172 |
1 files changed, 122 insertions, 50 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index ccbb37685c6c..c97fe92532d1 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -94,29 +94,6 @@ | |||
94 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 94 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
95 | (unsigned long)ZERO_SIZE_PTR) | 95 | (unsigned long)ZERO_SIZE_PTR) |
96 | 96 | ||
97 | /* | ||
98 | * Common fields provided in kmem_cache by all slab allocators | ||
99 | * This struct is either used directly by the allocator (SLOB) | ||
100 | * or the allocator must include definitions for all fields | ||
101 | * provided in kmem_cache_common in their definition of kmem_cache. | ||
102 | * | ||
103 | * Once we can do anonymous structs (C11 standard) we could put a | ||
104 | * anonymous struct definition in these allocators so that the | ||
105 | * separate allocations in the kmem_cache structure of SLAB and | ||
106 | * SLUB is no longer needed. | ||
107 | */ | ||
108 | #ifdef CONFIG_SLOB | ||
109 | struct kmem_cache { | ||
110 | unsigned int object_size;/* The original size of the object */ | ||
111 | unsigned int size; /* The aligned/padded/added on size */ | ||
112 | unsigned int align; /* Alignment as calculated */ | ||
113 | unsigned long flags; /* Active flags on the slab */ | ||
114 | const char *name; /* Slab name for sysfs */ | ||
115 | int refcount; /* Use counter */ | ||
116 | void (*ctor)(void *); /* Called on object slot creation */ | ||
117 | struct list_head list; /* List of all slab caches on the system */ | ||
118 | }; | ||
119 | #endif | ||
120 | 97 | ||
121 | struct mem_cgroup; | 98 | struct mem_cgroup; |
122 | /* | 99 | /* |
@@ -156,6 +133,35 @@ void kfree(const void *); | |||
156 | void kzfree(const void *); | 133 | void kzfree(const void *); |
157 | size_t ksize(const void *); | 134 | size_t ksize(const void *); |
158 | 135 | ||
136 | #ifdef CONFIG_SLOB | ||
137 | /* | ||
138 | * Common fields provided in kmem_cache by all slab allocators | ||
139 | * This struct is either used directly by the allocator (SLOB) | ||
140 | * or the allocator must include definitions for all fields | ||
141 | * provided in kmem_cache_common in their definition of kmem_cache. | ||
142 | * | ||
143 | * Once we can do anonymous structs (C11 standard) we could put a | ||
144 | * anonymous struct definition in these allocators so that the | ||
145 | * separate allocations in the kmem_cache structure of SLAB and | ||
146 | * SLUB is no longer needed. | ||
147 | */ | ||
148 | struct kmem_cache { | ||
149 | unsigned int object_size;/* The original size of the object */ | ||
150 | unsigned int size; /* The aligned/padded/added on size */ | ||
151 | unsigned int align; /* Alignment as calculated */ | ||
152 | unsigned long flags; /* Active flags on the slab */ | ||
153 | const char *name; /* Slab name for sysfs */ | ||
154 | int refcount; /* Use counter */ | ||
155 | void (*ctor)(void *); /* Called on object slot creation */ | ||
156 | struct list_head list; /* List of all slab caches on the system */ | ||
157 | }; | ||
158 | |||
159 | #define KMALLOC_MAX_SIZE (1UL << 30) | ||
160 | |||
161 | #include <linux/slob_def.h> | ||
162 | |||
163 | #else /* CONFIG_SLOB */ | ||
164 | |||
159 | /* | 165 | /* |
160 | * The largest kmalloc size supported by the slab allocators is | 166 | * The largest kmalloc size supported by the slab allocators is |
161 | * 32 megabyte (2^25) or the maximum allocatable page order if that is | 167 | * 32 megabyte (2^25) or the maximum allocatable page order if that is |
@@ -172,6 +178,99 @@ size_t ksize(const void *); | |||
172 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) | 178 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) |
173 | 179 | ||
174 | /* | 180 | /* |
181 | * Kmalloc subsystem. | ||
182 | */ | ||
183 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | ||
184 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | ||
185 | #else | ||
186 | #ifdef CONFIG_SLAB | ||
187 | #define KMALLOC_MIN_SIZE 32 | ||
188 | #else | ||
189 | #define KMALLOC_MIN_SIZE 8 | ||
190 | #endif | ||
191 | #endif | ||
192 | |||
193 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | ||
194 | |||
195 | /* | ||
196 | * Figure out which kmalloc slab an allocation of a certain size | ||
197 | * belongs to. | ||
198 | * 0 = zero alloc | ||
199 | * 1 = 65 .. 96 bytes | ||
200 | * 2 = 120 .. 192 bytes | ||
201 | * n = 2^(n-1) .. 2^n -1 | ||
202 | */ | ||
203 | static __always_inline int kmalloc_index(size_t size) | ||
204 | { | ||
205 | if (!size) | ||
206 | return 0; | ||
207 | |||
208 | if (size <= KMALLOC_MIN_SIZE) | ||
209 | return KMALLOC_SHIFT_LOW; | ||
210 | |||
211 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | ||
212 | return 1; | ||
213 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | ||
214 | return 2; | ||
215 | if (size <= 8) return 3; | ||
216 | if (size <= 16) return 4; | ||
217 | if (size <= 32) return 5; | ||
218 | if (size <= 64) return 6; | ||
219 | if (size <= 128) return 7; | ||
220 | if (size <= 256) return 8; | ||
221 | if (size <= 512) return 9; | ||
222 | if (size <= 1024) return 10; | ||
223 | if (size <= 2 * 1024) return 11; | ||
224 | if (size <= 4 * 1024) return 12; | ||
225 | if (size <= 8 * 1024) return 13; | ||
226 | if (size <= 16 * 1024) return 14; | ||
227 | if (size <= 32 * 1024) return 15; | ||
228 | if (size <= 64 * 1024) return 16; | ||
229 | if (size <= 128 * 1024) return 17; | ||
230 | if (size <= 256 * 1024) return 18; | ||
231 | if (size <= 512 * 1024) return 19; | ||
232 | if (size <= 1024 * 1024) return 20; | ||
233 | if (size <= 2 * 1024 * 1024) return 21; | ||
234 | if (size <= 4 * 1024 * 1024) return 22; | ||
235 | if (size <= 8 * 1024 * 1024) return 23; | ||
236 | if (size <= 16 * 1024 * 1024) return 24; | ||
237 | if (size <= 32 * 1024 * 1024) return 25; | ||
238 | if (size <= 64 * 1024 * 1024) return 26; | ||
239 | BUG(); | ||
240 | |||
241 | /* Will never be reached. Needed because the compiler may complain */ | ||
242 | return -1; | ||
243 | } | ||
244 | |||
245 | #ifdef CONFIG_SLAB | ||
246 | #include <linux/slab_def.h> | ||
247 | #elif defined(CONFIG_SLUB) | ||
248 | #include <linux/slub_def.h> | ||
249 | #else | ||
250 | #error "Unknown slab allocator" | ||
251 | #endif | ||
252 | |||
253 | /* | ||
254 | * Determine size used for the nth kmalloc cache. | ||
255 | * return size or 0 if a kmalloc cache for that | ||
256 | * size does not exist | ||
257 | */ | ||
258 | static __always_inline int kmalloc_size(int n) | ||
259 | { | ||
260 | if (n > 2) | ||
261 | return 1 << n; | ||
262 | |||
263 | if (n == 1 && KMALLOC_MIN_SIZE <= 32) | ||
264 | return 96; | ||
265 | |||
266 | if (n == 2 && KMALLOC_MIN_SIZE <= 64) | ||
267 | return 192; | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | #endif /* !CONFIG_SLOB */ | ||
272 | |||
273 | /* | ||
175 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | 274 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed |
176 | * alignment larger than the alignment of a 64-bit integer. | 275 | * alignment larger than the alignment of a 64-bit integer. |
177 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. | 276 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. |
@@ -233,33 +332,6 @@ struct seq_file; | |||
233 | int cache_show(struct kmem_cache *s, struct seq_file *m); | 332 | int cache_show(struct kmem_cache *s, struct seq_file *m); |
234 | void print_slabinfo_header(struct seq_file *m); | 333 | void print_slabinfo_header(struct seq_file *m); |
235 | 334 | ||
236 | /* | ||
237 | * Allocator specific definitions. These are mainly used to establish optimized | ||
238 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
239 | * selecting the appropriate general cache at compile time. | ||
240 | * | ||
241 | * Allocators must define at least: | ||
242 | * | ||
243 | * kmem_cache_alloc() | ||
244 | * __kmalloc() | ||
245 | * kmalloc() | ||
246 | * | ||
247 | * Those wishing to support NUMA must also define: | ||
248 | * | ||
249 | * kmem_cache_alloc_node() | ||
250 | * kmalloc_node() | ||
251 | * | ||
252 | * See each allocator definition file for additional comments and | ||
253 | * implementation notes. | ||
254 | */ | ||
255 | #ifdef CONFIG_SLUB | ||
256 | #include <linux/slub_def.h> | ||
257 | #elif defined(CONFIG_SLOB) | ||
258 | #include <linux/slob_def.h> | ||
259 | #else | ||
260 | #include <linux/slab_def.h> | ||
261 | #endif | ||
262 | |||
263 | /** | 335 | /** |
264 | * kmalloc_array - allocate memory for an array. | 336 | * kmalloc_array - allocate memory for an array. |
265 | * @n: number of elements. | 337 | * @n: number of elements. |