aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-06-14 15:55:13 -0400
committerPekka Enberg <penberg@kernel.org>2013-06-18 11:34:43 -0400
commit069e2b351de67e7a837b15b3d26c65c19b790cc3 (patch)
tree136808eeef17f3c81f634f485ea1fb79b85763a8 /include
parentd0d04b78f403b0bcfe03315e16b50d196610720d (diff)
slob: Rework #ifdeffery in slab.h
Make the SLOB specific stuff harmonize more with the way the other allocators do it. Create the typical kmalloc constants for that purpose. SLOB does not support it but the constants help us avoid #ifdefs. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h39
1 files changed, 28 insertions, 11 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c621752caa6..9690c14eb7fb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -169,11 +169,7 @@ struct kmem_cache {
169 struct list_head list; /* List of all slab caches on the system */ 169 struct list_head list; /* List of all slab caches on the system */
170}; 170};
171 171
172#define KMALLOC_MAX_SIZE (1UL << 30) 172#endif /* CONFIG_SLOB */
173
174#include <linux/slob_def.h>
175
176#else /* CONFIG_SLOB */
177 173
178/* 174/*
179 * Kmalloc array related definitions 175 * Kmalloc array related definitions
@@ -195,7 +191,9 @@ struct kmem_cache {
195#ifndef KMALLOC_SHIFT_LOW 191#ifndef KMALLOC_SHIFT_LOW
196#define KMALLOC_SHIFT_LOW 5 192#define KMALLOC_SHIFT_LOW 5
197#endif 193#endif
198#else 194#endif
195
196#ifdef CONFIG_SLUB
199/* 197/*
200 * SLUB allocates up to order 2 pages directly and otherwise 198 * SLUB allocates up to order 2 pages directly and otherwise
201 * passes the request to the page allocator. 199 * passes the request to the page allocator.
@@ -207,6 +205,19 @@ struct kmem_cache {
207#endif 205#endif
208#endif 206#endif
209 207
208#ifdef CONFIG_SLOB
209/*
210 * SLOB passes all page size and larger requests to the page allocator.
211 * No kmalloc array is necessary since objects of different sizes can
212 * be allocated from the same page.
213 */
214#define KMALLOC_SHIFT_MAX 30
215#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
216#ifndef KMALLOC_SHIFT_LOW
217#define KMALLOC_SHIFT_LOW 3
218#endif
219#endif
220
210/* Maximum allocatable size */ 221/* Maximum allocatable size */
211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 222#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212/* Maximum size for which we actually use a slab cache */ 223/* Maximum size for which we actually use a slab cache */
@@ -221,6 +232,7 @@ struct kmem_cache {
221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 232#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222#endif 233#endif
223 234
235#ifndef CONFIG_SLOB
224extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 236extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225#ifdef CONFIG_ZONE_DMA 237#ifdef CONFIG_ZONE_DMA
226extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 238extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
@@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
275 /* Will never be reached. Needed because the compiler may complain */ 287 /* Will never be reached. Needed because the compiler may complain */
276 return -1; 288 return -1;
277} 289}
290#endif /* !CONFIG_SLOB */
278 291
279#ifdef CONFIG_SLAB 292#ifdef CONFIG_SLAB
280#include <linux/slab_def.h> 293#include <linux/slab_def.h>
281#elif defined(CONFIG_SLUB) 294#endif
295
296#ifdef CONFIG_SLUB
282#include <linux/slub_def.h> 297#include <linux/slub_def.h>
283#else 298#endif
284#error "Unknown slab allocator" 299
300#ifdef CONFIG_SLOB
301#include <linux/slob_def.h>
285#endif 302#endif
286 303
287/* 304/*
@@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
291 */ 308 */
292static __always_inline int kmalloc_size(int n) 309static __always_inline int kmalloc_size(int n)
293{ 310{
311#ifndef CONFIG_SLOB
294 if (n > 2) 312 if (n > 2)
295 return 1 << n; 313 return 1 << n;
296 314
@@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
299 317
300 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 318 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301 return 192; 319 return 192;
302 320#endif
303 return 0; 321 return 0;
304} 322}
305#endif /* !CONFIG_SLOB */
306 323
307/* 324/*
308 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.