aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-08-20 13:37:15 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-02 03:24:27 -0400
commit51df1142816e469173889fb6d6dc810be9b9e022 (patch)
treee2827e87486675b514c68f06d67ac5980cd6ceb1 /include/linux/slub_def.h
parent6c182dc0de26ef97efb6a97a8deab074833764e7 (diff)
slub: Dynamically size kmalloc cache allocations
kmalloc caches are statically defined and may take up a lot of space just because the sizes of the node array has to be dimensioned for the largest node count supported. This patch makes the size of the kmem_cache structure dynamic throughout by creating a kmem_cache slab cache for the kmem_cache objects. The bootstrap occurs by allocating the initial one or two kmem_cache objects from the page allocator. C2->C3 - Fix various issues indicated by David - Make create kmalloc_cache return a kmem_cache * pointer. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h7
1 files changed, 2 insertions, 5 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 9f63538928c0..a6c43ec6a4a5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -139,19 +139,16 @@ struct kmem_cache {
139 139
140#ifdef CONFIG_ZONE_DMA 140#ifdef CONFIG_ZONE_DMA
141#define SLUB_DMA __GFP_DMA 141#define SLUB_DMA __GFP_DMA
142/* Reserve extra caches for potential DMA use */
143#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
144#else 142#else
145/* Disable DMA functionality */ 143/* Disable DMA functionality */
146#define SLUB_DMA (__force gfp_t)0 144#define SLUB_DMA (__force gfp_t)0
147#define KMALLOC_CACHES SLUB_PAGE_SHIFT
148#endif 145#endif
149 146
150/* 147/*
151 * We keep the general caches in an array of slab caches that are used for 148 * We keep the general caches in an array of slab caches that are used for
152 * 2^x bytes of allocations. 149 * 2^x bytes of allocations.
153 */ 150 */
154extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; 151extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
155 152
156/* 153/*
157 * Sorry that the following has to be that ugly but some versions of GCC 154 * Sorry that the following has to be that ugly but some versions of GCC
@@ -216,7 +213,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
216 if (index == 0) 213 if (index == 0)
217 return NULL; 214 return NULL;
218 215
219 return &kmalloc_caches[index]; 216 return kmalloc_caches[index];
220} 217}
221 218
222void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 219void *kmem_cache_alloc(struct kmem_cache *, gfp_t);