aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:18 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:06 -0500
commite33660165c901d18e7d3df2290db070d3e4b46df (patch)
treefac7e77d6a57d244262e73213976ae11519c3d5a /include/linux/slab_def.h
parentce6a50263d4ddeba1f0d08f16716a82770c03690 (diff)
slab: Use common kmalloc_index/kmalloc_size functions
Make slab use the common functions. We can get rid of a lot of old ugly stuff as a results. Among them the sizes array and the weird include/linux/kmalloc_sizes file and some pretty bad #include statements in slab_def.h. The one thing that is different in slab is that the 32 byte cache will also be created for arches that have page sizes larger than 4K. There are numerous smaller allocations that SLOB and SLUB can handle better because of their support for smaller allocation sizes so lets keep the 32 byte slab also for arches with > 4K pages. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h47
1 files changed, 12 insertions, 35 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8bb6e0eaf3c6..e0f30ef9525d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -11,8 +11,6 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 14#include <linux/compiler.h>
17 15
18/* 16/*
@@ -104,15 +102,8 @@ struct kmem_cache {
104 */ 102 */
105}; 103};
106 104
107/* Size description struct for general caches. */ 105extern struct kmem_cache *kmalloc_caches[PAGE_SHIFT + MAX_ORDER];
108struct cache_sizes { 106extern struct kmem_cache *kmalloc_dma_caches[PAGE_SHIFT + MAX_ORDER];
109 size_t cs_size;
110 struct kmem_cache *cs_cachep;
111#ifdef CONFIG_ZONE_DMA
112 struct kmem_cache *cs_dmacachep;
113#endif
114};
115extern struct cache_sizes malloc_sizes[];
116 107
117void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 108void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
118void *__kmalloc(size_t size, gfp_t flags); 109void *__kmalloc(size_t size, gfp_t flags);
@@ -133,26 +124,19 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
133 void *ret; 124 void *ret;
134 125
135 if (__builtin_constant_p(size)) { 126 if (__builtin_constant_p(size)) {
136 int i = 0; 127 int i;
137 128
138 if (!size) 129 if (!size)
139 return ZERO_SIZE_PTR; 130 return ZERO_SIZE_PTR;
140 131
141#define CACHE(x) \ 132 i = kmalloc_index(size);
142 if (size <= x) \ 133
143 goto found; \
144 else \
145 i++;
146#include <linux/kmalloc_sizes.h>
147#undef CACHE
148 return NULL;
149found:
150#ifdef CONFIG_ZONE_DMA 134#ifdef CONFIG_ZONE_DMA
151 if (flags & GFP_DMA) 135 if (flags & GFP_DMA)
152 cachep = malloc_sizes[i].cs_dmacachep; 136 cachep = kmalloc_dma_caches[i];
153 else 137 else
154#endif 138#endif
155 cachep = malloc_sizes[i].cs_cachep; 139 cachep = kmalloc_caches[i];
156 140
157 ret = kmem_cache_alloc_trace(cachep, flags, size); 141 ret = kmem_cache_alloc_trace(cachep, flags, size);
158 142
@@ -186,26 +170,19 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
186 struct kmem_cache *cachep; 170 struct kmem_cache *cachep;
187 171
188 if (__builtin_constant_p(size)) { 172 if (__builtin_constant_p(size)) {
189 int i = 0; 173 int i;
190 174
191 if (!size) 175 if (!size)
192 return ZERO_SIZE_PTR; 176 return ZERO_SIZE_PTR;
193 177
194#define CACHE(x) \ 178 i = kmalloc_index(size);
195 if (size <= x) \ 179
196 goto found; \
197 else \
198 i++;
199#include <linux/kmalloc_sizes.h>
200#undef CACHE
201 return NULL;
202found:
203#ifdef CONFIG_ZONE_DMA 180#ifdef CONFIG_ZONE_DMA
204 if (flags & GFP_DMA) 181 if (flags & GFP_DMA)
205 cachep = malloc_sizes[i].cs_dmacachep; 182 cachep = kmalloc_dma_caches[i];
206 else 183 else
207#endif 184#endif
208 cachep = malloc_sizes[i].cs_cachep; 185 cachep = kmalloc_caches[i];
209 186
210 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 187 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
211 } 188 }