aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:08 -0500
commit2c59dd6544212faa5ce761920d2251f4152f408d (patch)
treec2547eb50205b72368e0b4758fc7c9a0111238a5 /include/linux/slub_def.h
parent9e5e8deca74603357626471a9b44f05dea9e32b1 (diff)
slab: Common Kmalloc cache determination
Extract the optimized lookup functions from slub and put them into slab_common.c. Then make slab use these functions as well. Joonsoo notes that this fixes some issues with constant folding which also reduces the code size for slub. https://lkml.org/lkml/2012/10/20/82 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h41
1 files changed, 10 insertions, 31 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3701896f7f8a..16341e5316de 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,29 +115,6 @@ struct kmem_cache {
115 struct kmem_cache_node *node[MAX_NUMNODES]; 115 struct kmem_cache_node *node[MAX_NUMNODES];
116}; 116};
117 117
118#ifdef CONFIG_ZONE_DMA
119#define SLUB_DMA __GFP_DMA
120#else
121/* Disable DMA functionality */
122#define SLUB_DMA (__force gfp_t)0
123#endif
124
125/*
126 * Find the slab cache for a given combination of allocation flags and size.
127 *
128 * This ought to end up with a global pointer to the right cache
129 * in kmalloc_caches.
130 */
131static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
132{
133 int index = kmalloc_index(size);
134
135 if (index == 0)
136 return NULL;
137
138 return kmalloc_caches[index];
139}
140
141void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 118void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
142void *__kmalloc(size_t size, gfp_t flags); 119void *__kmalloc(size_t size, gfp_t flags);
143 120
@@ -195,13 +172,14 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
195 if (size > KMALLOC_MAX_CACHE_SIZE) 172 if (size > KMALLOC_MAX_CACHE_SIZE)
196 return kmalloc_large(size, flags); 173 return kmalloc_large(size, flags);
197 174
198 if (!(flags & SLUB_DMA)) { 175 if (!(flags & GFP_DMA)) {
199 struct kmem_cache *s = kmalloc_slab(size); 176 int index = kmalloc_index(size);
200 177
201 if (!s) 178 if (!index)
202 return ZERO_SIZE_PTR; 179 return ZERO_SIZE_PTR;
203 180
204 return kmem_cache_alloc_trace(s, flags, size); 181 return kmem_cache_alloc_trace(kmalloc_caches[index],
182 flags, size);
205 } 183 }
206 } 184 }
207 return __kmalloc(size, flags); 185 return __kmalloc(size, flags);
@@ -228,13 +206,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
228static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 206static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
229{ 207{
230 if (__builtin_constant_p(size) && 208 if (__builtin_constant_p(size) &&
231 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) { 209 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
232 struct kmem_cache *s = kmalloc_slab(size); 210 int index = kmalloc_index(size);
233 211
234 if (!s) 212 if (!index)
235 return ZERO_SIZE_PTR; 213 return ZERO_SIZE_PTR;
236 214
237 return kmem_cache_alloc_node_trace(s, flags, node, size); 215 return kmem_cache_alloc_node_trace(kmalloc_caches[index],
216 flags, node, size);
238 } 217 }
239 return __kmalloc_node(size, flags, node); 218 return __kmalloc_node(size, flags, node);
240} 219}