aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h19
1 files changed, 16 insertions, 3 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aacd..986e09dcfd8f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -121,10 +121,23 @@ struct kmem_cache {
121#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 121#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
122 122
123/* 123/*
124 * Maximum kmalloc object size handled by SLUB. Larger object allocations
125 * are passed through to the page allocator. The page allocator "fastpath"
126 * is relatively slow so we need this value sufficiently high so that
127 * performance critical objects are allocated through the SLUB fastpath.
128 *
129 * This should be dropped to PAGE_SIZE / 2 once the page allocator
130 * "fastpath" becomes competitive with the slab allocator fastpaths.
131 */
132#define SLUB_MAX_SIZE (PAGE_SIZE)
133
134#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
135
136/*
124 * We keep the general caches in an array of slab caches that are used for 137 * We keep the general caches in an array of slab caches that are used for
125 * 2^x bytes of allocations. 138 * 2^x bytes of allocations.
126 */ 139 */
127extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; 140extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
128 141
129/* 142/*
130 * Sorry that the following has to be that ugly but some versions of GCC 143 * Sorry that the following has to be that ugly but some versions of GCC
@@ -212,7 +225,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
212static __always_inline void *kmalloc(size_t size, gfp_t flags) 225static __always_inline void *kmalloc(size_t size, gfp_t flags)
213{ 226{
214 if (__builtin_constant_p(size)) { 227 if (__builtin_constant_p(size)) {
215 if (size > PAGE_SIZE) 228 if (size > SLUB_MAX_SIZE)
216 return kmalloc_large(size, flags); 229 return kmalloc_large(size, flags);
217 230
218 if (!(flags & SLUB_DMA)) { 231 if (!(flags & SLUB_DMA)) {
@@ -234,7 +247,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
235{ 248{
236 if (__builtin_constant_p(size) && 249 if (__builtin_constant_p(size) &&
237 size <= PAGE_SIZE && !(flags & SLUB_DMA)) { 250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
238 struct kmem_cache *s = kmalloc_slab(size); 251 struct kmem_cache *s = kmalloc_slab(size);
239 252
240 if (!s) 253 if (!s)