aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:07 -0500
commit95a05b428cc675694321c8f762591984f3fd2b1e (patch)
tree3a74205955201dd5e1abb0a85104d95cafa49df6 /include/linux/slub_def.h
parent6a67368c36e2c0c2578ba62f6264ab739af08cce (diff)
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest object kept in the kmalloc arrays for SLAB and SLUB. Differentiate between the maximum size for which a slab cache is used (KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size (KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h19
1 files changed, 3 insertions, 16 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 99c3e05ff1f0..032028ef9a34 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,19 +115,6 @@ struct kmem_cache {
115 struct kmem_cache_node *node[MAX_NUMNODES]; 115 struct kmem_cache_node *node[MAX_NUMNODES];
116}; 116};
117 117
118/*
119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
120 * are passed through to the page allocator. The page allocator "fastpath"
121 * is relatively slow so we need this value sufficiently high so that
122 * performance critical objects are allocated through the SLUB fastpath.
123 *
124 * This should be dropped to PAGE_SIZE / 2 once the page allocator
125 * "fastpath" becomes competitive with the slab allocator fastpaths.
126 */
127#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
128
129#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
130
131#ifdef CONFIG_ZONE_DMA 118#ifdef CONFIG_ZONE_DMA
132#define SLUB_DMA __GFP_DMA 119#define SLUB_DMA __GFP_DMA
133#else 120#else
@@ -139,7 +126,7 @@ struct kmem_cache {
139 * We keep the general caches in an array of slab caches that are used for 126 * We keep the general caches in an array of slab caches that are used for
140 * 2^x bytes of allocations. 127 * 2^x bytes of allocations.
141 */ 128 */
142extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 129extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
143 130
144/* 131/*
145 * Find the slab cache for a given combination of allocation flags and size. 132 * Find the slab cache for a given combination of allocation flags and size.
@@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
211static __always_inline void *kmalloc(size_t size, gfp_t flags) 198static __always_inline void *kmalloc(size_t size, gfp_t flags)
212{ 199{
213 if (__builtin_constant_p(size)) { 200 if (__builtin_constant_p(size)) {
214 if (size > SLUB_MAX_SIZE) 201 if (size > KMALLOC_MAX_CACHE_SIZE)
215 return kmalloc_large(size, flags); 202 return kmalloc_large(size, flags);
216 203
217 if (!(flags & SLUB_DMA)) { 204 if (!(flags & SLUB_DMA)) {
@@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 235{
249 if (__builtin_constant_p(size) && 236 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 237 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 238 struct kmem_cache *s = kmalloc_slab(size);
252 239
253 if (!s) 240 if (!s)