aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:07 -0500
commit95a05b428cc675694321c8f762591984f3fd2b1e (patch)
tree3a74205955201dd5e1abb0a85104d95cafa49df6 /include
parent6a67368c36e2c0c2578ba62f6264ab739af08cce (diff)
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest object kept in the kmalloc arrays for SLAB and SLUB. Differentiate between the maximum size for which a slab cache is used (KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size (KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h34
-rw-r--r--include/linux/slub_def.h19
2 files changed, 27 insertions, 26 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c97fe92532d1..c01780540054 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -163,7 +163,12 @@ struct kmem_cache {
163#else /* CONFIG_SLOB */ 163#else /* CONFIG_SLOB */
164 164
165/* 165/*
166 * The largest kmalloc size supported by the slab allocators is 166 * Kmalloc array related definitions
167 */
168
169#ifdef CONFIG_SLAB
170/*
171 * The largest kmalloc size supported by the SLAB allocators is
167 * 32 megabyte (2^25) or the maximum allocatable page order if that is 172 * 32 megabyte (2^25) or the maximum allocatable page order if that is
168 * less than 32 MB. 173 * less than 32 MB.
169 * 174 *
@@ -173,9 +178,24 @@ struct kmem_cache {
173 */ 178 */
174#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 179#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
175 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 180 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
181#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
182#define KMALLOC_SHIFT_LOW 5
183#else
184/*
185 * SLUB allocates up to order 2 pages directly and otherwise
186 * passes the request to the page allocator.
187 */
188#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
189#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
190#define KMALLOC_SHIFT_LOW 3
191#endif
176 192
177#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 193/* Maximum allocatable size */
178#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 194#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
195/* Maximum size for which we actually use a slab cache */
196#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
197/* Maximum order allocatable via the slab allocagtor */
198#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
179 199
180/* 200/*
181 * Kmalloc subsystem. 201 * Kmalloc subsystem.
@@ -183,15 +203,9 @@ struct kmem_cache {
183#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 203#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
184#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 204#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
185#else 205#else
186#ifdef CONFIG_SLAB 206#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
187#define KMALLOC_MIN_SIZE 32
188#else
189#define KMALLOC_MIN_SIZE 8
190#endif
191#endif 207#endif
192 208
193#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
194
195/* 209/*
196 * Figure out which kmalloc slab an allocation of a certain size 210 * Figure out which kmalloc slab an allocation of a certain size
197 * belongs to. 211 * belongs to.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 99c3e05ff1f0..032028ef9a34 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,19 +115,6 @@ struct kmem_cache {
115 struct kmem_cache_node *node[MAX_NUMNODES]; 115 struct kmem_cache_node *node[MAX_NUMNODES];
116}; 116};
117 117
118/*
119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
120 * are passed through to the page allocator. The page allocator "fastpath"
121 * is relatively slow so we need this value sufficiently high so that
122 * performance critical objects are allocated through the SLUB fastpath.
123 *
124 * This should be dropped to PAGE_SIZE / 2 once the page allocator
125 * "fastpath" becomes competitive with the slab allocator fastpaths.
126 */
127#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
128
129#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
130
131#ifdef CONFIG_ZONE_DMA 118#ifdef CONFIG_ZONE_DMA
132#define SLUB_DMA __GFP_DMA 119#define SLUB_DMA __GFP_DMA
133#else 120#else
@@ -139,7 +126,7 @@ struct kmem_cache {
139 * We keep the general caches in an array of slab caches that are used for 126 * We keep the general caches in an array of slab caches that are used for
140 * 2^x bytes of allocations. 127 * 2^x bytes of allocations.
141 */ 128 */
142extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 129extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
143 130
144/* 131/*
145 * Find the slab cache for a given combination of allocation flags and size. 132 * Find the slab cache for a given combination of allocation flags and size.
@@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
211static __always_inline void *kmalloc(size_t size, gfp_t flags) 198static __always_inline void *kmalloc(size_t size, gfp_t flags)
212{ 199{
213 if (__builtin_constant_p(size)) { 200 if (__builtin_constant_p(size)) {
214 if (size > SLUB_MAX_SIZE) 201 if (size > KMALLOC_MAX_CACHE_SIZE)
215 return kmalloc_large(size, flags); 202 return kmalloc_large(size, flags);
216 203
217 if (!(flags & SLUB_DMA)) { 204 if (!(flags & SLUB_DMA)) {
@@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 235{
249 if (__builtin_constant_p(size) && 236 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 237 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 238 struct kmem_cache *s = kmalloc_slab(size);
252 239
253 if (!s) 240 if (!s)