aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h34
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--mm/slub.c22
3 files changed, 38 insertions, 37 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c97fe92532d1..c01780540054 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -163,7 +163,12 @@ struct kmem_cache {
163#else /* CONFIG_SLOB */ 163#else /* CONFIG_SLOB */
164 164
165/* 165/*
166 * The largest kmalloc size supported by the slab allocators is 166 * Kmalloc array related definitions
167 */
168
169#ifdef CONFIG_SLAB
170/*
171 * The largest kmalloc size supported by the SLAB allocators is
167 * 32 megabyte (2^25) or the maximum allocatable page order if that is 172 * 32 megabyte (2^25) or the maximum allocatable page order if that is
168 * less than 32 MB. 173 * less than 32 MB.
169 * 174 *
@@ -173,9 +178,24 @@ struct kmem_cache {
173 */ 178 */
174#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 179#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
175 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 180 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
181#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
182#define KMALLOC_SHIFT_LOW 5
183#else
184/*
185 * SLUB allocates up to order 2 pages directly and otherwise
186 * passes the request to the page allocator.
187 */
188#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
189#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
190#define KMALLOC_SHIFT_LOW 3
191#endif
176 192
177#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 193/* Maximum allocatable size */
178#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 194#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
195/* Maximum size for which we actually use a slab cache */
196#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
197/* Maximum order allocatable via the slab allocagtor */
198#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
179 199
180/* 200/*
181 * Kmalloc subsystem. 201 * Kmalloc subsystem.
@@ -183,15 +203,9 @@ struct kmem_cache {
183#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 203#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
184#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 204#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
185#else 205#else
186#ifdef CONFIG_SLAB 206#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
187#define KMALLOC_MIN_SIZE 32
188#else
189#define KMALLOC_MIN_SIZE 8
190#endif
191#endif 207#endif
192 208
193#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
194
195/* 209/*
196 * Figure out which kmalloc slab an allocation of a certain size 210 * Figure out which kmalloc slab an allocation of a certain size
197 * belongs to. 211 * belongs to.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 99c3e05ff1f0..032028ef9a34 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,19 +115,6 @@ struct kmem_cache {
115 struct kmem_cache_node *node[MAX_NUMNODES]; 115 struct kmem_cache_node *node[MAX_NUMNODES];
116}; 116};
117 117
118/*
119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
120 * are passed through to the page allocator. The page allocator "fastpath"
121 * is relatively slow so we need this value sufficiently high so that
122 * performance critical objects are allocated through the SLUB fastpath.
123 *
124 * This should be dropped to PAGE_SIZE / 2 once the page allocator
125 * "fastpath" becomes competitive with the slab allocator fastpaths.
126 */
127#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
128
129#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
130
131#ifdef CONFIG_ZONE_DMA 118#ifdef CONFIG_ZONE_DMA
132#define SLUB_DMA __GFP_DMA 119#define SLUB_DMA __GFP_DMA
133#else 120#else
@@ -139,7 +126,7 @@ struct kmem_cache {
139 * We keep the general caches in an array of slab caches that are used for 126 * We keep the general caches in an array of slab caches that are used for
140 * 2^x bytes of allocations. 127 * 2^x bytes of allocations.
141 */ 128 */
142extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 129extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
143 130
144/* 131/*
145 * Find the slab cache for a given combination of allocation flags and size. 132 * Find the slab cache for a given combination of allocation flags and size.
@@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
211static __always_inline void *kmalloc(size_t size, gfp_t flags) 198static __always_inline void *kmalloc(size_t size, gfp_t flags)
212{ 199{
213 if (__builtin_constant_p(size)) { 200 if (__builtin_constant_p(size)) {
214 if (size > SLUB_MAX_SIZE) 201 if (size > KMALLOC_MAX_CACHE_SIZE)
215 return kmalloc_large(size, flags); 202 return kmalloc_large(size, flags);
216 203
217 if (!(flags & SLUB_DMA)) { 204 if (!(flags & SLUB_DMA)) {
@@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 235{
249 if (__builtin_constant_p(size) && 236 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 237 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 238 struct kmem_cache *s = kmalloc_slab(size);
252 239
253 if (!s) 240 if (!s)
diff --git a/mm/slub.c b/mm/slub.c
index ba2ca53f6c3a..d0f72ee06310 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n)
2775static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2775static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2776{ 2776{
2777 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2777 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2778 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2778 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
2779 2779
2780 /* 2780 /*
2781 * Must align to double word boundary for the double cmpxchg 2781 * Must align to double word boundary for the double cmpxchg
@@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
3174 * Kmalloc subsystem 3174 * Kmalloc subsystem
3175 *******************************************************************/ 3175 *******************************************************************/
3176 3176
3177struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3177struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
3178EXPORT_SYMBOL(kmalloc_caches); 3178EXPORT_SYMBOL(kmalloc_caches);
3179 3179
3180#ifdef CONFIG_ZONE_DMA 3180#ifdef CONFIG_ZONE_DMA
3181static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 3181static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
3182#endif 3182#endif
3183 3183
3184static int __init setup_slub_min_order(char *str) 3184static int __init setup_slub_min_order(char *str)
@@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags)
3280 struct kmem_cache *s; 3280 struct kmem_cache *s;
3281 void *ret; 3281 void *ret;
3282 3282
3283 if (unlikely(size > SLUB_MAX_SIZE)) 3283 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3284 return kmalloc_large(size, flags); 3284 return kmalloc_large(size, flags);
3285 3285
3286 s = get_slab(size, flags); 3286 s = get_slab(size, flags);
@@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3316 struct kmem_cache *s; 3316 struct kmem_cache *s;
3317 void *ret; 3317 void *ret;
3318 3318
3319 if (unlikely(size > SLUB_MAX_SIZE)) { 3319 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3320 ret = kmalloc_large_node(size, flags, node); 3320 ret = kmalloc_large_node(size, flags, node);
3321 3321
3322 trace_kmalloc_node(_RET_IP_, ret, 3322 trace_kmalloc_node(_RET_IP_, ret,
@@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void)
3721 caches++; 3721 caches++;
3722 } 3722 }
3723 3723
3724 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3724 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3725 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3725 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3726 caches++; 3726 caches++;
3727 } 3727 }
@@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void)
3739 BUG_ON(!kmalloc_caches[2]->name); 3739 BUG_ON(!kmalloc_caches[2]->name);
3740 } 3740 }
3741 3741
3742 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3742 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3743 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3743 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3744 3744
3745 BUG_ON(!s); 3745 BUG_ON(!s);
@@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void)
3751#endif 3751#endif
3752 3752
3753#ifdef CONFIG_ZONE_DMA 3753#ifdef CONFIG_ZONE_DMA
3754 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3754 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
3755 struct kmem_cache *s = kmalloc_caches[i]; 3755 struct kmem_cache *s = kmalloc_caches[i];
3756 3756
3757 if (s && s->size) { 3757 if (s && s->size) {
@@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3930 struct kmem_cache *s; 3930 struct kmem_cache *s;
3931 void *ret; 3931 void *ret;
3932 3932
3933 if (unlikely(size > SLUB_MAX_SIZE)) 3933 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3934 return kmalloc_large(size, gfpflags); 3934 return kmalloc_large(size, gfpflags);
3935 3935
3936 s = get_slab(size, gfpflags); 3936 s = get_slab(size, gfpflags);
@@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3953 struct kmem_cache *s; 3953 struct kmem_cache *s;
3954 void *ret; 3954 void *ret;
3955 3955
3956 if (unlikely(size > SLUB_MAX_SIZE)) { 3956 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3957 ret = kmalloc_large_node(size, gfpflags, node); 3957 ret = kmalloc_large_node(size, gfpflags, node);
3958 3958
3959 trace_kmalloc_node(caller, ret, 3959 trace_kmalloc_node(caller, ret,
@@ -4312,7 +4312,7 @@ static void resiliency_test(void)
4312{ 4312{
4313 u8 *p; 4313 u8 *p;
4314 4314
4315 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 4315 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4316 4316
4317 printk(KERN_ERR "SLUB resiliency testing\n"); 4317 printk(KERN_ERR "SLUB resiliency testing\n");
4318 printk(KERN_ERR "-----------------------\n"); 4318 printk(KERN_ERR "-----------------------\n");