aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h6
-rw-r--r--mm/slub.c18
2 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 98be113cf935..57deecc79d52 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,7 +111,7 @@ struct kmem_cache {
111 * We keep the general caches in an array of slab caches that are used for 111 * We keep the general caches in an array of slab caches that are used for
112 * 2^x bytes of allocations. 112 * 2^x bytes of allocations.
113 */ 113 */
114extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; 114extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
115 115
116/* 116/*
117 * Sorry that the following has to be that ugly but some versions of GCC 117 * Sorry that the following has to be that ugly but some versions of GCC
@@ -197,7 +197,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
197static __always_inline void *kmalloc(size_t size, gfp_t flags) 197static __always_inline void *kmalloc(size_t size, gfp_t flags)
198{ 198{
199 if (__builtin_constant_p(size)) { 199 if (__builtin_constant_p(size)) {
200 if (size > PAGE_SIZE / 2) 200 if (size > PAGE_SIZE)
201 return kmalloc_large(size, flags); 201 return kmalloc_large(size, flags);
202 202
203 if (!(flags & SLUB_DMA)) { 203 if (!(flags & SLUB_DMA)) {
@@ -219,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
219static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 219static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
220{ 220{
221 if (__builtin_constant_p(size) && 221 if (__builtin_constant_p(size) &&
222 size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { 222 size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
223 struct kmem_cache *s = kmalloc_slab(size); 223 struct kmem_cache *s = kmalloc_slab(size);
224 224
225 if (!s) 225 if (!s)
diff --git a/mm/slub.c b/mm/slub.c
index 644fd0aaeaf1..4b3895cb90ee 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2517,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2517 * Kmalloc subsystem 2517 * Kmalloc subsystem
2518 *******************************************************************/ 2518 *******************************************************************/
2519 2519
2520struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; 2520struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2521EXPORT_SYMBOL(kmalloc_caches); 2521EXPORT_SYMBOL(kmalloc_caches);
2522 2522
2523#ifdef CONFIG_ZONE_DMA 2523#ifdef CONFIG_ZONE_DMA
2524static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; 2524static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2525#endif 2525#endif
2526 2526
2527static int __init setup_slub_min_order(char *str) 2527static int __init setup_slub_min_order(char *str)
@@ -2703,7 +2703,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2703{ 2703{
2704 struct kmem_cache *s; 2704 struct kmem_cache *s;
2705 2705
2706 if (unlikely(size > PAGE_SIZE / 2)) 2706 if (unlikely(size > PAGE_SIZE))
2707 return kmalloc_large(size, flags); 2707 return kmalloc_large(size, flags);
2708 2708
2709 s = get_slab(size, flags); 2709 s = get_slab(size, flags);
@@ -2720,7 +2720,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2720{ 2720{
2721 struct kmem_cache *s; 2721 struct kmem_cache *s;
2722 2722
2723 if (unlikely(size > PAGE_SIZE / 2)) 2723 if (unlikely(size > PAGE_SIZE))
2724 return kmalloc_large(size, flags); 2724 return kmalloc_large(size, flags);
2725 2725
2726 s = get_slab(size, flags); 2726 s = get_slab(size, flags);
@@ -3032,7 +3032,7 @@ void __init kmem_cache_init(void)
3032 caches++; 3032 caches++;
3033 } 3033 }
3034 3034
3035 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { 3035 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
3036 create_kmalloc_cache(&kmalloc_caches[i], 3036 create_kmalloc_cache(&kmalloc_caches[i],
3037 "kmalloc", 1 << i, GFP_KERNEL); 3037 "kmalloc", 1 << i, GFP_KERNEL);
3038 caches++; 3038 caches++;
@@ -3059,7 +3059,7 @@ void __init kmem_cache_init(void)
3059 slab_state = UP; 3059 slab_state = UP;
3060 3060
3061 /* Provide the correct kmalloc names now that the caches are up */ 3061 /* Provide the correct kmalloc names now that the caches are up */
3062 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) 3062 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
3063 kmalloc_caches[i]. name = 3063 kmalloc_caches[i]. name =
3064 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3064 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3065 3065
@@ -3088,7 +3088,7 @@ static int slab_unmergeable(struct kmem_cache *s)
3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3089 return 1; 3089 return 1;
3090 3090
3091 if ((s->flags & __PAGE_ALLOC_FALLBACK) 3091 if ((s->flags & __PAGE_ALLOC_FALLBACK))
3092 return 1; 3092 return 1;
3093 3093
3094 if (s->ctor) 3094 if (s->ctor)
@@ -3252,7 +3252,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3252{ 3252{
3253 struct kmem_cache *s; 3253 struct kmem_cache *s;
3254 3254
3255 if (unlikely(size > PAGE_SIZE / 2)) 3255 if (unlikely(size > PAGE_SIZE))
3256 return kmalloc_large(size, gfpflags); 3256 return kmalloc_large(size, gfpflags);
3257 3257
3258 s = get_slab(size, gfpflags); 3258 s = get_slab(size, gfpflags);
@@ -3268,7 +3268,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3268{ 3268{
3269 struct kmem_cache *s; 3269 struct kmem_cache *s;
3270 3270
3271 if (unlikely(size > PAGE_SIZE / 2)) 3271 if (unlikely(size > PAGE_SIZE))
3272 return kmalloc_large(size, gfpflags); 3272 return kmalloc_large(size, gfpflags);
3273 3273
3274 s = get_slab(size, gfpflags); 3274 s = get_slab(size, gfpflags);