diff options
-rw-r--r-- | include/linux/slub_def.h | 19 | ||||
-rw-r--r-- | mm/slub.c | 16 |
2 files changed, 24 insertions, 11 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6b657f7dcb2b..9e3a575b2c30 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -122,10 +122,23 @@ struct kmem_cache { | |||
122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | ||
126 | * are passed through to the page allocator. The page allocator "fastpath" | ||
127 | * is relatively slow so we need this value sufficiently high so that | ||
128 | * performance critical objects are allocated through the SLUB fastpath. | ||
129 | * | ||
130 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | ||
131 | * "fastpath" becomes competitive with the slab allocator fastpaths. | ||
132 | */ | ||
133 | #define SLUB_MAX_SIZE (PAGE_SIZE) | ||
134 | |||
135 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) | ||
136 | |||
137 | /* | ||
125 | * We keep the general caches in an array of slab caches that are used for | 138 | * We keep the general caches in an array of slab caches that are used for |
126 | * 2^x bytes of allocations. | 139 | * 2^x bytes of allocations. |
127 | */ | 140 | */ |
128 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; | 141 | extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; |
129 | 142 | ||
130 | /* | 143 | /* |
131 | * Sorry that the following has to be that ugly but some versions of GCC | 144 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *ret; | 244 | void *ret; |
232 | 245 | ||
233 | if (__builtin_constant_p(size)) { | 246 | if (__builtin_constant_p(size)) { |
234 | if (size > PAGE_SIZE) | 247 | if (size > SLUB_MAX_SIZE) |
235 | return kmalloc_large(size, flags); | 248 | return kmalloc_large(size, flags); |
236 | 249 | ||
237 | if (!(flags & SLUB_DMA)) { | 250 | if (!(flags & SLUB_DMA)) { |
@@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
275 | void *ret; | 288 | void *ret; |
276 | 289 | ||
277 | if (__builtin_constant_p(size) && | 290 | if (__builtin_constant_p(size) && |
278 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 291 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
279 | struct kmem_cache *s = kmalloc_slab(size); | 292 | struct kmem_cache *s = kmalloc_slab(size); |
280 | 293 | ||
281 | if (!s) | 294 | if (!s) |
@@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2506 | * Kmalloc subsystem | 2506 | * Kmalloc subsystem |
2507 | *******************************************************************/ | 2507 | *******************************************************************/ |
2508 | 2508 | ||
2509 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2509 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
2510 | EXPORT_SYMBOL(kmalloc_caches); | 2510 | EXPORT_SYMBOL(kmalloc_caches); |
2511 | 2511 | ||
2512 | static int __init setup_slub_min_order(char *str) | 2512 | static int __init setup_slub_min_order(char *str) |
@@ -2568,7 +2568,7 @@ panic: | |||
2568 | } | 2568 | } |
2569 | 2569 | ||
2570 | #ifdef CONFIG_ZONE_DMA | 2570 | #ifdef CONFIG_ZONE_DMA |
2571 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2571 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
2572 | 2572 | ||
2573 | static void sysfs_add_func(struct work_struct *w) | 2573 | static void sysfs_add_func(struct work_struct *w) |
2574 | { | 2574 | { |
@@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2690 | struct kmem_cache *s; | 2690 | struct kmem_cache *s; |
2691 | void *ret; | 2691 | void *ret; |
2692 | 2692 | ||
2693 | if (unlikely(size > PAGE_SIZE)) | 2693 | if (unlikely(size > SLUB_MAX_SIZE)) |
2694 | return kmalloc_large(size, flags); | 2694 | return kmalloc_large(size, flags); |
2695 | 2695 | ||
2696 | s = get_slab(size, flags); | 2696 | s = get_slab(size, flags); |
@@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2724 | struct kmem_cache *s; | 2724 | struct kmem_cache *s; |
2725 | void *ret; | 2725 | void *ret; |
2726 | 2726 | ||
2727 | if (unlikely(size > PAGE_SIZE)) { | 2727 | if (unlikely(size > SLUB_MAX_SIZE)) { |
2728 | ret = kmalloc_large_node(size, flags, node); | 2728 | ret = kmalloc_large_node(size, flags, node); |
2729 | 2729 | ||
2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | 2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, |
@@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) | |||
3039 | caches++; | 3039 | caches++; |
3040 | } | 3040 | } |
3041 | 3041 | ||
3042 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 3042 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3043 | create_kmalloc_cache(&kmalloc_caches[i], | 3043 | create_kmalloc_cache(&kmalloc_caches[i], |
3044 | "kmalloc", 1 << i, GFP_KERNEL); | 3044 | "kmalloc", 1 << i, GFP_KERNEL); |
3045 | caches++; | 3045 | caches++; |
@@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) | |||
3076 | slab_state = UP; | 3076 | slab_state = UP; |
3077 | 3077 | ||
3078 | /* Provide the correct kmalloc names now that the caches are up */ | 3078 | /* Provide the correct kmalloc names now that the caches are up */ |
3079 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3079 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3080 | kmalloc_caches[i]. name = | 3080 | kmalloc_caches[i]. name = |
3081 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3081 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
3082 | 3082 | ||
@@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3277 | struct kmem_cache *s; | 3277 | struct kmem_cache *s; |
3278 | void *ret; | 3278 | void *ret; |
3279 | 3279 | ||
3280 | if (unlikely(size > PAGE_SIZE)) | 3280 | if (unlikely(size > SLUB_MAX_SIZE)) |
3281 | return kmalloc_large(size, gfpflags); | 3281 | return kmalloc_large(size, gfpflags); |
3282 | 3282 | ||
3283 | s = get_slab(size, gfpflags); | 3283 | s = get_slab(size, gfpflags); |
@@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3300 | struct kmem_cache *s; | 3300 | struct kmem_cache *s; |
3301 | void *ret; | 3301 | void *ret; |
3302 | 3302 | ||
3303 | if (unlikely(size > PAGE_SIZE)) | 3303 | if (unlikely(size > SLUB_MAX_SIZE)) |
3304 | return kmalloc_large_node(size, gfpflags, node); | 3304 | return kmalloc_large_node(size, gfpflags, node); |
3305 | 3305 | ||
3306 | s = get_slab(size, gfpflags); | 3306 | s = get_slab(size, gfpflags); |