aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-14 17:28:09 -0500
committerChristoph Lameter <christoph@stapp.engr.sgi.com>2008-02-14 18:30:02 -0500
commit331dc558fa020451ff773973cee855fd721aa88e (patch)
tree12adf10a7e53c8d07835487694d8180b991304cd /mm/slub.c
parent71c7a06ff0a2ba0434ace4d7aa679537c4211d9d (diff)
slub: Support 4k kmallocs again to compensate for page allocator slowness
Currently we hand off PAGE_SIZEd kmallocs to the page allocator in the mistaken belief that the page allocator can handle these allocations effectively. However, measurements indicate a minimum slowdown by the factor of 8 (and that is only SMP, NUMA is much worse) vs the slub fastpath which causes regressions in tbench. Increase the number of kmalloc caches by one so that we again handle 4k kmallocs directly from slub. 4k page buffering for the page allocator will be performed by slub like done by slab. At some point the page allocator fastpath should be fixed. A lot of the kernel would benefit from a faster ability to allocate a single page. If that is done then the 4k allocs may again be forwarded to the page allocator and this patch could be reverted. Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 644fd0aaeaf1..4b3895cb90ee 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2517,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2517 * Kmalloc subsystem 2517 * Kmalloc subsystem
2518 *******************************************************************/ 2518 *******************************************************************/
2519 2519
2520struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; 2520struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2521EXPORT_SYMBOL(kmalloc_caches); 2521EXPORT_SYMBOL(kmalloc_caches);
2522 2522
2523#ifdef CONFIG_ZONE_DMA 2523#ifdef CONFIG_ZONE_DMA
2524static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; 2524static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2525#endif 2525#endif
2526 2526
2527static int __init setup_slub_min_order(char *str) 2527static int __init setup_slub_min_order(char *str)
@@ -2703,7 +2703,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2703{ 2703{
2704 struct kmem_cache *s; 2704 struct kmem_cache *s;
2705 2705
2706 if (unlikely(size > PAGE_SIZE / 2)) 2706 if (unlikely(size > PAGE_SIZE))
2707 return kmalloc_large(size, flags); 2707 return kmalloc_large(size, flags);
2708 2708
2709 s = get_slab(size, flags); 2709 s = get_slab(size, flags);
@@ -2720,7 +2720,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2720{ 2720{
2721 struct kmem_cache *s; 2721 struct kmem_cache *s;
2722 2722
2723 if (unlikely(size > PAGE_SIZE / 2)) 2723 if (unlikely(size > PAGE_SIZE))
2724 return kmalloc_large(size, flags); 2724 return kmalloc_large(size, flags);
2725 2725
2726 s = get_slab(size, flags); 2726 s = get_slab(size, flags);
@@ -3032,7 +3032,7 @@ void __init kmem_cache_init(void)
3032 caches++; 3032 caches++;
3033 } 3033 }
3034 3034
3035 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { 3035 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
3036 create_kmalloc_cache(&kmalloc_caches[i], 3036 create_kmalloc_cache(&kmalloc_caches[i],
3037 "kmalloc", 1 << i, GFP_KERNEL); 3037 "kmalloc", 1 << i, GFP_KERNEL);
3038 caches++; 3038 caches++;
@@ -3059,7 +3059,7 @@ void __init kmem_cache_init(void)
3059 slab_state = UP; 3059 slab_state = UP;
3060 3060
3061 /* Provide the correct kmalloc names now that the caches are up */ 3061 /* Provide the correct kmalloc names now that the caches are up */
3062 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) 3062 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
3063 kmalloc_caches[i]. name = 3063 kmalloc_caches[i]. name =
3064 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3064 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3065 3065
@@ -3088,7 +3088,7 @@ static int slab_unmergeable(struct kmem_cache *s)
3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3089 return 1; 3089 return 1;
3090 3090
3091 if ((s->flags & __PAGE_ALLOC_FALLBACK) 3091 if ((s->flags & __PAGE_ALLOC_FALLBACK))
3092 return 1; 3092 return 1;
3093 3093
3094 if (s->ctor) 3094 if (s->ctor)
@@ -3252,7 +3252,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3252{ 3252{
3253 struct kmem_cache *s; 3253 struct kmem_cache *s;
3254 3254
3255 if (unlikely(size > PAGE_SIZE / 2)) 3255 if (unlikely(size > PAGE_SIZE))
3256 return kmalloc_large(size, gfpflags); 3256 return kmalloc_large(size, gfpflags);
3257 3257
3258 s = get_slab(size, gfpflags); 3258 s = get_slab(size, gfpflags);
@@ -3268,7 +3268,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3268{ 3268{
3269 struct kmem_cache *s; 3269 struct kmem_cache *s;
3270 3270
3271 if (unlikely(size > PAGE_SIZE / 2)) 3271 if (unlikely(size > PAGE_SIZE))
3272 return kmalloc_large(size, gfpflags); 3272 return kmalloc_large(size, gfpflags);
3273 3273
3274 s = get_slab(size, gfpflags); 3274 s = get_slab(size, gfpflags);