aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-14 17:28:09 -0500
committerChristoph Lameter <christoph@stapp.engr.sgi.com>2008-02-14 18:30:02 -0500
commit331dc558fa020451ff773973cee855fd721aa88e (patch)
tree12adf10a7e53c8d07835487694d8180b991304cd /include/linux/slub_def.h
parent71c7a06ff0a2ba0434ace4d7aa679537c4211d9d (diff)
slub: Support 4k kmallocs again to compensate for page allocator slowness
Currently we hand off PAGE_SIZEd kmallocs to the page allocator in the mistaken belief that the page allocator can handle these allocations effectively. However, measurements indicate a minimum slowdown by the factor of 8 (and that is only SMP, NUMA is much worse) vs the slub fastpath which causes regressions in tbench. Increase the number of kmalloc caches by one so that we again handle 4k kmallocs directly from slub. 4k page buffering for the page allocator will be performed by slub like done by slab. At some point the page allocator fastpath should be fixed. A lot of the kernel would benefit from a faster ability to allocate a single page. If that is done then the 4k allocs may again be forwarded to the page allocator and this patch could be reverted. Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 98be113cf935..57deecc79d52 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,7 +111,7 @@ struct kmem_cache {
111 * We keep the general caches in an array of slab caches that are used for 111 * We keep the general caches in an array of slab caches that are used for
112 * 2^x bytes of allocations. 112 * 2^x bytes of allocations.
113 */ 113 */
114extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; 114extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
115 115
116/* 116/*
117 * Sorry that the following has to be that ugly but some versions of GCC 117 * Sorry that the following has to be that ugly but some versions of GCC
@@ -197,7 +197,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
197static __always_inline void *kmalloc(size_t size, gfp_t flags) 197static __always_inline void *kmalloc(size_t size, gfp_t flags)
198{ 198{
199 if (__builtin_constant_p(size)) { 199 if (__builtin_constant_p(size)) {
200 if (size > PAGE_SIZE / 2) 200 if (size > PAGE_SIZE)
201 return kmalloc_large(size, flags); 201 return kmalloc_large(size, flags);
202 202
203 if (!(flags & SLUB_DMA)) { 203 if (!(flags & SLUB_DMA)) {
@@ -219,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
219static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 219static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
220{ 220{
221 if (__builtin_constant_p(size) && 221 if (__builtin_constant_p(size) &&
222 size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { 222 size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
223 struct kmem_cache *s = kmalloc_slab(size); 223 struct kmem_cache *s = kmalloc_slab(size);
224 224
225 if (!s) 225 if (!s)