aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-12-21 17:37:37 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-21 18:51:07 -0500
commit76be895001f2b0bee42a7685e942d3e08d5dd46c (patch)
tree7444607c21c11ad363eee300f286ad8e1b71b65f /mm/slub.c
parentea67db4cdbbf7f4e74150e71da0984e25121f500 (diff)
SLUB: Improve hackbench speed
Increase the mininum number of partial slabs to keep around and put partial slabs to the end of the partial queue so that they can add more objects. Signed-off-by: Christoph Lameter <clameter@sgi.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b9f37cb0f2e6..3655ad359f03 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page)
172 * Mininum number of partial slabs. These will be left on the partial 172 * Mininum number of partial slabs. These will be left on the partial
173 * lists even if they are empty. kmem_cache_shrink may reclaim them. 173 * lists even if they are empty. kmem_cache_shrink may reclaim them.
174 */ 174 */
175#define MIN_PARTIAL 2 175#define MIN_PARTIAL 5
176 176
177/* 177/*
178 * Maximum number of desirable partial slabs. 178 * Maximum number of desirable partial slabs.
@@ -1613,7 +1613,7 @@ checks_ok:
1613 * then add it. 1613 * then add it.
1614 */ 1614 */
1615 if (unlikely(!prior)) 1615 if (unlikely(!prior))
1616 add_partial(get_node(s, page_to_nid(page)), page); 1616 add_partial_tail(get_node(s, page_to_nid(page)), page);
1617 1617
1618out_unlock: 1618out_unlock:
1619 slab_unlock(page); 1619 slab_unlock(page);