aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2007-07-21 07:37:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 20:49:16 -0400
commitd6269543ef24aa012aa228c27af3adb074f7b36b (patch)
tree5e0c24ce1dd5ed3947ea00b7863782e24848b8d3 /mm
parent41f9dc5c871600f53c8912b2975971d2a11c1c25 (diff)
slob: reduce list scanning
The version of SLOB in -mm always scans its free list from the beginning, which results in small allocations and free segments clustering at the beginning of the list over time. This causes the average search to scan over a large stretch at the beginning on each allocation. By starting each page search where the last one left off, we evenly distribute the allocations and greatly shorten the average search. Without this patch, kernel compiles on a 1.5G machine take a large amount of system time for list scanning. With this patch, compiles are within a few seconds of performance of a SLAB kernel with no notable change in system time. Signed-off-by: Matt Mackall <mpm@selenic.com> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/slob.c b/mm/slob.c
index d50920ecc02b..ec33fcdc852e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
294{ 294{
295 struct slob_page *sp; 295 struct slob_page *sp;
296 struct list_head *prev;
296 slob_t *b = NULL; 297 slob_t *b = NULL;
297 unsigned long flags; 298 unsigned long flags;
298 299
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
307 if (node != -1 && page_to_nid(&sp->page) != node) 308 if (node != -1 && page_to_nid(&sp->page) != node)
308 continue; 309 continue;
309#endif 310#endif
311 /* Enough room on this page? */
312 if (sp->units < SLOB_UNITS(size))
313 continue;
310 314
311 if (sp->units >= SLOB_UNITS(size)) { 315 /* Attempt to alloc */
312 b = slob_page_alloc(sp, size, align); 316 prev = sp->list.prev;
313 if (b) 317 b = slob_page_alloc(sp, size, align);
314 break; 318 if (!b)
315 } 319 continue;
320
321 /* Improve fragment distribution and reduce our average
322 * search time by starting our next search here. (see
323 * Knuth vol 1, sec 2.5, pg 449) */
324 if (free_slob_pages.next != prev->next)
325 list_move_tail(&free_slob_pages, prev->next);
326 break;
316 } 327 }
317 spin_unlock_irqrestore(&slob_lock, flags); 328 spin_unlock_irqrestore(&slob_lock, flags);
318 329