aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/slob.c b/mm/slob.c
index d50920ecc02b..ec33fcdc852e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
294{ 294{
295 struct slob_page *sp; 295 struct slob_page *sp;
296 struct list_head *prev;
296 slob_t *b = NULL; 297 slob_t *b = NULL;
297 unsigned long flags; 298 unsigned long flags;
298 299
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
307 if (node != -1 && page_to_nid(&sp->page) != node) 308 if (node != -1 && page_to_nid(&sp->page) != node)
308 continue; 309 continue;
309#endif 310#endif
311 /* Enough room on this page? */
312 if (sp->units < SLOB_UNITS(size))
313 continue;
310 314
311 if (sp->units >= SLOB_UNITS(size)) { 315 /* Attempt to alloc */
312 b = slob_page_alloc(sp, size, align); 316 prev = sp->list.prev;
313 if (b) 317 b = slob_page_alloc(sp, size, align);
314 break; 318 if (!b)
315 } 319 continue;
320
321 /* Improve fragment distribution and reduce our average
322 * search time by starting our next search here. (see
323 * Knuth vol 1, sec 2.5, pg 449) */
324 if (free_slob_pages.next != prev->next)
325 list_move_tail(&free_slob_pages, prev->next);
326 break;
316 } 327 }
317 spin_unlock_irqrestore(&slob_lock, flags); 328 spin_unlock_irqrestore(&slob_lock, flags);
318 329