aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2008-02-05 01:29:37 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:19 -0500
commit20cecbae44528d347c46e71f40650b75e0dcbc8e (patch)
treefae7206c9aff698b76c5c6aab796539d047947bc /mm/slob.c
parent679299b32dbf9bac4bdaedc850fb95d0f81b4963 (diff)
slob: reduce external fragmentation by using three free lists
By putting smaller objects on their own list, we greatly reduce overall external fragmentation and increase repeatability. This reduces total SLOB overhead from > 50% to ~6% on a simple boot test. Signed-off-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c47
1 files changed, 33 insertions, 14 deletions
diff --git a/mm/slob.c b/mm/slob.c
index c56c5e57c192..e2c3c0ec5463 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -12,10 +12,17 @@
12 * allocator is as little as 2 bytes, however typically most architectures 12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit. 13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
14 * 14 *
15 * The slob heap is a linked list of pages from alloc_pages(), and 15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * within each page, there is a singly-linked list of free blocks (slob_t). 16 * and within each page, there is a singly-linked list of free blocks
17 * The heap is grown on demand and allocation from the heap is currently 17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * first-fit. 18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
19 * 26 *
20 * Above this is an implementation of kmalloc/kfree. Blocks returned 27 * Above this is an implementation of kmalloc/kfree. Blocks returned
21 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
@@ -110,9 +117,13 @@ static inline void free_slob_page(struct slob_page *sp)
110} 117}
111 118
112/* 119/*
113 * All (partially) free slob pages go on this list. 120 * All partially free slob pages go on these lists.
114 */ 121 */
115static LIST_HEAD(free_slob_pages); 122#define SLOB_BREAK1 256
123#define SLOB_BREAK2 1024
124static LIST_HEAD(free_slob_small);
125static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large);
116 127
117/* 128/*
118 * slob_page: True for all slob pages (false for bigblock pages) 129 * slob_page: True for all slob pages (false for bigblock pages)
@@ -140,9 +151,9 @@ static inline int slob_page_free(struct slob_page *sp)
140 return test_bit(PG_private, &sp->flags); 151 return test_bit(PG_private, &sp->flags);
141} 152}
142 153
143static inline void set_slob_page_free(struct slob_page *sp) 154static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
144{ 155{
145 list_add(&sp->list, &free_slob_pages); 156 list_add(&sp->list, list);
146 __set_bit(PG_private, &sp->flags); 157 __set_bit(PG_private, &sp->flags);
147} 158}
148 159
@@ -294,12 +305,20 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
294{ 305{
295 struct slob_page *sp; 306 struct slob_page *sp;
296 struct list_head *prev; 307 struct list_head *prev;
308 struct list_head *slob_list;
297 slob_t *b = NULL; 309 slob_t *b = NULL;
298 unsigned long flags; 310 unsigned long flags;
299 311
312 if (size < SLOB_BREAK1)
313 slob_list = &free_slob_small;
314 else if (size < SLOB_BREAK2)
315 slob_list = &free_slob_medium;
316 else
317 slob_list = &free_slob_large;
318
300 spin_lock_irqsave(&slob_lock, flags); 319 spin_lock_irqsave(&slob_lock, flags);
301 /* Iterate through each partially free page, try to find room */ 320 /* Iterate through each partially free page, try to find room */
302 list_for_each_entry(sp, &free_slob_pages, list) { 321 list_for_each_entry(sp, slob_list, list) {
303#ifdef CONFIG_NUMA 322#ifdef CONFIG_NUMA
304 /* 323 /*
305 * If there's a node specification, search for a partial 324 * If there's a node specification, search for a partial
@@ -321,9 +340,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
321 /* Improve fragment distribution and reduce our average 340 /* Improve fragment distribution and reduce our average
322 * search time by starting our next search here. (see 341 * search time by starting our next search here. (see
323 * Knuth vol 1, sec 2.5, pg 449) */ 342 * Knuth vol 1, sec 2.5, pg 449) */
324 if (prev != free_slob_pages.prev && 343 if (prev != slob_list->prev &&
325 free_slob_pages.next != prev->next) 344 slob_list->next != prev->next)
326 list_move_tail(&free_slob_pages, prev->next); 345 list_move_tail(slob_list, prev->next);
327 break; 346 break;
328 } 347 }
329 spin_unlock_irqrestore(&slob_lock, flags); 348 spin_unlock_irqrestore(&slob_lock, flags);
@@ -341,7 +360,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
341 sp->free = b; 360 sp->free = b;
342 INIT_LIST_HEAD(&sp->list); 361 INIT_LIST_HEAD(&sp->list);
343 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 362 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
344 set_slob_page_free(sp); 363 set_slob_page_free(sp, slob_list);
345 b = slob_page_alloc(sp, size, align); 364 b = slob_page_alloc(sp, size, align);
346 BUG_ON(!b); 365 BUG_ON(!b);
347 spin_unlock_irqrestore(&slob_lock, flags); 366 spin_unlock_irqrestore(&slob_lock, flags);
@@ -387,7 +406,7 @@ static void slob_free(void *block, int size)
387 set_slob(b, units, 406 set_slob(b, units,
388 (void *)((unsigned long)(b + 407 (void *)((unsigned long)(b +
389 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); 408 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
390 set_slob_page_free(sp); 409 set_slob_page_free(sp, &free_slob_small);
391 goto out; 410 goto out;
392 } 411 }
393 412