aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2007-07-17 07:03:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit5ad333eb66ff1e52a87639822ae088577669dcf9 (patch)
treeaddae6bbd19585f19328f309924d06d647e8f2b7 /mm/page_alloc.c
parent7e63efef857575320fb413fbc3d0ee704b72845f (diff)
Lumpy Reclaim V4
When we are out of memory of a suitable size we enter reclaim. The current reclaim algorithm targets pages in LRU order, which is great for fairness at order-0 but highly unsuitable if you desire pages at higher orders. To get pages of higher order we must shoot down a very high proportion of memory; >95% in a lot of cases. This patch set adds a lumpy reclaim algorithm to the allocator. It targets groups of pages at the specified order anchored at the end of the active and inactive lists. This encourages groups of pages at the requested orders to move from active to inactive, and active to free lists. This behaviour is only triggered out of direct reclaim when higher order pages have been requested. This patch set is particularly effective when utilised with an anti-fragmentation scheme which groups pages of similar reclaimability together. This patch set is based on Peter Zijlstra's lumpy reclaim V2 patch which forms the foundation. Credit to Mel Gorman for sanitity checking. Mel said: The patches have an application with hugepage pool resizing. When lumpy-reclaim is used used with ZONE_MOVABLE, the hugepages pool can be resized with greater reliability. Testing on a desktop machine with 2GB of RAM showed that growing the hugepage pool with ZONE_MOVABLE on it's own was very slow as the success rate was quite low. Without lumpy-reclaim, each attempt to grow the pool by 100 pages would yield 1 or 2 hugepages. With lumpy-reclaim, getting 40 to 70 hugepages on each attempt was typical. [akpm@osdl.org: ia64 pfn_to_nid fixes and loop cleanup] [bunk@stusta.de: static declarations for internal functions] [a.p.zijlstra@chello.nl: initial lumpy V2 implementation] Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Bob Picco <bob.picco@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ac4f8c6b5c10..1a889c3fec59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1333,7 +1333,7 @@ nofail_alloc:
1333 reclaim_state.reclaimed_slab = 0; 1333 reclaim_state.reclaimed_slab = 0;
1334 p->reclaim_state = &reclaim_state; 1334 p->reclaim_state = &reclaim_state;
1335 1335
1336 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1336 did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1337 1337
1338 p->reclaim_state = NULL; 1338 p->reclaim_state = NULL;
1339 p->flags &= ~PF_MEMALLOC; 1339 p->flags &= ~PF_MEMALLOC;
@@ -1370,7 +1370,8 @@ nofail_alloc:
1370 */ 1370 */
1371 do_retry = 0; 1371 do_retry = 0;
1372 if (!(gfp_mask & __GFP_NORETRY)) { 1372 if (!(gfp_mask & __GFP_NORETRY)) {
1373 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1373 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1374 (gfp_mask & __GFP_REPEAT))
1374 do_retry = 1; 1375 do_retry = 1;
1375 if (gfp_mask & __GFP_NOFAIL) 1376 if (gfp_mask & __GFP_NOFAIL)
1376 do_retry = 1; 1377 do_retry = 1;