diff options
author | Mel Gorman <mel@csn.ul.ie> | 2011-01-13 18:45:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:33 -0500 |
commit | 3e7d344970673c5334cf7b5bb27c8c0942b06126 (patch) | |
tree | 832ecb4da5fd27efa5a503df5b96bfdee2a52ffd /mm/page_alloc.c | |
parent | ee64fc9354e515a79c7232cfde65c88ec627308b (diff) |
mm: vmscan: reclaim order-0 and use compaction instead of lumpy reclaim
Lumpy reclaim is disruptive. It reclaims a large number of pages and
ignores the age of the pages it reclaims. This can incur significant
stalls and potentially increase the number of major faults.
Compaction has reached the point where it is considered reasonably stable
(meaning it has passed a lot of testing) and is a potential candidate for
displacing lumpy reclaim. This patch introduces an alternative to lumpy
reclaim whe compaction is available called reclaim/compaction. The basic
operation is very simple - instead of selecting a contiguous range of
pages to reclaim, a number of order-0 pages are reclaimed and then
compaction is later by either kswapd (compact_zone_order()) or direct
compaction (__alloc_pages_direct_compact()).
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: use conventional task_struct naming]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 22a1bb7723e4..03a66a31bfcd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1815,12 +1815,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1815 | int migratetype, unsigned long *did_some_progress) | 1815 | int migratetype, unsigned long *did_some_progress) |
1816 | { | 1816 | { |
1817 | struct page *page; | 1817 | struct page *page; |
1818 | struct task_struct *tsk = current; | ||
1818 | 1819 | ||
1819 | if (!order || compaction_deferred(preferred_zone)) | 1820 | if (!order || compaction_deferred(preferred_zone)) |
1820 | return NULL; | 1821 | return NULL; |
1821 | 1822 | ||
1823 | tsk->flags |= PF_MEMALLOC; | ||
1822 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 1824 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
1823 | nodemask); | 1825 | nodemask); |
1826 | tsk->flags &= ~PF_MEMALLOC; | ||
1824 | if (*did_some_progress != COMPACT_SKIPPED) { | 1827 | if (*did_some_progress != COMPACT_SKIPPED) { |
1825 | 1828 | ||
1826 | /* Page migration frees to the PCP lists but we want merging */ | 1829 | /* Page migration frees to the PCP lists but we want merging */ |
@@ -2121,6 +2124,19 @@ rebalance: | |||
2121 | /* Wait for some write requests to complete then retry */ | 2124 | /* Wait for some write requests to complete then retry */ |
2122 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); | 2125 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
2123 | goto rebalance; | 2126 | goto rebalance; |
2127 | } else { | ||
2128 | /* | ||
2129 | * High-order allocations do not necessarily loop after | ||
2130 | * direct reclaim and reclaim/compaction depends on compaction | ||
2131 | * being called after reclaim so call directly if necessary | ||
2132 | */ | ||
2133 | page = __alloc_pages_direct_compact(gfp_mask, order, | ||
2134 | zonelist, high_zoneidx, | ||
2135 | nodemask, | ||
2136 | alloc_flags, preferred_zone, | ||
2137 | migratetype, &did_some_progress); | ||
2138 | if (page) | ||
2139 | goto got_pg; | ||
2124 | } | 2140 | } |
2125 | 2141 | ||
2126 | nopage: | 2142 | nopage: |