aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2009-09-21 20:01:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:30 -0400
commitde2e7567c7ddf24f0ca80010163ed10da66a14e2 (patch)
tree203eeb5deda2ef11e46ee449134ba9afa0c8579b /mm/vmscan.c
parentadea02a1bea71a508da32c04d715485a1fe62029 (diff)
vmscan: don't attempt to reclaim anon page in lumpy reclaim when no swap space is available
The VM already avoids attempting to reclaim anon pages in various places, But it doesn't avoid it for lumpy reclaim. It shuffles lru list unnecessary so that it is pointless. [akpm@linux-foundation.org: cleanup] Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 208071c48bf2..ece2ecb08102 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -935,6 +935,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
935 /* Check that we have not crossed a zone boundary. */ 935 /* Check that we have not crossed a zone boundary. */
936 if (unlikely(page_zone_id(cursor_page) != zone_id)) 936 if (unlikely(page_zone_id(cursor_page) != zone_id))
937 continue; 937 continue;
938
939 /*
940 * If we don't have enough swap space, reclaiming of
941 * anon page which don't already have a swap slot is
942 * pointless.
943 */
944 if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
945 !PageSwapCache(cursor_page))
946 continue;
947
938 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 948 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
939 list_move(&cursor_page->lru, dst); 949 list_move(&cursor_page->lru, dst);
940 mem_cgroup_del_lru(cursor_page); 950 mem_cgroup_del_lru(cursor_page);