aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2011-10-31 20:07:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:46 -0400
commita18bba061c789f5815c3efc3c80e6ac269911964 (patch)
treebec0234fb338f8e06b6e39df2cfa09acf2a968a3 /mm/vmscan.c
parentee72886d8ed5d9de3fa0ed3b99a7ca7702576a96 (diff)
mm: vmscan: remove dead code related to lumpy reclaim waiting on pages under writeback
Lumpy reclaim worked with two passes - the first which queued pages for IO and the second which waited on writeback. As direct reclaim can no longer write pages there is some dead code. This patch removes it but direct reclaim will continue to wait on pages under writeback while in synchronous reclaim mode. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Alex Elder <aelder@sgi.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c21
1 files changed, 5 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 10f9c59aed5..5c596654bd3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -495,15 +495,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
495 return PAGE_ACTIVATE; 495 return PAGE_ACTIVATE;
496 } 496 }
497 497
498 /*
499 * Wait on writeback if requested to. This happens when
500 * direct reclaiming a large contiguous area and the
501 * first attempt to free a range of pages fails.
502 */
503 if (PageWriteback(page) &&
504 (sc->reclaim_mode & RECLAIM_MODE_SYNC))
505 wait_on_page_writeback(page);
506
507 if (!PageWriteback(page)) { 498 if (!PageWriteback(page)) {
508 /* synchronous write or broken a_ops? */ 499 /* synchronous write or broken a_ops? */
509 ClearPageReclaim(page); 500 ClearPageReclaim(page);
@@ -804,12 +795,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
804 795
805 if (PageWriteback(page)) { 796 if (PageWriteback(page)) {
806 /* 797 /*
807 * Synchronous reclaim is performed in two passes, 798 * Synchronous reclaim cannot queue pages for
808 * first an asynchronous pass over the list to 799 * writeback due to the possibility of stack overflow
809 * start parallel writeback, and a second synchronous 800 * but if it encounters a page under writeback, wait
810 * pass to wait for the IO to complete. Wait here 801 * for the IO to complete.
811 * for any page for which writeback has already
812 * started.
813 */ 802 */
814 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 803 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
815 may_enter_fs) 804 may_enter_fs)
@@ -1414,7 +1403,7 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
1414} 1403}
1415 1404
1416/* 1405/*
1417 * Returns true if the caller should wait to clean dirty/writeback pages. 1406 * Returns true if a direct reclaim should wait on pages under writeback.
1418 * 1407 *
1419 * If we are direct reclaiming for contiguous pages and we do not reclaim 1408 * If we are direct reclaiming for contiguous pages and we do not reclaim
1420 * everything in the list, try again and wait for writeback IO to complete. 1409 * everything in the list, try again and wait for writeback IO to complete.