aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2011-10-31 20:07:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:47 -0400
commit49ea7eb65e7c5060807fb9312b1ad4c3eab82e2c (patch)
tree88eaa206cdcac1190817820a0eb56bca2585f9ea /mm
parent92df3a723f84cdf8133560bbff950a7a99e92bc9 (diff)
mm: vmscan: immediately reclaim end-of-LRU dirty pages when writeback completes
When direct reclaim encounters a dirty page, it gets recycled around the LRU for another cycle. This patch marks the page PageReclaim similar to deactivate_page() so that the page gets reclaimed almost immediately after the page gets cleaned. This is to avoid reclaiming clean pages that are younger than a dirty page encountered at the end of the LRU that might have been something like a use-once page. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <jweiner@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Alex Elder <aelder@sgi.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c10
-rw-r--r--mm/vmstat.c2
2 files changed, 10 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7b0573f33a2..a297603d35b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -866,7 +866,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
866 */ 866 */
867 if (page_is_file_cache(page) && 867 if (page_is_file_cache(page) &&
868 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 868 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
869 inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP); 869 /*
870 * Immediately reclaim when written back.
871 * Similar in principal to deactivate_page()
872 * except we already have the page isolated
873 * and know it's dirty
874 */
875 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
876 SetPageReclaim(page);
877
870 goto keep_locked; 878 goto keep_locked;
871 } 879 }
872 880
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 210bd8ff3a6..56e529a4051 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -702,7 +702,7 @@ const char * const vmstat_text[] = {
702 "nr_unstable", 702 "nr_unstable",
703 "nr_bounce", 703 "nr_bounce",
704 "nr_vmscan_write", 704 "nr_vmscan_write",
705 "nr_vmscan_write_skip", 705 "nr_vmscan_immediate_reclaim",
706 "nr_writeback_temp", 706 "nr_writeback_temp",
707 "nr_isolated_anon", 707 "nr_isolated_anon",
708 "nr_isolated_file", 708 "nr_isolated_file",