aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2011-10-31 20:07:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:46 -0400
commitf84f6e2b0868f198f97a32ba503d6f9f319a249a (patch)
tree2103afe0304afd0045dca4b92dfd35922cfc289b /mm/vmscan.c
parent966dbde2c208e07bab7a45a7855e1e693eabe661 (diff)
mm: vmscan: do not writeback filesystem pages in kswapd except in high priority
It is preferable that no dirty pages are dispatched for cleaning from the page reclaim path. At normal priorities, this patch prevents kswapd writing pages. However, page reclaim does have a requirement that pages be freed in a particular zone. If it is failing to make sufficient progress (reclaiming < SWAP_CLUSTER_MAX at any priority priority), the priority is raised to scan more pages. A priority of DEF_PRIORITY - 3 is considered to be the point where kswapd is getting into trouble reclaiming pages. If this priority is reached, kswapd will dispatch pages for writing. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Alex Elder <aelder@sgi.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5c596654bd37..15e3a29fdb23 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -750,7 +750,8 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
750 */ 750 */
751static unsigned long shrink_page_list(struct list_head *page_list, 751static unsigned long shrink_page_list(struct list_head *page_list,
752 struct zone *zone, 752 struct zone *zone,
753 struct scan_control *sc) 753 struct scan_control *sc,
754 int priority)
754{ 755{
755 LIST_HEAD(ret_pages); 756 LIST_HEAD(ret_pages);
756 LIST_HEAD(free_pages); 757 LIST_HEAD(free_pages);
@@ -856,9 +857,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
856 857
857 /* 858 /*
858 * Only kswapd can writeback filesystem pages to 859 * Only kswapd can writeback filesystem pages to
859 * avoid risk of stack overflow 860 * avoid risk of stack overflow but do not writeback
861 * unless under significant pressure.
860 */ 862 */
861 if (page_is_file_cache(page) && !current_is_kswapd()) { 863 if (page_is_file_cache(page) &&
864 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
862 inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP); 865 inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
863 goto keep_locked; 866 goto keep_locked;
864 } 867 }
@@ -1509,12 +1512,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1509 1512
1510 spin_unlock_irq(&zone->lru_lock); 1513 spin_unlock_irq(&zone->lru_lock);
1511 1514
1512 nr_reclaimed = shrink_page_list(&page_list, zone, sc); 1515 nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
1513 1516
1514 /* Check if we should syncronously wait for writeback */ 1517 /* Check if we should syncronously wait for writeback */
1515 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1518 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1516 set_reclaim_mode(priority, sc, true); 1519 set_reclaim_mode(priority, sc, true);
1517 nr_reclaimed += shrink_page_list(&page_list, zone, sc); 1520 nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
1518 } 1521 }
1519 1522
1520 local_irq_disable(); 1523 local_irq_disable();