aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 18:01:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:28 -0400
commitd43006d503ac921c7df4f94d13c17db6f13c9d26 (patch)
tree3adf95869d1821cd5360c81dca790b3203608555 /mm
parent9aa41348a8d11427feec350b21dcdd4330fd20c4 (diff)
mm: vmscan: have kswapd writeback pages based on dirty pages encountered, not priority
Currently kswapd queues dirty pages for writeback if scanning at an elevated priority but the priority kswapd scans at is not related to the number of unqueued dirty encountered. Since commit "mm: vmscan: Flatten kswapd priority loop", the priority is related to the size of the LRU and the zone watermark which is no indication as to whether kswapd should write pages or not. This patch tracks if an excessive number of unqueued dirty pages are being encountered at the end of the LRU. If so, it indicates that dirty pages are being recycled before flusher threads can clean them and flags the zone so that kswapd will start writing pages until the zone is balanced. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Tested-by: Zlatko Calusic <zcalusic@bitsync.net> Cc: dormando <dormando@rydia.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1505c573719d..d6c916d808ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -676,13 +676,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
676 struct zone *zone, 676 struct zone *zone,
677 struct scan_control *sc, 677 struct scan_control *sc,
678 enum ttu_flags ttu_flags, 678 enum ttu_flags ttu_flags,
679 unsigned long *ret_nr_dirty, 679 unsigned long *ret_nr_unqueued_dirty,
680 unsigned long *ret_nr_writeback, 680 unsigned long *ret_nr_writeback,
681 bool force_reclaim) 681 bool force_reclaim)
682{ 682{
683 LIST_HEAD(ret_pages); 683 LIST_HEAD(ret_pages);
684 LIST_HEAD(free_pages); 684 LIST_HEAD(free_pages);
685 int pgactivate = 0; 685 int pgactivate = 0;
686 unsigned long nr_unqueued_dirty = 0;
686 unsigned long nr_dirty = 0; 687 unsigned long nr_dirty = 0;
687 unsigned long nr_congested = 0; 688 unsigned long nr_congested = 0;
688 unsigned long nr_reclaimed = 0; 689 unsigned long nr_reclaimed = 0;
@@ -808,14 +809,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
808 if (PageDirty(page)) { 809 if (PageDirty(page)) {
809 nr_dirty++; 810 nr_dirty++;
810 811
812 if (!PageWriteback(page))
813 nr_unqueued_dirty++;
814
811 /* 815 /*
812 * Only kswapd can writeback filesystem pages to 816 * Only kswapd can writeback filesystem pages to
813 * avoid risk of stack overflow but do not writeback 817 * avoid risk of stack overflow but only writeback
814 * unless under significant pressure. 818 * if many dirty pages have been encountered.
815 */ 819 */
816 if (page_is_file_cache(page) && 820 if (page_is_file_cache(page) &&
817 (!current_is_kswapd() || 821 (!current_is_kswapd() ||
818 sc->priority >= DEF_PRIORITY - 2)) { 822 !zone_is_reclaim_dirty(zone))) {
819 /* 823 /*
820 * Immediately reclaim when written back. 824 * Immediately reclaim when written back.
821 * Similar in principal to deactivate_page() 825 * Similar in principal to deactivate_page()
@@ -960,7 +964,7 @@ keep:
960 list_splice(&ret_pages, page_list); 964 list_splice(&ret_pages, page_list);
961 count_vm_events(PGACTIVATE, pgactivate); 965 count_vm_events(PGACTIVATE, pgactivate);
962 mem_cgroup_uncharge_end(); 966 mem_cgroup_uncharge_end();
963 *ret_nr_dirty += nr_dirty; 967 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
964 *ret_nr_writeback += nr_writeback; 968 *ret_nr_writeback += nr_writeback;
965 return nr_reclaimed; 969 return nr_reclaimed;
966} 970}
@@ -1373,6 +1377,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1373 (nr_taken >> (DEF_PRIORITY - sc->priority))) 1377 (nr_taken >> (DEF_PRIORITY - sc->priority)))
1374 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1378 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1375 1379
1380 /*
1381 * Similarly, if many dirty pages are encountered that are not
1382 * currently being written then flag that kswapd should start
1383 * writing back pages.
1384 */
1385 if (global_reclaim(sc) && nr_dirty &&
1386 nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority)))
1387 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1388
1376 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1389 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1377 zone_idx(zone), 1390 zone_idx(zone),
1378 nr_scanned, nr_reclaimed, 1391 nr_scanned, nr_reclaimed,
@@ -2769,8 +2782,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2769 end_zone = i; 2782 end_zone = i;
2770 break; 2783 break;
2771 } else { 2784 } else {
2772 /* If balanced, clear the congested flag */ 2785 /*
2786 * If balanced, clear the dirty and congested
2787 * flags
2788 */
2773 zone_clear_flag(zone, ZONE_CONGESTED); 2789 zone_clear_flag(zone, ZONE_CONGESTED);
2790 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2774 } 2791 }
2775 } 2792 }
2776 2793
@@ -2888,8 +2905,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2888 * possible there are dirty pages backed by 2905 * possible there are dirty pages backed by
2889 * congested BDIs but as pressure is relieved, 2906 * congested BDIs but as pressure is relieved,
2890 * speculatively avoid congestion waits 2907 * speculatively avoid congestion waits
2908 * or writing pages from kswapd context.
2891 */ 2909 */
2892 zone_clear_flag(zone, ZONE_CONGESTED); 2910 zone_clear_flag(zone, ZONE_CONGESTED);
2911 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2893 } 2912 }
2894 2913
2895 /* 2914 /*