aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c61
1 files changed, 48 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4a43c289b23a..999ef0b9399a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -669,6 +669,25 @@ static enum page_references page_check_references(struct page *page,
669 return PAGEREF_RECLAIM; 669 return PAGEREF_RECLAIM;
670} 670}
671 671
672/* Check if a page is dirty or under writeback */
673static void page_check_dirty_writeback(struct page *page,
674 bool *dirty, bool *writeback)
675{
676 /*
677 * Anonymous pages are not handled by flushers and must be written
678 * from reclaim context. Do not stall reclaim based on them
679 */
680 if (!page_is_file_cache(page)) {
681 *dirty = false;
682 *writeback = false;
683 return;
684 }
685
686 /* By default assume that the page flags are accurate */
687 *dirty = PageDirty(page);
688 *writeback = PageWriteback(page);
689}
690
672/* 691/*
673 * shrink_page_list() returns the number of reclaimed pages 692 * shrink_page_list() returns the number of reclaimed pages
674 */ 693 */
@@ -697,6 +716,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
697 struct page *page; 716 struct page *page;
698 int may_enter_fs; 717 int may_enter_fs;
699 enum page_references references = PAGEREF_RECLAIM_CLEAN; 718 enum page_references references = PAGEREF_RECLAIM_CLEAN;
719 bool dirty, writeback;
700 720
701 cond_resched(); 721 cond_resched();
702 722
@@ -725,6 +745,24 @@ static unsigned long shrink_page_list(struct list_head *page_list,
725 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 745 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
726 746
727 /* 747 /*
748 * The number of dirty pages determines if a zone is marked
749 * reclaim_congested which affects wait_iff_congested. kswapd
750 * will stall and start writing pages if the tail of the LRU
751 * is all dirty unqueued pages.
752 */
753 page_check_dirty_writeback(page, &dirty, &writeback);
754 if (dirty || writeback)
755 nr_dirty++;
756
757 if (dirty && !writeback)
758 nr_unqueued_dirty++;
759
760 /* Treat this page as congested if underlying BDI is */
761 mapping = page_mapping(page);
762 if (mapping && bdi_write_congested(mapping->backing_dev_info))
763 nr_congested++;
764
765 /*
728 * If a page at the tail of the LRU is under writeback, there 766 * If a page at the tail of the LRU is under writeback, there
729 * are three cases to consider. 767 * are three cases to consider.
730 * 768 *
@@ -819,9 +857,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
819 if (!add_to_swap(page, page_list)) 857 if (!add_to_swap(page, page_list))
820 goto activate_locked; 858 goto activate_locked;
821 may_enter_fs = 1; 859 may_enter_fs = 1;
822 }
823 860
824 mapping = page_mapping(page); 861 /* Adding to swap updated mapping */
862 mapping = page_mapping(page);
863 }
825 864
826 /* 865 /*
827 * The page is mapped into the page tables of one or more 866 * The page is mapped into the page tables of one or more
@@ -841,11 +880,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
841 } 880 }
842 881
843 if (PageDirty(page)) { 882 if (PageDirty(page)) {
844 nr_dirty++;
845
846 if (!PageWriteback(page))
847 nr_unqueued_dirty++;
848
849 /* 883 /*
850 * Only kswapd can writeback filesystem pages to 884 * Only kswapd can writeback filesystem pages to
851 * avoid risk of stack overflow but only writeback 885 * avoid risk of stack overflow but only writeback
@@ -876,7 +910,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
876 /* Page is dirty, try to write it out here */ 910 /* Page is dirty, try to write it out here */
877 switch (pageout(page, mapping, sc)) { 911 switch (pageout(page, mapping, sc)) {
878 case PAGE_KEEP: 912 case PAGE_KEEP:
879 nr_congested++;
880 goto keep_locked; 913 goto keep_locked;
881 case PAGE_ACTIVATE: 914 case PAGE_ACTIVATE:
882 goto activate_locked; 915 goto activate_locked;
@@ -1318,7 +1351,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1318 unsigned long nr_scanned; 1351 unsigned long nr_scanned;
1319 unsigned long nr_reclaimed = 0; 1352 unsigned long nr_reclaimed = 0;
1320 unsigned long nr_taken; 1353 unsigned long nr_taken;
1321 unsigned long nr_dirty = 0; 1354 unsigned long nr_unqueued_dirty = 0;
1322 unsigned long nr_writeback = 0; 1355 unsigned long nr_writeback = 0;
1323 isolate_mode_t isolate_mode = 0; 1356 isolate_mode_t isolate_mode = 0;
1324 int file = is_file_lru(lru); 1357 int file = is_file_lru(lru);
@@ -1361,7 +1394,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1361 return 0; 1394 return 0;
1362 1395
1363 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, 1396 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1364 &nr_dirty, &nr_writeback, false); 1397 &nr_unqueued_dirty, &nr_writeback, false);
1365 1398
1366 spin_lock_irq(&zone->lru_lock); 1399 spin_lock_irq(&zone->lru_lock);
1367 1400
@@ -1416,11 +1449,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1416 /* 1449 /*
1417 * Similarly, if many dirty pages are encountered that are not 1450 * Similarly, if many dirty pages are encountered that are not
1418 * currently being written then flag that kswapd should start 1451 * currently being written then flag that kswapd should start
1419 * writing back pages. 1452 * writing back pages and stall to give a chance for flushers
1453 * to catch up.
1420 */ 1454 */
1421 if (global_reclaim(sc) && nr_dirty && 1455 if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) {
1422 nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority))) 1456 congestion_wait(BLK_RW_ASYNC, HZ/10);
1423 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); 1457 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1458 }
1424 1459
1425 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1460 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1426 zone_idx(zone), 1461 zone_idx(zone),