aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 18:02:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:29 -0400
commit8e950282804558e4605401b9c79c1d34f0d73507 (patch)
treec4afdf31677851afcb83355b2395838571fc6d79 /mm/vmscan.c
parentf7ab8db791a8692f5ed4201dbae25722c1732a8d (diff)
mm: vmscan: move direct reclaim wait_iff_congested into shrink_list
shrink_inactive_list makes decisions on whether to stall based on the number of dirty pages encountered. The wait_iff_congested() call in shrink_page_list does no such thing and it's arbitrary. This patch moves the decision on whether to set ZONE_CONGESTED and the wait_iff_congested call into shrink_page_list. This keeps all the decisions on whether to stall or not in the one place. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Cc: Zlatko Calusic <zcalusic@bitsync.net> Cc: dormando <dormando@rydia.net> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5f80d018bffa..4898daf074cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -695,7 +695,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
695 struct zone *zone, 695 struct zone *zone,
696 struct scan_control *sc, 696 struct scan_control *sc,
697 enum ttu_flags ttu_flags, 697 enum ttu_flags ttu_flags,
698 unsigned long *ret_nr_dirty,
698 unsigned long *ret_nr_unqueued_dirty, 699 unsigned long *ret_nr_unqueued_dirty,
700 unsigned long *ret_nr_congested,
699 unsigned long *ret_nr_writeback, 701 unsigned long *ret_nr_writeback,
700 unsigned long *ret_nr_immediate, 702 unsigned long *ret_nr_immediate,
701 bool force_reclaim) 703 bool force_reclaim)
@@ -1017,20 +1019,13 @@ keep:
1017 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1019 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
1018 } 1020 }
1019 1021
1020 /*
1021 * Tag a zone as congested if all the dirty pages encountered were
1022 * backed by a congested BDI. In this case, reclaimers should just
1023 * back off and wait for congestion to clear because further reclaim
1024 * will encounter the same problem
1025 */
1026 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
1027 zone_set_flag(zone, ZONE_CONGESTED);
1028
1029 free_hot_cold_page_list(&free_pages, 1); 1022 free_hot_cold_page_list(&free_pages, 1);
1030 1023
1031 list_splice(&ret_pages, page_list); 1024 list_splice(&ret_pages, page_list);
1032 count_vm_events(PGACTIVATE, pgactivate); 1025 count_vm_events(PGACTIVATE, pgactivate);
1033 mem_cgroup_uncharge_end(); 1026 mem_cgroup_uncharge_end();
1027 *ret_nr_dirty += nr_dirty;
1028 *ret_nr_congested += nr_congested;
1034 *ret_nr_unqueued_dirty += nr_unqueued_dirty; 1029 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
1035 *ret_nr_writeback += nr_writeback; 1030 *ret_nr_writeback += nr_writeback;
1036 *ret_nr_immediate += nr_immediate; 1031 *ret_nr_immediate += nr_immediate;
@@ -1045,7 +1040,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1045 .priority = DEF_PRIORITY, 1040 .priority = DEF_PRIORITY,
1046 .may_unmap = 1, 1041 .may_unmap = 1,
1047 }; 1042 };
1048 unsigned long ret, dummy1, dummy2, dummy3; 1043 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1049 struct page *page, *next; 1044 struct page *page, *next;
1050 LIST_HEAD(clean_pages); 1045 LIST_HEAD(clean_pages);
1051 1046
@@ -1057,8 +1052,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1057 } 1052 }
1058 1053
1059 ret = shrink_page_list(&clean_pages, zone, &sc, 1054 ret = shrink_page_list(&clean_pages, zone, &sc,
1060 TTU_UNMAP|TTU_IGNORE_ACCESS, 1055 TTU_UNMAP|TTU_IGNORE_ACCESS,
1061 &dummy1, &dummy2, &dummy3, true); 1056 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1062 list_splice(&clean_pages, page_list); 1057 list_splice(&clean_pages, page_list);
1063 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); 1058 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1064 return ret; 1059 return ret;
@@ -1352,6 +1347,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1352 unsigned long nr_scanned; 1347 unsigned long nr_scanned;
1353 unsigned long nr_reclaimed = 0; 1348 unsigned long nr_reclaimed = 0;
1354 unsigned long nr_taken; 1349 unsigned long nr_taken;
1350 unsigned long nr_dirty = 0;
1351 unsigned long nr_congested = 0;
1355 unsigned long nr_unqueued_dirty = 0; 1352 unsigned long nr_unqueued_dirty = 0;
1356 unsigned long nr_writeback = 0; 1353 unsigned long nr_writeback = 0;
1357 unsigned long nr_immediate = 0; 1354 unsigned long nr_immediate = 0;
@@ -1396,8 +1393,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1396 return 0; 1393 return 0;
1397 1394
1398 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, 1395 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1399 &nr_unqueued_dirty, &nr_writeback, &nr_immediate, 1396 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1400 false); 1397 &nr_writeback, &nr_immediate,
1398 false);
1401 1399
1402 spin_lock_irq(&zone->lru_lock); 1400 spin_lock_irq(&zone->lru_lock);
1403 1401
@@ -1431,7 +1429,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1431 * same way balance_dirty_pages() manages. 1429 * same way balance_dirty_pages() manages.
1432 * 1430 *
1433 * This scales the number of dirty pages that must be under writeback 1431 * This scales the number of dirty pages that must be under writeback
1434 * before throttling depending on priority. It is a simple backoff 1432 * before a zone gets flagged ZONE_WRITEBACK. It is a simple backoff
1435 * function that has the most effect in the range DEF_PRIORITY to 1433 * function that has the most effect in the range DEF_PRIORITY to
1436 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1434 * DEF_PRIORITY-2 which is the priority reclaim is considered to be
1437 * in trouble and reclaim is considered to be in trouble. 1435 * in trouble and reclaim is considered to be in trouble.
@@ -1442,12 +1440,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1442 * ... 1440 * ...
1443 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1441 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1444 * isolated page is PageWriteback 1442 * isolated page is PageWriteback
1443 *
1444 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1445 * of pages under pages flagged for immediate reclaim and stall if any
1446 * are encountered in the nr_immediate check below.
1445 */ 1447 */
1446 if (nr_writeback && nr_writeback >= 1448 if (nr_writeback && nr_writeback >=
1447 (nr_taken >> (DEF_PRIORITY - sc->priority))) { 1449 (nr_taken >> (DEF_PRIORITY - sc->priority)))
1448 zone_set_flag(zone, ZONE_WRITEBACK); 1450 zone_set_flag(zone, ZONE_WRITEBACK);
1449 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1450 }
1451 1451
1452 /* 1452 /*
1453 * memcg will stall in page writeback so only consider forcibly 1453 * memcg will stall in page writeback so only consider forcibly
@@ -1455,6 +1455,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1455 */ 1455 */
1456 if (global_reclaim(sc)) { 1456 if (global_reclaim(sc)) {
1457 /* 1457 /*
1458 * Tag a zone as congested if all the dirty pages scanned were
1459 * backed by a congested BDI and wait_iff_congested will stall.
1460 */
1461 if (nr_dirty && nr_dirty == nr_congested)
1462 zone_set_flag(zone, ZONE_CONGESTED);
1463
1464 /*
1458 * If dirty pages are scanned that are not queued for IO, it 1465 * If dirty pages are scanned that are not queued for IO, it
1459 * implies that flushers are not keeping up. In this case, flag 1466 * implies that flushers are not keeping up. In this case, flag
1460 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing 1467 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
@@ -1474,6 +1481,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1474 congestion_wait(BLK_RW_ASYNC, HZ/10); 1481 congestion_wait(BLK_RW_ASYNC, HZ/10);
1475 } 1482 }
1476 1483
1484 /*
1485 * Stall direct reclaim for IO completions if underlying BDIs or zone
1486 * is congested. Allow kswapd to continue until it starts encountering
1487 * unqueued dirty pages or cycling through the LRU too quickly.
1488 */
1489 if (!sc->hibernation_mode && !current_is_kswapd())
1490 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1491
1477 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1492 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1478 zone_idx(zone), 1493 zone_idx(zone),
1479 nr_scanned, nr_reclaimed, 1494 nr_scanned, nr_reclaimed,
@@ -2374,17 +2389,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2374 WB_REASON_TRY_TO_FREE_PAGES); 2389 WB_REASON_TRY_TO_FREE_PAGES);
2375 sc->may_writepage = 1; 2390 sc->may_writepage = 1;
2376 } 2391 }
2377
2378 /* Take a nap, wait for some writeback to complete */
2379 if (!sc->hibernation_mode && sc->nr_scanned &&
2380 sc->priority < DEF_PRIORITY - 2) {
2381 struct zone *preferred_zone;
2382
2383 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2384 &cpuset_current_mems_allowed,
2385 &preferred_zone);
2386 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2387 }
2388 } while (--sc->priority >= 0); 2392 } while (--sc->priority >= 0);
2389 2393
2390out: 2394out: