aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index af72fe8e8d74..06123f20a326 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
920 /* Case 1 above */ 920 /* Case 1 above */
921 if (current_is_kswapd() && 921 if (current_is_kswapd() &&
922 PageReclaim(page) && 922 PageReclaim(page) &&
923 zone_is_reclaim_writeback(zone)) { 923 test_bit(ZONE_WRITEBACK, &zone->flags)) {
924 nr_immediate++; 924 nr_immediate++;
925 goto keep_locked; 925 goto keep_locked;
926 926
@@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1002 */ 1002 */
1003 if (page_is_file_cache(page) && 1003 if (page_is_file_cache(page) &&
1004 (!current_is_kswapd() || 1004 (!current_is_kswapd() ||
1005 !zone_is_reclaim_dirty(zone))) { 1005 !test_bit(ZONE_DIRTY, &zone->flags))) {
1006 /* 1006 /*
1007 * Immediately reclaim when written back. 1007 * Immediately reclaim when written back.
1008 * Similar in principal to deactivate_page() 1008 * Similar in principal to deactivate_page()
@@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1563 * are encountered in the nr_immediate check below. 1563 * are encountered in the nr_immediate check below.
1564 */ 1564 */
1565 if (nr_writeback && nr_writeback == nr_taken) 1565 if (nr_writeback && nr_writeback == nr_taken)
1566 zone_set_flag(zone, ZONE_WRITEBACK); 1566 set_bit(ZONE_WRITEBACK, &zone->flags);
1567 1567
1568 /* 1568 /*
1569 * memcg will stall in page writeback so only consider forcibly 1569 * memcg will stall in page writeback so only consider forcibly
@@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1575 * backed by a congested BDI and wait_iff_congested will stall. 1575 * backed by a congested BDI and wait_iff_congested will stall.
1576 */ 1576 */
1577 if (nr_dirty && nr_dirty == nr_congested) 1577 if (nr_dirty && nr_dirty == nr_congested)
1578 zone_set_flag(zone, ZONE_CONGESTED); 1578 set_bit(ZONE_CONGESTED, &zone->flags);
1579 1579
1580 /* 1580 /*
1581 * If dirty pages are scanned that are not queued for IO, it 1581 * If dirty pages are scanned that are not queued for IO, it
1582 * implies that flushers are not keeping up. In this case, flag 1582 * implies that flushers are not keeping up. In this case, flag
1583 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing 1583 * the zone ZONE_DIRTY and kswapd will start writing pages from
1584 * pages from reclaim context. 1584 * reclaim context.
1585 */ 1585 */
1586 if (nr_unqueued_dirty == nr_taken) 1586 if (nr_unqueued_dirty == nr_taken)
1587 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); 1587 set_bit(ZONE_DIRTY, &zone->flags);
1588 1588
1589 /* 1589 /*
1590 * If kswapd scans pages marked marked for immediate 1590 * If kswapd scans pages marked marked for immediate
@@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
2984 /* Account for the number of pages attempted to reclaim */ 2984 /* Account for the number of pages attempted to reclaim */
2985 *nr_attempted += sc->nr_to_reclaim; 2985 *nr_attempted += sc->nr_to_reclaim;
2986 2986
2987 zone_clear_flag(zone, ZONE_WRITEBACK); 2987 clear_bit(ZONE_WRITEBACK, &zone->flags);
2988 2988
2989 /* 2989 /*
2990 * If a zone reaches its high watermark, consider it to be no longer 2990 * If a zone reaches its high watermark, consider it to be no longer
@@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
2994 */ 2994 */
2995 if (zone_reclaimable(zone) && 2995 if (zone_reclaimable(zone) &&
2996 zone_balanced(zone, testorder, 0, classzone_idx)) { 2996 zone_balanced(zone, testorder, 0, classzone_idx)) {
2997 zone_clear_flag(zone, ZONE_CONGESTED); 2997 clear_bit(ZONE_CONGESTED, &zone->flags);
2998 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); 2998 clear_bit(ZONE_DIRTY, &zone->flags);
2999 } 2999 }
3000 3000
3001 return sc->nr_scanned >= sc->nr_to_reclaim; 3001 return sc->nr_scanned >= sc->nr_to_reclaim;
@@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3086 * If balanced, clear the dirty and congested 3086 * If balanced, clear the dirty and congested
3087 * flags 3087 * flags
3088 */ 3088 */
3089 zone_clear_flag(zone, ZONE_CONGESTED); 3089 clear_bit(ZONE_CONGESTED, &zone->flags);
3090 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); 3090 clear_bit(ZONE_DIRTY, &zone->flags);
3091 } 3091 }
3092 } 3092 }
3093 3093
@@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3714 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3714 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3715 return ZONE_RECLAIM_NOSCAN; 3715 return ZONE_RECLAIM_NOSCAN;
3716 3716
3717 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3717 if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
3718 return ZONE_RECLAIM_NOSCAN; 3718 return ZONE_RECLAIM_NOSCAN;
3719 3719
3720 ret = __zone_reclaim(zone, gfp_mask, order); 3720 ret = __zone_reclaim(zone, gfp_mask, order);
3721 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3721 clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
3722 3722
3723 if (!ret) 3723 if (!ret)
3724 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3724 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);