aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-10-09 18:28:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:57 -0400
commit5705465174686d007473e017b76c4b64b44aa690 (patch)
tree09d197b925802f7b2b7d96129046eb0d15da60dc /mm
parent7c809968ffa92d41baaa9054e897436480179b20 (diff)
mm: clean up zone flags
Page reclaim tests zone_is_reclaim_dirty(), but the site that actually sets this state does zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY), sending the reader through layers indirection just to track down a simple bit. Remove all zone flag wrappers and just use bitops against zone->flags directly. It's just as readable and the lines are barely any longer. Also rename ZONE_TAIL_LRU_DIRTY to ZONE_DIRTY to match ZONE_WRITEBACK, and remove the zone_flags_t typedef. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/oom_kill.c6
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/vmscan.c28
4 files changed, 22 insertions, 22 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1706cbbdf5f0..b27714f1b40f 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -631,7 +631,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
631 * of sleeping on the congestion queue 631 * of sleeping on the congestion queue
632 */ 632 */
633 if (atomic_read(&nr_bdi_congested[sync]) == 0 || 633 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
634 !zone_is_reclaim_congested(zone)) { 634 !test_bit(ZONE_CONGESTED, &zone->flags)) {
635 cond_resched(); 635 cond_resched();
636 636
637 /* In case we scheduled, work out time remaining */ 637 /* In case we scheduled, work out time remaining */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1e11df8fa7ec..bbf405a3a18f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -565,7 +565,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
565 565
566 spin_lock(&zone_scan_lock); 566 spin_lock(&zone_scan_lock);
567 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) 567 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
568 if (zone_is_oom_locked(zone)) { 568 if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) {
569 ret = false; 569 ret = false;
570 goto out; 570 goto out;
571 } 571 }
@@ -575,7 +575,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
575 * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. 575 * call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
576 */ 576 */
577 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) 577 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
578 zone_set_flag(zone, ZONE_OOM_LOCKED); 578 set_bit(ZONE_OOM_LOCKED, &zone->flags);
579 579
580out: 580out:
581 spin_unlock(&zone_scan_lock); 581 spin_unlock(&zone_scan_lock);
@@ -594,7 +594,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
594 594
595 spin_lock(&zone_scan_lock); 595 spin_lock(&zone_scan_lock);
596 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) 596 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
597 zone_clear_flag(zone, ZONE_OOM_LOCKED); 597 clear_bit(ZONE_OOM_LOCKED, &zone->flags);
598 spin_unlock(&zone_scan_lock); 598 spin_unlock(&zone_scan_lock);
599} 599}
600 600
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae2f8474273c..f3769f0fce3c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1614,8 +1614,8 @@ again:
1614 1614
1615 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1615 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1616 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 1616 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1617 !zone_is_fair_depleted(zone)) 1617 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
1618 zone_set_flag(zone, ZONE_FAIR_DEPLETED); 1618 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1619 1619
1620 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1620 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1621 zone_statistics(preferred_zone, zone, gfp_flags); 1621 zone_statistics(preferred_zone, zone, gfp_flags);
@@ -1935,7 +1935,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
1935 mod_zone_page_state(zone, NR_ALLOC_BATCH, 1935 mod_zone_page_state(zone, NR_ALLOC_BATCH,
1936 high_wmark_pages(zone) - low_wmark_pages(zone) - 1936 high_wmark_pages(zone) - low_wmark_pages(zone) -
1937 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 1937 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
1938 zone_clear_flag(zone, ZONE_FAIR_DEPLETED); 1938 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1939 } while (zone++ != preferred_zone); 1939 } while (zone++ != preferred_zone);
1940} 1940}
1941 1941
@@ -1986,7 +1986,7 @@ zonelist_scan:
1986 if (alloc_flags & ALLOC_FAIR) { 1986 if (alloc_flags & ALLOC_FAIR) {
1987 if (!zone_local(preferred_zone, zone)) 1987 if (!zone_local(preferred_zone, zone))
1988 break; 1988 break;
1989 if (zone_is_fair_depleted(zone)) { 1989 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
1990 nr_fair_skipped++; 1990 nr_fair_skipped++;
1991 continue; 1991 continue;
1992 } 1992 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index af72fe8e8d74..06123f20a326 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
920 /* Case 1 above */ 920 /* Case 1 above */
921 if (current_is_kswapd() && 921 if (current_is_kswapd() &&
922 PageReclaim(page) && 922 PageReclaim(page) &&
923 zone_is_reclaim_writeback(zone)) { 923 test_bit(ZONE_WRITEBACK, &zone->flags)) {
924 nr_immediate++; 924 nr_immediate++;
925 goto keep_locked; 925 goto keep_locked;
926 926
@@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1002 */ 1002 */
1003 if (page_is_file_cache(page) && 1003 if (page_is_file_cache(page) &&
1004 (!current_is_kswapd() || 1004 (!current_is_kswapd() ||
1005 !zone_is_reclaim_dirty(zone))) { 1005 !test_bit(ZONE_DIRTY, &zone->flags))) {
1006 /* 1006 /*
1007 * Immediately reclaim when written back. 1007 * Immediately reclaim when written back.
1008 * Similar in principal to deactivate_page() 1008 * Similar in principal to deactivate_page()
@@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1563 * are encountered in the nr_immediate check below. 1563 * are encountered in the nr_immediate check below.
1564 */ 1564 */
1565 if (nr_writeback && nr_writeback == nr_taken) 1565 if (nr_writeback && nr_writeback == nr_taken)
1566 zone_set_flag(zone, ZONE_WRITEBACK); 1566 set_bit(ZONE_WRITEBACK, &zone->flags);
1567 1567
1568 /* 1568 /*
1569 * memcg will stall in page writeback so only consider forcibly 1569 * memcg will stall in page writeback so only consider forcibly
@@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1575 * backed by a congested BDI and wait_iff_congested will stall. 1575 * backed by a congested BDI and wait_iff_congested will stall.
1576 */ 1576 */
1577 if (nr_dirty && nr_dirty == nr_congested) 1577 if (nr_dirty && nr_dirty == nr_congested)
1578 zone_set_flag(zone, ZONE_CONGESTED); 1578 set_bit(ZONE_CONGESTED, &zone->flags);
1579 1579
1580 /* 1580 /*
1581 * If dirty pages are scanned that are not queued for IO, it 1581 * If dirty pages are scanned that are not queued for IO, it
1582 * implies that flushers are not keeping up. In this case, flag 1582 * implies that flushers are not keeping up. In this case, flag
1583 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing 1583 * the zone ZONE_DIRTY and kswapd will start writing pages from
1584 * pages from reclaim context. 1584 * reclaim context.
1585 */ 1585 */
1586 if (nr_unqueued_dirty == nr_taken) 1586 if (nr_unqueued_dirty == nr_taken)
1587 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); 1587 set_bit(ZONE_DIRTY, &zone->flags);
1588 1588
1589 /* 1589 /*
1590 * If kswapd scans pages marked marked for immediate 1590 * If kswapd scans pages marked marked for immediate
@@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
2984 /* Account for the number of pages attempted to reclaim */ 2984 /* Account for the number of pages attempted to reclaim */
2985 *nr_attempted += sc->nr_to_reclaim; 2985 *nr_attempted += sc->nr_to_reclaim;
2986 2986
2987 zone_clear_flag(zone, ZONE_WRITEBACK); 2987 clear_bit(ZONE_WRITEBACK, &zone->flags);
2988 2988
2989 /* 2989 /*
2990 * If a zone reaches its high watermark, consider it to be no longer 2990 * If a zone reaches its high watermark, consider it to be no longer
@@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
2994 */ 2994 */
2995 if (zone_reclaimable(zone) && 2995 if (zone_reclaimable(zone) &&
2996 zone_balanced(zone, testorder, 0, classzone_idx)) { 2996 zone_balanced(zone, testorder, 0, classzone_idx)) {
2997 zone_clear_flag(zone, ZONE_CONGESTED); 2997 clear_bit(ZONE_CONGESTED, &zone->flags);
2998 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); 2998 clear_bit(ZONE_DIRTY, &zone->flags);
2999 } 2999 }
3000 3000
3001 return sc->nr_scanned >= sc->nr_to_reclaim; 3001 return sc->nr_scanned >= sc->nr_to_reclaim;
@@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3086 * If balanced, clear the dirty and congested 3086 * If balanced, clear the dirty and congested
3087 * flags 3087 * flags
3088 */ 3088 */
3089 zone_clear_flag(zone, ZONE_CONGESTED); 3089 clear_bit(ZONE_CONGESTED, &zone->flags);
3090 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); 3090 clear_bit(ZONE_DIRTY, &zone->flags);
3091 } 3091 }
3092 } 3092 }
3093 3093
@@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3714 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3714 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3715 return ZONE_RECLAIM_NOSCAN; 3715 return ZONE_RECLAIM_NOSCAN;
3716 3716
3717 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3717 if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
3718 return ZONE_RECLAIM_NOSCAN; 3718 return ZONE_RECLAIM_NOSCAN;
3719 3719
3720 ret = __zone_reclaim(zone, gfp_mask, order); 3720 ret = __zone_reclaim(zone, gfp_mask, order);
3721 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3721 clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
3722 3722
3723 if (!ret) 3723 if (!ret)
3724 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3724 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);