aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 18:01:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:28 -0400
commit7c954f6de6b630de30f265a079aad359f159ebe9 (patch)
treecbcbacbfe79d8a53822ef85bae6904fa9d6fb45f
parentb7ea3c417b6c2e74ca1cb051568f60377908928d (diff)
mm: vmscan: move logic from balance_pgdat() to kswapd_shrink_zone()
balance_pgdat() is very long and some of the logic can and should be internal to kswapd_shrink_zone(). Move it so the flow of balance_pgdat() is marginally easier to follow. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Tested-by: Zlatko Calusic <zcalusic@bitsync.net> Cc: dormando <dormando@rydia.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c110
1 files changed, 54 insertions, 56 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a2d0c6842616..4a43c289b23a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2709,18 +2709,53 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2709 * This is used to determine if the scanning priority needs to be raised. 2709 * This is used to determine if the scanning priority needs to be raised.
2710 */ 2710 */
2711static bool kswapd_shrink_zone(struct zone *zone, 2711static bool kswapd_shrink_zone(struct zone *zone,
2712 int classzone_idx,
2712 struct scan_control *sc, 2713 struct scan_control *sc,
2713 unsigned long lru_pages, 2714 unsigned long lru_pages,
2714 unsigned long *nr_attempted) 2715 unsigned long *nr_attempted)
2715{ 2716{
2716 unsigned long nr_slab; 2717 unsigned long nr_slab;
2718 int testorder = sc->order;
2719 unsigned long balance_gap;
2717 struct reclaim_state *reclaim_state = current->reclaim_state; 2720 struct reclaim_state *reclaim_state = current->reclaim_state;
2718 struct shrink_control shrink = { 2721 struct shrink_control shrink = {
2719 .gfp_mask = sc->gfp_mask, 2722 .gfp_mask = sc->gfp_mask,
2720 }; 2723 };
2724 bool lowmem_pressure;
2721 2725
2722 /* Reclaim above the high watermark. */ 2726 /* Reclaim above the high watermark. */
2723 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); 2727 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
2728
2729 /*
2730 * Kswapd reclaims only single pages with compaction enabled. Trying
2731 * too hard to reclaim until contiguous free pages have become
2732 * available can hurt performance by evicting too much useful data
2733 * from memory. Do not reclaim more than needed for compaction.
2734 */
2735 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2736 compaction_suitable(zone, sc->order) !=
2737 COMPACT_SKIPPED)
2738 testorder = 0;
2739
2740 /*
2741 * We put equal pressure on every zone, unless one zone has way too
2742 * many pages free already. The "too many pages" is defined as the
2743 * high wmark plus a "gap" where the gap is either the low
2744 * watermark or 1% of the zone, whichever is smaller.
2745 */
2746 balance_gap = min(low_wmark_pages(zone),
2747 (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2748 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2749
2750 /*
2751 * If there is no low memory pressure or the zone is balanced then no
2752 * reclaim is necessary
2753 */
2754 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
2755 if (!lowmem_pressure && zone_balanced(zone, testorder,
2756 balance_gap, classzone_idx))
2757 return true;
2758
2724 shrink_zone(zone, sc); 2759 shrink_zone(zone, sc);
2725 2760
2726 reclaim_state->reclaimed_slab = 0; 2761 reclaim_state->reclaimed_slab = 0;
@@ -2735,6 +2770,18 @@ static bool kswapd_shrink_zone(struct zone *zone,
2735 2770
2736 zone_clear_flag(zone, ZONE_WRITEBACK); 2771 zone_clear_flag(zone, ZONE_WRITEBACK);
2737 2772
2773 /*
2774 * If a zone reaches its high watermark, consider it to be no longer
2775 * congested. It's possible there are dirty pages backed by congested
2776 * BDIs but as pressure is relieved, speculatively avoid congestion
2777 * waits.
2778 */
2779 if (!zone->all_unreclaimable &&
2780 zone_balanced(zone, testorder, 0, classzone_idx)) {
2781 zone_clear_flag(zone, ZONE_CONGESTED);
2782 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2783 }
2784
2738 return sc->nr_scanned >= sc->nr_to_reclaim; 2785 return sc->nr_scanned >= sc->nr_to_reclaim;
2739} 2786}
2740 2787
@@ -2870,8 +2917,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2870 */ 2917 */
2871 for (i = 0; i <= end_zone; i++) { 2918 for (i = 0; i <= end_zone; i++) {
2872 struct zone *zone = pgdat->node_zones + i; 2919 struct zone *zone = pgdat->node_zones + i;
2873 int testorder;
2874 unsigned long balance_gap;
2875 2920
2876 if (!populated_zone(zone)) 2921 if (!populated_zone(zone))
2877 continue; 2922 continue;
@@ -2892,61 +2937,14 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2892 sc.nr_reclaimed += nr_soft_reclaimed; 2937 sc.nr_reclaimed += nr_soft_reclaimed;
2893 2938
2894 /* 2939 /*
2895 * We put equal pressure on every zone, unless 2940 * There should be no need to raise the scanning
2896 * one zone has way too many pages free 2941 * priority if enough pages are already being scanned
2897 * already. The "too many pages" is defined 2942 * that that high watermark would be met at 100%
2898 * as the high wmark plus a "gap" where the 2943 * efficiency.
2899 * gap is either the low watermark or 1%
2900 * of the zone, whichever is smaller.
2901 */
2902 balance_gap = min(low_wmark_pages(zone),
2903 (zone->managed_pages +
2904 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2905 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2906 /*
2907 * Kswapd reclaims only single pages with compaction
2908 * enabled. Trying too hard to reclaim until contiguous
2909 * free pages have become available can hurt performance
2910 * by evicting too much useful data from memory.
2911 * Do not reclaim more than needed for compaction.
2912 */ 2944 */
2913 testorder = order; 2945 if (kswapd_shrink_zone(zone, end_zone, &sc,
2914 if (IS_ENABLED(CONFIG_COMPACTION) && order && 2946 lru_pages, &nr_attempted))
2915 compaction_suitable(zone, order) != 2947 raise_priority = false;
2916 COMPACT_SKIPPED)
2917 testorder = 0;
2918
2919 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2920 !zone_balanced(zone, testorder,
2921 balance_gap, end_zone)) {
2922 /*
2923 * There should be no need to raise the
2924 * scanning priority if enough pages are
2925 * already being scanned that high
2926 * watermark would be met at 100% efficiency.
2927 */
2928 if (kswapd_shrink_zone(zone, &sc, lru_pages,
2929 &nr_attempted))
2930 raise_priority = false;
2931 }
2932
2933 if (zone->all_unreclaimable) {
2934 if (end_zone && end_zone == i)
2935 end_zone--;
2936 continue;
2937 }
2938
2939 if (zone_balanced(zone, testorder, 0, end_zone))
2940 /*
2941 * If a zone reaches its high watermark,
2942 * consider it to be no longer congested. It's
2943 * possible there are dirty pages backed by
2944 * congested BDIs but as pressure is relieved,
2945 * speculatively avoid congestion waits
2946 * or writing pages from kswapd context.
2947 */
2948 zone_clear_flag(zone, ZONE_CONGESTED);
2949 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2950 } 2948 }
2951 2949
2952 /* 2950 /*