diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-28 18:45:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 19:07:41 -0400 |
commit | 31483b6ad205784d3a82240865135bef5c97c105 (patch) | |
tree | 5fddc5be01647a316fdcab6f511f62677edcc684 /mm/vmscan.c | |
parent | 1d82de618ddde0f1164e640f79af152f01994c18 (diff) |
mm, vmscan: remove balance gap
The balance gap was introduced to apply equal pressure to all zones when
reclaiming for a higher zone. With node-based LRU, the need for the
balance gap is removed and the code is dead so remove it.
[vbabka@suse.cz: Also remove KSWAPD_ZONE_BALANCE_GAP_RATIO]
Link: http://lkml.kernel.org/r/1467970510-21195-9-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 19 |
1 files changed, 8 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7b382b90b145..a52167eabc96 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2518,7 +2518,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, | |||
2518 | */ | 2518 | */ |
2519 | static inline bool compaction_ready(struct zone *zone, int order, int classzone_idx) | 2519 | static inline bool compaction_ready(struct zone *zone, int order, int classzone_idx) |
2520 | { | 2520 | { |
2521 | unsigned long balance_gap, watermark; | 2521 | unsigned long watermark; |
2522 | bool watermark_ok; | 2522 | bool watermark_ok; |
2523 | 2523 | ||
2524 | /* | 2524 | /* |
@@ -2527,9 +2527,7 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_ | |||
2527 | * there is a buffer of free pages available to give compaction | 2527 | * there is a buffer of free pages available to give compaction |
2528 | * a reasonable chance of completing and allocating the page | 2528 | * a reasonable chance of completing and allocating the page |
2529 | */ | 2529 | */ |
2530 | balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( | 2530 | watermark = high_wmark_pages(zone) + (2UL << order); |
2531 | zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); | ||
2532 | watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); | ||
2533 | watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, classzone_idx); | 2531 | watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, classzone_idx); |
2534 | 2532 | ||
2535 | /* | 2533 | /* |
@@ -3000,10 +2998,9 @@ static void age_active_anon(struct pglist_data *pgdat, | |||
3000 | } while (memcg); | 2998 | } while (memcg); |
3001 | } | 2999 | } |
3002 | 3000 | ||
3003 | static bool zone_balanced(struct zone *zone, int order, | 3001 | static bool zone_balanced(struct zone *zone, int order, int classzone_idx) |
3004 | unsigned long balance_gap, int classzone_idx) | ||
3005 | { | 3002 | { |
3006 | unsigned long mark = high_wmark_pages(zone) + balance_gap; | 3003 | unsigned long mark = high_wmark_pages(zone); |
3007 | 3004 | ||
3008 | return zone_watermark_ok_safe(zone, order, mark, classzone_idx); | 3005 | return zone_watermark_ok_safe(zone, order, mark, classzone_idx); |
3009 | } | 3006 | } |
@@ -3045,7 +3042,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, | |||
3045 | if (!populated_zone(zone)) | 3042 | if (!populated_zone(zone)) |
3046 | continue; | 3043 | continue; |
3047 | 3044 | ||
3048 | if (zone_balanced(zone, order, 0, classzone_idx)) | 3045 | if (zone_balanced(zone, order, classzone_idx)) |
3049 | return true; | 3046 | return true; |
3050 | } | 3047 | } |
3051 | 3048 | ||
@@ -3148,7 +3145,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3148 | break; | 3145 | break; |
3149 | } | 3146 | } |
3150 | 3147 | ||
3151 | if (!zone_balanced(zone, order, 0, 0)) { | 3148 | if (!zone_balanced(zone, order, 0)) { |
3152 | classzone_idx = i; | 3149 | classzone_idx = i; |
3153 | break; | 3150 | break; |
3154 | } else { | 3151 | } else { |
@@ -3216,7 +3213,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) | |||
3216 | if (!populated_zone(zone)) | 3213 | if (!populated_zone(zone)) |
3217 | continue; | 3214 | continue; |
3218 | 3215 | ||
3219 | if (zone_balanced(zone, sc.order, 0, classzone_idx)) { | 3216 | if (zone_balanced(zone, sc.order, classzone_idx)) { |
3220 | clear_bit(PGDAT_CONGESTED, &pgdat->flags); | 3217 | clear_bit(PGDAT_CONGESTED, &pgdat->flags); |
3221 | clear_bit(PGDAT_DIRTY, &pgdat->flags); | 3218 | clear_bit(PGDAT_DIRTY, &pgdat->flags); |
3222 | goto out; | 3219 | goto out; |
@@ -3427,7 +3424,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) | |||
3427 | } | 3424 | } |
3428 | if (!waitqueue_active(&pgdat->kswapd_wait)) | 3425 | if (!waitqueue_active(&pgdat->kswapd_wait)) |
3429 | return; | 3426 | return; |
3430 | if (zone_balanced(zone, order, 0, 0)) | 3427 | if (zone_balanced(zone, order, 0)) |
3431 | return; | 3428 | return; |
3432 | 3429 | ||
3433 | trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); | 3430 | trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); |