aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit86c79f6b5426ce118d32c73fa9e328f0a86ab590 (patch)
tree8cc32bcea6199c34326976a043b49c4df82a5851 /mm/vmscan.c
parent6256c6b499a1689d62ddfcb38d8048f9cd177070 (diff)
mm: vmscan: do not reclaim from kswapd if there is any eligible zone
kswapd scans from highest to lowest for a zone that requires balancing. This was necessary when reclaim was per-zone to fairly age pages on lower zones. Now that we are reclaiming on a per-node basis, any eligible zone can be used and pages will still be aged fairly. This patch avoids reclaiming excessively unless buffer_heads are over the limit and it's necessary to reclaim from a higher zone than requested by the waker of kswapd to relieve low memory pressure. [hillf.zj@alibaba-inc.com: Force kswapd reclaim no more than needed] Link: http://lkml.kernel.org/r/1466518566-30034-12-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-13-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c59
1 files changed, 27 insertions, 32 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b39b903bd14..b7a276f4b1b0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3144,31 +3144,39 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3144 3144
3145 sc.nr_reclaimed = 0; 3145 sc.nr_reclaimed = 0;
3146 3146
3147 /* Scan from the highest requested zone to dma */ 3147 /*
3148 for (i = classzone_idx; i >= 0; i--) { 3148 * If the number of buffer_heads in the machine exceeds the
3149 zone = pgdat->node_zones + i; 3149 * maximum allowed level then reclaim from all zones. This is
3150 if (!populated_zone(zone)) 3150 * not specific to highmem as highmem may not exist but it is
3151 continue; 3151 * it is expected that buffer_heads are stripped in writeback.
3152 3152 */
3153 /* 3153 if (buffer_heads_over_limit) {
3154 * If the number of buffer_heads in the machine 3154 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3155 * exceeds the maximum allowed level and this node 3155 zone = pgdat->node_zones + i;
3156 * has a highmem zone, force kswapd to reclaim from 3156 if (!populated_zone(zone))
3157 * it to relieve lowmem pressure. 3157 continue;
3158 */
3159 if (buffer_heads_over_limit && is_highmem_idx(i)) {
3160 classzone_idx = i;
3161 break;
3162 }
3163 3158
3164 if (!zone_balanced(zone, order, 0)) {
3165 classzone_idx = i; 3159 classzone_idx = i;
3166 break; 3160 break;
3167 } 3161 }
3168 } 3162 }
3169 3163
3170 if (i < 0) 3164 /*
3171 goto out; 3165 * Only reclaim if there are no eligible zones. Check from
3166 * high to low zone as allocations prefer higher zones.
3167 * Scanning from low to high zone would allow congestion to be
3168 * cleared during a very small window when a small low
3169 * zone was balanced even under extreme pressure when the
3170 * overall node may be congested.
3171 */
3172 for (i = classzone_idx; i >= 0; i--) {
3173 zone = pgdat->node_zones + i;
3174 if (!populated_zone(zone))
3175 continue;
3176
3177 if (zone_balanced(zone, sc.order, classzone_idx))
3178 goto out;
3179 }
3172 3180
3173 /* 3181 /*
3174 * Do some background aging of the anon list, to give 3182 * Do some background aging of the anon list, to give
@@ -3214,19 +3222,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3214 break; 3222 break;
3215 3223
3216 /* 3224 /*
3217 * Stop reclaiming if any eligible zone is balanced and clear
3218 * node writeback or congested.
3219 */
3220 for (i = 0; i <= classzone_idx; i++) {
3221 zone = pgdat->node_zones + i;
3222 if (!populated_zone(zone))
3223 continue;
3224
3225 if (zone_balanced(zone, sc.order, classzone_idx))
3226 goto out;
3227 }
3228
3229 /*
3230 * Raise priority if scanning rate is too low or there was no 3225 * Raise priority if scanning rate is too low or there was no
3231 * progress in reclaiming pages 3226 * progress in reclaiming pages
3232 */ 3227 */