aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit79dafcdca31386cfe0fe95b1c7f30a85209af166 (patch)
tree9954c30bb6048650106c03bbd5e9aaa8e79b9bfd /mm/vmscan.c
parent38087d9b0360987a6db46c2c2c4ece37cd048abe (diff)
mm, vmscan: by default have direct reclaim only shrink once per node
Direct reclaim iterates over all zones in the zonelist and shrinking them but this is in conflict with node-based reclaim. In the default case, only shrink once per node. Link: http://lkml.kernel.org/r/1467970510-21195-11-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 905c60473126..01fe4708e404 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2552,14 +2552,6 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_
2552 * try to reclaim pages from zones which will satisfy the caller's allocation 2552 * try to reclaim pages from zones which will satisfy the caller's allocation
2553 * request. 2553 * request.
2554 * 2554 *
2555 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2556 * Because:
2557 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2558 * allocation or
2559 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2560 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2561 * zone defense algorithm.
2562 *
2563 * If a zone is deemed to be full of pinned pages then just give it a light 2555 * If a zone is deemed to be full of pinned pages then just give it a light
2564 * scan then give up on it. 2556 * scan then give up on it.
2565 */ 2557 */
@@ -2571,6 +2563,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2571 unsigned long nr_soft_scanned; 2563 unsigned long nr_soft_scanned;
2572 gfp_t orig_mask; 2564 gfp_t orig_mask;
2573 enum zone_type classzone_idx; 2565 enum zone_type classzone_idx;
2566 pg_data_t *last_pgdat = NULL;
2574 2567
2575 /* 2568 /*
2576 * If the number of buffer_heads in the machine exceeds the maximum 2569 * If the number of buffer_heads in the machine exceeds the maximum
@@ -2630,6 +2623,15 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2630 } 2623 }
2631 2624
2632 /* 2625 /*
2626 * Shrink each node in the zonelist once. If the
2627 * zonelist is ordered by zone (not the default) then a
2628 * node may be shrunk multiple times but in that case
2629 * the user prefers lower zones being preserved.
2630 */
2631 if (zone->zone_pgdat == last_pgdat)
2632 continue;
2633
2634 /*
2633 * This steals pages from memory cgroups over softlimit 2635 * This steals pages from memory cgroups over softlimit
2634 * and returns the number of reclaimed pages and 2636 * and returns the number of reclaimed pages and
2635 * scanned pages. This works for global memory pressure 2637 * scanned pages. This works for global memory pressure
@@ -2644,6 +2646,10 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2644 /* need some check for avoid more shrink_zone() */ 2646 /* need some check for avoid more shrink_zone() */
2645 } 2647 }
2646 2648
2649 /* See comment about same check for global reclaim above */
2650 if (zone->zone_pgdat == last_pgdat)
2651 continue;
2652 last_pgdat = zone->zone_pgdat;
2647 shrink_node(zone->zone_pgdat, sc, classzone_idx); 2653 shrink_node(zone->zone_pgdat, sc, classzone_idx);
2648 } 2654 }
2649 2655