aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commitef8f2327996b5c20f11420f64e439e87c7a01604 (patch)
tree0ea9bf78d88e1207005fc5310fe812d1edb0efc2 /mm/vmscan.c
parenta9dd0a83104c01269ea36a9b4ec42b51edf85427 (diff)
mm, memcg: move memcg limit enforcement from zones to nodes
Memcg needs adjustment after moving LRUs to the node. Limits are tracked per memcg but the soft-limit excess is tracked per zone. As global page reclaim is based on the node, it is easy to imagine a situation where a zone soft limit is exceeded even though the memcg limit is fine. This patch moves the soft limit tree the node. Technically, all the variable names should also change but people are already familiar by the meaning of "mz" even if "mn" would be a more appropriate name now. Link: http://lkml.kernel.org/r/1467970510-21195-15-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 46f7a71ed13b..9f6e673efba7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2229,8 +2229,7 @@ static inline void init_tlb_ubc(void)
2229static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, 2229static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2230 struct scan_control *sc, unsigned long *lru_pages) 2230 struct scan_control *sc, unsigned long *lru_pages)
2231{ 2231{
2232 struct zone *zone = &pgdat->node_zones[sc->reclaim_idx]; 2232 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2233 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg);
2234 unsigned long nr[NR_LRU_LISTS]; 2233 unsigned long nr[NR_LRU_LISTS];
2235 unsigned long targets[NR_LRU_LISTS]; 2234 unsigned long targets[NR_LRU_LISTS];
2236 unsigned long nr_to_scan; 2235 unsigned long nr_to_scan;
@@ -2439,7 +2438,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
2439 do { 2438 do {
2440 struct mem_cgroup *root = sc->target_mem_cgroup; 2439 struct mem_cgroup *root = sc->target_mem_cgroup;
2441 struct mem_cgroup_reclaim_cookie reclaim = { 2440 struct mem_cgroup_reclaim_cookie reclaim = {
2442 .zone = &pgdat->node_zones[classzone_idx], 2441 .pgdat = pgdat,
2443 .priority = sc->priority, 2442 .priority = sc->priority,
2444 }; 2443 };
2445 unsigned long node_lru_pages = 0; 2444 unsigned long node_lru_pages = 0;
@@ -2647,7 +2646,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2647 * and balancing, not for a memcg's limit. 2646 * and balancing, not for a memcg's limit.
2648 */ 2647 */
2649 nr_soft_scanned = 0; 2648 nr_soft_scanned = 0;
2650 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2649 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2651 sc->order, sc->gfp_mask, 2650 sc->order, sc->gfp_mask,
2652 &nr_soft_scanned); 2651 &nr_soft_scanned);
2653 sc->nr_reclaimed += nr_soft_reclaimed; 2652 sc->nr_reclaimed += nr_soft_reclaimed;
@@ -2917,7 +2916,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2917 2916
2918unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 2917unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
2919 gfp_t gfp_mask, bool noswap, 2918 gfp_t gfp_mask, bool noswap,
2920 struct zone *zone, 2919 pg_data_t *pgdat,
2921 unsigned long *nr_scanned) 2920 unsigned long *nr_scanned)
2922{ 2921{
2923 struct scan_control sc = { 2922 struct scan_control sc = {
@@ -2944,7 +2943,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
2944 * will pick up pages from other mem cgroup's as well. We hack 2943 * will pick up pages from other mem cgroup's as well. We hack
2945 * the priority and make it zero. 2944 * the priority and make it zero.
2946 */ 2945 */
2947 shrink_node_memcg(zone->zone_pgdat, memcg, &sc, &lru_pages); 2946 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
2948 2947
2949 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2948 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2950 2949
@@ -2994,7 +2993,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2994#endif 2993#endif
2995 2994
2996static void age_active_anon(struct pglist_data *pgdat, 2995static void age_active_anon(struct pglist_data *pgdat,
2997 struct zone *zone, struct scan_control *sc) 2996 struct scan_control *sc)
2998{ 2997{
2999 struct mem_cgroup *memcg; 2998 struct mem_cgroup *memcg;
3000 2999
@@ -3003,7 +3002,7 @@ static void age_active_anon(struct pglist_data *pgdat,
3003 3002
3004 memcg = mem_cgroup_iter(NULL, NULL, NULL); 3003 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3005 do { 3004 do {
3006 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg); 3005 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3007 3006
3008 if (inactive_list_is_low(lruvec, false)) 3007 if (inactive_list_is_low(lruvec, false))
3009 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 3008 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
@@ -3193,7 +3192,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3193 * pages are rotated regardless of classzone as this is 3192 * pages are rotated regardless of classzone as this is
3194 * about consistent aging. 3193 * about consistent aging.
3195 */ 3194 */
3196 age_active_anon(pgdat, &pgdat->node_zones[MAX_NR_ZONES - 1], &sc); 3195 age_active_anon(pgdat, &sc);
3197 3196
3198 /* 3197 /*
3199 * If we're getting trouble reclaiming, start doing writepage 3198 * If we're getting trouble reclaiming, start doing writepage
@@ -3205,7 +3204,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3205 /* Call soft limit reclaim before calling shrink_node. */ 3204 /* Call soft limit reclaim before calling shrink_node. */
3206 sc.nr_scanned = 0; 3205 sc.nr_scanned = 0;
3207 nr_soft_scanned = 0; 3206 nr_soft_scanned = 0;
3208 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, sc.order, 3207 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3209 sc.gfp_mask, &nr_soft_scanned); 3208 sc.gfp_mask, &nr_soft_scanned);
3210 sc.nr_reclaimed += nr_soft_reclaimed; 3209 sc.nr_reclaimed += nr_soft_reclaimed;
3211 3210