diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2011-06-27 19:18:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-27 21:00:13 -0400 |
commit | ac34a1a3c39da0a1b9188d12a9ce85506364ed2a (patch) | |
tree | f74f34047c6bc516e29196685cc8671aff4a02d2 /mm | |
parent | 26c4caea9d697043cc5a458b96411b86d7f6babd (diff) |
memcg: fix direct softlimit reclaim to be called in limit path
Commit d149e3b25d7c ("memcg: add the soft_limit reclaim in global direct
reclaim") adds a softlimit hook to shrink_zones(). By this, soft limit
is called as
try_to_free_pages()
do_try_to_free_pages()
shrink_zones()
mem_cgroup_soft_limit_reclaim()
Then, direct reclaim is memcg softlimit hint aware, now.
But, the memory cgroup's "limit" path can call softlimit shrinker.
try_to_free_mem_cgroup_pages()
do_try_to_free_pages()
shrink_zones()
mem_cgroup_soft_limit_reclaim()
This will cause a global reclaim when a memcg hits limit.
This is bug. soft_limit_reclaim() should be called when
scanning_global_lru(sc) == true.
And the commit adds a variable "total_scanned" for counting softlimit
scanned pages....it's not "total". This patch removes the variable and
update sc->nr_scanned instead of it. This will affect shrink_slab()'s
scan condition but, global LRU is scanned by softlimit and I think this
change makes sense.
TODO: avoid too much scanning of a zone when softlimit did enough work.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Ying Han <yinghan@google.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8ff834e19c24..4f49535d4cd3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1995,14 +1995,13 @@ restart: | |||
1995 | * If a zone is deemed to be full of pinned pages then just give it a light | 1995 | * If a zone is deemed to be full of pinned pages then just give it a light |
1996 | * scan then give up on it. | 1996 | * scan then give up on it. |
1997 | */ | 1997 | */ |
1998 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | 1998 | static void shrink_zones(int priority, struct zonelist *zonelist, |
1999 | struct scan_control *sc) | 1999 | struct scan_control *sc) |
2000 | { | 2000 | { |
2001 | struct zoneref *z; | 2001 | struct zoneref *z; |
2002 | struct zone *zone; | 2002 | struct zone *zone; |
2003 | unsigned long nr_soft_reclaimed; | 2003 | unsigned long nr_soft_reclaimed; |
2004 | unsigned long nr_soft_scanned; | 2004 | unsigned long nr_soft_scanned; |
2005 | unsigned long total_scanned = 0; | ||
2006 | 2005 | ||
2007 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 2006 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
2008 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 2007 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
@@ -2017,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | |||
2017 | continue; | 2016 | continue; |
2018 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 2017 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) |
2019 | continue; /* Let kswapd poll it */ | 2018 | continue; /* Let kswapd poll it */ |
2019 | /* | ||
2020 | * This steals pages from memory cgroups over softlimit | ||
2021 | * and returns the number of reclaimed pages and | ||
2022 | * scanned pages. This works for global memory pressure | ||
2023 | * and balancing, not for a memcg's limit. | ||
2024 | */ | ||
2025 | nr_soft_scanned = 0; | ||
2026 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2027 | sc->order, sc->gfp_mask, | ||
2028 | &nr_soft_scanned); | ||
2029 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2030 | sc->nr_scanned += nr_soft_scanned; | ||
2031 | /* need some check for avoid more shrink_zone() */ | ||
2020 | } | 2032 | } |
2021 | 2033 | ||
2022 | nr_soft_scanned = 0; | ||
2023 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2024 | sc->order, sc->gfp_mask, | ||
2025 | &nr_soft_scanned); | ||
2026 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2027 | total_scanned += nr_soft_scanned; | ||
2028 | |||
2029 | shrink_zone(priority, zone, sc); | 2034 | shrink_zone(priority, zone, sc); |
2030 | } | 2035 | } |
2031 | |||
2032 | return total_scanned; | ||
2033 | } | 2036 | } |
2034 | 2037 | ||
2035 | static bool zone_reclaimable(struct zone *zone) | 2038 | static bool zone_reclaimable(struct zone *zone) |
@@ -2094,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2094 | sc->nr_scanned = 0; | 2097 | sc->nr_scanned = 0; |
2095 | if (!priority) | 2098 | if (!priority) |
2096 | disable_swap_token(sc->mem_cgroup); | 2099 | disable_swap_token(sc->mem_cgroup); |
2097 | total_scanned += shrink_zones(priority, zonelist, sc); | 2100 | shrink_zones(priority, zonelist, sc); |
2098 | /* | 2101 | /* |
2099 | * Don't shrink slabs when reclaiming memory from | 2102 | * Don't shrink slabs when reclaiming memory from |
2100 | * over limit cgroups | 2103 | * over limit cgroups |