aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2013-09-12 18:13:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:00 -0400
commitde57780dc659f95b17ccb649f003278dde0b5b86 (patch)
treed2493cc412c16946f3ead9158a61b26dd1f0c45a /mm/vmscan.c
parenta5b7c87f92076352dbff2fe0423ec255e1c9a71b (diff)
memcg: enhance memcg iterator to support predicates
The caller of the iterator might know that some nodes or even subtrees should be skipped but there is no way to tell iterators about that so the only choice left is to let iterators to visit each node and do the selection outside of the iterating code. This, however, doesn't scale well with hierarchies with many groups where only few groups are interesting. This patch adds mem_cgroup_iter_cond variant of the iterator with a callback which gets called for every visited node. There are three possible ways how the callback can influence the walk. Either the node is visited, it is skipped but the tree walk continues down the tree or the whole subtree of the current group is skipped. [hughd@google.com: fix memcg-less page reclaim] Signed-off-by: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Glauber Costa <glommer@openvz.org> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michel Lespinasse <walken@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Ying Han <yinghan@google.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1896e7ca494b..f2e35099508b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2151,21 +2151,16 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2151 .zone = zone, 2151 .zone = zone,
2152 .priority = sc->priority, 2152 .priority = sc->priority,
2153 }; 2153 };
2154 struct mem_cgroup *memcg; 2154 struct mem_cgroup *memcg = NULL;
2155 mem_cgroup_iter_filter filter = (soft_reclaim) ?
2156 mem_cgroup_soft_reclaim_eligible : NULL;
2155 2157
2156 nr_reclaimed = sc->nr_reclaimed; 2158 nr_reclaimed = sc->nr_reclaimed;
2157 nr_scanned = sc->nr_scanned; 2159 nr_scanned = sc->nr_scanned;
2158 2160
2159 memcg = mem_cgroup_iter(root, NULL, &reclaim); 2161 while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
2160 do {
2161 struct lruvec *lruvec; 2162 struct lruvec *lruvec;
2162 2163
2163 if (soft_reclaim &&
2164 !mem_cgroup_soft_reclaim_eligible(memcg, root)) {
2165 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2166 continue;
2167 }
2168
2169 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2164 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2170 2165
2171 shrink_lruvec(lruvec, sc); 2166 shrink_lruvec(lruvec, sc);
@@ -2185,8 +2180,7 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2185 mem_cgroup_iter_break(root, memcg); 2180 mem_cgroup_iter_break(root, memcg);
2186 break; 2181 break;
2187 } 2182 }
2188 memcg = mem_cgroup_iter(root, memcg, &reclaim); 2183 }
2189 } while (memcg);
2190 2184
2191 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, 2185 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2192 sc->nr_scanned - nr_scanned, 2186 sc->nr_scanned - nr_scanned,