aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2013-09-24 18:27:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-24 20:00:26 -0400
commit0608f43da64a1f1c42507304b5f25bc8b1227aa4 (patch)
tree3138a251439bee1f12bd176f41f3fc63e194f455 /mm/vmscan.c
parentbb4cc1a8b5eaf3b9e5707d7c270400b05d11a2b7 (diff)
revert "memcg, vmscan: integrate soft reclaim tighter with zone shrinking code"
Revert commit 3b38722efd9f ("memcg, vmscan: integrate soft reclaim tighter with zone shrinking code") I merged this prematurely - Michal and Johannes still disagree about the overall design direction and the future remains unclear. Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c62
1 files changed, 26 insertions, 36 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0e081cada4ba..beb35778c69f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -139,21 +139,11 @@ static bool global_reclaim(struct scan_control *sc)
139{ 139{
140 return !sc->target_mem_cgroup; 140 return !sc->target_mem_cgroup;
141} 141}
142
143static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
144{
145 return !mem_cgroup_disabled() && global_reclaim(sc);
146}
147#else 142#else
148static bool global_reclaim(struct scan_control *sc) 143static bool global_reclaim(struct scan_control *sc)
149{ 144{
150 return true; 145 return true;
151} 146}
152
153static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
154{
155 return false;
156}
157#endif 147#endif
158 148
159unsigned long zone_reclaimable_pages(struct zone *zone) 149unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -2174,8 +2164,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
2174 } 2164 }
2175} 2165}
2176 2166
2177static void 2167static void shrink_zone(struct zone *zone, struct scan_control *sc)
2178__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2179{ 2168{
2180 unsigned long nr_reclaimed, nr_scanned; 2169 unsigned long nr_reclaimed, nr_scanned;
2181 2170
@@ -2194,12 +2183,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2194 do { 2183 do {
2195 struct lruvec *lruvec; 2184 struct lruvec *lruvec;
2196 2185
2197 if (soft_reclaim &&
2198 !mem_cgroup_soft_reclaim_eligible(memcg)) {
2199 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2200 continue;
2201 }
2202
2203 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2186 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2204 2187
2205 shrink_lruvec(lruvec, sc); 2188 shrink_lruvec(lruvec, sc);
@@ -2230,24 +2213,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2230 sc->nr_scanned - nr_scanned, sc)); 2213 sc->nr_scanned - nr_scanned, sc));
2231} 2214}
2232 2215
2233
2234static void shrink_zone(struct zone *zone, struct scan_control *sc)
2235{
2236 bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
2237 unsigned long nr_scanned = sc->nr_scanned;
2238
2239 __shrink_zone(zone, sc, do_soft_reclaim);
2240
2241 /*
2242 * No group is over the soft limit or those that are do not have
2243 * pages in the zone we are reclaiming so we have to reclaim everybody
2244 */
2245 if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
2246 __shrink_zone(zone, sc, false);
2247 return;
2248 }
2249}
2250
2251/* Returns true if compaction should go ahead for a high-order request */ 2216/* Returns true if compaction should go ahead for a high-order request */
2252static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2217static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2253{ 2218{
@@ -2309,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2309{ 2274{
2310 struct zoneref *z; 2275 struct zoneref *z;
2311 struct zone *zone; 2276 struct zone *zone;
2277 unsigned long nr_soft_reclaimed;
2278 unsigned long nr_soft_scanned;
2312 bool aborted_reclaim = false; 2279 bool aborted_reclaim = false;
2313 2280
2314 /* 2281 /*
@@ -2348,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2348 continue; 2315 continue;
2349 } 2316 }
2350 } 2317 }
2318 /*
2319 * This steals pages from memory cgroups over softlimit
2320 * and returns the number of reclaimed pages and
2321 * scanned pages. This works for global memory pressure
2322 * and balancing, not for a memcg's limit.
2323 */
2324 nr_soft_scanned = 0;
2325 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2326 sc->order, sc->gfp_mask,
2327 &nr_soft_scanned);
2328 sc->nr_reclaimed += nr_soft_reclaimed;
2329 sc->nr_scanned += nr_soft_scanned;
2351 /* need some check for avoid more shrink_zone() */ 2330 /* need some check for avoid more shrink_zone() */
2352 } 2331 }
2353 2332
@@ -2941,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2941{ 2920{
2942 int i; 2921 int i;
2943 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2922 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2923 unsigned long nr_soft_reclaimed;
2924 unsigned long nr_soft_scanned;
2944 struct scan_control sc = { 2925 struct scan_control sc = {
2945 .gfp_mask = GFP_KERNEL, 2926 .gfp_mask = GFP_KERNEL,
2946 .priority = DEF_PRIORITY, 2927 .priority = DEF_PRIORITY,
@@ -3055,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3055 3036
3056 sc.nr_scanned = 0; 3037 sc.nr_scanned = 0;
3057 3038
3039 nr_soft_scanned = 0;
3040 /*
3041 * Call soft limit reclaim before calling shrink_zone.
3042 */
3043 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3044 order, sc.gfp_mask,
3045 &nr_soft_scanned);
3046 sc.nr_reclaimed += nr_soft_reclaimed;
3047
3058 /* 3048 /*
3059 * There should be no need to raise the scanning 3049 * There should be no need to raise the scanning
3060 * priority if enough pages are already being scanned 3050 * priority if enough pages are already being scanned