aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-06 19:06:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:18 -0400
commit0b06496a338e83627dc5f0d25323e7a1ae9cb87d (patch)
tree87f1f3cbdf0a32dfbf96a177401dcfa7e06b007a /mm/vmscan.c
parent8d07429319b2836604061f48f7e3dfe78acc060c (diff)
mm: vmscan: rework compaction-ready signaling in direct reclaim
Page reclaim for a higher-order page runs until compaction is ready, then aborts and signals this situation through the return value of shrink_zones(). This is an oddly specific signal to encode in the return value of shrink_zones(), though, and can be quite confusing. Introduce sc->compaction_ready and signal the compactability of the zones out-of-band to free up the return value of shrink_zones() for actual zone reclaimability. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Michal Hocko <mhocko@suse.cz> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c70
1 files changed, 32 insertions, 38 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 19b5b8016209..6f43df4a5253 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -65,6 +65,9 @@ struct scan_control {
65 /* Number of pages freed so far during a call to shrink_zones() */ 65 /* Number of pages freed so far during a call to shrink_zones() */
66 unsigned long nr_reclaimed; 66 unsigned long nr_reclaimed;
67 67
68 /* One of the zones is ready for compaction */
69 int compaction_ready;
70
68 /* How many pages shrink_list() should reclaim */ 71 /* How many pages shrink_list() should reclaim */
69 unsigned long nr_to_reclaim; 72 unsigned long nr_to_reclaim;
70 73
@@ -2292,15 +2295,11 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
2292} 2295}
2293 2296
2294/* Returns true if compaction should go ahead for a high-order request */ 2297/* Returns true if compaction should go ahead for a high-order request */
2295static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2298static inline bool compaction_ready(struct zone *zone, int order)
2296{ 2299{
2297 unsigned long balance_gap, watermark; 2300 unsigned long balance_gap, watermark;
2298 bool watermark_ok; 2301 bool watermark_ok;
2299 2302
2300 /* Do not consider compaction for orders reclaim is meant to satisfy */
2301 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2302 return false;
2303
2304 /* 2303 /*
2305 * Compaction takes time to run and there are potentially other 2304 * Compaction takes time to run and there are potentially other
2306 * callers using the pages just freed. Continue reclaiming until 2305 * callers using the pages just freed. Continue reclaiming until
@@ -2309,18 +2308,18 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2309 */ 2308 */
2310 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( 2309 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
2311 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); 2310 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
2312 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); 2311 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order);
2313 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); 2312 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2314 2313
2315 /* 2314 /*
2316 * If compaction is deferred, reclaim up to a point where 2315 * If compaction is deferred, reclaim up to a point where
2317 * compaction will have a chance of success when re-enabled 2316 * compaction will have a chance of success when re-enabled
2318 */ 2317 */
2319 if (compaction_deferred(zone, sc->order)) 2318 if (compaction_deferred(zone, order))
2320 return watermark_ok; 2319 return watermark_ok;
2321 2320
2322 /* If compaction is not ready to start, keep reclaiming */ 2321 /* If compaction is not ready to start, keep reclaiming */
2323 if (!compaction_suitable(zone, sc->order)) 2322 if (!compaction_suitable(zone, order))
2324 return false; 2323 return false;
2325 2324
2326 return watermark_ok; 2325 return watermark_ok;
@@ -2341,20 +2340,14 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2341 * 2340 *
2342 * If a zone is deemed to be full of pinned pages then just give it a light 2341 * If a zone is deemed to be full of pinned pages then just give it a light
2343 * scan then give up on it. 2342 * scan then give up on it.
2344 *
2345 * This function returns true if a zone is being reclaimed for a costly
2346 * high-order allocation and compaction is ready to begin. This indicates to
2347 * the caller that it should consider retrying the allocation instead of
2348 * further reclaim.
2349 */ 2343 */
2350static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 2344static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2351{ 2345{
2352 struct zoneref *z; 2346 struct zoneref *z;
2353 struct zone *zone; 2347 struct zone *zone;
2354 unsigned long nr_soft_reclaimed; 2348 unsigned long nr_soft_reclaimed;
2355 unsigned long nr_soft_scanned; 2349 unsigned long nr_soft_scanned;
2356 unsigned long lru_pages = 0; 2350 unsigned long lru_pages = 0;
2357 bool aborted_reclaim = false;
2358 struct reclaim_state *reclaim_state = current->reclaim_state; 2351 struct reclaim_state *reclaim_state = current->reclaim_state;
2359 gfp_t orig_mask; 2352 gfp_t orig_mask;
2360 struct shrink_control shrink = { 2353 struct shrink_control shrink = {
@@ -2391,22 +2384,24 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2391 if (sc->priority != DEF_PRIORITY && 2384 if (sc->priority != DEF_PRIORITY &&
2392 !zone_reclaimable(zone)) 2385 !zone_reclaimable(zone))
2393 continue; /* Let kswapd poll it */ 2386 continue; /* Let kswapd poll it */
2394 if (IS_ENABLED(CONFIG_COMPACTION)) { 2387
2395 /* 2388 /*
2396 * If we already have plenty of memory free for 2389 * If we already have plenty of memory free for
2397 * compaction in this zone, don't free any more. 2390 * compaction in this zone, don't free any more.
2398 * Even though compaction is invoked for any 2391 * Even though compaction is invoked for any
2399 * non-zero order, only frequent costly order 2392 * non-zero order, only frequent costly order
2400 * reclamation is disruptive enough to become a 2393 * reclamation is disruptive enough to become a
2401 * noticeable problem, like transparent huge 2394 * noticeable problem, like transparent huge
2402 * page allocations. 2395 * page allocations.
2403 */ 2396 */
2404 if ((zonelist_zone_idx(z) <= requested_highidx) 2397 if (IS_ENABLED(CONFIG_COMPACTION) &&
2405 && compaction_ready(zone, sc)) { 2398 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2406 aborted_reclaim = true; 2399 zonelist_zone_idx(z) <= requested_highidx &&
2407 continue; 2400 compaction_ready(zone, sc->order)) {
2408 } 2401 sc->compaction_ready = true;
2402 continue;
2409 } 2403 }
2404
2410 /* 2405 /*
2411 * This steals pages from memory cgroups over softlimit 2406 * This steals pages from memory cgroups over softlimit
2412 * and returns the number of reclaimed pages and 2407 * and returns the number of reclaimed pages and
@@ -2444,8 +2439,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2444 * promoted it to __GFP_HIGHMEM. 2439 * promoted it to __GFP_HIGHMEM.
2445 */ 2440 */
2446 sc->gfp_mask = orig_mask; 2441 sc->gfp_mask = orig_mask;
2447
2448 return aborted_reclaim;
2449} 2442}
2450 2443
2451/* All zones in zonelist are unreclaimable? */ 2444/* All zones in zonelist are unreclaimable? */
@@ -2489,7 +2482,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2489{ 2482{
2490 unsigned long total_scanned = 0; 2483 unsigned long total_scanned = 0;
2491 unsigned long writeback_threshold; 2484 unsigned long writeback_threshold;
2492 bool aborted_reclaim;
2493 2485
2494 delayacct_freepages_start(); 2486 delayacct_freepages_start();
2495 2487
@@ -2500,11 +2492,14 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2500 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 2492 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2501 sc->priority); 2493 sc->priority);
2502 sc->nr_scanned = 0; 2494 sc->nr_scanned = 0;
2503 aborted_reclaim = shrink_zones(zonelist, sc); 2495 shrink_zones(zonelist, sc);
2504 2496
2505 total_scanned += sc->nr_scanned; 2497 total_scanned += sc->nr_scanned;
2506 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2498 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2507 goto out; 2499 break;
2500
2501 if (sc->compaction_ready)
2502 break;
2508 2503
2509 /* 2504 /*
2510 * If we're getting trouble reclaiming, start doing 2505 * If we're getting trouble reclaiming, start doing
@@ -2526,16 +2521,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2526 WB_REASON_TRY_TO_FREE_PAGES); 2521 WB_REASON_TRY_TO_FREE_PAGES);
2527 sc->may_writepage = 1; 2522 sc->may_writepage = 1;
2528 } 2523 }
2529 } while (--sc->priority >= 0 && !aborted_reclaim); 2524 } while (--sc->priority >= 0);
2530 2525
2531out:
2532 delayacct_freepages_end(); 2526 delayacct_freepages_end();
2533 2527
2534 if (sc->nr_reclaimed) 2528 if (sc->nr_reclaimed)
2535 return sc->nr_reclaimed; 2529 return sc->nr_reclaimed;
2536 2530
2537 /* Aborted reclaim to try compaction? don't OOM, then */ 2531 /* Aborted reclaim to try compaction? don't OOM, then */
2538 if (aborted_reclaim) 2532 if (sc->compaction_ready)
2539 return 1; 2533 return 1;
2540 2534
2541 /* top priority shrink_zones still had more to do? don't OOM, then */ 2535 /* top priority shrink_zones still had more to do? don't OOM, then */