aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c83
1 files changed, 45 insertions, 38 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 48550c66f1f..b7ed3767564 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2207,9 +2207,12 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2207 * Throttle direct reclaimers if backing storage is backed by the network 2207 * Throttle direct reclaimers if backing storage is backed by the network
2208 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 2208 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2209 * depleted. kswapd will continue to make progress and wake the processes 2209 * depleted. kswapd will continue to make progress and wake the processes
2210 * when the low watermark is reached 2210 * when the low watermark is reached.
2211 *
2212 * Returns true if a fatal signal was delivered during throttling. If this
2213 * happens, the page allocator should not consider triggering the OOM killer.
2211 */ 2214 */
2212static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 2215static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2213 nodemask_t *nodemask) 2216 nodemask_t *nodemask)
2214{ 2217{
2215 struct zone *zone; 2218 struct zone *zone;
@@ -2224,13 +2227,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2224 * processes to block on log_wait_commit(). 2227 * processes to block on log_wait_commit().
2225 */ 2228 */
2226 if (current->flags & PF_KTHREAD) 2229 if (current->flags & PF_KTHREAD)
2227 return; 2230 goto out;
2231
2232 /*
2233 * If a fatal signal is pending, this process should not throttle.
2234 * It should return quickly so it can exit and free its memory
2235 */
2236 if (fatal_signal_pending(current))
2237 goto out;
2228 2238
2229 /* Check if the pfmemalloc reserves are ok */ 2239 /* Check if the pfmemalloc reserves are ok */
2230 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone); 2240 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2231 pgdat = zone->zone_pgdat; 2241 pgdat = zone->zone_pgdat;
2232 if (pfmemalloc_watermark_ok(pgdat)) 2242 if (pfmemalloc_watermark_ok(pgdat))
2233 return; 2243 goto out;
2234 2244
2235 /* Account for the throttling */ 2245 /* Account for the throttling */
2236 count_vm_event(PGSCAN_DIRECT_THROTTLE); 2246 count_vm_event(PGSCAN_DIRECT_THROTTLE);
@@ -2246,12 +2256,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2246 if (!(gfp_mask & __GFP_FS)) { 2256 if (!(gfp_mask & __GFP_FS)) {
2247 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 2257 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2248 pfmemalloc_watermark_ok(pgdat), HZ); 2258 pfmemalloc_watermark_ok(pgdat), HZ);
2249 return; 2259
2260 goto check_pending;
2250 } 2261 }
2251 2262
2252 /* Throttle until kswapd wakes the process */ 2263 /* Throttle until kswapd wakes the process */
2253 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 2264 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2254 pfmemalloc_watermark_ok(pgdat)); 2265 pfmemalloc_watermark_ok(pgdat));
2266
2267check_pending:
2268 if (fatal_signal_pending(current))
2269 return true;
2270
2271out:
2272 return false;
2255} 2273}
2256 2274
2257unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2275unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
@@ -2273,13 +2291,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2273 .gfp_mask = sc.gfp_mask, 2291 .gfp_mask = sc.gfp_mask,
2274 }; 2292 };
2275 2293
2276 throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
2277
2278 /* 2294 /*
2279 * Do not enter reclaim if fatal signal is pending. 1 is returned so 2295 * Do not enter reclaim if fatal signal was delivered while throttled.
2280 * that the page allocator does not consider triggering OOM 2296 * 1 is returned so that the page allocator does not OOM kill at this
2297 * point.
2281 */ 2298 */
2282 if (fatal_signal_pending(current)) 2299 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2283 return 1; 2300 return 1;
2284 2301
2285 trace_mm_vmscan_direct_reclaim_begin(order, 2302 trace_mm_vmscan_direct_reclaim_begin(order,
@@ -2397,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
2397 } while (memcg); 2414 } while (memcg);
2398} 2415}
2399 2416
2417static bool zone_balanced(struct zone *zone, int order,
2418 unsigned long balance_gap, int classzone_idx)
2419{
2420 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2421 balance_gap, classzone_idx, 0))
2422 return false;
2423
2424 if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
2425 return false;
2426
2427 return true;
2428}
2429
2400/* 2430/*
2401 * pgdat_balanced is used when checking if a node is balanced for high-order 2431 * pgdat_balanced is used when checking if a node is balanced for high-order
2402 * allocations. Only zones that meet watermarks and are in a zone allowed 2432 * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2475,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2475 continue; 2505 continue;
2476 } 2506 }
2477 2507
2478 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2508 if (!zone_balanced(zone, order, 0, i))
2479 i, 0))
2480 all_zones_ok = false; 2509 all_zones_ok = false;
2481 else 2510 else
2482 balanced += zone->present_pages; 2511 balanced += zone->present_pages;
@@ -2585,8 +2614,7 @@ loop_again:
2585 break; 2614 break;
2586 } 2615 }
2587 2616
2588 if (!zone_watermark_ok_safe(zone, order, 2617 if (!zone_balanced(zone, order, 0, 0)) {
2589 high_wmark_pages(zone), 0, 0)) {
2590 end_zone = i; 2618 end_zone = i;
2591 break; 2619 break;
2592 } else { 2620 } else {
@@ -2662,9 +2690,8 @@ loop_again:
2662 testorder = 0; 2690 testorder = 0;
2663 2691
2664 if ((buffer_heads_over_limit && is_highmem_idx(i)) || 2692 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2665 !zone_watermark_ok_safe(zone, testorder, 2693 !zone_balanced(zone, testorder,
2666 high_wmark_pages(zone) + balance_gap, 2694 balance_gap, end_zone)) {
2667 end_zone, 0)) {
2668 shrink_zone(zone, &sc); 2695 shrink_zone(zone, &sc);
2669 2696
2670 reclaim_state->reclaimed_slab = 0; 2697 reclaim_state->reclaimed_slab = 0;
@@ -2691,8 +2718,7 @@ loop_again:
2691 continue; 2718 continue;
2692 } 2719 }
2693 2720
2694 if (!zone_watermark_ok_safe(zone, testorder, 2721 if (!zone_balanced(zone, testorder, 0, end_zone)) {
2695 high_wmark_pages(zone), end_zone, 0)) {
2696 all_zones_ok = 0; 2722 all_zones_ok = 0;
2697 /* 2723 /*
2698 * We are still under min water mark. This 2724 * We are still under min water mark. This
@@ -2797,29 +2823,10 @@ out:
2797 if (!populated_zone(zone)) 2823 if (!populated_zone(zone))
2798 continue; 2824 continue;
2799 2825
2800 if (zone->all_unreclaimable &&
2801 sc.priority != DEF_PRIORITY)
2802 continue;
2803
2804 /* Would compaction fail due to lack of free memory? */
2805 if (COMPACTION_BUILD &&
2806 compaction_suitable(zone, order) == COMPACT_SKIPPED)
2807 goto loop_again;
2808
2809 /* Confirm the zone is balanced for order-0 */
2810 if (!zone_watermark_ok(zone, 0,
2811 high_wmark_pages(zone), 0, 0)) {
2812 order = sc.order = 0;
2813 goto loop_again;
2814 }
2815
2816 /* Check if the memory needs to be defragmented. */ 2826 /* Check if the memory needs to be defragmented. */
2817 if (zone_watermark_ok(zone, order, 2827 if (zone_watermark_ok(zone, order,
2818 low_wmark_pages(zone), *classzone_idx, 0)) 2828 low_wmark_pages(zone), *classzone_idx, 0))
2819 zones_need_compaction = 0; 2829 zones_need_compaction = 0;
2820
2821 /* If balanced, clear the congested flag */
2822 zone_clear_flag(zone, ZONE_CONGESTED);
2823 } 2830 }
2824 2831
2825 if (zones_need_compaction) 2832 if (zones_need_compaction)