aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 665b090b6c7..060e4c19140 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2399,6 +2399,7 @@ loop_again:
2399 for (i = 0; i <= end_zone; i++) { 2399 for (i = 0; i <= end_zone; i++) {
2400 struct zone *zone = pgdat->node_zones + i; 2400 struct zone *zone = pgdat->node_zones + i;
2401 int nr_slab; 2401 int nr_slab;
2402 unsigned long balance_gap;
2402 2403
2403 if (!populated_zone(zone)) 2404 if (!populated_zone(zone))
2404 continue; 2405 continue;
@@ -2415,11 +2416,20 @@ loop_again:
2415 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2416 2417
2417 /* 2418 /*
2418 * We put equal pressure on every zone, unless one 2419 * We put equal pressure on every zone, unless
2419 * zone has way too many pages free already. 2420 * one zone has way too many pages free
2421 * already. The "too many pages" is defined
2422 * as the high wmark plus a "gap" where the
2423 * gap is either the low watermark or 1%
2424 * of the zone, whichever is smaller.
2420 */ 2425 */
2426 balance_gap = min(low_wmark_pages(zone),
2427 (zone->present_pages +
2428 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2429 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2421 if (!zone_watermark_ok_safe(zone, order, 2430 if (!zone_watermark_ok_safe(zone, order,
2422 8*high_wmark_pages(zone), end_zone, 0)) 2431 high_wmark_pages(zone) + balance_gap,
2432 end_zone, 0))
2423 shrink_zone(priority, zone, &sc); 2433 shrink_zone(priority, zone, &sc);
2424 reclaim_state->reclaimed_slab = 0; 2434 reclaim_state->reclaimed_slab = 0;
2425 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2435 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,