diff options
-rw-r--r-- | include/linux/swap.h | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 16 |
2 files changed, 22 insertions, 3 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index c335055c4253..ed6ebe690f4a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -155,6 +155,15 @@ enum { | |||
155 | #define SWAP_CLUSTER_MAX 32 | 155 | #define SWAP_CLUSTER_MAX 32 |
156 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX | 156 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
157 | 157 | ||
158 | /* | ||
159 | * Ratio between the present memory in the zone and the "gap" that | ||
160 | * we're allowing kswapd to shrink in addition to the per-zone high | ||
161 | * wmark, even for zones that already have the high wmark satisfied, | ||
162 | * in order to provide better per-zone lru behavior. We are ok to | ||
163 | * spend not more than 1% of the memory for this zone balancing "gap". | ||
164 | */ | ||
165 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 | ||
166 | |||
158 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ | 167 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
159 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | 168 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ |
160 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ | 169 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 665b090b6c72..060e4c191403 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2399,6 +2399,7 @@ loop_again: | |||
2399 | for (i = 0; i <= end_zone; i++) { | 2399 | for (i = 0; i <= end_zone; i++) { |
2400 | struct zone *zone = pgdat->node_zones + i; | 2400 | struct zone *zone = pgdat->node_zones + i; |
2401 | int nr_slab; | 2401 | int nr_slab; |
2402 | unsigned long balance_gap; | ||
2402 | 2403 | ||
2403 | if (!populated_zone(zone)) | 2404 | if (!populated_zone(zone)) |
2404 | continue; | 2405 | continue; |
@@ -2415,11 +2416,20 @@ loop_again: | |||
2415 | mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); | 2416 | mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); |
2416 | 2417 | ||
2417 | /* | 2418 | /* |
2418 | * We put equal pressure on every zone, unless one | 2419 | * We put equal pressure on every zone, unless |
2419 | * zone has way too many pages free already. | 2420 | * one zone has way too many pages free |
2421 | * already. The "too many pages" is defined | ||
2422 | * as the high wmark plus a "gap" where the | ||
2423 | * gap is either the low watermark or 1% | ||
2424 | * of the zone, whichever is smaller. | ||
2420 | */ | 2425 | */ |
2426 | balance_gap = min(low_wmark_pages(zone), | ||
2427 | (zone->present_pages + | ||
2428 | KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / | ||
2429 | KSWAPD_ZONE_BALANCE_GAP_RATIO); | ||
2421 | if (!zone_watermark_ok_safe(zone, order, | 2430 | if (!zone_watermark_ok_safe(zone, order, |
2422 | 8*high_wmark_pages(zone), end_zone, 0)) | 2431 | high_wmark_pages(zone) + balance_gap, |
2432 | end_zone, 0)) | ||
2423 | shrink_zone(priority, zone, &sc); | 2433 | shrink_zone(priority, zone, &sc); |
2424 | reclaim_state->reclaimed_slab = 0; | 2434 | reclaim_state->reclaimed_slab = 0; |
2425 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 2435 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |