aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
commita215aa7b9ab3759c047201199fba64d3042d7f13 (patch)
treebca37493d9b2233450e6d3ffced1261d0e4f71fe /mm/vmscan.c
parentd31199a77ef606f1d06894385f1852181ba6136b (diff)
Update 2.6.36 to 2.6.36.4wip-dissipation2-jerickso
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5dfabf25f11..3e71cb1ee28c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2082,7 +2082,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
2082 if (zone->all_unreclaimable) 2082 if (zone->all_unreclaimable)
2083 continue; 2083 continue;
2084 2084
2085 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 2085 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2086 0, 0)) 2086 0, 0))
2087 return 1; 2087 return 1;
2088 } 2088 }
@@ -2169,7 +2169,7 @@ loop_again:
2169 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2169 shrink_active_list(SWAP_CLUSTER_MAX, zone,
2170 &sc, priority, 0); 2170 &sc, priority, 0);
2171 2171
2172 if (!zone_watermark_ok(zone, order, 2172 if (!zone_watermark_ok_safe(zone, order,
2173 high_wmark_pages(zone), 0, 0)) { 2173 high_wmark_pages(zone), 0, 0)) {
2174 end_zone = i; 2174 end_zone = i;
2175 break; 2175 break;
@@ -2215,7 +2215,7 @@ loop_again:
2215 * We put equal pressure on every zone, unless one 2215 * We put equal pressure on every zone, unless one
2216 * zone has way too many pages free already. 2216 * zone has way too many pages free already.
2217 */ 2217 */
2218 if (!zone_watermark_ok(zone, order, 2218 if (!zone_watermark_ok_safe(zone, order,
2219 8*high_wmark_pages(zone), end_zone, 0)) 2219 8*high_wmark_pages(zone), end_zone, 0))
2220 shrink_zone(priority, zone, &sc); 2220 shrink_zone(priority, zone, &sc);
2221 reclaim_state->reclaimed_slab = 0; 2221 reclaim_state->reclaimed_slab = 0;
@@ -2236,7 +2236,7 @@ loop_again:
2236 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2236 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2237 sc.may_writepage = 1; 2237 sc.may_writepage = 1;
2238 2238
2239 if (!zone_watermark_ok(zone, order, 2239 if (!zone_watermark_ok_safe(zone, order,
2240 high_wmark_pages(zone), end_zone, 0)) { 2240 high_wmark_pages(zone), end_zone, 0)) {
2241 all_zones_ok = 0; 2241 all_zones_ok = 0;
2242 /* 2242 /*
@@ -2244,7 +2244,7 @@ loop_again:
2244 * means that we have a GFP_ATOMIC allocation 2244 * means that we have a GFP_ATOMIC allocation
2245 * failure risk. Hurry up! 2245 * failure risk. Hurry up!
2246 */ 2246 */
2247 if (!zone_watermark_ok(zone, order, 2247 if (!zone_watermark_ok_safe(zone, order,
2248 min_wmark_pages(zone), end_zone, 0)) 2248 min_wmark_pages(zone), end_zone, 0))
2249 has_under_min_watermark_zone = 1; 2249 has_under_min_watermark_zone = 1;
2250 } 2250 }
@@ -2378,7 +2378,9 @@ static int kswapd(void *p)
2378 */ 2378 */
2379 if (!sleeping_prematurely(pgdat, order, remaining)) { 2379 if (!sleeping_prematurely(pgdat, order, remaining)) {
2380 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2380 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2381 restore_pgdat_percpu_threshold(pgdat);
2381 schedule(); 2382 schedule();
2383 reduce_pgdat_percpu_threshold(pgdat);
2382 } else { 2384 } else {
2383 if (remaining) 2385 if (remaining)
2384 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2386 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
@@ -2417,16 +2419,17 @@ void wakeup_kswapd(struct zone *zone, int order)
2417 if (!populated_zone(zone)) 2419 if (!populated_zone(zone))
2418 return; 2420 return;
2419 2421
2420 pgdat = zone->zone_pgdat; 2422 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2421 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2422 return; 2423 return;
2424 pgdat = zone->zone_pgdat;
2423 if (pgdat->kswapd_max_order < order) 2425 if (pgdat->kswapd_max_order < order)
2424 pgdat->kswapd_max_order = order; 2426 pgdat->kswapd_max_order = order;
2425 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2426 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2427 return;
2428 if (!waitqueue_active(&pgdat->kswapd_wait)) 2427 if (!waitqueue_active(&pgdat->kswapd_wait))
2429 return; 2428 return;
2429 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2430 return;
2431
2432 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2430 wake_up_interruptible(&pgdat->kswapd_wait); 2433 wake_up_interruptible(&pgdat->kswapd_wait);
2431} 2434}
2432 2435