aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-03-24 02:17:25 -0400
committerPaul Mundt <lethal@linux-sh.org>2011-03-24 02:17:25 -0400
commita3d3362287fbe96fe90abdb5c6d1a35471129a8c (patch)
treead3c85ed1feef470c66599eb514e30f43c2db5dd /mm/vmscan.c
parentfb7f045ace0624f1e59a7db8497e460bd54b1cbc (diff)
parent4bbba111d94781d34081c37856bbc5eb33f6c72a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh-latest
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c36
1 files changed, 15 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6771ea70bfe7..060e4c191403 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -514,7 +514,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
514 514
515 freepage = mapping->a_ops->freepage; 515 freepage = mapping->a_ops->freepage;
516 516
517 __remove_from_page_cache(page); 517 __delete_from_page_cache(page);
518 spin_unlock_irq(&mapping->tree_lock); 518 spin_unlock_irq(&mapping->tree_lock);
519 mem_cgroup_uncharge_cache_page(page); 519 mem_cgroup_uncharge_cache_page(page);
520 520
@@ -2397,9 +2397,9 @@ loop_again:
2397 * cause too much scanning of the lower zones. 2397 * cause too much scanning of the lower zones.
2398 */ 2398 */
2399 for (i = 0; i <= end_zone; i++) { 2399 for (i = 0; i <= end_zone; i++) {
2400 int compaction;
2401 struct zone *zone = pgdat->node_zones + i; 2400 struct zone *zone = pgdat->node_zones + i;
2402 int nr_slab; 2401 int nr_slab;
2402 unsigned long balance_gap;
2403 2403
2404 if (!populated_zone(zone)) 2404 if (!populated_zone(zone))
2405 continue; 2405 continue;
@@ -2416,11 +2416,20 @@ loop_again:
2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2417 2417
2418 /* 2418 /*
2419 * We put equal pressure on every zone, unless one 2419 * We put equal pressure on every zone, unless
2420 * zone has way too many pages free already. 2420 * one zone has way too many pages free
2421 * already. The "too many pages" is defined
2422 * as the high wmark plus a "gap" where the
2423 * gap is either the low watermark or 1%
2424 * of the zone, whichever is smaller.
2421 */ 2425 */
2426 balance_gap = min(low_wmark_pages(zone),
2427 (zone->present_pages +
2428 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2429 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2422 if (!zone_watermark_ok_safe(zone, order, 2430 if (!zone_watermark_ok_safe(zone, order,
2423 8*high_wmark_pages(zone), end_zone, 0)) 2431 high_wmark_pages(zone) + balance_gap,
2432 end_zone, 0))
2424 shrink_zone(priority, zone, &sc); 2433 shrink_zone(priority, zone, &sc);
2425 reclaim_state->reclaimed_slab = 0; 2434 reclaim_state->reclaimed_slab = 0;
2426 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2435 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2428,24 +2437,9 @@ loop_again:
2428 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2437 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2429 total_scanned += sc.nr_scanned; 2438 total_scanned += sc.nr_scanned;
2430 2439
2431 compaction = 0;
2432 if (order &&
2433 zone_watermark_ok(zone, 0,
2434 high_wmark_pages(zone),
2435 end_zone, 0) &&
2436 !zone_watermark_ok(zone, order,
2437 high_wmark_pages(zone),
2438 end_zone, 0)) {
2439 compact_zone_order(zone,
2440 order,
2441 sc.gfp_mask, false,
2442 COMPACT_MODE_KSWAPD);
2443 compaction = 1;
2444 }
2445
2446 if (zone->all_unreclaimable) 2440 if (zone->all_unreclaimable)
2447 continue; 2441 continue;
2448 if (!compaction && nr_slab == 0 && 2442 if (nr_slab == 0 &&
2449 !zone_reclaimable(zone)) 2443 !zone_reclaimable(zone))
2450 zone->all_unreclaimable = 1; 2444 zone->all_unreclaimable = 1;
2451 /* 2445 /*