aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2011-04-13 07:32:28 -0400
committerPatrick McHardy <kaber@trash.net>2011-04-13 07:32:28 -0400
commitb32e3dc7860d00124fa432dba09667e647cb9bcc (patch)
tree2fa6e56f389431dfb84609d3d7572cad76e88e71 /mm/vmscan.c
parent6604271c5bc658a6067ed0c3deba4d89e0e50382 (diff)
parent96120d86fe302c006259baee9061eea9e1b9e486 (diff)
Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6771ea70bfe7..f73b8657c2d0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -358,7 +358,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
358static void handle_write_error(struct address_space *mapping, 358static void handle_write_error(struct address_space *mapping,
359 struct page *page, int error) 359 struct page *page, int error)
360{ 360{
361 lock_page_nosync(page); 361 lock_page(page);
362 if (page_mapping(page) == mapping) 362 if (page_mapping(page) == mapping)
363 mapping_set_error(mapping, error); 363 mapping_set_error(mapping, error);
364 unlock_page(page); 364 unlock_page(page);
@@ -514,7 +514,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
514 514
515 freepage = mapping->a_ops->freepage; 515 freepage = mapping->a_ops->freepage;
516 516
517 __remove_from_page_cache(page); 517 __delete_from_page_cache(page);
518 spin_unlock_irq(&mapping->tree_lock); 518 spin_unlock_irq(&mapping->tree_lock);
519 mem_cgroup_uncharge_cache_page(page); 519 mem_cgroup_uncharge_cache_page(page);
520 520
@@ -2397,9 +2397,9 @@ loop_again:
2397 * cause too much scanning of the lower zones. 2397 * cause too much scanning of the lower zones.
2398 */ 2398 */
2399 for (i = 0; i <= end_zone; i++) { 2399 for (i = 0; i <= end_zone; i++) {
2400 int compaction;
2401 struct zone *zone = pgdat->node_zones + i; 2400 struct zone *zone = pgdat->node_zones + i;
2402 int nr_slab; 2401 int nr_slab;
2402 unsigned long balance_gap;
2403 2403
2404 if (!populated_zone(zone)) 2404 if (!populated_zone(zone))
2405 continue; 2405 continue;
@@ -2416,11 +2416,20 @@ loop_again:
2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2417 2417
2418 /* 2418 /*
2419 * We put equal pressure on every zone, unless one 2419 * We put equal pressure on every zone, unless
2420 * zone has way too many pages free already. 2420 * one zone has way too many pages free
2421 * already. The "too many pages" is defined
2422 * as the high wmark plus a "gap" where the
2423 * gap is either the low watermark or 1%
2424 * of the zone, whichever is smaller.
2421 */ 2425 */
2426 balance_gap = min(low_wmark_pages(zone),
2427 (zone->present_pages +
2428 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2429 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2422 if (!zone_watermark_ok_safe(zone, order, 2430 if (!zone_watermark_ok_safe(zone, order,
2423 8*high_wmark_pages(zone), end_zone, 0)) 2431 high_wmark_pages(zone) + balance_gap,
2432 end_zone, 0))
2424 shrink_zone(priority, zone, &sc); 2433 shrink_zone(priority, zone, &sc);
2425 reclaim_state->reclaimed_slab = 0; 2434 reclaim_state->reclaimed_slab = 0;
2426 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2435 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2428,24 +2437,9 @@ loop_again:
2428 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2437 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2429 total_scanned += sc.nr_scanned; 2438 total_scanned += sc.nr_scanned;
2430 2439
2431 compaction = 0;
2432 if (order &&
2433 zone_watermark_ok(zone, 0,
2434 high_wmark_pages(zone),
2435 end_zone, 0) &&
2436 !zone_watermark_ok(zone, order,
2437 high_wmark_pages(zone),
2438 end_zone, 0)) {
2439 compact_zone_order(zone,
2440 order,
2441 sc.gfp_mask, false,
2442 COMPACT_MODE_KSWAPD);
2443 compaction = 1;
2444 }
2445
2446 if (zone->all_unreclaimable) 2440 if (zone->all_unreclaimable)
2447 continue; 2441 continue;
2448 if (!compaction && nr_slab == 0 && 2442 if (nr_slab == 0 &&
2449 !zone_reclaimable(zone)) 2443 !zone_reclaimable(zone))
2450 zone->all_unreclaimable = 1; 2444 zone->all_unreclaimable = 1;
2451 /* 2445 /*