aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c66
1 files changed, 31 insertions, 35 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6771ea70bfe7..f6b435c80079 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,6 +41,7 @@
41#include <linux/memcontrol.h> 41#include <linux/memcontrol.h>
42#include <linux/delayacct.h> 42#include <linux/delayacct.h>
43#include <linux/sysctl.h> 43#include <linux/sysctl.h>
44#include <linux/oom.h>
44 45
45#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
46#include <asm/div64.h> 47#include <asm/div64.h>
@@ -358,7 +359,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
358static void handle_write_error(struct address_space *mapping, 359static void handle_write_error(struct address_space *mapping,
359 struct page *page, int error) 360 struct page *page, int error)
360{ 361{
361 lock_page_nosync(page); 362 lock_page(page);
362 if (page_mapping(page) == mapping) 363 if (page_mapping(page) == mapping)
363 mapping_set_error(mapping, error); 364 mapping_set_error(mapping, error);
364 unlock_page(page); 365 unlock_page(page);
@@ -514,7 +515,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
514 515
515 freepage = mapping->a_ops->freepage; 516 freepage = mapping->a_ops->freepage;
516 517
517 __remove_from_page_cache(page); 518 __delete_from_page_cache(page);
518 spin_unlock_irq(&mapping->tree_lock); 519 spin_unlock_irq(&mapping->tree_lock);
519 mem_cgroup_uncharge_cache_page(page); 520 mem_cgroup_uncharge_cache_page(page);
520 521
@@ -1065,7 +1066,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1065 * surrounding the tag page. Only take those pages of 1066 * surrounding the tag page. Only take those pages of
1066 * the same active state as that tag page. We may safely 1067 * the same active state as that tag page. We may safely
1067 * round the target page pfn down to the requested order 1068 * round the target page pfn down to the requested order
1068 * as the mem_map is guarenteed valid out to MAX_ORDER, 1069 * as the mem_map is guaranteed valid out to MAX_ORDER,
1069 * where that page is in a different zone we will detect 1070 * where that page is in a different zone we will detect
1070 * it from its zone id and abort this block scan. 1071 * it from its zone id and abort this block scan.
1071 */ 1072 */
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
1988 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 1989 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1989} 1990}
1990 1991
1991/* 1992/* All zones in zonelist are unreclaimable? */
1992 * As hibernation is going on, kswapd is freezed so that it can't mark
1993 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1994 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1995 */
1996static bool all_unreclaimable(struct zonelist *zonelist, 1993static bool all_unreclaimable(struct zonelist *zonelist,
1997 struct scan_control *sc) 1994 struct scan_control *sc)
1998{ 1995{
1999 struct zoneref *z; 1996 struct zoneref *z;
2000 struct zone *zone; 1997 struct zone *zone;
2001 bool all_unreclaimable = true;
2002 1998
2003 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1999 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2004 gfp_zone(sc->gfp_mask), sc->nodemask) { 2000 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2006 continue; 2002 continue;
2007 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2003 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2008 continue; 2004 continue;
2009 if (zone_reclaimable(zone)) { 2005 if (!zone->all_unreclaimable)
2010 all_unreclaimable = false; 2006 return false;
2011 break;
2012 }
2013 } 2007 }
2014 2008
2015 return all_unreclaimable; 2009 return true;
2016} 2010}
2017 2011
2018/* 2012/*
@@ -2108,6 +2102,14 @@ out:
2108 if (sc->nr_reclaimed) 2102 if (sc->nr_reclaimed)
2109 return sc->nr_reclaimed; 2103 return sc->nr_reclaimed;
2110 2104
2105 /*
2106 * As hibernation is going on, kswapd is freezed so that it can't mark
2107 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2108 * check.
2109 */
2110 if (oom_killer_disabled)
2111 return 0;
2112
2111 /* top priority shrink_zones still had more to do? don't OOM, then */ 2113 /* top priority shrink_zones still had more to do? don't OOM, then */
2112 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2114 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2113 return 1; 2115 return 1;
@@ -2224,7 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2224 * o a 16M DMA zone that is balanced will not balance a zone on any 2226 * o a 16M DMA zone that is balanced will not balance a zone on any
2225 * reasonable sized machine 2227 * reasonable sized machine
2226 * o On all other machines, the top zone must be at least a reasonable 2228 * o On all other machines, the top zone must be at least a reasonable
2227 * precentage of the middle zones. For example, on 32-bit x86, highmem 2229 * percentage of the middle zones. For example, on 32-bit x86, highmem
2228 * would need to be at least 256M for it to be balance a whole node. 2230 * would need to be at least 256M for it to be balance a whole node.
2229 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2231 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2230 * to balance a node on its own. These seemed like reasonable ratios. 2232 * to balance a node on its own. These seemed like reasonable ratios.
@@ -2397,9 +2399,9 @@ loop_again:
2397 * cause too much scanning of the lower zones. 2399 * cause too much scanning of the lower zones.
2398 */ 2400 */
2399 for (i = 0; i <= end_zone; i++) { 2401 for (i = 0; i <= end_zone; i++) {
2400 int compaction;
2401 struct zone *zone = pgdat->node_zones + i; 2402 struct zone *zone = pgdat->node_zones + i;
2402 int nr_slab; 2403 int nr_slab;
2404 unsigned long balance_gap;
2403 2405
2404 if (!populated_zone(zone)) 2406 if (!populated_zone(zone))
2405 continue; 2407 continue;
@@ -2416,11 +2418,20 @@ loop_again:
2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2418 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2417 2419
2418 /* 2420 /*
2419 * We put equal pressure on every zone, unless one 2421 * We put equal pressure on every zone, unless
2420 * zone has way too many pages free already. 2422 * one zone has way too many pages free
2423 * already. The "too many pages" is defined
2424 * as the high wmark plus a "gap" where the
2425 * gap is either the low watermark or 1%
2426 * of the zone, whichever is smaller.
2421 */ 2427 */
2428 balance_gap = min(low_wmark_pages(zone),
2429 (zone->present_pages +
2430 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2431 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2422 if (!zone_watermark_ok_safe(zone, order, 2432 if (!zone_watermark_ok_safe(zone, order,
2423 8*high_wmark_pages(zone), end_zone, 0)) 2433 high_wmark_pages(zone) + balance_gap,
2434 end_zone, 0))
2424 shrink_zone(priority, zone, &sc); 2435 shrink_zone(priority, zone, &sc);
2425 reclaim_state->reclaimed_slab = 0; 2436 reclaim_state->reclaimed_slab = 0;
2426 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2437 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2428,24 +2439,9 @@ loop_again:
2428 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2439 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2429 total_scanned += sc.nr_scanned; 2440 total_scanned += sc.nr_scanned;
2430 2441
2431 compaction = 0;
2432 if (order &&
2433 zone_watermark_ok(zone, 0,
2434 high_wmark_pages(zone),
2435 end_zone, 0) &&
2436 !zone_watermark_ok(zone, order,
2437 high_wmark_pages(zone),
2438 end_zone, 0)) {
2439 compact_zone_order(zone,
2440 order,
2441 sc.gfp_mask, false,
2442 COMPACT_MODE_KSWAPD);
2443 compaction = 1;
2444 }
2445
2446 if (zone->all_unreclaimable) 2442 if (zone->all_unreclaimable)
2447 continue; 2443 continue;
2448 if (!compaction && nr_slab == 0 && 2444 if (nr_slab == 0 &&
2449 !zone_reclaimable(zone)) 2445 !zone_reclaimable(zone))
2450 zone->all_unreclaimable = 1; 2446 zone->all_unreclaimable = 1;
2451 /* 2447 /*