aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
committerJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
commitd4ab4e6a23f805abb8fc3cc34525eec3788aeca1 (patch)
treeeefd82c155bc27469a85667d759cd90facf4a6e3 /mm/vmscan.c
parentc0fa797ae6cd02ff87c0bfe0d509368a3b45640e (diff)
parent96fd2d57b8252e16dfacf8941f7a74a6119197f5 (diff)
Merge branch 'master'; commit 'v2.6.39-rc3' into next
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f73b8657c2d0..f6b435c80079 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,6 +41,7 @@
41#include <linux/memcontrol.h> 41#include <linux/memcontrol.h>
42#include <linux/delayacct.h> 42#include <linux/delayacct.h>
43#include <linux/sysctl.h> 43#include <linux/sysctl.h>
44#include <linux/oom.h>
44 45
45#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
46#include <asm/div64.h> 47#include <asm/div64.h>
@@ -1065,7 +1066,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1065 * surrounding the tag page. Only take those pages of 1066 * surrounding the tag page. Only take those pages of
1066 * the same active state as that tag page. We may safely 1067 * the same active state as that tag page. We may safely
1067 * round the target page pfn down to the requested order 1068 * round the target page pfn down to the requested order
1068 * as the mem_map is guarenteed valid out to MAX_ORDER, 1069 * as the mem_map is guaranteed valid out to MAX_ORDER,
1069 * where that page is in a different zone we will detect 1070 * where that page is in a different zone we will detect
1070 * it from its zone id and abort this block scan. 1071 * it from its zone id and abort this block scan.
1071 */ 1072 */
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
1988 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 1989 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1989} 1990}
1990 1991
1991/* 1992/* All zones in zonelist are unreclaimable? */
1992 * As hibernation is going on, kswapd is freezed so that it can't mark
1993 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1994 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1995 */
1996static bool all_unreclaimable(struct zonelist *zonelist, 1993static bool all_unreclaimable(struct zonelist *zonelist,
1997 struct scan_control *sc) 1994 struct scan_control *sc)
1998{ 1995{
1999 struct zoneref *z; 1996 struct zoneref *z;
2000 struct zone *zone; 1997 struct zone *zone;
2001 bool all_unreclaimable = true;
2002 1998
2003 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1999 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2004 gfp_zone(sc->gfp_mask), sc->nodemask) { 2000 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2006 continue; 2002 continue;
2007 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2003 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2008 continue; 2004 continue;
2009 if (zone_reclaimable(zone)) { 2005 if (!zone->all_unreclaimable)
2010 all_unreclaimable = false; 2006 return false;
2011 break;
2012 }
2013 } 2007 }
2014 2008
2015 return all_unreclaimable; 2009 return true;
2016} 2010}
2017 2011
2018/* 2012/*
@@ -2108,6 +2102,14 @@ out:
2108 if (sc->nr_reclaimed) 2102 if (sc->nr_reclaimed)
2109 return sc->nr_reclaimed; 2103 return sc->nr_reclaimed;
2110 2104
2105 /*
2106 * As hibernation is going on, kswapd is freezed so that it can't mark
2107 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2108 * check.
2109 */
2110 if (oom_killer_disabled)
2111 return 0;
2112
2111 /* top priority shrink_zones still had more to do? don't OOM, then */ 2113 /* top priority shrink_zones still had more to do? don't OOM, then */
2112 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2114 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2113 return 1; 2115 return 1;
@@ -2224,7 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2224 * o a 16M DMA zone that is balanced will not balance a zone on any 2226 * o a 16M DMA zone that is balanced will not balance a zone on any
2225 * reasonable sized machine 2227 * reasonable sized machine
2226 * o On all other machines, the top zone must be at least a reasonable 2228 * o On all other machines, the top zone must be at least a reasonable
2227 * precentage of the middle zones. For example, on 32-bit x86, highmem 2229 * percentage of the middle zones. For example, on 32-bit x86, highmem
2228 * would need to be at least 256M for it to be balance a whole node. 2230 * would need to be at least 256M for it to be balance a whole node.
2229 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2231 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2230 * to balance a node on its own. These seemed like reasonable ratios. 2232 * to balance a node on its own. These seemed like reasonable ratios.