aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2011-10-31 20:09:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:50 -0400
commite0c23279c9f800c403f37511484d9014ac83adec (patch)
tree9dcf058d3d1c691328ea5839dfe9c340e47ee3fa /mm
parente0887c19b2daa140f20ca8104bdc5740f39dbb86 (diff)
vmscan: abort reclaim/compaction if compaction can proceed
If compaction can proceed, shrink_zones() stops doing any work but its callers still call shrink_slab() which raises the priority and potentially sleeps. This is unnecessary and wasteful so this patch aborts direct reclaim/compaction entirely if compaction can proceed. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Johannes Weiner <jweiner@redhat.com> Cc: Josh Boyer <jwboyer@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e0f05797388..a90c603a8d02 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2103,14 +2103,19 @@ restart:
2103 * 2103 *
2104 * If a zone is deemed to be full of pinned pages then just give it a light 2104 * If a zone is deemed to be full of pinned pages then just give it a light
2105 * scan then give up on it. 2105 * scan then give up on it.
2106 *
2107 * This function returns true if a zone is being reclaimed for a costly
2108 * high-order allocation and compaction is either ready to begin or deferred.
2109 * This indicates to the caller that it should retry the allocation or fail.
2106 */ 2110 */
2107static void shrink_zones(int priority, struct zonelist *zonelist, 2111static bool shrink_zones(int priority, struct zonelist *zonelist,
2108 struct scan_control *sc) 2112 struct scan_control *sc)
2109{ 2113{
2110 struct zoneref *z; 2114 struct zoneref *z;
2111 struct zone *zone; 2115 struct zone *zone;
2112 unsigned long nr_soft_reclaimed; 2116 unsigned long nr_soft_reclaimed;
2113 unsigned long nr_soft_scanned; 2117 unsigned long nr_soft_scanned;
2118 bool should_abort_reclaim = false;
2114 2119
2115 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2120 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2116 gfp_zone(sc->gfp_mask), sc->nodemask) { 2121 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2127,19 +2132,20 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
2127 continue; /* Let kswapd poll it */ 2132 continue; /* Let kswapd poll it */
2128 if (COMPACTION_BUILD) { 2133 if (COMPACTION_BUILD) {
2129 /* 2134 /*
2130 * If we already have plenty of memory 2135 * If we already have plenty of memory free for
2131 * free for compaction, don't free any 2136 * compaction in this zone, don't free any more.
2132 * more. Even though compaction is 2137 * Even though compaction is invoked for any
2133 * invoked for any non-zero order, 2138 * non-zero order, only frequent costly order
2134 * only frequent costly order 2139 * reclamation is disruptive enough to become a
2135 * reclamation is disruptive enough to 2140 * noticable problem, like transparent huge page
2136 * become a noticable problem, like 2141 * allocations.
2137 * transparent huge page allocations.
2138 */ 2142 */
2139 if (sc->order > PAGE_ALLOC_COSTLY_ORDER && 2143 if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2140 (compaction_suitable(zone, sc->order) || 2144 (compaction_suitable(zone, sc->order) ||
2141 compaction_deferred(zone))) 2145 compaction_deferred(zone))) {
2146 should_abort_reclaim = true;
2142 continue; 2147 continue;
2148 }
2143 } 2149 }
2144 /* 2150 /*
2145 * This steals pages from memory cgroups over softlimit 2151 * This steals pages from memory cgroups over softlimit
@@ -2158,6 +2164,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
2158 2164
2159 shrink_zone(priority, zone, sc); 2165 shrink_zone(priority, zone, sc);
2160 } 2166 }
2167
2168 return should_abort_reclaim;
2161} 2169}
2162 2170
2163static bool zone_reclaimable(struct zone *zone) 2171static bool zone_reclaimable(struct zone *zone)
@@ -2222,7 +2230,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2222 sc->nr_scanned = 0; 2230 sc->nr_scanned = 0;
2223 if (!priority) 2231 if (!priority)
2224 disable_swap_token(sc->mem_cgroup); 2232 disable_swap_token(sc->mem_cgroup);
2225 shrink_zones(priority, zonelist, sc); 2233 if (shrink_zones(priority, zonelist, sc))
2234 break;
2235
2226 /* 2236 /*
2227 * Don't shrink slabs when reclaiming memory from 2237 * Don't shrink slabs when reclaiming memory from
2228 * over limit cgroups 2238 * over limit cgroups