aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2012-12-11 19:00:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commitd84da3f9e4f18809821562bd960e00a10673b341 (patch)
treeb2e6c97c8cd4cb94977ce4fba0770f6cf91eff75 /mm/vmscan.c
parente5adfffc857788c8b7eca0e98cf1e26f1964b292 (diff)
mm: use IS_ENABLED(CONFIG_COMPACTION) instead of COMPACTION_BUILD
We don't need custom COMPACTION_BUILD anymore, since we have handy IS_ENABLED(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b7ed3767564..a1ce17f44be 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1752,7 +1752,7 @@ out:
1752/* Use reclaim/compaction for costly allocs or under memory pressure */ 1752/* Use reclaim/compaction for costly allocs or under memory pressure */
1753static bool in_reclaim_compaction(struct scan_control *sc) 1753static bool in_reclaim_compaction(struct scan_control *sc)
1754{ 1754{
1755 if (COMPACTION_BUILD && sc->order && 1755 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
1756 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 1756 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1757 sc->priority < DEF_PRIORITY - 2)) 1757 sc->priority < DEF_PRIORITY - 2))
1758 return true; 1758 return true;
@@ -2005,7 +2005,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2005 if (zone->all_unreclaimable && 2005 if (zone->all_unreclaimable &&
2006 sc->priority != DEF_PRIORITY) 2006 sc->priority != DEF_PRIORITY)
2007 continue; /* Let kswapd poll it */ 2007 continue; /* Let kswapd poll it */
2008 if (COMPACTION_BUILD) { 2008 if (IS_ENABLED(CONFIG_COMPACTION)) {
2009 /* 2009 /*
2010 * If we already have plenty of memory free for 2010 * If we already have plenty of memory free for
2011 * compaction in this zone, don't free any more. 2011 * compaction in this zone, don't free any more.
@@ -2421,7 +2421,8 @@ static bool zone_balanced(struct zone *zone, int order,
2421 balance_gap, classzone_idx, 0)) 2421 balance_gap, classzone_idx, 0))
2422 return false; 2422 return false;
2423 2423
2424 if (COMPACTION_BUILD && order && !compaction_suitable(zone, order)) 2424 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2425 !compaction_suitable(zone, order))
2425 return false; 2426 return false;
2426 2427
2427 return true; 2428 return true;
@@ -2684,7 +2685,7 @@ loop_again:
2684 * Do not reclaim more than needed for compaction. 2685 * Do not reclaim more than needed for compaction.
2685 */ 2686 */
2686 testorder = order; 2687 testorder = order;
2687 if (COMPACTION_BUILD && order && 2688 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2688 compaction_suitable(zone, order) != 2689 compaction_suitable(zone, order) !=
2689 COMPACT_SKIPPED) 2690 COMPACT_SKIPPED)
2690 testorder = 0; 2691 testorder = 0;