aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-01-21 18:51:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:48 -0500
commitde6c60a6c115acaa721cfd499e028a413d1fcbf3 (patch)
tree81277ea0bf611f37544dbb0a22e4eff3fc2a9aa8 /mm
parent0eb927c0ab789d3d7d69f68acb850f69d4e7c36f (diff)
mm: compaction: encapsulate defer reset logic
Currently there are several functions to manipulate the deferred compaction state variables. The remaining case where the variables are touched directly is when a successful allocation occurs in direct compaction, or is expected to be successful in the future by kswapd. Here, the lowest order that is expected to fail is updated, and in the case of successful allocation, the deferred status and counter is reset completely. Create a new function compaction_defer_reset() to encapsulate this functionality and make it easier to understand the code. No functional change. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c9
-rw-r--r--mm/page_alloc.c5
2 files changed, 5 insertions, 9 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index a03995eddedb..927de97cab8d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1124,12 +1124,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1124 compact_zone(zone, cc); 1124 compact_zone(zone, cc);
1125 1125
1126 if (cc->order > 0) { 1126 if (cc->order > 0) {
1127 int ok = zone_watermark_ok(zone, cc->order, 1127 if (zone_watermark_ok(zone, cc->order,
1128 low_wmark_pages(zone), 0, 0); 1128 low_wmark_pages(zone), 0, 0))
1129 if (ok && cc->order >= zone->compact_order_failed) 1129 compaction_defer_reset(zone, cc->order, false);
1130 zone->compact_order_failed = cc->order + 1;
1131 /* Currently async compaction is never deferred. */ 1130 /* Currently async compaction is never deferred. */
1132 else if (!ok && cc->sync) 1131 else if (cc->sync)
1133 defer_compaction(zone, cc->order); 1132 defer_compaction(zone, cc->order);
1134 } 1133 }
1135 1134
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b230e838883d..84da0e3bc886 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2235,10 +2235,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2235 preferred_zone, migratetype); 2235 preferred_zone, migratetype);
2236 if (page) { 2236 if (page) {
2237 preferred_zone->compact_blockskip_flush = false; 2237 preferred_zone->compact_blockskip_flush = false;
2238 preferred_zone->compact_considered = 0; 2238 compaction_defer_reset(preferred_zone, order, true);
2239 preferred_zone->compact_defer_shift = 0;
2240 if (order >= preferred_zone->compact_order_failed)
2241 preferred_zone->compact_order_failed = order + 1;
2242 count_vm_event(COMPACTSUCCESS); 2239 count_vm_event(COMPACTSUCCESS);
2243 return page; 2240 return page;
2244 } 2241 }