aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-10-09 18:27:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:53 -0400
commit98dd3b48a7b8e8277f14c2b7d879477efc1ed0d0 (patch)
tree3af739a4e8ff892de86c8d55a42c86468f7608be
parent53853e2d2bfb748a8b5aa2fd1de15699266865e0 (diff)
mm, compaction: do not count compact_stall if all zones skipped compaction
The compact_stall vmstat counter counts the number of allocations stalled by direct compaction. It does not count when all attempted zones had deferred compaction, but it does count when all zones skipped compaction. The skipping is decided based on very early check of compaction_suitable(), based on watermarks and memory fragmentation. Therefore it makes sense not to count skipped compactions as stalls. Moreover, compact_success or compact_fail is also already not being counted when compaction was skipped, so this patch changes the compact_stall counting to match the other two. Additionally, restructure __alloc_pages_direct_compact() code for better readability. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c76
1 files changed, 41 insertions, 35 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 514fd8008114..822babd808fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2301,7 +2301,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2301{ 2301{
2302 struct zone *last_compact_zone = NULL; 2302 struct zone *last_compact_zone = NULL;
2303 unsigned long compact_result; 2303 unsigned long compact_result;
2304 2304 struct page *page;
2305 2305
2306 if (!order) 2306 if (!order)
2307 return NULL; 2307 return NULL;
@@ -2313,49 +2313,55 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2313 &last_compact_zone); 2313 &last_compact_zone);
2314 current->flags &= ~PF_MEMALLOC; 2314 current->flags &= ~PF_MEMALLOC;
2315 2315
2316 if (compact_result > COMPACT_DEFERRED) 2316 switch (compact_result) {
2317 count_vm_event(COMPACTSTALL); 2317 case COMPACT_DEFERRED:
2318 else
2319 *deferred_compaction = true; 2318 *deferred_compaction = true;
2319 /* fall-through */
2320 case COMPACT_SKIPPED:
2321 return NULL;
2322 default:
2323 break;
2324 }
2320 2325
2321 if (compact_result > COMPACT_SKIPPED) { 2326 /*
2322 struct page *page; 2327 * At least in one zone compaction wasn't deferred or skipped, so let's
2328 * count a compaction stall
2329 */
2330 count_vm_event(COMPACTSTALL);
2323 2331
2324 /* Page migration frees to the PCP lists but we want merging */ 2332 /* Page migration frees to the PCP lists but we want merging */
2325 drain_pages(get_cpu()); 2333 drain_pages(get_cpu());
2326 put_cpu(); 2334 put_cpu();
2327 2335
2328 page = get_page_from_freelist(gfp_mask, nodemask, 2336 page = get_page_from_freelist(gfp_mask, nodemask,
2329 order, zonelist, high_zoneidx, 2337 order, zonelist, high_zoneidx,
2330 alloc_flags & ~ALLOC_NO_WATERMARKS, 2338 alloc_flags & ~ALLOC_NO_WATERMARKS,
2331 preferred_zone, classzone_idx, migratetype); 2339 preferred_zone, classzone_idx, migratetype);
2332 2340
2333 if (page) { 2341 if (page) {
2334 struct zone *zone = page_zone(page); 2342 struct zone *zone = page_zone(page);
2335 2343
2336 zone->compact_blockskip_flush = false; 2344 zone->compact_blockskip_flush = false;
2337 compaction_defer_reset(zone, order, true); 2345 compaction_defer_reset(zone, order, true);
2338 count_vm_event(COMPACTSUCCESS); 2346 count_vm_event(COMPACTSUCCESS);
2339 return page; 2347 return page;
2340 } 2348 }
2341 2349
2342 /* 2350 /*
2343 * last_compact_zone is where try_to_compact_pages thought 2351 * last_compact_zone is where try_to_compact_pages thought allocation
2344 * allocation should succeed, so it did not defer compaction. 2352 * should succeed, so it did not defer compaction. But here we know
2345 * But now we know that it didn't succeed, so we do the defer. 2353 * that it didn't succeed, so we do the defer.
2346 */ 2354 */
2347 if (last_compact_zone && mode != MIGRATE_ASYNC) 2355 if (last_compact_zone && mode != MIGRATE_ASYNC)
2348 defer_compaction(last_compact_zone, order); 2356 defer_compaction(last_compact_zone, order);
2349 2357
2350 /* 2358 /*
2351 * It's bad if compaction run occurs and fails. 2359 * It's bad if compaction run occurs and fails. The most likely reason
2352 * The most likely reason is that pages exist, 2360 * is that pages exist, but not enough to satisfy watermarks.
2353 * but not enough to satisfy watermarks. 2361 */
2354 */ 2362 count_vm_event(COMPACTFAIL);
2355 count_vm_event(COMPACTFAIL);
2356 2363
2357 cond_resched(); 2364 cond_resched();
2358 }
2359 2365
2360 return NULL; 2366 return NULL;
2361} 2367}