aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/compaction.c54
1 files changed, 28 insertions, 26 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index b6984e234fdf..bcce7897e17a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -384,6 +384,20 @@ static bool suitable_migration_target(struct page *page)
384} 384}
385 385
386/* 386/*
387 * Returns the start pfn of the last page block in a zone. This is the starting
388 * point for full compaction of a zone. Compaction searches for free pages from
389 * the end of each zone, while isolate_freepages_block scans forward inside each
390 * page block.
391 */
392static unsigned long start_free_pfn(struct zone *zone)
393{
394 unsigned long free_pfn;
395 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
396 free_pfn &= ~(pageblock_nr_pages-1);
397 return free_pfn;
398}
399
400/*
387 * Based on information in the current compact_control, find blocks 401 * Based on information in the current compact_control, find blocks
388 * suitable for isolating free pages from and then isolate them. 402 * suitable for isolating free pages from and then isolate them.
389 */ 403 */
@@ -422,17 +436,6 @@ static void isolate_freepages(struct zone *zone,
422 pfn -= pageblock_nr_pages) { 436 pfn -= pageblock_nr_pages) {
423 unsigned long isolated; 437 unsigned long isolated;
424 438
425 /*
426 * Skip ahead if another thread is compacting in the area
427 * simultaneously. If we wrapped around, we can only skip
428 * ahead if zone->compact_cached_free_pfn also wrapped to
429 * above our starting point.
430 */
431 if (cc->order > 0 && (!cc->wrapped ||
432 zone->compact_cached_free_pfn >
433 cc->start_free_pfn))
434 pfn = min(pfn, zone->compact_cached_free_pfn);
435
436 if (!pfn_valid(pfn)) 439 if (!pfn_valid(pfn))
437 continue; 440 continue;
438 441
@@ -474,7 +477,15 @@ static void isolate_freepages(struct zone *zone,
474 */ 477 */
475 if (isolated) { 478 if (isolated) {
476 high_pfn = max(high_pfn, pfn); 479 high_pfn = max(high_pfn, pfn);
477 if (cc->order > 0) 480
481 /*
482 * If the free scanner has wrapped, update
483 * compact_cached_free_pfn to point to the highest
484 * pageblock with free pages. This reduces excessive
485 * scanning of full pageblocks near the end of the
486 * zone
487 */
488 if (cc->order > 0 && cc->wrapped)
478 zone->compact_cached_free_pfn = high_pfn; 489 zone->compact_cached_free_pfn = high_pfn;
479 } 490 }
480 } 491 }
@@ -484,6 +495,11 @@ static void isolate_freepages(struct zone *zone,
484 495
485 cc->free_pfn = high_pfn; 496 cc->free_pfn = high_pfn;
486 cc->nr_freepages = nr_freepages; 497 cc->nr_freepages = nr_freepages;
498
499 /* If compact_cached_free_pfn is reset then set it now */
500 if (cc->order > 0 && !cc->wrapped &&
501 zone->compact_cached_free_pfn == start_free_pfn(zone))
502 zone->compact_cached_free_pfn = high_pfn;
487} 503}
488 504
489/* 505/*
@@ -570,20 +586,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
570 return ISOLATE_SUCCESS; 586 return ISOLATE_SUCCESS;
571} 587}
572 588
573/*
574 * Returns the start pfn of the last page block in a zone. This is the starting
575 * point for full compaction of a zone. Compaction searches for free pages from
576 * the end of each zone, while isolate_freepages_block scans forward inside each
577 * page block.
578 */
579static unsigned long start_free_pfn(struct zone *zone)
580{
581 unsigned long free_pfn;
582 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
583 free_pfn &= ~(pageblock_nr_pages-1);
584 return free_pfn;
585}
586
587static int compact_finished(struct zone *zone, 589static int compact_finished(struct zone *zone,
588 struct compact_control *cc) 590 struct compact_control *cc)
589{ 591{