aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-01-21 18:51:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:48 -0500
commit50b5b094e683f8e51e82c6dfe97b1608cf97e6c0 (patch)
treebff4a81f4426cdf17b90dd36eb80aedfbba318de /mm
parent7ed695e069c3cbea5e1fd08f84a04536da91f584 (diff)
mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
Compaction temporarily marks pageblocks where it fails to isolate pages as to-be-skipped in further compactions, in order to improve efficiency. One of the reasons to fail isolating pages is that isolation is not attempted in pageblocks that are not of MIGRATE_MOVABLE (or CMA) type. The problem is that blocks skipped due to not being MIGRATE_MOVABLE in async compaction become skipped due to the temporary mark also in future sync compaction. Moreover, this may follow quite soon during __alloc_page_slowpath, without much time for kswapd to clear the pageblock skip marks. This goes against the idea that sync compaction should try to scan these blocks more thoroughly than the async compaction. The fix is to ensure in async compaction that these !MIGRATE_MOVABLE blocks are not marked to be skipped. Note this should not affect performance or locking impact of further async compactions, as skipping a block due to being !MIGRATE_MOVABLE is done soon after skipping a block marked to be skipped, both without locking. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index cc46db36e708..32a033cb5c65 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -459,6 +459,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
459 unsigned long flags; 459 unsigned long flags;
460 bool locked = false; 460 bool locked = false;
461 struct page *page = NULL, *valid_page = NULL; 461 struct page *page = NULL, *valid_page = NULL;
462 bool skipped_async_unsuitable = false;
462 463
463 /* 464 /*
464 * Ensure that there are not too many pages isolated from the LRU 465 * Ensure that there are not too many pages isolated from the LRU
@@ -534,6 +535,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
534 if (!cc->sync && last_pageblock_nr != pageblock_nr && 535 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
535 !migrate_async_suitable(get_pageblock_migratetype(page))) { 536 !migrate_async_suitable(get_pageblock_migratetype(page))) {
536 cc->finished_update_migrate = true; 537 cc->finished_update_migrate = true;
538 skipped_async_unsuitable = true;
537 goto next_pageblock; 539 goto next_pageblock;
538 } 540 }
539 541
@@ -627,8 +629,13 @@ next_pageblock:
627 if (locked) 629 if (locked)
628 spin_unlock_irqrestore(&zone->lru_lock, flags); 630 spin_unlock_irqrestore(&zone->lru_lock, flags);
629 631
630 /* Update the pageblock-skip if the whole pageblock was scanned */ 632 /*
631 if (low_pfn == end_pfn) 633 * Update the pageblock-skip information and cached scanner pfn,
634 * if the whole pageblock was scanned without isolating any page.
635 * This is not done when pageblock was skipped due to being unsuitable
636 * for async compaction, so that eventual sync compaction can try.
637 */
638 if (low_pfn == end_pfn && !skipped_async_unsuitable)
632 update_pageblock_skip(cc, valid_page, nr_isolated, true); 639 update_pageblock_skip(cc, valid_page, nr_isolated, true);
633 640
634 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 641 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);