aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-06-04 19:08:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:07 -0400
commite9ade569910a82614ff5f2c2cea2b65a8d785da4 (patch)
tree0540262e4eb7d5316e219bd4ef6426649b30b4c4 /mm/compaction.c
parentf8c9301fa5a2a8b873c67f2a3d8230d5c13f61b7 (diff)
mm/compaction: avoid rescanning pageblocks in isolate_freepages
The compaction free scanner in isolate_freepages() currently remembers PFN of the highest pageblock where it successfully isolates, to be used as the starting pageblock for the next invocation. The rationale behind this is that page migration might return free pages to the allocator when migration fails and we don't want to skip them if the compaction continues. Since migration now returns free pages back to compaction code where they can be reused, this is no longer a concern. This patch changes isolate_freepages() so that the PFN for restarting is updated with each pageblock where isolation is attempted. Using stress-highalloc from mmtests, this resulted in 10% reduction of the pages scanned by the free scanner. Note that the somewhat similar functionality that records highest successful pageblock in zone->compact_cached_free_pfn, remains unchanged. This cache is used when the whole compaction is restarted, not for multiple invocations of the free scanner during single compaction. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 3c60e3d5237e..58441220b953 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -688,7 +688,6 @@ static void isolate_freepages(struct zone *zone,
688 unsigned long block_start_pfn; /* start of current pageblock */ 688 unsigned long block_start_pfn; /* start of current pageblock */
689 unsigned long block_end_pfn; /* end of current pageblock */ 689 unsigned long block_end_pfn; /* end of current pageblock */
690 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 690 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
691 unsigned long next_free_pfn; /* start pfn for scaning at next round */
692 int nr_freepages = cc->nr_freepages; 691 int nr_freepages = cc->nr_freepages;
693 struct list_head *freelist = &cc->freepages; 692 struct list_head *freelist = &cc->freepages;
694 693
@@ -709,12 +708,6 @@ static void isolate_freepages(struct zone *zone,
709 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 708 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
710 709
711 /* 710 /*
712 * If no pages are isolated, the block_start_pfn < low_pfn check
713 * will kick in.
714 */
715 next_free_pfn = 0;
716
717 /*
718 * Isolate free pages until enough are available to migrate the 711 * Isolate free pages until enough are available to migrate the
719 * pages on cc->migratepages. We stop searching if the migrate 712 * pages on cc->migratepages. We stop searching if the migrate
720 * and free page scanners meet or enough free pages are isolated. 713 * and free page scanners meet or enough free pages are isolated.
@@ -754,19 +747,19 @@ static void isolate_freepages(struct zone *zone,
754 continue; 747 continue;
755 748
756 /* Found a block suitable for isolating free pages from */ 749 /* Found a block suitable for isolating free pages from */
750 cc->free_pfn = block_start_pfn;
757 isolated = isolate_freepages_block(cc, block_start_pfn, 751 isolated = isolate_freepages_block(cc, block_start_pfn,
758 block_end_pfn, freelist, false); 752 block_end_pfn, freelist, false);
759 nr_freepages += isolated; 753 nr_freepages += isolated;
760 754
761 /* 755 /*
762 * Record the highest PFN we isolated pages from. When next 756 * Set a flag that we successfully isolated in this pageblock.
763 * looking for free pages, the search will restart here as 757 * In the next loop iteration, zone->compact_cached_free_pfn
764 * page migration may have returned some pages to the allocator 758 * will not be updated and thus it will effectively contain the
759 * highest pageblock we isolated pages from.
765 */ 760 */
766 if (isolated && next_free_pfn == 0) { 761 if (isolated)
767 cc->finished_update_free = true; 762 cc->finished_update_free = true;
768 next_free_pfn = block_start_pfn;
769 }
770 } 763 }
771 764
772 /* split_free_page does not map the pages */ 765 /* split_free_page does not map the pages */
@@ -777,9 +770,8 @@ static void isolate_freepages(struct zone *zone,
777 * so that compact_finished() may detect this 770 * so that compact_finished() may detect this
778 */ 771 */
779 if (block_start_pfn < low_pfn) 772 if (block_start_pfn < low_pfn)
780 next_free_pfn = cc->migrate_pfn; 773 cc->free_pfn = cc->migrate_pfn;
781 774
782 cc->free_pfn = next_free_pfn;
783 cc->nr_freepages = nr_freepages; 775 cc->nr_freepages = nr_freepages;
784} 776}
785 777