aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-06-04 19:07:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:00 -0400
commitc96b9e508f3d06ddb601dcc9792d62c044ab359e (patch)
treeb4acb0fb4634a8fbe98968b02532b6f1b05ddaaf
parent13fb44e4b0414d7e718433a49e6430d5b76bd46e (diff)
mm/compaction: cleanup isolate_freepages()
isolate_freepages() is currently somewhat hard to follow thanks to many looks like it is related to the 'low_pfn' variable, but in fact it is not. This patch renames the 'high_pfn' variable to a hopefully less confusing name, and slightly changes its handling without a functional change. A comment made obsolete by recent changes is also updated. [akpm@linux-foundation.org: comment fixes, per Minchan] [iamjoonsoo.kim@lge.com: cleanups] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Cc: Dongjun Shin <d.j.shin@samsung.com> Cc: Sunghwan Yun <sunghwan.yun@samsung.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/compaction.c56
1 files changed, 27 insertions, 29 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 95f7531458f7..6010aabde28c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -665,7 +665,10 @@ static void isolate_freepages(struct zone *zone,
665 struct compact_control *cc) 665 struct compact_control *cc)
666{ 666{
667 struct page *page; 667 struct page *page;
668 unsigned long high_pfn, low_pfn, pfn, z_end_pfn; 668 unsigned long block_start_pfn; /* start of current pageblock */
669 unsigned long block_end_pfn; /* end of current pageblock */
670 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
671 unsigned long next_free_pfn; /* start pfn for scaning at next round */
669 int nr_freepages = cc->nr_freepages; 672 int nr_freepages = cc->nr_freepages;
670 struct list_head *freelist = &cc->freepages; 673 struct list_head *freelist = &cc->freepages;
671 674
@@ -673,32 +676,33 @@ static void isolate_freepages(struct zone *zone,
673 * Initialise the free scanner. The starting point is where we last 676 * Initialise the free scanner. The starting point is where we last
674 * successfully isolated from, zone-cached value, or the end of the 677 * successfully isolated from, zone-cached value, or the end of the
675 * zone when isolating for the first time. We need this aligned to 678 * zone when isolating for the first time. We need this aligned to
676 * the pageblock boundary, because we do pfn -= pageblock_nr_pages 679 * the pageblock boundary, because we do
677 * in the for loop. 680 * block_start_pfn -= pageblock_nr_pages in the for loop.
681 * For ending point, take care when isolating in last pageblock of a
682 * a zone which ends in the middle of a pageblock.
678 * The low boundary is the end of the pageblock the migration scanner 683 * The low boundary is the end of the pageblock the migration scanner
679 * is using. 684 * is using.
680 */ 685 */
681 pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 686 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
687 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
688 zone_end_pfn(zone));
682 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 689 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
683 690
684 /* 691 /*
685 * Take care that if the migration scanner is at the end of the zone 692 * If no pages are isolated, the block_start_pfn < low_pfn check
686 * that the free scanner does not accidentally move to the next zone 693 * will kick in.
687 * in the next isolation cycle.
688 */ 694 */
689 high_pfn = min(low_pfn, pfn); 695 next_free_pfn = 0;
690
691 z_end_pfn = zone_end_pfn(zone);
692 696
693 /* 697 /*
694 * Isolate free pages until enough are available to migrate the 698 * Isolate free pages until enough are available to migrate the
695 * pages on cc->migratepages. We stop searching if the migrate 699 * pages on cc->migratepages. We stop searching if the migrate
696 * and free page scanners meet or enough free pages are isolated. 700 * and free page scanners meet or enough free pages are isolated.
697 */ 701 */
698 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 702 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
699 pfn -= pageblock_nr_pages) { 703 block_end_pfn = block_start_pfn,
704 block_start_pfn -= pageblock_nr_pages) {
700 unsigned long isolated; 705 unsigned long isolated;
701 unsigned long end_pfn;
702 706
703 /* 707 /*
704 * This can iterate a massively long zone without finding any 708 * This can iterate a massively long zone without finding any
@@ -707,7 +711,7 @@ static void isolate_freepages(struct zone *zone,
707 */ 711 */
708 cond_resched(); 712 cond_resched();
709 713
710 if (!pfn_valid(pfn)) 714 if (!pfn_valid(block_start_pfn))
711 continue; 715 continue;
712 716
713 /* 717 /*
@@ -717,7 +721,7 @@ static void isolate_freepages(struct zone *zone,
717 * i.e. it's possible that all pages within a zones range of 721 * i.e. it's possible that all pages within a zones range of
718 * pages do not belong to a single zone. 722 * pages do not belong to a single zone.
719 */ 723 */
720 page = pfn_to_page(pfn); 724 page = pfn_to_page(block_start_pfn);
721 if (page_zone(page) != zone) 725 if (page_zone(page) != zone)
722 continue; 726 continue;
723 727
@@ -730,14 +734,8 @@ static void isolate_freepages(struct zone *zone,
730 continue; 734 continue;
731 735
732 /* Found a block suitable for isolating free pages from */ 736 /* Found a block suitable for isolating free pages from */
733 737 isolated = isolate_freepages_block(cc, block_start_pfn,
734 /* 738 block_end_pfn, freelist, false);
735 * Take care when isolating in last pageblock of a zone which
736 * ends in the middle of a pageblock.
737 */
738 end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
739 isolated = isolate_freepages_block(cc, pfn, end_pfn,
740 freelist, false);
741 nr_freepages += isolated; 739 nr_freepages += isolated;
742 740
743 /* 741 /*
@@ -745,9 +743,9 @@ static void isolate_freepages(struct zone *zone,
745 * looking for free pages, the search will restart here as 743 * looking for free pages, the search will restart here as
746 * page migration may have returned some pages to the allocator 744 * page migration may have returned some pages to the allocator
747 */ 745 */
748 if (isolated) { 746 if (isolated && next_free_pfn == 0) {
749 cc->finished_update_free = true; 747 cc->finished_update_free = true;
750 high_pfn = max(high_pfn, pfn); 748 next_free_pfn = block_start_pfn;
751 } 749 }
752 } 750 }
753 751
@@ -758,10 +756,10 @@ static void isolate_freepages(struct zone *zone,
758 * If we crossed the migrate scanner, we want to keep it that way 756 * If we crossed the migrate scanner, we want to keep it that way
759 * so that compact_finished() may detect this 757 * so that compact_finished() may detect this
760 */ 758 */
761 if (pfn < low_pfn) 759 if (block_start_pfn < low_pfn)
762 cc->free_pfn = max(pfn, zone->zone_start_pfn); 760 next_free_pfn = cc->migrate_pfn;
763 else 761
764 cc->free_pfn = high_pfn; 762 cc->free_pfn = next_free_pfn;
765 cc->nr_freepages = nr_freepages; 763 cc->nr_freepages = nr_freepages;
766} 764}
767 765