diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 65 |
1 files changed, 5 insertions, 60 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index bdf6e13045ea..db76361a3117 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -538,20 +538,6 @@ next_pageblock: | |||
538 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | 538 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
539 | #ifdef CONFIG_COMPACTION | 539 | #ifdef CONFIG_COMPACTION |
540 | /* | 540 | /* |
541 | * Returns the start pfn of the last page block in a zone. This is the starting | ||
542 | * point for full compaction of a zone. Compaction searches for free pages from | ||
543 | * the end of each zone, while isolate_freepages_block scans forward inside each | ||
544 | * page block. | ||
545 | */ | ||
546 | static unsigned long start_free_pfn(struct zone *zone) | ||
547 | { | ||
548 | unsigned long free_pfn; | ||
549 | free_pfn = zone->zone_start_pfn + zone->spanned_pages; | ||
550 | free_pfn &= ~(pageblock_nr_pages-1); | ||
551 | return free_pfn; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * Based on information in the current compact_control, find blocks | 541 | * Based on information in the current compact_control, find blocks |
556 | * suitable for isolating free pages from and then isolate them. | 542 | * suitable for isolating free pages from and then isolate them. |
557 | */ | 543 | */ |
@@ -619,19 +605,8 @@ static void isolate_freepages(struct zone *zone, | |||
619 | * looking for free pages, the search will restart here as | 605 | * looking for free pages, the search will restart here as |
620 | * page migration may have returned some pages to the allocator | 606 | * page migration may have returned some pages to the allocator |
621 | */ | 607 | */ |
622 | if (isolated) { | 608 | if (isolated) |
623 | high_pfn = max(high_pfn, pfn); | 609 | high_pfn = max(high_pfn, pfn); |
624 | |||
625 | /* | ||
626 | * If the free scanner has wrapped, update | ||
627 | * compact_cached_free_pfn to point to the highest | ||
628 | * pageblock with free pages. This reduces excessive | ||
629 | * scanning of full pageblocks near the end of the | ||
630 | * zone | ||
631 | */ | ||
632 | if (cc->order > 0 && cc->wrapped) | ||
633 | zone->compact_cached_free_pfn = high_pfn; | ||
634 | } | ||
635 | } | 610 | } |
636 | 611 | ||
637 | /* split_free_page does not map the pages */ | 612 | /* split_free_page does not map the pages */ |
@@ -639,11 +614,6 @@ static void isolate_freepages(struct zone *zone, | |||
639 | 614 | ||
640 | cc->free_pfn = high_pfn; | 615 | cc->free_pfn = high_pfn; |
641 | cc->nr_freepages = nr_freepages; | 616 | cc->nr_freepages = nr_freepages; |
642 | |||
643 | /* If compact_cached_free_pfn is reset then set it now */ | ||
644 | if (cc->order > 0 && !cc->wrapped && | ||
645 | zone->compact_cached_free_pfn == start_free_pfn(zone)) | ||
646 | zone->compact_cached_free_pfn = high_pfn; | ||
647 | } | 617 | } |
648 | 618 | ||
649 | /* | 619 | /* |
@@ -738,26 +708,8 @@ static int compact_finished(struct zone *zone, | |||
738 | if (fatal_signal_pending(current)) | 708 | if (fatal_signal_pending(current)) |
739 | return COMPACT_PARTIAL; | 709 | return COMPACT_PARTIAL; |
740 | 710 | ||
741 | /* | 711 | /* Compaction run completes if the migrate and free scanner meet */ |
742 | * A full (order == -1) compaction run starts at the beginning and | 712 | if (cc->free_pfn <= cc->migrate_pfn) |
743 | * end of a zone; it completes when the migrate and free scanner meet. | ||
744 | * A partial (order > 0) compaction can start with the free scanner | ||
745 | * at a random point in the zone, and may have to restart. | ||
746 | */ | ||
747 | if (cc->free_pfn <= cc->migrate_pfn) { | ||
748 | if (cc->order > 0 && !cc->wrapped) { | ||
749 | /* We started partway through; restart at the end. */ | ||
750 | unsigned long free_pfn = start_free_pfn(zone); | ||
751 | zone->compact_cached_free_pfn = free_pfn; | ||
752 | cc->free_pfn = free_pfn; | ||
753 | cc->wrapped = 1; | ||
754 | return COMPACT_CONTINUE; | ||
755 | } | ||
756 | return COMPACT_COMPLETE; | ||
757 | } | ||
758 | |||
759 | /* We wrapped around and ended up where we started. */ | ||
760 | if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn) | ||
761 | return COMPACT_COMPLETE; | 713 | return COMPACT_COMPLETE; |
762 | 714 | ||
763 | /* | 715 | /* |
@@ -863,15 +815,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
863 | 815 | ||
864 | /* Setup to move all movable pages to the end of the zone */ | 816 | /* Setup to move all movable pages to the end of the zone */ |
865 | cc->migrate_pfn = zone->zone_start_pfn; | 817 | cc->migrate_pfn = zone->zone_start_pfn; |
866 | 818 | cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; | |
867 | if (cc->order > 0) { | 819 | cc->free_pfn &= ~(pageblock_nr_pages-1); |
868 | /* Incremental compaction. Start where the last one stopped. */ | ||
869 | cc->free_pfn = zone->compact_cached_free_pfn; | ||
870 | cc->start_free_pfn = cc->free_pfn; | ||
871 | } else { | ||
872 | /* Order == -1 starts at the end of the zone. */ | ||
873 | cc->free_pfn = start_free_pfn(zone); | ||
874 | } | ||
875 | 820 | ||
876 | migrate_prep_local(); | 821 | migrate_prep_local(); |
877 | 822 | ||