summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/compaction.c89
1 files changed, 80 insertions, 9 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 329973a1ae45..7487067b4613 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -638,12 +638,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
638{ 638{
639 struct zone *zone = cc->zone; 639 struct zone *zone = cc->zone;
640 unsigned long nr_scanned = 0, nr_isolated = 0; 640 unsigned long nr_scanned = 0, nr_isolated = 0;
641 struct list_head *migratelist = &cc->migratepages;
642 struct lruvec *lruvec; 641 struct lruvec *lruvec;
643 unsigned long flags = 0; 642 unsigned long flags = 0;
644 bool locked = false; 643 bool locked = false;
645 struct page *page = NULL, *valid_page = NULL; 644 struct page *page = NULL, *valid_page = NULL;
646 unsigned long start_pfn = low_pfn; 645 unsigned long start_pfn = low_pfn;
646 bool skip_on_failure = false;
647 unsigned long next_skip_pfn = 0;
647 648
648 /* 649 /*
649 * Ensure that there are not too many pages isolated from the LRU 650 * Ensure that there are not too many pages isolated from the LRU
@@ -664,10 +665,37 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
664 if (compact_should_abort(cc)) 665 if (compact_should_abort(cc))
665 return 0; 666 return 0;
666 667
668 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
669 skip_on_failure = true;
670 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
671 }
672
667 /* Time to isolate some pages for migration */ 673 /* Time to isolate some pages for migration */
668 for (; low_pfn < end_pfn; low_pfn++) { 674 for (; low_pfn < end_pfn; low_pfn++) {
669 bool is_lru; 675 bool is_lru;
670 676
677 if (skip_on_failure && low_pfn >= next_skip_pfn) {
678 /*
679 * We have isolated all migration candidates in the
680 * previous order-aligned block, and did not skip it due
681 * to failure. We should migrate the pages now and
682 * hopefully succeed compaction.
683 */
684 if (nr_isolated)
685 break;
686
687 /*
688 * We failed to isolate in the previous order-aligned
689 * block. Set the new boundary to the end of the
690 * current block. Note we can't simply increase
691 * next_skip_pfn by 1 << order, as low_pfn might have
692 * been incremented by a higher number due to skipping
693 * a compound or a high-order buddy page in the
694 * previous loop iteration.
695 */
696 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
697 }
698
671 /* 699 /*
672 * Periodically drop the lock (if held) regardless of its 700 * Periodically drop the lock (if held) regardless of its
673 * contention, to give chance to IRQs. Abort async compaction 701 * contention, to give chance to IRQs. Abort async compaction
@@ -679,7 +707,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
679 break; 707 break;
680 708
681 if (!pfn_valid_within(low_pfn)) 709 if (!pfn_valid_within(low_pfn))
682 continue; 710 goto isolate_fail;
683 nr_scanned++; 711 nr_scanned++;
684 712
685 page = pfn_to_page(low_pfn); 713 page = pfn_to_page(low_pfn);
@@ -734,11 +762,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
734 if (likely(comp_order < MAX_ORDER)) 762 if (likely(comp_order < MAX_ORDER))
735 low_pfn += (1UL << comp_order) - 1; 763 low_pfn += (1UL << comp_order) - 1;
736 764
737 continue; 765 goto isolate_fail;
738 } 766 }
739 767
740 if (!is_lru) 768 if (!is_lru)
741 continue; 769 goto isolate_fail;
742 770
743 /* 771 /*
744 * Migration will fail if an anonymous page is pinned in memory, 772 * Migration will fail if an anonymous page is pinned in memory,
@@ -747,7 +775,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
747 */ 775 */
748 if (!page_mapping(page) && 776 if (!page_mapping(page) &&
749 page_count(page) > page_mapcount(page)) 777 page_count(page) > page_mapcount(page))
750 continue; 778 goto isolate_fail;
751 779
752 /* If we already hold the lock, we can skip some rechecking */ 780 /* If we already hold the lock, we can skip some rechecking */
753 if (!locked) { 781 if (!locked) {
@@ -758,7 +786,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
758 786
759 /* Recheck PageLRU and PageCompound under lock */ 787 /* Recheck PageLRU and PageCompound under lock */
760 if (!PageLRU(page)) 788 if (!PageLRU(page))
761 continue; 789 goto isolate_fail;
762 790
763 /* 791 /*
764 * Page become compound since the non-locked check, 792 * Page become compound since the non-locked check,
@@ -767,7 +795,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
767 */ 795 */
768 if (unlikely(PageCompound(page))) { 796 if (unlikely(PageCompound(page))) {
769 low_pfn += (1UL << compound_order(page)) - 1; 797 low_pfn += (1UL << compound_order(page)) - 1;
770 continue; 798 goto isolate_fail;
771 } 799 }
772 } 800 }
773 801
@@ -775,7 +803,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
775 803
776 /* Try isolate the page */ 804 /* Try isolate the page */
777 if (__isolate_lru_page(page, isolate_mode) != 0) 805 if (__isolate_lru_page(page, isolate_mode) != 0)
778 continue; 806 goto isolate_fail;
779 807
780 VM_BUG_ON_PAGE(PageCompound(page), page); 808 VM_BUG_ON_PAGE(PageCompound(page), page);
781 809
@@ -783,7 +811,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
783 del_page_from_lru_list(page, lruvec, page_lru(page)); 811 del_page_from_lru_list(page, lruvec, page_lru(page));
784 812
785isolate_success: 813isolate_success:
786 list_add(&page->lru, migratelist); 814 list_add(&page->lru, &cc->migratepages);
787 cc->nr_migratepages++; 815 cc->nr_migratepages++;
788 nr_isolated++; 816 nr_isolated++;
789 817
@@ -801,6 +829,37 @@ isolate_success:
801 ++low_pfn; 829 ++low_pfn;
802 break; 830 break;
803 } 831 }
832
833 continue;
834isolate_fail:
835 if (!skip_on_failure)
836 continue;
837
838 /*
839 * We have isolated some pages, but then failed. Release them
840 * instead of migrating, as we cannot form the cc->order buddy
841 * page anyway.
842 */
843 if (nr_isolated) {
844 if (locked) {
845 spin_unlock_irqrestore(&zone->lru_lock, flags);
846 locked = false;
847 }
848 acct_isolated(zone, cc);
849 putback_movable_pages(&cc->migratepages);
850 cc->nr_migratepages = 0;
851 cc->last_migrated_pfn = 0;
852 nr_isolated = 0;
853 }
854
855 if (low_pfn < next_skip_pfn) {
856 low_pfn = next_skip_pfn - 1;
857 /*
858 * The check near the loop beginning would have updated
859 * next_skip_pfn too, but this is a bit simpler.
860 */
861 next_skip_pfn += 1UL << cc->order;
862 }
804 } 863 }
805 864
806 /* 865 /*
@@ -1401,6 +1460,18 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1401 ret = COMPACT_CONTENDED; 1460 ret = COMPACT_CONTENDED;
1402 goto out; 1461 goto out;
1403 } 1462 }
1463 /*
1464 * We failed to migrate at least one page in the current
1465 * order-aligned block, so skip the rest of it.
1466 */
1467 if (cc->direct_compaction &&
1468 (cc->mode == MIGRATE_ASYNC)) {
1469 cc->migrate_pfn = block_end_pfn(
1470 cc->migrate_pfn - 1, cc->order);
1471 /* Draining pcplists is useless in this case */
1472 cc->last_migrated_pfn = 0;
1473
1474 }
1404 } 1475 }
1405 1476
1406check_drain: 1477check_drain: