aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/compaction.c254
-rw-r--r--mm/internal.h4
-rw-r--r--mm/page_alloc.c3
3 files changed, 149 insertions, 112 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 7bf150d4e1c8..8058e3f98f08 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
132 */ 132 */
133static void update_pageblock_skip(struct compact_control *cc, 133static void update_pageblock_skip(struct compact_control *cc,
134 struct page *page, unsigned long nr_isolated, 134 struct page *page, unsigned long nr_isolated,
135 bool set_unsuitable, bool migrate_scanner) 135 bool migrate_scanner)
136{ 136{
137 struct zone *zone = cc->zone; 137 struct zone *zone = cc->zone;
138 unsigned long pfn; 138 unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control *cc,
146 if (nr_isolated) 146 if (nr_isolated)
147 return; 147 return;
148 148
149 /* 149 set_pageblock_skip(page);
150 * Only skip pageblocks when all forms of compaction will be known to
151 * fail in the near future.
152 */
153 if (set_unsuitable)
154 set_pageblock_skip(page);
155 150
156 pfn = page_to_pfn(page); 151 pfn = page_to_pfn(page);
157 152
@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
180 175
181static void update_pageblock_skip(struct compact_control *cc, 176static void update_pageblock_skip(struct compact_control *cc,
182 struct page *page, unsigned long nr_isolated, 177 struct page *page, unsigned long nr_isolated,
183 bool set_unsuitable, bool migrate_scanner) 178 bool migrate_scanner)
184{ 179{
185} 180}
186#endif /* CONFIG_COMPACTION */ 181#endif /* CONFIG_COMPACTION */
@@ -348,8 +343,7 @@ isolate_fail:
348 343
349 /* Update the pageblock-skip if the whole pageblock was scanned */ 344 /* Update the pageblock-skip if the whole pageblock was scanned */
350 if (blockpfn == end_pfn) 345 if (blockpfn == end_pfn)
351 update_pageblock_skip(cc, valid_page, total_isolated, true, 346 update_pageblock_skip(cc, valid_page, total_isolated, false);
352 false);
353 347
354 count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 348 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
355 if (total_isolated) 349 if (total_isolated)
@@ -420,22 +414,19 @@ isolate_freepages_range(struct compact_control *cc,
420} 414}
421 415
422/* Update the number of anon and file isolated pages in the zone */ 416/* Update the number of anon and file isolated pages in the zone */
423static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 417static void acct_isolated(struct zone *zone, struct compact_control *cc)
424{ 418{
425 struct page *page; 419 struct page *page;
426 unsigned int count[2] = { 0, }; 420 unsigned int count[2] = { 0, };
427 421
422 if (list_empty(&cc->migratepages))
423 return;
424
428 list_for_each_entry(page, &cc->migratepages, lru) 425 list_for_each_entry(page, &cc->migratepages, lru)
429 count[!!page_is_file_cache(page)]++; 426 count[!!page_is_file_cache(page)]++;
430 427
431 /* If locked we can use the interrupt unsafe versions */ 428 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
432 if (locked) { 429 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
433 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
434 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
435 } else {
436 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
437 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
438 }
439} 430}
440 431
441/* Similar to reclaim, but different enough that they don't share logic */ 432/* Similar to reclaim, but different enough that they don't share logic */
@@ -454,40 +445,34 @@ static bool too_many_isolated(struct zone *zone)
454} 445}
455 446
456/** 447/**
457 * isolate_migratepages_range() - isolate all migrate-able pages in range. 448 * isolate_migratepages_block() - isolate all migrate-able pages within
458 * @zone: Zone pages are in. 449 * a single pageblock
459 * @cc: Compaction control structure. 450 * @cc: Compaction control structure.
460 * @low_pfn: The first PFN of the range. 451 * @low_pfn: The first PFN to isolate
461 * @end_pfn: The one-past-the-last PFN of the range. 452 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
462 * @unevictable: true if it allows to isolate unevictable pages 453 * @isolate_mode: Isolation mode to be used.
463 * 454 *
464 * Isolate all pages that can be migrated from the range specified by 455 * Isolate all pages that can be migrated from the range specified by
465 * [low_pfn, end_pfn). Returns zero if there is a fatal signal 456 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
466 * pending), otherwise PFN of the first page that was not scanned 457 * Returns zero if there is a fatal signal pending, otherwise PFN of the
467 * (which may be both less, equal to or more then end_pfn). 458 * first page that was not scanned (which may be both less, equal to or more
459 * than end_pfn).
468 * 460 *
469 * Assumes that cc->migratepages is empty and cc->nr_migratepages is 461 * The pages are isolated on cc->migratepages list (not required to be empty),
470 * zero. 462 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
471 * 463 * is neither read nor updated.
472 * Apart from cc->migratepages and cc->nr_migratetypes this function
473 * does not modify any cc's fields, in particular it does not modify
474 * (or read for that matter) cc->migrate_pfn.
475 */ 464 */
476unsigned long 465static unsigned long
477isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 466isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
478 unsigned long low_pfn, unsigned long end_pfn, bool unevictable) 467 unsigned long end_pfn, isolate_mode_t isolate_mode)
479{ 468{
480 unsigned long last_pageblock_nr = 0, pageblock_nr; 469 struct zone *zone = cc->zone;
481 unsigned long nr_scanned = 0, nr_isolated = 0; 470 unsigned long nr_scanned = 0, nr_isolated = 0;
482 struct list_head *migratelist = &cc->migratepages; 471 struct list_head *migratelist = &cc->migratepages;
483 struct lruvec *lruvec; 472 struct lruvec *lruvec;
484 unsigned long flags; 473 unsigned long flags;
485 bool locked = false; 474 bool locked = false;
486 struct page *page = NULL, *valid_page = NULL; 475 struct page *page = NULL, *valid_page = NULL;
487 bool set_unsuitable = true;
488 const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
489 ISOLATE_ASYNC_MIGRATE : 0) |
490 (unevictable ? ISOLATE_UNEVICTABLE : 0);
491 476
492 /* 477 /*
493 * Ensure that there are not too many pages isolated from the LRU 478 * Ensure that there are not too many pages isolated from the LRU
@@ -518,19 +503,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
518 } 503 }
519 } 504 }
520 505
521 /*
522 * migrate_pfn does not necessarily start aligned to a
523 * pageblock. Ensure that pfn_valid is called when moving
524 * into a new MAX_ORDER_NR_PAGES range in case of large
525 * memory holes within the zone
526 */
527 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
528 if (!pfn_valid(low_pfn)) {
529 low_pfn += MAX_ORDER_NR_PAGES - 1;
530 continue;
531 }
532 }
533
534 if (!pfn_valid_within(low_pfn)) 506 if (!pfn_valid_within(low_pfn))
535 continue; 507 continue;
536 nr_scanned++; 508 nr_scanned++;
@@ -548,28 +520,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
548 if (!valid_page) 520 if (!valid_page)
549 valid_page = page; 521 valid_page = page;
550 522
551 /* If isolation recently failed, do not retry */
552 pageblock_nr = low_pfn >> pageblock_order;
553 if (last_pageblock_nr != pageblock_nr) {
554 int mt;
555
556 last_pageblock_nr = pageblock_nr;
557 if (!isolation_suitable(cc, page))
558 goto next_pageblock;
559
560 /*
561 * For async migration, also only scan in MOVABLE
562 * blocks. Async migration is optimistic to see if
563 * the minimum amount of work satisfies the allocation
564 */
565 mt = get_pageblock_migratetype(page);
566 if (cc->mode == MIGRATE_ASYNC &&
567 !migrate_async_suitable(mt)) {
568 set_unsuitable = false;
569 goto next_pageblock;
570 }
571 }
572
573 /* 523 /*
574 * Skip if free. page_order cannot be used without zone->lock 524 * Skip if free. page_order cannot be used without zone->lock
575 * as nothing prevents parallel allocations or buddy merging. 525 * as nothing prevents parallel allocations or buddy merging.
@@ -604,8 +554,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
604 */ 554 */
605 if (PageTransHuge(page)) { 555 if (PageTransHuge(page)) {
606 if (!locked) 556 if (!locked)
607 goto next_pageblock; 557 low_pfn = ALIGN(low_pfn + 1,
608 low_pfn += (1 << compound_order(page)) - 1; 558 pageblock_nr_pages) - 1;
559 else
560 low_pfn += (1 << compound_order(page)) - 1;
561
609 continue; 562 continue;
610 } 563 }
611 564
@@ -635,7 +588,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
635 lruvec = mem_cgroup_page_lruvec(page, zone); 588 lruvec = mem_cgroup_page_lruvec(page, zone);
636 589
637 /* Try isolate the page */ 590 /* Try isolate the page */
638 if (__isolate_lru_page(page, mode) != 0) 591 if (__isolate_lru_page(page, isolate_mode) != 0)
639 continue; 592 continue;
640 593
641 VM_BUG_ON_PAGE(PageTransCompound(page), page); 594 VM_BUG_ON_PAGE(PageTransCompound(page), page);
@@ -654,15 +607,8 @@ isolate_success:
654 ++low_pfn; 607 ++low_pfn;
655 break; 608 break;
656 } 609 }
657
658 continue;
659
660next_pageblock:
661 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
662 } 610 }
663 611
664 acct_isolated(zone, locked, cc);
665
666 if (locked) 612 if (locked)
667 spin_unlock_irqrestore(&zone->lru_lock, flags); 613 spin_unlock_irqrestore(&zone->lru_lock, flags);
668 614
@@ -671,8 +617,7 @@ next_pageblock:
671 * if the whole pageblock was scanned without isolating any page. 617 * if the whole pageblock was scanned without isolating any page.
672 */ 618 */
673 if (low_pfn == end_pfn) 619 if (low_pfn == end_pfn)
674 update_pageblock_skip(cc, valid_page, nr_isolated, 620 update_pageblock_skip(cc, valid_page, nr_isolated, true);
675 set_unsuitable, true);
676 621
677 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 622 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
678 623
@@ -683,15 +628,63 @@ next_pageblock:
683 return low_pfn; 628 return low_pfn;
684} 629}
685 630
631/**
632 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
633 * @cc: Compaction control structure.
634 * @start_pfn: The first PFN to start isolating.
635 * @end_pfn: The one-past-last PFN.
636 *
637 * Returns zero if isolation fails fatally due to e.g. pending signal.
638 * Otherwise, function returns one-past-the-last PFN of isolated page
639 * (which may be greater than end_pfn if end fell in a middle of a THP page).
640 */
641unsigned long
642isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
643 unsigned long end_pfn)
644{
645 unsigned long pfn, block_end_pfn;
646
647 /* Scan block by block. First and last block may be incomplete */
648 pfn = start_pfn;
649 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
650
651 for (; pfn < end_pfn; pfn = block_end_pfn,
652 block_end_pfn += pageblock_nr_pages) {
653
654 block_end_pfn = min(block_end_pfn, end_pfn);
655
656 /* Skip whole pageblock in case of a memory hole */
657 if (!pfn_valid(pfn))
658 continue;
659
660 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
661 ISOLATE_UNEVICTABLE);
662
663 /*
664 * In case of fatal failure, release everything that might
665 * have been isolated in the previous iteration, and signal
666 * the failure back to caller.
667 */
668 if (!pfn) {
669 putback_movable_pages(&cc->migratepages);
670 cc->nr_migratepages = 0;
671 break;
672 }
673 }
674 acct_isolated(cc->zone, cc);
675
676 return pfn;
677}
678
686#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 679#endif /* CONFIG_COMPACTION || CONFIG_CMA */
687#ifdef CONFIG_COMPACTION 680#ifdef CONFIG_COMPACTION
688/* 681/*
689 * Based on information in the current compact_control, find blocks 682 * Based on information in the current compact_control, find blocks
690 * suitable for isolating free pages from and then isolate them. 683 * suitable for isolating free pages from and then isolate them.
691 */ 684 */
692static void isolate_freepages(struct zone *zone, 685static void isolate_freepages(struct compact_control *cc)
693 struct compact_control *cc)
694{ 686{
687 struct zone *zone = cc->zone;
695 struct page *page; 688 struct page *page;
696 unsigned long block_start_pfn; /* start of current pageblock */ 689 unsigned long block_start_pfn; /* start of current pageblock */
697 unsigned long block_end_pfn; /* end of current pageblock */ 690 unsigned long block_end_pfn; /* end of current pageblock */
@@ -809,7 +802,7 @@ static struct page *compaction_alloc(struct page *migratepage,
809 */ 802 */
810 if (list_empty(&cc->freepages)) { 803 if (list_empty(&cc->freepages)) {
811 if (!cc->contended) 804 if (!cc->contended)
812 isolate_freepages(cc->zone, cc); 805 isolate_freepages(cc);
813 806
814 if (list_empty(&cc->freepages)) 807 if (list_empty(&cc->freepages))
815 return NULL; 808 return NULL;
@@ -843,34 +836,82 @@ typedef enum {
843} isolate_migrate_t; 836} isolate_migrate_t;
844 837
845/* 838/*
846 * Isolate all pages that can be migrated from the block pointed to by 839 * Isolate all pages that can be migrated from the first suitable block,
847 * the migrate scanner within compact_control. 840 * starting at the block pointed to by the migrate scanner pfn within
841 * compact_control.
848 */ 842 */
849static isolate_migrate_t isolate_migratepages(struct zone *zone, 843static isolate_migrate_t isolate_migratepages(struct zone *zone,
850 struct compact_control *cc) 844 struct compact_control *cc)
851{ 845{
852 unsigned long low_pfn, end_pfn; 846 unsigned long low_pfn, end_pfn;
847 struct page *page;
848 const isolate_mode_t isolate_mode =
849 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
853 850
854 /* Do not scan outside zone boundaries */ 851 /*
855 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 852 * Start at where we last stopped, or beginning of the zone as
853 * initialized by compact_zone()
854 */
855 low_pfn = cc->migrate_pfn;
856 856
857 /* Only scan within a pageblock boundary */ 857 /* Only scan within a pageblock boundary */
858 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 858 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
859 859
860 /* Do not cross the free scanner or scan within a memory hole */ 860 /*
861 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 861 * Iterate over whole pageblocks until we find the first suitable.
862 cc->migrate_pfn = end_pfn; 862 * Do not cross the free scanner.
863 return ISOLATE_NONE; 863 */
864 } 864 for (; end_pfn <= cc->free_pfn;
865 low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
865 866
866 /* Perform the isolation */ 867 /*
867 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); 868 * This can potentially iterate a massively long zone with
868 if (!low_pfn || cc->contended) 869 * many pageblocks unsuitable, so periodically check if we
869 return ISOLATE_ABORT; 870 * need to schedule, or even abort async compaction.
871 */
872 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
873 && compact_should_abort(cc))
874 break;
875
876 /* Skip whole pageblock in case of a memory hole */
877 if (!pfn_valid(low_pfn))
878 continue;
870 879
880 page = pfn_to_page(low_pfn);
881
882 /* If isolation recently failed, do not retry */
883 if (!isolation_suitable(cc, page))
884 continue;
885
886 /*
887 * For async compaction, also only scan in MOVABLE blocks.
888 * Async compaction is optimistic to see if the minimum amount
889 * of work satisfies the allocation.
890 */
891 if (cc->mode == MIGRATE_ASYNC &&
892 !migrate_async_suitable(get_pageblock_migratetype(page)))
893 continue;
894
895 /* Perform the isolation */
896 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
897 isolate_mode);
898
899 if (!low_pfn || cc->contended)
900 return ISOLATE_ABORT;
901
902 /*
903 * Either we isolated something and proceed with migration. Or
904 * we failed and compact_zone should decide if we should
905 * continue or not.
906 */
907 break;
908 }
909
910 acct_isolated(zone, cc);
911 /* Record where migration scanner will be restarted */
871 cc->migrate_pfn = low_pfn; 912 cc->migrate_pfn = low_pfn;
872 913
873 return ISOLATE_SUCCESS; 914 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
874} 915}
875 916
876static int compact_finished(struct zone *zone, 917static int compact_finished(struct zone *zone,
@@ -1043,9 +1084,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1043 ; 1084 ;
1044 } 1085 }
1045 1086
1046 if (!cc->nr_migratepages)
1047 continue;
1048
1049 err = migrate_pages(&cc->migratepages, compaction_alloc, 1087 err = migrate_pages(&cc->migratepages, compaction_alloc,
1050 compaction_free, (unsigned long)cc, cc->mode, 1088 compaction_free, (unsigned long)cc, cc->mode,
1051 MR_COMPACTION); 1089 MR_COMPACTION);
diff --git a/mm/internal.h b/mm/internal.h
index a1b651b11c5f..5a0738fa649c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -154,8 +154,8 @@ unsigned long
154isolate_freepages_range(struct compact_control *cc, 154isolate_freepages_range(struct compact_control *cc,
155 unsigned long start_pfn, unsigned long end_pfn); 155 unsigned long start_pfn, unsigned long end_pfn);
156unsigned long 156unsigned long
157isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 157isolate_migratepages_range(struct compact_control *cc,
158 unsigned long low_pfn, unsigned long end_pfn, bool unevictable); 158 unsigned long low_pfn, unsigned long end_pfn);
159 159
160#endif 160#endif
161 161
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 822babd808fe..dfbf54b51649 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6288,8 +6288,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
6288 6288
6289 if (list_empty(&cc->migratepages)) { 6289 if (list_empty(&cc->migratepages)) {
6290 cc->nr_migratepages = 0; 6290 cc->nr_migratepages = 0;
6291 pfn = isolate_migratepages_range(cc->zone, cc, 6291 pfn = isolate_migratepages_range(cc, pfn, end);
6292 pfn, end, true);
6293 if (!pfn) { 6292 if (!pfn) {
6294 ret = -EINTR; 6293 ret = -EINTR;
6295 break; 6294 break;