aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c91
1 files changed, 57 insertions, 34 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8058e3f98f08..1067c07cb33d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -67,6 +67,49 @@ static inline bool migrate_async_suitable(int migratetype)
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68} 68}
69 69
70/*
71 * Check that the whole (or subset of) a pageblock given by the interval of
72 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
73 * with the migration of free compaction scanner. The scanners then need to
74 * use only pfn_valid_within() check for arches that allow holes within
75 * pageblocks.
76 *
77 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
78 *
79 * It's possible on some configurations to have a setup like node0 node1 node0
80 * i.e. it's possible that all pages within a zones range of pages do not
81 * belong to a single zone. We assume that a border between node0 and node1
82 * can occur within a single pageblock, but not a node0 node1 node0
83 * interleaving within a single pageblock. It is therefore sufficient to check
84 * the first and last page of a pageblock and avoid checking each individual
85 * page in a pageblock.
86 */
87static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
88 unsigned long end_pfn, struct zone *zone)
89{
90 struct page *start_page;
91 struct page *end_page;
92
93 /* end_pfn is one past the range we are checking */
94 end_pfn--;
95
96 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
97 return NULL;
98
99 start_page = pfn_to_page(start_pfn);
100
101 if (page_zone(start_page) != zone)
102 return NULL;
103
104 end_page = pfn_to_page(end_pfn);
105
106 /* This gives a shorter code than deriving page_zone(end_page) */
107 if (page_zone_id(start_page) != page_zone_id(end_page))
108 return NULL;
109
110 return start_page;
111}
112
70#ifdef CONFIG_COMPACTION 113#ifdef CONFIG_COMPACTION
71/* Returns true if the pageblock should be scanned for pages to isolate. */ 114/* Returns true if the pageblock should be scanned for pages to isolate. */
72static inline bool isolation_suitable(struct compact_control *cc, 115static inline bool isolation_suitable(struct compact_control *cc,
@@ -371,17 +414,17 @@ isolate_freepages_range(struct compact_control *cc,
371 unsigned long isolated, pfn, block_end_pfn; 414 unsigned long isolated, pfn, block_end_pfn;
372 LIST_HEAD(freelist); 415 LIST_HEAD(freelist);
373 416
374 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 417 pfn = start_pfn;
375 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 418 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
376 break; 419
420 for (; pfn < end_pfn; pfn += isolated,
421 block_end_pfn += pageblock_nr_pages) {
377 422
378 /*
379 * On subsequent iterations ALIGN() is actually not needed,
380 * but we keep it that we not to complicate the code.
381 */
382 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
383 block_end_pfn = min(block_end_pfn, end_pfn); 423 block_end_pfn = min(block_end_pfn, end_pfn);
384 424
425 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
426 break;
427
385 isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 428 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
386 &freelist, true); 429 &freelist, true);
387 430
@@ -507,15 +550,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
507 continue; 550 continue;
508 nr_scanned++; 551 nr_scanned++;
509 552
510 /*
511 * Get the page and ensure the page is within the same zone.
512 * See the comment in isolate_freepages about overlapping
513 * nodes. It is deliberate that the new zone lock is not taken
514 * as memory compaction should not move pages between nodes.
515 */
516 page = pfn_to_page(low_pfn); 553 page = pfn_to_page(low_pfn);
517 if (page_zone(page) != zone)
518 continue;
519 554
520 if (!valid_page) 555 if (!valid_page)
521 valid_page = page; 556 valid_page = page;
@@ -653,8 +688,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
653 688
654 block_end_pfn = min(block_end_pfn, end_pfn); 689 block_end_pfn = min(block_end_pfn, end_pfn);
655 690
656 /* Skip whole pageblock in case of a memory hole */ 691 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
657 if (!pfn_valid(pfn))
658 continue; 692 continue;
659 693
660 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 694 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
@@ -727,18 +761,9 @@ static void isolate_freepages(struct compact_control *cc)
727 && compact_should_abort(cc)) 761 && compact_should_abort(cc))
728 break; 762 break;
729 763
730 if (!pfn_valid(block_start_pfn)) 764 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
731 continue; 765 zone);
732 766 if (!page)
733 /*
734 * Check for overlapping nodes/zones. It's possible on some
735 * configurations to have a setup like
736 * node0 node1 node0
737 * i.e. it's possible that all pages within a zones range of
738 * pages do not belong to a single zone.
739 */
740 page = pfn_to_page(block_start_pfn);
741 if (page_zone(page) != zone)
742 continue; 767 continue;
743 768
744 /* Check the block is suitable for migration */ 769 /* Check the block is suitable for migration */
@@ -873,12 +898,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
873 && compact_should_abort(cc)) 898 && compact_should_abort(cc))
874 break; 899 break;
875 900
876 /* Skip whole pageblock in case of a memory hole */ 901 page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
877 if (!pfn_valid(low_pfn)) 902 if (!page)
878 continue; 903 continue;
879 904
880 page = pfn_to_page(low_pfn);
881
882 /* If isolation recently failed, do not retry */ 905 /* If isolation recently failed, do not retry */
883 if (!isolation_suitable(cc, page)) 906 if (!isolation_suitable(cc, page))
884 continue; 907 continue;