summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2017-11-17 18:26:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 19:10:00 -0500
commit21dc7e023611fbcf8e38f255731bcf3cc38e7638 (patch)
tree6487d3f4f8887428a17f189610dda040d8bf7afd /mm/compaction.c
parenta0647dc9208fae9124ca38d43a5c3c950d955291 (diff)
mm, compaction: persistently skip hugetlbfs pageblocks
It is pointless to migrate hugetlb memory as part of memory compaction if the hugetlb size is equal to the pageblock order. No defragmentation is occurring in this condition. It is also pointless to for the freeing scanner to scan a pageblock where a hugetlb page is pinned. Unconditionally skip these pageblocks, and do so peristently so that they are not rescanned until it is observed that these hugepages are no longer pinned. It would also be possible to do this by involving the hugetlb subsystem in marking pageblocks to no longer be skipped when they hugetlb pages are freed. This is a simple solution that doesn't involve any additional subsystems in pageblock skip manipulation. [rientjes@google.com: fix build] Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708201734390.117182@chino.kir.corp.google.com Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708151639130.106658@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Tested-by: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index ad40d67421f3..94b5c0865dd1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -219,6 +219,20 @@ static void reset_cached_positions(struct zone *zone)
219} 219}
220 220
221/* 221/*
222 * Hugetlbfs pages should consistenly be skipped until updated by the hugetlb
223 * subsystem. It is always pointless to compact pages of pageblock_order and
224 * the free scanner can reconsider when no longer huge.
225 */
226static bool pageblock_skip_persistent(struct page *page, unsigned int order)
227{
228 if (!PageHuge(page))
229 return false;
230 if (order != pageblock_order)
231 return false;
232 return true;
233}
234
235/*
222 * This function is called to clear all cached information on pageblocks that 236 * This function is called to clear all cached information on pageblocks that
223 * should be skipped for page isolation when the migrate and free page scanner 237 * should be skipped for page isolation when the migrate and free page scanner
224 * meet. 238 * meet.
@@ -242,6 +256,8 @@ static void __reset_isolation_suitable(struct zone *zone)
242 continue; 256 continue;
243 if (zone != page_zone(page)) 257 if (zone != page_zone(page))
244 continue; 258 continue;
259 if (pageblock_skip_persistent(page, compound_order(page)))
260 continue;
245 261
246 clear_pageblock_skip(page); 262 clear_pageblock_skip(page);
247 } 263 }
@@ -307,7 +323,13 @@ static inline bool isolation_suitable(struct compact_control *cc,
307 return true; 323 return true;
308} 324}
309 325
310static void update_pageblock_skip(struct compact_control *cc, 326static inline bool pageblock_skip_persistent(struct page *page,
327 unsigned int order)
328{
329 return false;
330}
331
332static inline void update_pageblock_skip(struct compact_control *cc,
311 struct page *page, unsigned long nr_isolated, 333 struct page *page, unsigned long nr_isolated,
312 bool migrate_scanner) 334 bool migrate_scanner)
313{ 335{
@@ -449,13 +471,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
449 * and the only danger is skipping too much. 471 * and the only danger is skipping too much.
450 */ 472 */
451 if (PageCompound(page)) { 473 if (PageCompound(page)) {
452 unsigned int comp_order = compound_order(page); 474 const unsigned int order = compound_order(page);
453 475
454 if (likely(comp_order < MAX_ORDER)) { 476 if (pageblock_skip_persistent(page, order)) {
455 blockpfn += (1UL << comp_order) - 1; 477 set_pageblock_skip(page);
456 cursor += (1UL << comp_order) - 1; 478 blockpfn = end_pfn;
479 } else if (likely(order < MAX_ORDER)) {
480 blockpfn += (1UL << order) - 1;
481 cursor += (1UL << order) - 1;
457 } 482 }
458
459 goto isolate_fail; 483 goto isolate_fail;
460 } 484 }
461 485
@@ -772,11 +796,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
772 * danger is skipping too much. 796 * danger is skipping too much.
773 */ 797 */
774 if (PageCompound(page)) { 798 if (PageCompound(page)) {
775 unsigned int comp_order = compound_order(page); 799 const unsigned int order = compound_order(page);
776
777 if (likely(comp_order < MAX_ORDER))
778 low_pfn += (1UL << comp_order) - 1;
779 800
801 if (pageblock_skip_persistent(page, order)) {
802 set_pageblock_skip(page);
803 low_pfn = end_pfn;
804 } else if (likely(order < MAX_ORDER))
805 low_pfn += (1UL << order) - 1;
780 goto isolate_fail; 806 goto isolate_fail;
781 } 807 }
782 808
@@ -838,7 +864,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
838 * is safe to read and it's 0 for tail pages. 864 * is safe to read and it's 0 for tail pages.
839 */ 865 */
840 if (unlikely(PageCompound(page))) { 866 if (unlikely(PageCompound(page))) {
841 low_pfn += (1UL << compound_order(page)) - 1; 867 const unsigned int order = compound_order(page);
868
869 if (pageblock_skip_persistent(page, order)) {
870 set_pageblock_skip(page);
871 low_pfn = end_pfn;
872 } else
873 low_pfn += (1UL << order) - 1;
842 goto isolate_fail; 874 goto isolate_fail;
843 } 875 }
844 } 876 }