summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-11-17 18:26:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 19:10:00 -0500
commitd3c85bad89b9153df741af14ad859ee49677f00d (patch)
tree7e8744f20b166ab955d03eb4acd3bd4e11ae462a /mm/compaction.c
parent2583d6713267a4c80126e4e50dd45f5cf685ebe8 (diff)
mm, compaction: remove unneeded pageblock_skip_persistent() checks
Commit f3c931633a59 ("mm, compaction: persistently skip hugetlbfs pageblocks") has introduced pageblock_skip_persistent() checks into migration and free scanners, to make sure pageblocks that should be persistently skipped are marked as such, regardless of the ignore_skip_hint flag. Since the previous patch introduced a new no_set_skip_hint flag, the ignore flag no longer prevents marking pageblocks as skipped. Therefore we can remove the special cases. The relevant pageblocks will be marked as skipped by the common logic which marks each pageblock where no page could be isolated. This makes the code simpler. Link: http://lkml.kernel.org/r/20171102121706.21504-3-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c18
1 files changed, 3 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index bb1188a9d58e..10cd757f1006 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -476,10 +476,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
476 if (PageCompound(page)) { 476 if (PageCompound(page)) {
477 const unsigned int order = compound_order(page); 477 const unsigned int order = compound_order(page);
478 478
479 if (pageblock_skip_persistent(page, order)) { 479 if (likely(order < MAX_ORDER)) {
480 set_pageblock_skip(page);
481 blockpfn = end_pfn;
482 } else if (likely(order < MAX_ORDER)) {
483 blockpfn += (1UL << order) - 1; 480 blockpfn += (1UL << order) - 1;
484 cursor += (1UL << order) - 1; 481 cursor += (1UL << order) - 1;
485 } 482 }
@@ -801,10 +798,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
801 if (PageCompound(page)) { 798 if (PageCompound(page)) {
802 const unsigned int order = compound_order(page); 799 const unsigned int order = compound_order(page);
803 800
804 if (pageblock_skip_persistent(page, order)) { 801 if (likely(order < MAX_ORDER))
805 set_pageblock_skip(page);
806 low_pfn = end_pfn;
807 } else if (likely(order < MAX_ORDER))
808 low_pfn += (1UL << order) - 1; 802 low_pfn += (1UL << order) - 1;
809 goto isolate_fail; 803 goto isolate_fail;
810 } 804 }
@@ -867,13 +861,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
867 * is safe to read and it's 0 for tail pages. 861 * is safe to read and it's 0 for tail pages.
868 */ 862 */
869 if (unlikely(PageCompound(page))) { 863 if (unlikely(PageCompound(page))) {
870 const unsigned int order = compound_order(page); 864 low_pfn += (1UL << compound_order(page)) - 1;
871
872 if (pageblock_skip_persistent(page, order)) {
873 set_pageblock_skip(page);
874 low_pfn = end_pfn;
875 } else
876 low_pfn += (1UL << order) - 1;
877 goto isolate_fail; 865 goto isolate_fail;
878 } 866 }
879 } 867 }