summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-11-17 18:26:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 19:10:00 -0500
commitb527cfe5bc23208cf9a346879501333cec638aba (patch)
tree3302cb8445a9ae8edd1ac187ffef8dc8f84f9781 /mm/compaction.c
parent21dc7e023611fbcf8e38f255731bcf3cc38e7638 (diff)
mm, compaction: extend pageblock_skip_persistent() to all compound pages
pageblock_skip_persistent() checks for HugeTLB pages of pageblock order. When clearing pageblock skip bits for compaction, the bits are not cleared for such pageblocks, because they cannot contain base pages suitable for migration, nor free pages to use as migration targets. This optimization can be simply extended to all compound pages of order equal or larger than pageblock order, because migrating such pages (if they support it) cannot help sub-pageblock fragmentation. This includes THP's and also gigantic HugeTLB pages, which the current implementation doesn't persistently skip due to a strict pageblock_order equality check and not recognizing tail pages. While THP pages are generally less "persistent" than HugeTLB, we can still expect that if a THP exists at the point of __reset_isolation_suitable(), it will exist also during the subsequent compaction run. The time difference here could be actually smaller than between a compaction run that sets a (non-persistent) skip bit on a THP, and the next compaction run that observes it. Link: http://lkml.kernel.org/r/20171102121706.21504-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 94b5c0865dd1..e8f5b4e2cb05 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -219,17 +219,21 @@ static void reset_cached_positions(struct zone *zone)
219} 219}
220 220
221/* 221/*
222 * Hugetlbfs pages should consistenly be skipped until updated by the hugetlb 222 * Compound pages of >= pageblock_order should consistenly be skipped until
223 * subsystem. It is always pointless to compact pages of pageblock_order and 223 * released. It is always pointless to compact pages of such order (if they are
224 * the free scanner can reconsider when no longer huge. 224 * migratable), and the pageblocks they occupy cannot contain any free pages.
225 */ 225 */
226static bool pageblock_skip_persistent(struct page *page, unsigned int order) 226static bool pageblock_skip_persistent(struct page *page)
227{ 227{
228 if (!PageHuge(page)) 228 if (!PageCompound(page))
229 return false; 229 return false;
230 if (order != pageblock_order) 230
231 return false; 231 page = compound_head(page);
232 return true; 232
233 if (compound_order(page) >= pageblock_order)
234 return true;
235
236 return false;
233} 237}
234 238
235/* 239/*
@@ -256,7 +260,7 @@ static void __reset_isolation_suitable(struct zone *zone)
256 continue; 260 continue;
257 if (zone != page_zone(page)) 261 if (zone != page_zone(page))
258 continue; 262 continue;
259 if (pageblock_skip_persistent(page, compound_order(page))) 263 if (pageblock_skip_persistent(page))
260 continue; 264 continue;
261 265
262 clear_pageblock_skip(page); 266 clear_pageblock_skip(page);
@@ -323,8 +327,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
323 return true; 327 return true;
324} 328}
325 329
326static inline bool pageblock_skip_persistent(struct page *page, 330static inline bool pageblock_skip_persistent(struct page *page)
327 unsigned int order)
328{ 331{
329 return false; 332 return false;
330} 333}