summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-07-10 18:47:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 19:32:30 -0400
commit7a8f58f3918869dda0d71b2e9245baedbbe7bc5e (patch)
tree643709493b087024637674845bdf3217cf444e30 /mm/page_alloc.c
parent23955622ff8d231bcc9650b3d06583f117a6e3ba (diff)
mm, page_alloc: fallback to smallest page when not stealing whole pageblock
Since commit 3bc48f96cf11 ("mm, page_alloc: split smallest stolen page in fallback") we pick the smallest (but sufficient) page of all that have been stolen from a pageblock of different migratetype. However, there are cases when we decide not to steal the whole pageblock. Practically in the current implementation it means that we are trying to fallback for a MIGRATE_MOVABLE allocation of order X, go through the freelists from MAX_ORDER-1 down to X, and find free page of order Y. If Y is less than pageblock_order / 2, we decide not to steal all pages from the pageblock. When Y > X, it means we are potentially splitting a larger page than we need, as there might be other pages of order Z, where X <= Z < Y. Since Y is already too small to steal whole pageblock, picking smallest available Z will result in the same decision and we avoid splitting a higher-order page in a MIGRATE_UNMOVABLE or MIGRATE_RECLAIMABLE pageblock. This patch therefore changes the fallback algorithm so that in the situation described above, we switch the fallback search strategy to go from order X upwards to find the smallest suitable fallback. In theory there shouldn't be a downside of this change wrt fragmentation. This has been tested with mmtests' stress-highalloc performing GFP_KERNEL order-4 allocations, here is the relevant extfrag tracepoint statistics: 4.12.0-rc2 4.12.0-rc2 1-kernel4 2-kernel4 Page alloc extfrag event 25640976 69680977 Extfrag fragmenting 25621086 69661364 Extfrag fragmenting for unmovable 74409 73204 Extfrag fragmenting unmovable placed with movable 69003 67684 Extfrag fragmenting unmovable placed with reclaim. 5406 5520 Extfrag fragmenting for reclaimable 6398 8467 Extfrag fragmenting reclaimable placed with movable 869 884 Extfrag fragmenting reclaimable placed with unmov. 5529 7583 Extfrag fragmenting for movable 25540279 69579693 Since we force movable allocations to steal the smallest available page (which we then practially always split), we steal less per fallback, so the number of fallbacks increases and steals potentially happen from different pageblocks. This is however not an issue for movable pages that can be compacted. Importantly, the "unmovable placed with movable" statistics is lower, which is the result of less fragmentation in the unmovable pageblocks. The effect on reclaimable allocation is a bit unclear. Link: http://lkml.kernel.org/r/20170529093947.22618-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c53
1 files changed, 44 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd65b60939b6..869035717048 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2216,7 +2216,11 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2216 int fallback_mt; 2216 int fallback_mt;
2217 bool can_steal; 2217 bool can_steal;
2218 2218
2219 /* Find the largest possible block of pages in the other list */ 2219 /*
2220 * Find the largest available free page in the other list. This roughly
2221 * approximates finding the pageblock with the most free pages, which
2222 * would be too costly to do exactly.
2223 */
2220 for (current_order = MAX_ORDER-1; 2224 for (current_order = MAX_ORDER-1;
2221 current_order >= order && current_order <= MAX_ORDER-1; 2225 current_order >= order && current_order <= MAX_ORDER-1;
2222 --current_order) { 2226 --current_order) {
@@ -2226,19 +2230,50 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2226 if (fallback_mt == -1) 2230 if (fallback_mt == -1)
2227 continue; 2231 continue;
2228 2232
2229 page = list_first_entry(&area->free_list[fallback_mt], 2233 /*
2230 struct page, lru); 2234 * We cannot steal all free pages from the pageblock and the
2235 * requested migratetype is movable. In that case it's better to
2236 * steal and split the smallest available page instead of the
2237 * largest available page, because even if the next movable
2238 * allocation falls back into a different pageblock than this
2239 * one, it won't cause permanent fragmentation.
2240 */
2241 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2242 && current_order > order)
2243 goto find_smallest;
2231 2244
2232 steal_suitable_fallback(zone, page, start_migratetype, 2245 goto do_steal;
2233 can_steal); 2246 }
2234 2247
2235 trace_mm_page_alloc_extfrag(page, order, current_order, 2248 return false;
2236 start_migratetype, fallback_mt);
2237 2249
2238 return true; 2250find_smallest:
2251 for (current_order = order; current_order < MAX_ORDER;
2252 current_order++) {
2253 area = &(zone->free_area[current_order]);
2254 fallback_mt = find_suitable_fallback(area, current_order,
2255 start_migratetype, false, &can_steal);
2256 if (fallback_mt != -1)
2257 break;
2239 } 2258 }
2240 2259
2241 return false; 2260 /*
2261 * This should not happen - we already found a suitable fallback
2262 * when looking for the largest page.
2263 */
2264 VM_BUG_ON(current_order == MAX_ORDER);
2265
2266do_steal:
2267 page = list_first_entry(&area->free_list[fallback_mt],
2268 struct page, lru);
2269
2270 steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2271
2272 trace_mm_page_alloc_extfrag(page, order, current_order,
2273 start_migratetype, fallback_mt);
2274
2275 return true;
2276
2242} 2277}
2243 2278
2244/* 2279/*