aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-12-10 18:43:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:06 -0500
commitebff398017c69a3810bcbc5200ba224d5ccaa207 (patch)
tree6c63381aa6317eaf8dd4d929195588cf83a9a2c8 /mm/vmscan.c
parent1da58ee2a0279a1b0afd3248396de5659b8cf95b (diff)
mm, compaction: pass classzone_idx and alloc_flags to watermark checking
Compaction relies on zone watermark checks for decisions such as if it's worth to start compacting in compaction_suitable() or whether compaction should stop in compact_finished(). The watermark checks take classzone_idx and alloc_flags parameters, which are related to the memory allocation request. But from the context of compaction they are currently passed as 0, including the direct compaction which is invoked to satisfy the allocation request, and could therefore know the proper values. The lack of proper values can lead to mismatch between decisions taken during compaction and decisions related to the allocation request. Lack of proper classzone_idx value means that lowmem_reserve is not taken into account. This has manifested (during recent changes to deferred compaction) when DMA zone was used as fallback for preferred Normal zone. compaction_suitable() without proper classzone_idx would think that the watermarks are already satisfied, but watermark check in get_page_from_freelist() would fail. Because of this problem, deferring compaction has extra complexity that can be removed in the following patch. The issue (not confirmed in practice) with missing alloc_flags is opposite in nature. For allocations that include ALLOC_HIGH, ALLOC_HIGHER or ALLOC_CMA in alloc_flags (the last includes all MOVABLE allocations on CMA-enabled systems) the watermark checking in compaction with 0 passed will be stricter than in get_page_from_freelist(). In these cases compaction might be running for a longer time than is really needed. Another issue compaction_suitable() is that the check for "does the zone need compaction at all?" comes only after the check "does the zone have enough free free pages to succeed compaction". The latter considers extra pages for migration and can therefore in some situations fail and return COMPACT_SKIPPED, although the high-order allocation would succeed and we should return COMPACT_PARTIAL. This patch fixes these problems by adding alloc_flags and classzone_idx to struct compact_control and related functions involved in direct compaction and watermark checking. Where possible, all other callers of compaction_suitable() pass proper values where those are known. This is currently limited to classzone_idx, which is sometimes known in kswapd context. However, the direct reclaim callers should_continue_reclaim() and compaction_ready() do not currently know the proper values, so the coordination between reclaim and compaction may still not be as accurate as it could. This can be fixed later, if it's shown to be an issue. Additionaly the checks in compact_suitable() are reordered to address the second issue described above. The effect of this patch should be slightly better high-order allocation success rates and/or less compaction overhead, depending on the type of allocations and presence of CMA. It allows simplifying deferred compaction code in a followup patch. When testing with stress-highalloc, there was some slight improvement (which might be just due to variance) in success rates of non-THP-like allocations. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 53157e157061..4636d9e822c1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2249,7 +2249,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
2249 return true; 2249 return true;
2250 2250
2251 /* If compaction would go ahead or the allocation would succeed, stop */ 2251 /* If compaction would go ahead or the allocation would succeed, stop */
2252 switch (compaction_suitable(zone, sc->order)) { 2252 switch (compaction_suitable(zone, sc->order, 0, 0)) {
2253 case COMPACT_PARTIAL: 2253 case COMPACT_PARTIAL:
2254 case COMPACT_CONTINUE: 2254 case COMPACT_CONTINUE:
2255 return false; 2255 return false;
@@ -2346,7 +2346,7 @@ static inline bool compaction_ready(struct zone *zone, int order)
2346 * If compaction is not ready to start and allocation is not likely 2346 * If compaction is not ready to start and allocation is not likely
2347 * to succeed without it, then keep reclaiming. 2347 * to succeed without it, then keep reclaiming.
2348 */ 2348 */
2349 if (compaction_suitable(zone, order) == COMPACT_SKIPPED) 2349 if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED)
2350 return false; 2350 return false;
2351 2351
2352 return watermark_ok; 2352 return watermark_ok;
@@ -2824,8 +2824,8 @@ static bool zone_balanced(struct zone *zone, int order,
2824 balance_gap, classzone_idx, 0)) 2824 balance_gap, classzone_idx, 0))
2825 return false; 2825 return false;
2826 2826
2827 if (IS_ENABLED(CONFIG_COMPACTION) && order && 2827 if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
2828 compaction_suitable(zone, order) == COMPACT_SKIPPED) 2828 order, 0, classzone_idx) == COMPACT_SKIPPED)
2829 return false; 2829 return false;
2830 2830
2831 return true; 2831 return true;
@@ -2952,8 +2952,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
2952 * from memory. Do not reclaim more than needed for compaction. 2952 * from memory. Do not reclaim more than needed for compaction.
2953 */ 2953 */
2954 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 2954 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2955 compaction_suitable(zone, sc->order) != 2955 compaction_suitable(zone, sc->order, 0, classzone_idx)
2956 COMPACT_SKIPPED) 2956 != COMPACT_SKIPPED)
2957 testorder = 0; 2957 testorder = 0;
2958 2958
2959 /* 2959 /*