summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-20 19:56:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 20:58:30 -0400
commitc5d01d0d18e2ab7a21f0371b00e4d1a06f79cdf5 (patch)
tree5ebb7038a7e113dde69e6a471169cd3e4a6afdcd /mm
parent4f9a358c36fcdad3ea1db263ec4d484a70ad543e (diff)
mm, compaction: simplify __alloc_pages_direct_compact feedback interface
__alloc_pages_direct_compact communicates potential back off by two variables: - deferred_compaction tells that the compaction returned COMPACT_DEFERRED - contended_compaction is set when there is a contention on zone->lock resp. zone->lru_lock locks __alloc_pages_slowpath then backs of for THP allocation requests to prevent from long stalls. This is rather messy and it would be much cleaner to return a single compact result value and hide all the nasty details into __alloc_pages_direct_compact. This patch shouldn't introduce any functional changes. Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ed62c4b90598..8bcc10616fab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3185,29 +3185,21 @@ out:
3185static struct page * 3185static struct page *
3186__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3186__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3187 unsigned int alloc_flags, const struct alloc_context *ac, 3187 unsigned int alloc_flags, const struct alloc_context *ac,
3188 enum migrate_mode mode, int *contended_compaction, 3188 enum migrate_mode mode, enum compact_result *compact_result)
3189 bool *deferred_compaction)
3190{ 3189{
3191 enum compact_result compact_result;
3192 struct page *page; 3190 struct page *page;
3191 int contended_compaction;
3193 3192
3194 if (!order) 3193 if (!order)
3195 return NULL; 3194 return NULL;
3196 3195
3197 current->flags |= PF_MEMALLOC; 3196 current->flags |= PF_MEMALLOC;
3198 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3197 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3199 mode, contended_compaction); 3198 mode, &contended_compaction);
3200 current->flags &= ~PF_MEMALLOC; 3199 current->flags &= ~PF_MEMALLOC;
3201 3200
3202 switch (compact_result) { 3201 if (*compact_result <= COMPACT_INACTIVE)
3203 case COMPACT_DEFERRED:
3204 *deferred_compaction = true;
3205 /* fall-through */
3206 case COMPACT_SKIPPED:
3207 return NULL; 3202 return NULL;
3208 default:
3209 break;
3210 }
3211 3203
3212 /* 3204 /*
3213 * At least in one zone compaction wasn't deferred or skipped, so let's 3205 * At least in one zone compaction wasn't deferred or skipped, so let's
@@ -3233,6 +3225,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3233 */ 3225 */
3234 count_vm_event(COMPACTFAIL); 3226 count_vm_event(COMPACTFAIL);
3235 3227
3228 /*
3229 * In all zones where compaction was attempted (and not
3230 * deferred or skipped), lock contention has been detected.
3231 * For THP allocation we do not want to disrupt the others
3232 * so we fallback to base pages instead.
3233 */
3234 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3235 *compact_result = COMPACT_CONTENDED;
3236
3237 /*
3238 * If compaction was aborted due to need_resched(), we do not
3239 * want to further increase allocation latency, unless it is
3240 * khugepaged trying to collapse.
3241 */
3242 if (contended_compaction == COMPACT_CONTENDED_SCHED
3243 && !(current->flags & PF_KTHREAD))
3244 *compact_result = COMPACT_CONTENDED;
3245
3236 cond_resched(); 3246 cond_resched();
3237 3247
3238 return NULL; 3248 return NULL;
@@ -3241,8 +3251,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3241static inline struct page * 3251static inline struct page *
3242__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3252__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3243 unsigned int alloc_flags, const struct alloc_context *ac, 3253 unsigned int alloc_flags, const struct alloc_context *ac,
3244 enum migrate_mode mode, int *contended_compaction, 3254 enum migrate_mode mode, enum compact_result *compact_result)
3245 bool *deferred_compaction)
3246{ 3255{
3247 return NULL; 3256 return NULL;
3248} 3257}
@@ -3387,8 +3396,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3387 unsigned long pages_reclaimed = 0; 3396 unsigned long pages_reclaimed = 0;
3388 unsigned long did_some_progress; 3397 unsigned long did_some_progress;
3389 enum migrate_mode migration_mode = MIGRATE_ASYNC; 3398 enum migrate_mode migration_mode = MIGRATE_ASYNC;
3390 bool deferred_compaction = false; 3399 enum compact_result compact_result;
3391 int contended_compaction = COMPACT_CONTENDED_NONE;
3392 3400
3393 /* 3401 /*
3394 * In the slowpath, we sanity check order to avoid ever trying to 3402 * In the slowpath, we sanity check order to avoid ever trying to
@@ -3475,8 +3483,7 @@ retry:
3475 */ 3483 */
3476 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3484 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3477 migration_mode, 3485 migration_mode,
3478 &contended_compaction, 3486 &compact_result);
3479 &deferred_compaction);
3480 if (page) 3487 if (page)
3481 goto got_pg; 3488 goto got_pg;
3482 3489
@@ -3489,25 +3496,14 @@ retry:
3489 * to heavily disrupt the system, so we fail the allocation 3496 * to heavily disrupt the system, so we fail the allocation
3490 * instead of entering direct reclaim. 3497 * instead of entering direct reclaim.
3491 */ 3498 */
3492 if (deferred_compaction) 3499 if (compact_result == COMPACT_DEFERRED)
3493 goto nopage;
3494
3495 /*
3496 * In all zones where compaction was attempted (and not
3497 * deferred or skipped), lock contention has been detected.
3498 * For THP allocation we do not want to disrupt the others
3499 * so we fallback to base pages instead.
3500 */
3501 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3502 goto nopage; 3500 goto nopage;
3503 3501
3504 /* 3502 /*
3505 * If compaction was aborted due to need_resched(), we do not 3503 * Compaction is contended so rather back off than cause
3506 * want to further increase allocation latency, unless it is 3504 * excessive stalls.
3507 * khugepaged trying to collapse.
3508 */ 3505 */
3509 if (contended_compaction == COMPACT_CONTENDED_SCHED 3506 if(compact_result == COMPACT_CONTENDED)
3510 && !(current->flags & PF_KTHREAD))
3511 goto nopage; 3507 goto nopage;
3512 } 3508 }
3513 3509
@@ -3555,8 +3551,7 @@ noretry:
3555 */ 3551 */
3556 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, 3552 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3557 ac, migration_mode, 3553 ac, migration_mode,
3558 &contended_compaction, 3554 &compact_result);
3559 &deferred_compaction);
3560 if (page) 3555 if (page)
3561 goto got_pg; 3556 goto got_pg;
3562nopage: 3557nopage: