summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/compaction.c61
1 files changed, 23 insertions, 38 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 78ae182aaf34..68e3c214bcbd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -405,6 +405,21 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
405} 405}
406 406
407/* 407/*
408 * Aside from avoiding lock contention, compaction also periodically checks
409 * need_resched() and records async compaction as contended if necessary.
410 */
411static inline void compact_check_resched(struct compact_control *cc)
412{
413 /* async compaction aborts if contended */
414 if (need_resched()) {
415 if (cc->mode == MIGRATE_ASYNC)
416 cc->contended = true;
417
418 cond_resched();
419 }
420}
421
422/*
408 * Compaction requires the taking of some coarse locks that are potentially 423 * Compaction requires the taking of some coarse locks that are potentially
409 * very heavily contended. The lock should be periodically unlocked to avoid 424 * very heavily contended. The lock should be periodically unlocked to avoid
410 * having disabled IRQs for a long time, even when there is nobody waiting on 425 * having disabled IRQs for a long time, even when there is nobody waiting on
@@ -432,33 +447,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
432 return true; 447 return true;
433 } 448 }
434 449
435 if (need_resched()) { 450 compact_check_resched(cc);
436 if (cc->mode == MIGRATE_ASYNC)
437 cc->contended = true;
438 cond_resched();
439 }
440
441 return false;
442}
443
444/*
445 * Aside from avoiding lock contention, compaction also periodically checks
446 * need_resched() and either schedules in sync compaction or aborts async
447 * compaction. This is similar to what compact_unlock_should_abort() does, but
448 * is used where no lock is concerned.
449 *
450 * Returns false when no scheduling was needed, or sync compaction scheduled.
451 * Returns true when async compaction should abort.
452 */
453static inline bool compact_should_abort(struct compact_control *cc)
454{
455 /* async compaction aborts if contended */
456 if (need_resched()) {
457 if (cc->mode == MIGRATE_ASYNC)
458 cc->contended = true;
459
460 cond_resched();
461 }
462 451
463 return false; 452 return false;
464} 453}
@@ -747,8 +736,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
747 return 0; 736 return 0;
748 } 737 }
749 738
750 if (compact_should_abort(cc)) 739 compact_check_resched(cc);
751 return 0;
752 740
753 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 741 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
754 skip_on_failure = true; 742 skip_on_failure = true;
@@ -1379,12 +1367,10 @@ static void isolate_freepages(struct compact_control *cc)
1379 isolate_start_pfn = block_start_pfn) { 1367 isolate_start_pfn = block_start_pfn) {
1380 /* 1368 /*
1381 * This can iterate a massively long zone without finding any 1369 * This can iterate a massively long zone without finding any
1382 * suitable migration targets, so periodically check if we need 1370 * suitable migration targets, so periodically check resched.
1383 * to schedule, or even abort async compaction.
1384 */ 1371 */
1385 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1372 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1386 && compact_should_abort(cc)) 1373 compact_check_resched(cc);
1387 break;
1388 1374
1389 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1375 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1390 zone); 1376 zone);
@@ -1677,11 +1663,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1677 /* 1663 /*
1678 * This can potentially iterate a massively long zone with 1664 * This can potentially iterate a massively long zone with
1679 * many pageblocks unsuitable, so periodically check if we 1665 * many pageblocks unsuitable, so periodically check if we
1680 * need to schedule, or even abort async compaction. 1666 * need to schedule.
1681 */ 1667 */
1682 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1668 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1683 && compact_should_abort(cc)) 1669 compact_check_resched(cc);
1684 break;
1685 1670
1686 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1671 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1687 zone); 1672 zone);