diff options
-rw-r--r-- | mm/compaction.c | 54 | ||||
-rw-r--r-- | mm/internal.h | 5 |
2 files changed, 48 insertions, 11 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 58441220b953..21bf292b642a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -222,6 +222,30 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, | |||
222 | return true; | 222 | return true; |
223 | } | 223 | } |
224 | 224 | ||
225 | /* | ||
226 | * Aside from avoiding lock contention, compaction also periodically checks | ||
227 | * need_resched() and either schedules in sync compaction or aborts async | ||
228 | * compaction. This is similar to what compact_checklock_irqsave() does, but | ||
229 | * is used where no lock is concerned. | ||
230 | * | ||
231 | * Returns false when no scheduling was needed, or sync compaction scheduled. | ||
232 | * Returns true when async compaction should abort. | ||
233 | */ | ||
234 | static inline bool compact_should_abort(struct compact_control *cc) | ||
235 | { | ||
236 | /* async compaction aborts if contended */ | ||
237 | if (need_resched()) { | ||
238 | if (cc->mode == MIGRATE_ASYNC) { | ||
239 | cc->contended = true; | ||
240 | return true; | ||
241 | } | ||
242 | |||
243 | cond_resched(); | ||
244 | } | ||
245 | |||
246 | return false; | ||
247 | } | ||
248 | |||
225 | /* Returns true if the page is within a block suitable for migration to */ | 249 | /* Returns true if the page is within a block suitable for migration to */ |
226 | static bool suitable_migration_target(struct page *page) | 250 | static bool suitable_migration_target(struct page *page) |
227 | { | 251 | { |
@@ -494,11 +518,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
494 | return 0; | 518 | return 0; |
495 | } | 519 | } |
496 | 520 | ||
497 | if (cond_resched()) { | 521 | if (compact_should_abort(cc)) |
498 | /* Async terminates prematurely on need_resched() */ | 522 | return 0; |
499 | if (cc->mode == MIGRATE_ASYNC) | ||
500 | return 0; | ||
501 | } | ||
502 | 523 | ||
503 | /* Time to isolate some pages for migration */ | 524 | /* Time to isolate some pages for migration */ |
504 | for (; low_pfn < end_pfn; low_pfn++) { | 525 | for (; low_pfn < end_pfn; low_pfn++) { |
@@ -720,9 +741,11 @@ static void isolate_freepages(struct zone *zone, | |||
720 | /* | 741 | /* |
721 | * This can iterate a massively long zone without finding any | 742 | * This can iterate a massively long zone without finding any |
722 | * suitable migration targets, so periodically check if we need | 743 | * suitable migration targets, so periodically check if we need |
723 | * to schedule. | 744 | * to schedule, or even abort async compaction. |
724 | */ | 745 | */ |
725 | cond_resched(); | 746 | if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
747 | && compact_should_abort(cc)) | ||
748 | break; | ||
726 | 749 | ||
727 | if (!pfn_valid(block_start_pfn)) | 750 | if (!pfn_valid(block_start_pfn)) |
728 | continue; | 751 | continue; |
@@ -760,6 +783,13 @@ static void isolate_freepages(struct zone *zone, | |||
760 | */ | 783 | */ |
761 | if (isolated) | 784 | if (isolated) |
762 | cc->finished_update_free = true; | 785 | cc->finished_update_free = true; |
786 | |||
787 | /* | ||
788 | * isolate_freepages_block() might have aborted due to async | ||
789 | * compaction being contended | ||
790 | */ | ||
791 | if (cc->contended) | ||
792 | break; | ||
763 | } | 793 | } |
764 | 794 | ||
765 | /* split_free_page does not map the pages */ | 795 | /* split_free_page does not map the pages */ |
@@ -786,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage, | |||
786 | struct compact_control *cc = (struct compact_control *)data; | 816 | struct compact_control *cc = (struct compact_control *)data; |
787 | struct page *freepage; | 817 | struct page *freepage; |
788 | 818 | ||
789 | /* Isolate free pages if necessary */ | 819 | /* |
820 | * Isolate free pages if necessary, and if we are not aborting due to | ||
821 | * contention. | ||
822 | */ | ||
790 | if (list_empty(&cc->freepages)) { | 823 | if (list_empty(&cc->freepages)) { |
791 | isolate_freepages(cc->zone, cc); | 824 | if (!cc->contended) |
825 | isolate_freepages(cc->zone, cc); | ||
792 | 826 | ||
793 | if (list_empty(&cc->freepages)) | 827 | if (list_empty(&cc->freepages)) |
794 | return NULL; | 828 | return NULL; |
@@ -858,7 +892,7 @@ static int compact_finished(struct zone *zone, | |||
858 | unsigned int order; | 892 | unsigned int order; |
859 | unsigned long watermark; | 893 | unsigned long watermark; |
860 | 894 | ||
861 | if (fatal_signal_pending(current)) | 895 | if (cc->contended || fatal_signal_pending(current)) |
862 | return COMPACT_PARTIAL; | 896 | return COMPACT_PARTIAL; |
863 | 897 | ||
864 | /* Compaction run completes if the migrate and free scanner meet */ | 898 | /* Compaction run completes if the migrate and free scanner meet */ |
diff --git a/mm/internal.h b/mm/internal.h index 802c3a4fc03a..7f22a11fcc66 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -144,7 +144,10 @@ struct compact_control { | |||
144 | int order; /* order a direct compactor needs */ | 144 | int order; /* order a direct compactor needs */ |
145 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 145 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
146 | struct zone *zone; | 146 | struct zone *zone; |
147 | bool contended; /* True if a lock was contended */ | 147 | bool contended; /* True if a lock was contended, or |
148 | * need_resched() true during async | ||
149 | * compaction | ||
150 | */ | ||
148 | }; | 151 | }; |
149 | 152 | ||
150 | unsigned long | 153 | unsigned long |