summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-08 18:02:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitf5f61a320bf6275f37fcabf6645b4ac8e683c007 (patch)
tree76b44df3a3723cdaf065ff1b36d1811c647b02bc /mm/compaction.c
parentf2849aa09d4fbc4145ebb5dc96187c9ab967f5cf (diff)
mm, compaction: simplify handling restart position in free pages scanner
Handling the position where compaction free scanner should restart (stored in cc->free_pfn) got more complex with commit e14c720efdd7 ("mm, compaction: remember position within pageblock in free pages scanner"). Currently the position is updated in each loop iteration of isolate_freepages(), although it should be enough to update it only when breaking from the loop. There's also an extra check outside the loop updates the position in case we have met the migration scanner. This can be simplified if we move the test for having isolated enough from the for-loop header next to the test for contention, and determining the restart position only in these cases. We can reuse the isolate_start_pfn variable for this instead of setting cc->free_pfn directly. Outside the loop, we can simply set cc->free_pfn to current value of isolate_start_pfn without any extra check. Also add a VM_BUG_ON to catch possible mistake in the future, in case we later add a new condition that terminates isolate_freepages_block() prematurely without also considering the condition in isolate_freepages(). Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 7077b81a4893..2c1e1ff321bf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -943,8 +943,7 @@ static void isolate_freepages(struct compact_control *cc)
943 * pages on cc->migratepages. We stop searching if the migrate 943 * pages on cc->migratepages. We stop searching if the migrate
944 * and free page scanners meet or enough free pages are isolated. 944 * and free page scanners meet or enough free pages are isolated.
945 */ 945 */
946 for (; block_start_pfn >= low_pfn && 946 for (; block_start_pfn >= low_pfn;
947 cc->nr_migratepages > cc->nr_freepages;
948 block_end_pfn = block_start_pfn, 947 block_end_pfn = block_start_pfn,
949 block_start_pfn -= pageblock_nr_pages, 948 block_start_pfn -= pageblock_nr_pages,
950 isolate_start_pfn = block_start_pfn) { 949 isolate_start_pfn = block_start_pfn) {
@@ -976,6 +975,8 @@ static void isolate_freepages(struct compact_control *cc)
976 block_end_pfn, freelist, false); 975 block_end_pfn, freelist, false);
977 976
978 /* 977 /*
978 * If we isolated enough freepages, or aborted due to async
979 * compaction being contended, terminate the loop.
979 * Remember where the free scanner should restart next time, 980 * Remember where the free scanner should restart next time,
980 * which is where isolate_freepages_block() left off. 981 * which is where isolate_freepages_block() left off.
981 * But if it scanned the whole pageblock, isolate_start_pfn 982 * But if it scanned the whole pageblock, isolate_start_pfn
@@ -984,27 +985,31 @@ static void isolate_freepages(struct compact_control *cc)
984 * In that case we will however want to restart at the start 985 * In that case we will however want to restart at the start
985 * of the previous pageblock. 986 * of the previous pageblock.
986 */ 987 */
987 cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 988 if ((cc->nr_freepages >= cc->nr_migratepages)
988 isolate_start_pfn : 989 || cc->contended) {
989 block_start_pfn - pageblock_nr_pages; 990 if (isolate_start_pfn >= block_end_pfn)
990 991 isolate_start_pfn =
991 /* 992 block_start_pfn - pageblock_nr_pages;
992 * isolate_freepages_block() might have aborted due to async
993 * compaction being contended
994 */
995 if (cc->contended)
996 break; 993 break;
994 } else {
995 /*
996 * isolate_freepages_block() should not terminate
997 * prematurely unless contended, or isolated enough
998 */
999 VM_BUG_ON(isolate_start_pfn < block_end_pfn);
1000 }
997 } 1001 }
998 1002
999 /* split_free_page does not map the pages */ 1003 /* split_free_page does not map the pages */
1000 map_pages(freelist); 1004 map_pages(freelist);
1001 1005
1002 /* 1006 /*
1003 * If we crossed the migrate scanner, we want to keep it that way 1007 * Record where the free scanner will restart next time. Either we
1004 * so that compact_finished() may detect this 1008 * broke from the loop and set isolate_start_pfn based on the last
1009 * call to isolate_freepages_block(), or we met the migration scanner
1010 * and the loop terminated due to isolate_start_pfn < low_pfn
1005 */ 1011 */
1006 if (block_start_pfn < low_pfn) 1012 cc->free_pfn = isolate_start_pfn;
1007 cc->free_pfn = cc->migrate_pfn;
1008} 1013}
1009 1014
1010/* 1015/*