aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-10-09 18:27:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:54 -0400
commite14c720efdd73c6d69cd8d07fa894bcd11fe1973 (patch)
tree150faa38ba6ec83226d52163c914144501df3d6e /mm/compaction.c
parent69b7189f12e0064237630e8c6bb64cad710bb268 (diff)
mm, compaction: remember position within pageblock in free pages scanner
Unlike the migration scanner, the free scanner remembers the beginning of the last scanned pageblock in cc->free_pfn. It might be therefore rescanning pages uselessly when called several times during single compaction. This might have been useful when pages were returned to the buddy allocator after a failed migration, but this is no longer the case. This patch changes the meaning of cc->free_pfn so that if it points to a middle of a pageblock, that pageblock is scanned only from cc->free_pfn to the end. isolate_freepages_block() will record the pfn of the last page it looked at, which is then used to update cc->free_pfn. In the mmtests stress-highalloc benchmark, this has resulted in lowering the ratio between pages scanned by both scanners, from 2.5 free pages per migrate page, to 2.25 free pages per migrate page, without affecting success rates. With __GFP_NO_KSWAPD allocations, this appears to result in a worse ratio (2.1 instead of 1.8), but page migration successes increased by 10%, so this could mean that more useful work can be done until need_resched() aborts this kind of compaction. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 5039c964f5c8..b69b7dac0361 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -330,7 +330,7 @@ static bool suitable_migration_target(struct page *page)
330 * (even though it may still end up isolating some pages). 330 * (even though it may still end up isolating some pages).
331 */ 331 */
332static unsigned long isolate_freepages_block(struct compact_control *cc, 332static unsigned long isolate_freepages_block(struct compact_control *cc,
333 unsigned long blockpfn, 333 unsigned long *start_pfn,
334 unsigned long end_pfn, 334 unsigned long end_pfn,
335 struct list_head *freelist, 335 struct list_head *freelist,
336 bool strict) 336 bool strict)
@@ -339,6 +339,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
339 struct page *cursor, *valid_page = NULL; 339 struct page *cursor, *valid_page = NULL;
340 unsigned long flags; 340 unsigned long flags;
341 bool locked = false; 341 bool locked = false;
342 unsigned long blockpfn = *start_pfn;
342 343
343 cursor = pfn_to_page(blockpfn); 344 cursor = pfn_to_page(blockpfn);
344 345
@@ -415,6 +416,9 @@ isolate_fail:
415 416
416 } 417 }
417 418
419 /* Record how far we have got within the block */
420 *start_pfn = blockpfn;
421
418 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 422 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
419 423
420 /* 424 /*
@@ -463,14 +467,16 @@ isolate_freepages_range(struct compact_control *cc,
463 467
464 for (; pfn < end_pfn; pfn += isolated, 468 for (; pfn < end_pfn; pfn += isolated,
465 block_end_pfn += pageblock_nr_pages) { 469 block_end_pfn += pageblock_nr_pages) {
470 /* Protect pfn from changing by isolate_freepages_block */
471 unsigned long isolate_start_pfn = pfn;
466 472
467 block_end_pfn = min(block_end_pfn, end_pfn); 473 block_end_pfn = min(block_end_pfn, end_pfn);
468 474
469 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 475 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
470 break; 476 break;
471 477
472 isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 478 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
473 &freelist, true); 479 block_end_pfn, &freelist, true);
474 480
475 /* 481 /*
476 * In strict mode, isolate_freepages_block() returns 0 if 482 * In strict mode, isolate_freepages_block() returns 0 if
@@ -769,6 +775,7 @@ static void isolate_freepages(struct compact_control *cc)
769 struct zone *zone = cc->zone; 775 struct zone *zone = cc->zone;
770 struct page *page; 776 struct page *page;
771 unsigned long block_start_pfn; /* start of current pageblock */ 777 unsigned long block_start_pfn; /* start of current pageblock */
778 unsigned long isolate_start_pfn; /* exact pfn we start at */
772 unsigned long block_end_pfn; /* end of current pageblock */ 779 unsigned long block_end_pfn; /* end of current pageblock */
773 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 780 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
774 int nr_freepages = cc->nr_freepages; 781 int nr_freepages = cc->nr_freepages;
@@ -777,14 +784,15 @@ static void isolate_freepages(struct compact_control *cc)
777 /* 784 /*
778 * Initialise the free scanner. The starting point is where we last 785 * Initialise the free scanner. The starting point is where we last
779 * successfully isolated from, zone-cached value, or the end of the 786 * successfully isolated from, zone-cached value, or the end of the
780 * zone when isolating for the first time. We need this aligned to 787 * zone when isolating for the first time. For looping we also need
781 * the pageblock boundary, because we do 788 * this pfn aligned down to the pageblock boundary, because we do
782 * block_start_pfn -= pageblock_nr_pages in the for loop. 789 * block_start_pfn -= pageblock_nr_pages in the for loop.
783 * For ending point, take care when isolating in last pageblock of a 790 * For ending point, take care when isolating in last pageblock of a
784 * a zone which ends in the middle of a pageblock. 791 * a zone which ends in the middle of a pageblock.
785 * The low boundary is the end of the pageblock the migration scanner 792 * The low boundary is the end of the pageblock the migration scanner
786 * is using. 793 * is using.
787 */ 794 */
795 isolate_start_pfn = cc->free_pfn;
788 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 796 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
789 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 797 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
790 zone_end_pfn(zone)); 798 zone_end_pfn(zone));
@@ -797,7 +805,8 @@ static void isolate_freepages(struct compact_control *cc)
797 */ 805 */
798 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 806 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
799 block_end_pfn = block_start_pfn, 807 block_end_pfn = block_start_pfn,
800 block_start_pfn -= pageblock_nr_pages) { 808 block_start_pfn -= pageblock_nr_pages,
809 isolate_start_pfn = block_start_pfn) {
801 unsigned long isolated; 810 unsigned long isolated;
802 811
803 /* 812 /*
@@ -822,13 +831,25 @@ static void isolate_freepages(struct compact_control *cc)
822 if (!isolation_suitable(cc, page)) 831 if (!isolation_suitable(cc, page))
823 continue; 832 continue;
824 833
825 /* Found a block suitable for isolating free pages from */ 834 /* Found a block suitable for isolating free pages from. */
826 cc->free_pfn = block_start_pfn; 835 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
827 isolated = isolate_freepages_block(cc, block_start_pfn,
828 block_end_pfn, freelist, false); 836 block_end_pfn, freelist, false);
829 nr_freepages += isolated; 837 nr_freepages += isolated;
830 838
831 /* 839 /*
840 * Remember where the free scanner should restart next time,
841 * which is where isolate_freepages_block() left off.
842 * But if it scanned the whole pageblock, isolate_start_pfn
843 * now points at block_end_pfn, which is the start of the next
844 * pageblock.
845 * In that case we will however want to restart at the start
846 * of the previous pageblock.
847 */
848 cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
849 isolate_start_pfn :
850 block_start_pfn - pageblock_nr_pages;
851
852 /*
832 * Set a flag that we successfully isolated in this pageblock. 853 * Set a flag that we successfully isolated in this pageblock.
833 * In the next loop iteration, zone->compact_cached_free_pfn 854 * In the next loop iteration, zone->compact_cached_free_pfn
834 * will not be updated and thus it will effectively contain the 855 * will not be updated and thus it will effectively contain the