aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-08 19:32:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:50 -0400
commit753341a4b85ff337487b9959c71c529f522004f4 (patch)
tree6a705fd73dd599e7eeb58cb06e84c86c07c03a64 /mm
parentf40d1e42bb988d2a26e8e111ea4c4c7bac819b7e (diff)
revert "mm: have order > 0 compaction start off where it left"
This reverts commit 7db8889ab05b ("mm: have order > 0 compaction start off where it left") and commit de74f1cc ("mm: have order > 0 compaction start near a pageblock with free pages"). These patches were a good idea and tests confirmed that they massively reduced the amount of scanning but the implementation is complex and tricky to understand. A later patch will cache what pageblocks should be skipped and reimplements the concept of compact_cached_free_pfn on top for both migration and free scanners. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Richard Davies <richard@arachsys.com> Cc: Shaohua Li <shli@kernel.org> Cc: Avi Kivity <avi@redhat.com> Acked-by: Rafael Aquini <aquini@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c65
-rw-r--r--mm/internal.h6
-rw-r--r--mm/page_alloc.c5
3 files changed, 5 insertions, 71 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index bdf6e13045ea..db76361a3117 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -538,20 +538,6 @@ next_pageblock:
538#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 538#endif /* CONFIG_COMPACTION || CONFIG_CMA */
539#ifdef CONFIG_COMPACTION 539#ifdef CONFIG_COMPACTION
540/* 540/*
541 * Returns the start pfn of the last page block in a zone. This is the starting
542 * point for full compaction of a zone. Compaction searches for free pages from
543 * the end of each zone, while isolate_freepages_block scans forward inside each
544 * page block.
545 */
546static unsigned long start_free_pfn(struct zone *zone)
547{
548 unsigned long free_pfn;
549 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
550 free_pfn &= ~(pageblock_nr_pages-1);
551 return free_pfn;
552}
553
554/*
555 * Based on information in the current compact_control, find blocks 541 * Based on information in the current compact_control, find blocks
556 * suitable for isolating free pages from and then isolate them. 542 * suitable for isolating free pages from and then isolate them.
557 */ 543 */
@@ -619,19 +605,8 @@ static void isolate_freepages(struct zone *zone,
619 * looking for free pages, the search will restart here as 605 * looking for free pages, the search will restart here as
620 * page migration may have returned some pages to the allocator 606 * page migration may have returned some pages to the allocator
621 */ 607 */
622 if (isolated) { 608 if (isolated)
623 high_pfn = max(high_pfn, pfn); 609 high_pfn = max(high_pfn, pfn);
624
625 /*
626 * If the free scanner has wrapped, update
627 * compact_cached_free_pfn to point to the highest
628 * pageblock with free pages. This reduces excessive
629 * scanning of full pageblocks near the end of the
630 * zone
631 */
632 if (cc->order > 0 && cc->wrapped)
633 zone->compact_cached_free_pfn = high_pfn;
634 }
635 } 610 }
636 611
637 /* split_free_page does not map the pages */ 612 /* split_free_page does not map the pages */
@@ -639,11 +614,6 @@ static void isolate_freepages(struct zone *zone,
639 614
640 cc->free_pfn = high_pfn; 615 cc->free_pfn = high_pfn;
641 cc->nr_freepages = nr_freepages; 616 cc->nr_freepages = nr_freepages;
642
643 /* If compact_cached_free_pfn is reset then set it now */
644 if (cc->order > 0 && !cc->wrapped &&
645 zone->compact_cached_free_pfn == start_free_pfn(zone))
646 zone->compact_cached_free_pfn = high_pfn;
647} 617}
648 618
649/* 619/*
@@ -738,26 +708,8 @@ static int compact_finished(struct zone *zone,
738 if (fatal_signal_pending(current)) 708 if (fatal_signal_pending(current))
739 return COMPACT_PARTIAL; 709 return COMPACT_PARTIAL;
740 710
741 /* 711 /* Compaction run completes if the migrate and free scanner meet */
742 * A full (order == -1) compaction run starts at the beginning and 712 if (cc->free_pfn <= cc->migrate_pfn)
743 * end of a zone; it completes when the migrate and free scanner meet.
744 * A partial (order > 0) compaction can start with the free scanner
745 * at a random point in the zone, and may have to restart.
746 */
747 if (cc->free_pfn <= cc->migrate_pfn) {
748 if (cc->order > 0 && !cc->wrapped) {
749 /* We started partway through; restart at the end. */
750 unsigned long free_pfn = start_free_pfn(zone);
751 zone->compact_cached_free_pfn = free_pfn;
752 cc->free_pfn = free_pfn;
753 cc->wrapped = 1;
754 return COMPACT_CONTINUE;
755 }
756 return COMPACT_COMPLETE;
757 }
758
759 /* We wrapped around and ended up where we started. */
760 if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
761 return COMPACT_COMPLETE; 713 return COMPACT_COMPLETE;
762 714
763 /* 715 /*
@@ -863,15 +815,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
863 815
864 /* Setup to move all movable pages to the end of the zone */ 816 /* Setup to move all movable pages to the end of the zone */
865 cc->migrate_pfn = zone->zone_start_pfn; 817 cc->migrate_pfn = zone->zone_start_pfn;
866 818 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
867 if (cc->order > 0) { 819 cc->free_pfn &= ~(pageblock_nr_pages-1);
868 /* Incremental compaction. Start where the last one stopped. */
869 cc->free_pfn = zone->compact_cached_free_pfn;
870 cc->start_free_pfn = cc->free_pfn;
871 } else {
872 /* Order == -1 starts at the end of the zone. */
873 cc->free_pfn = start_free_pfn(zone);
874 }
875 820
876 migrate_prep_local(); 821 migrate_prep_local();
877 822
diff --git a/mm/internal.h b/mm/internal.h
index 97664be2ca37..6f6bb9ab9386 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -118,14 +118,8 @@ struct compact_control {
118 unsigned long nr_freepages; /* Number of isolated free pages */ 118 unsigned long nr_freepages; /* Number of isolated free pages */
119 unsigned long nr_migratepages; /* Number of pages to migrate */ 119 unsigned long nr_migratepages; /* Number of pages to migrate */
120 unsigned long free_pfn; /* isolate_freepages search base */ 120 unsigned long free_pfn; /* isolate_freepages search base */
121 unsigned long start_free_pfn; /* where we started the search */
122 unsigned long migrate_pfn; /* isolate_migratepages search base */ 121 unsigned long migrate_pfn; /* isolate_migratepages search base */
123 bool sync; /* Synchronous migration */ 122 bool sync; /* Synchronous migration */
124 bool wrapped; /* Order > 0 compactions are
125 incremental, once free_pfn
126 and migrate_pfn meet, we restart
127 from the top of the zone;
128 remember we wrapped around. */
129 123
130 int order; /* order a direct compactor needs */ 124 int order; /* order a direct compactor needs */
131 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 125 int migratetype; /* MOVABLE, RECLAIMABLE etc */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ca002b39b9b4..628968c1ccf4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4490,11 +4490,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4490 4490
4491 zone->spanned_pages = size; 4491 zone->spanned_pages = size;
4492 zone->present_pages = realsize; 4492 zone->present_pages = realsize;
4493#if defined CONFIG_COMPACTION || defined CONFIG_CMA
4494 zone->compact_cached_free_pfn = zone->zone_start_pfn +
4495 zone->spanned_pages;
4496 zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
4497#endif
4498#ifdef CONFIG_NUMA 4493#ifdef CONFIG_NUMA
4499 zone->node = nid; 4494 zone->node = nid;
4500 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 4495 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)