aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
commitac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch)
treee37328cfbeaf43716dd5914cad9179e57e84df76 /mm/compaction.c
parenta40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff)
parent437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c63
1 files changed, 58 insertions, 5 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 2f42d9528539..e78cb9688421 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone,
422 pfn -= pageblock_nr_pages) { 422 pfn -= pageblock_nr_pages) {
423 unsigned long isolated; 423 unsigned long isolated;
424 424
425 /*
426 * Skip ahead if another thread is compacting in the area
427 * simultaneously. If we wrapped around, we can only skip
428 * ahead if zone->compact_cached_free_pfn also wrapped to
429 * above our starting point.
430 */
431 if (cc->order > 0 && (!cc->wrapped ||
432 zone->compact_cached_free_pfn >
433 cc->start_free_pfn))
434 pfn = min(pfn, zone->compact_cached_free_pfn);
435
425 if (!pfn_valid(pfn)) 436 if (!pfn_valid(pfn))
426 continue; 437 continue;
427 438
@@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone,
461 * looking for free pages, the search will restart here as 472 * looking for free pages, the search will restart here as
462 * page migration may have returned some pages to the allocator 473 * page migration may have returned some pages to the allocator
463 */ 474 */
464 if (isolated) 475 if (isolated) {
465 high_pfn = max(high_pfn, pfn); 476 high_pfn = max(high_pfn, pfn);
477 if (cc->order > 0)
478 zone->compact_cached_free_pfn = high_pfn;
479 }
466 } 480 }
467 481
468 /* split_free_page does not map the pages */ 482 /* split_free_page does not map the pages */
@@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
556 return ISOLATE_SUCCESS; 570 return ISOLATE_SUCCESS;
557} 571}
558 572
573/*
574 * Returns the start pfn of the last page block in a zone. This is the starting
575 * point for full compaction of a zone. Compaction searches for free pages from
576 * the end of each zone, while isolate_freepages_block scans forward inside each
577 * page block.
578 */
579static unsigned long start_free_pfn(struct zone *zone)
580{
581 unsigned long free_pfn;
582 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
583 free_pfn &= ~(pageblock_nr_pages-1);
584 return free_pfn;
585}
586
559static int compact_finished(struct zone *zone, 587static int compact_finished(struct zone *zone,
560 struct compact_control *cc) 588 struct compact_control *cc)
561{ 589{
@@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone,
565 if (fatal_signal_pending(current)) 593 if (fatal_signal_pending(current))
566 return COMPACT_PARTIAL; 594 return COMPACT_PARTIAL;
567 595
568 /* Compaction run completes if the migrate and free scanner meet */ 596 /*
569 if (cc->free_pfn <= cc->migrate_pfn) 597 * A full (order == -1) compaction run starts at the beginning and
598 * end of a zone; it completes when the migrate and free scanner meet.
599 * A partial (order > 0) compaction can start with the free scanner
600 * at a random point in the zone, and may have to restart.
601 */
602 if (cc->free_pfn <= cc->migrate_pfn) {
603 if (cc->order > 0 && !cc->wrapped) {
604 /* We started partway through; restart at the end. */
605 unsigned long free_pfn = start_free_pfn(zone);
606 zone->compact_cached_free_pfn = free_pfn;
607 cc->free_pfn = free_pfn;
608 cc->wrapped = 1;
609 return COMPACT_CONTINUE;
610 }
611 return COMPACT_COMPLETE;
612 }
613
614 /* We wrapped around and ended up where we started. */
615 if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
570 return COMPACT_COMPLETE; 616 return COMPACT_COMPLETE;
571 617
572 /* 618 /*
@@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
664 710
665 /* Setup to move all movable pages to the end of the zone */ 711 /* Setup to move all movable pages to the end of the zone */
666 cc->migrate_pfn = zone->zone_start_pfn; 712 cc->migrate_pfn = zone->zone_start_pfn;
667 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 713
668 cc->free_pfn &= ~(pageblock_nr_pages-1); 714 if (cc->order > 0) {
715 /* Incremental compaction. Start where the last one stopped. */
716 cc->free_pfn = zone->compact_cached_free_pfn;
717 cc->start_free_pfn = cc->free_pfn;
718 } else {
719 /* Order == -1 starts at the end of the zone. */
720 cc->free_pfn = start_free_pfn(zone);
721 }
669 722
670 migrate_prep_local(); 723 migrate_prep_local();
671 724