aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2012-06-11 23:46:58 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-06-11 23:46:58 -0400
commit66e61060d7ad9fcf61475fb836fb5987db7a7ee0 (patch)
treea0abe3fb2fa8858261dc41df9444c0e0ca85f1a6 /mm/compaction.c
parenta89c3e956ae78cec8926b92f2d61b7a5b675e787 (diff)
parentf242e50eee1ec7692c4854d94e8cd543991cce71 (diff)
Merge branch 'asoc-ab8500' into for-3.6
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c142
1 files changed, 23 insertions, 119 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 4ac338af5120..7ea259d82a99 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
236 */ 236 */
237 while (unlikely(too_many_isolated(zone))) { 237 while (unlikely(too_many_isolated(zone))) {
238 /* async migration should just abort */ 238 /* async migration should just abort */
239 if (cc->mode != COMPACT_SYNC) 239 if (!cc->sync)
240 return 0; 240 return 0;
241 241
242 congestion_wait(BLK_RW_ASYNC, HZ/10); 242 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
304 * satisfies the allocation 304 * satisfies the allocation
305 */ 305 */
306 pageblock_nr = low_pfn >> pageblock_order; 306 pageblock_nr = low_pfn >> pageblock_order;
307 if (cc->mode != COMPACT_SYNC && 307 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
308 last_pageblock_nr != pageblock_nr &&
309 !migrate_async_suitable(get_pageblock_migratetype(page))) { 308 !migrate_async_suitable(get_pageblock_migratetype(page))) {
310 low_pfn += pageblock_nr_pages; 309 low_pfn += pageblock_nr_pages;
311 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 310 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
326 continue; 325 continue;
327 } 326 }
328 327
329 if (cc->mode != COMPACT_SYNC) 328 if (!cc->sync)
330 mode |= ISOLATE_ASYNC_MIGRATE; 329 mode |= ISOLATE_ASYNC_MIGRATE;
331 330
332 lruvec = mem_cgroup_page_lruvec(page, zone); 331 lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
361 360
362#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 361#endif /* CONFIG_COMPACTION || CONFIG_CMA */
363#ifdef CONFIG_COMPACTION 362#ifdef CONFIG_COMPACTION
364/*
365 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
366 * converted to MIGRATE_MOVABLE type, false otherwise.
367 */
368static bool rescue_unmovable_pageblock(struct page *page)
369{
370 unsigned long pfn, start_pfn, end_pfn;
371 struct page *start_page, *end_page;
372
373 pfn = page_to_pfn(page);
374 start_pfn = pfn & ~(pageblock_nr_pages - 1);
375 end_pfn = start_pfn + pageblock_nr_pages;
376
377 start_page = pfn_to_page(start_pfn);
378 end_page = pfn_to_page(end_pfn);
379
380 /* Do not deal with pageblocks that overlap zones */
381 if (page_zone(start_page) != page_zone(end_page))
382 return false;
383
384 for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
385 page++) {
386 if (!pfn_valid_within(pfn))
387 continue;
388
389 if (PageBuddy(page)) {
390 int order = page_order(page);
391
392 pfn += (1 << order) - 1;
393 page += (1 << order) - 1;
394
395 continue;
396 } else if (page_count(page) == 0 || PageLRU(page))
397 continue;
398
399 return false;
400 }
401
402 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
403 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
404 return true;
405}
406 363
407enum smt_result { 364/* Returns true if the page is within a block suitable for migration to */
408 GOOD_AS_MIGRATION_TARGET, 365static bool suitable_migration_target(struct page *page)
409 FAIL_UNMOVABLE_TARGET,
410 FAIL_BAD_TARGET,
411};
412
413/*
414 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
415 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
416 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
417 */
418static enum smt_result suitable_migration_target(struct page *page,
419 struct compact_control *cc)
420{ 366{
421 367
422 int migratetype = get_pageblock_migratetype(page); 368 int migratetype = get_pageblock_migratetype(page);
423 369
424 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 370 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
425 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 371 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
426 return FAIL_BAD_TARGET; 372 return false;
427 373
428 /* If the page is a large free page, then allow migration */ 374 /* If the page is a large free page, then allow migration */
429 if (PageBuddy(page) && page_order(page) >= pageblock_order) 375 if (PageBuddy(page) && page_order(page) >= pageblock_order)
430 return GOOD_AS_MIGRATION_TARGET; 376 return true;
431 377
432 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 378 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
433 if (cc->mode != COMPACT_ASYNC_UNMOVABLE && 379 if (migrate_async_suitable(migratetype))
434 migrate_async_suitable(migratetype)) 380 return true;
435 return GOOD_AS_MIGRATION_TARGET;
436
437 if (cc->mode == COMPACT_ASYNC_MOVABLE &&
438 migratetype == MIGRATE_UNMOVABLE)
439 return FAIL_UNMOVABLE_TARGET;
440
441 if (cc->mode != COMPACT_ASYNC_MOVABLE &&
442 migratetype == MIGRATE_UNMOVABLE &&
443 rescue_unmovable_pageblock(page))
444 return GOOD_AS_MIGRATION_TARGET;
445 381
446 /* Otherwise skip the block */ 382 /* Otherwise skip the block */
447 return FAIL_BAD_TARGET; 383 return false;
448} 384}
449 385
450/* 386/*
@@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone,
478 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 414 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
479 415
480 /* 416 /*
481 * isolate_freepages() may be called more than once during
482 * compact_zone_order() run and we want only the most recent
483 * count.
484 */
485 cc->nr_pageblocks_skipped = 0;
486
487 /*
488 * Isolate free pages until enough are available to migrate the 417 * Isolate free pages until enough are available to migrate the
489 * pages on cc->migratepages. We stop searching if the migrate 418 * pages on cc->migratepages. We stop searching if the migrate
490 * and free page scanners meet or enough free pages are isolated. 419 * and free page scanners meet or enough free pages are isolated.
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
492 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 421 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
493 pfn -= pageblock_nr_pages) { 422 pfn -= pageblock_nr_pages) {
494 unsigned long isolated; 423 unsigned long isolated;
495 enum smt_result ret;
496 424
497 if (!pfn_valid(pfn)) 425 if (!pfn_valid(pfn))
498 continue; 426 continue;
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
509 continue; 437 continue;
510 438
511 /* Check the block is suitable for migration */ 439 /* Check the block is suitable for migration */
512 ret = suitable_migration_target(page, cc); 440 if (!suitable_migration_target(page))
513 if (ret != GOOD_AS_MIGRATION_TARGET) {
514 if (ret == FAIL_UNMOVABLE_TARGET)
515 cc->nr_pageblocks_skipped++;
516 continue; 441 continue;
517 } 442
518 /* 443 /*
519 * Found a block suitable for isolating free pages from. Now 444 * Found a block suitable for isolating free pages from. Now
520 * we disabled interrupts, double check things are ok and 445 * we disabled interrupts, double check things are ok and
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
523 */ 448 */
524 isolated = 0; 449 isolated = 0;
525 spin_lock_irqsave(&zone->lock, flags); 450 spin_lock_irqsave(&zone->lock, flags);
526 ret = suitable_migration_target(page, cc); 451 if (suitable_migration_target(page)) {
527 if (ret == GOOD_AS_MIGRATION_TARGET) {
528 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 452 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
529 isolated = isolate_freepages_block(pfn, end_pfn, 453 isolated = isolate_freepages_block(pfn, end_pfn,
530 freelist, false); 454 freelist, false);
531 nr_freepages += isolated; 455 nr_freepages += isolated;
532 } else if (ret == FAIL_UNMOVABLE_TARGET) 456 }
533 cc->nr_pageblocks_skipped++;
534 spin_unlock_irqrestore(&zone->lock, flags); 457 spin_unlock_irqrestore(&zone->lock, flags);
535 458
536 /* 459 /*
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
762 685
763 nr_migrate = cc->nr_migratepages; 686 nr_migrate = cc->nr_migratepages;
764 err = migrate_pages(&cc->migratepages, compaction_alloc, 687 err = migrate_pages(&cc->migratepages, compaction_alloc,
765 (unsigned long)&cc->freepages, false, 688 (unsigned long)cc, false,
766 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT 689 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
767 : MIGRATE_ASYNC);
768 update_nr_listpages(cc); 690 update_nr_listpages(cc);
769 nr_remaining = cc->nr_migratepages; 691 nr_remaining = cc->nr_migratepages;
770 692
@@ -793,8 +715,7 @@ out:
793 715
794static unsigned long compact_zone_order(struct zone *zone, 716static unsigned long compact_zone_order(struct zone *zone,
795 int order, gfp_t gfp_mask, 717 int order, gfp_t gfp_mask,
796 enum compact_mode mode, 718 bool sync)
797 unsigned long *nr_pageblocks_skipped)
798{ 719{
799 struct compact_control cc = { 720 struct compact_control cc = {
800 .nr_freepages = 0, 721 .nr_freepages = 0,
@@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone,
802 .order = order, 723 .order = order,
803 .migratetype = allocflags_to_migratetype(gfp_mask), 724 .migratetype = allocflags_to_migratetype(gfp_mask),
804 .zone = zone, 725 .zone = zone,
805 .mode = mode, 726 .sync = sync,
806 }; 727 };
807 unsigned long rc;
808
809 INIT_LIST_HEAD(&cc.freepages); 728 INIT_LIST_HEAD(&cc.freepages);
810 INIT_LIST_HEAD(&cc.migratepages); 729 INIT_LIST_HEAD(&cc.migratepages);
811 730
812 rc = compact_zone(zone, &cc); 731 return compact_zone(zone, &cc);
813 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
814
815 return rc;
816} 732}
817 733
818int sysctl_extfrag_threshold = 500; 734int sysctl_extfrag_threshold = 500;
@@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
837 struct zoneref *z; 753 struct zoneref *z;
838 struct zone *zone; 754 struct zone *zone;
839 int rc = COMPACT_SKIPPED; 755 int rc = COMPACT_SKIPPED;
840 unsigned long nr_pageblocks_skipped;
841 enum compact_mode mode;
842 756
843 /* 757 /*
844 * Check whether it is worth even starting compaction. The order check is 758 * Check whether it is worth even starting compaction. The order check is
@@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
855 nodemask) { 769 nodemask) {
856 int status; 770 int status;
857 771
858 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; 772 status = compact_zone_order(zone, order, gfp_mask, sync);
859retry:
860 status = compact_zone_order(zone, order, gfp_mask, mode,
861 &nr_pageblocks_skipped);
862 rc = max(status, rc); 773 rc = max(status, rc);
863 774
864 /* If a normal allocation would succeed, stop compacting */ 775 /* If a normal allocation would succeed, stop compacting */
865 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 776 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
866 break; 777 break;
867
868 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
869 if (nr_pageblocks_skipped) {
870 mode = COMPACT_ASYNC_UNMOVABLE;
871 goto retry;
872 }
873 }
874 } 778 }
875 779
876 return rc; 780 return rc;
@@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
904 if (ok && cc->order > zone->compact_order_failed) 808 if (ok && cc->order > zone->compact_order_failed)
905 zone->compact_order_failed = cc->order + 1; 809 zone->compact_order_failed = cc->order + 1;
906 /* Currently async compaction is never deferred. */ 810 /* Currently async compaction is never deferred. */
907 else if (!ok && cc->mode == COMPACT_SYNC) 811 else if (!ok && cc->sync)
908 defer_compaction(zone, cc->order); 812 defer_compaction(zone, cc->order);
909 } 813 }
910 814
@@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
919{ 823{
920 struct compact_control cc = { 824 struct compact_control cc = {
921 .order = order, 825 .order = order,
922 .mode = COMPACT_ASYNC_MOVABLE, 826 .sync = false,
923 }; 827 };
924 828
925 return __compact_pgdat(pgdat, &cc); 829 return __compact_pgdat(pgdat, &cc);
@@ -929,7 +833,7 @@ static int compact_node(int nid)
929{ 833{
930 struct compact_control cc = { 834 struct compact_control cc = {
931 .order = -1, 835 .order = -1,
932 .mode = COMPACT_SYNC, 836 .sync = true,
933 }; 837 };
934 838
935 return __compact_pgdat(NODE_DATA(nid), &cc); 839 return __compact_pgdat(NODE_DATA(nid), &cc);