aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-01-12 20:19:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:09 -0500
commita6bc32b899223a877f595ef9ddc1e89ead5072b8 (patch)
treea9529f7af2cf8e77bb6670acea32169c891a2b76 /mm
parent66199712e9eef5aede09dbcd9dfff87798a66917 (diff)
mm: compaction: introduce sync-light migration for use by compaction
This patch adds a lightweight sync migrate operation MIGRATE_SYNC_LIGHT mode that avoids writing back pages to backing storage. Async compaction maps to MIGRATE_ASYNC while sync compaction maps to MIGRATE_SYNC_LIGHT. For other migrate_pages users such as memory hotplug, MIGRATE_SYNC is used. This avoids sync compaction stalling for an excessive length of time, particularly when copying files to a USB stick where there might be a large number of dirty pages backed by a filesystem that does not support ->writepages. [aarcange@redhat.com: This patch is heavily based on Andrea's work] [akpm@linux-foundation.org: fix fs/nfs/write.c build] [akpm@linux-foundation.org: fix fs/btrfs/disk-io.c build] Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Dave Jones <davej@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Andy Isaacson <adi@hexapodia.org> Cc: Nai Xia <nai.xia@gmail.com> Cc: Johannes Weiner <jweiner@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c78
5 files changed, 47 insertions, 39 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index fb291585e1bf..71a58f67f481 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -557,7 +557,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
557 nr_migrate = cc->nr_migratepages; 557 nr_migrate = cc->nr_migratepages;
558 err = migrate_pages(&cc->migratepages, compaction_alloc, 558 err = migrate_pages(&cc->migratepages, compaction_alloc,
559 (unsigned long)cc, false, 559 (unsigned long)cc, false,
560 cc->sync); 560 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
561 update_nr_listpages(cc); 561 update_nr_listpages(cc);
562 nr_remaining = cc->nr_migratepages; 562 nr_remaining = cc->nr_migratepages;
563 563
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 06d3479513aa..56080ea36140 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1557,7 +1557,7 @@ int soft_offline_page(struct page *page, int flags)
1557 page_is_file_cache(page)); 1557 page_is_file_cache(page));
1558 list_add(&page->lru, &pagelist); 1558 list_add(&page->lru, &pagelist);
1559 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1559 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1560 0, true); 1560 0, MIGRATE_SYNC);
1561 if (ret) { 1561 if (ret) {
1562 putback_lru_pages(&pagelist); 1562 putback_lru_pages(&pagelist);
1563 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1563 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2168489c0bc9..6629fafd6ce4 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -809,7 +809,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
809 } 809 }
810 /* this function returns # of failed pages */ 810 /* this function returns # of failed pages */
811 ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 811 ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
812 true, true); 812 true, MIGRATE_SYNC);
813 if (ret) 813 if (ret)
814 putback_lru_pages(&source); 814 putback_lru_pages(&source);
815 } 815 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e3d58f088466..06b145fb64ab 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -942,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
942 942
943 if (!list_empty(&pagelist)) { 943 if (!list_empty(&pagelist)) {
944 err = migrate_pages(&pagelist, new_node_page, dest, 944 err = migrate_pages(&pagelist, new_node_page, dest,
945 false, true); 945 false, MIGRATE_SYNC);
946 if (err) 946 if (err)
947 putback_lru_pages(&pagelist); 947 putback_lru_pages(&pagelist);
948 } 948 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 4e86f3bacb85..9871a56d82c3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -218,12 +218,13 @@ out:
218 218
219#ifdef CONFIG_BLOCK 219#ifdef CONFIG_BLOCK
220/* Returns true if all buffers are successfully locked */ 220/* Returns true if all buffers are successfully locked */
221static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync) 221static bool buffer_migrate_lock_buffers(struct buffer_head *head,
222 enum migrate_mode mode)
222{ 223{
223 struct buffer_head *bh = head; 224 struct buffer_head *bh = head;
224 225
225 /* Simple case, sync compaction */ 226 /* Simple case, sync compaction */
226 if (sync) { 227 if (mode != MIGRATE_ASYNC) {
227 do { 228 do {
228 get_bh(bh); 229 get_bh(bh);
229 lock_buffer(bh); 230 lock_buffer(bh);
@@ -259,7 +260,7 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync)
259} 260}
260#else 261#else
261static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 262static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
262 bool sync) 263 enum migrate_mode mode)
263{ 264{
264 return true; 265 return true;
265} 266}
@@ -275,7 +276,7 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
275 */ 276 */
276static int migrate_page_move_mapping(struct address_space *mapping, 277static int migrate_page_move_mapping(struct address_space *mapping,
277 struct page *newpage, struct page *page, 278 struct page *newpage, struct page *page,
278 struct buffer_head *head, bool sync) 279 struct buffer_head *head, enum migrate_mode mode)
279{ 280{
280 int expected_count; 281 int expected_count;
281 void **pslot; 282 void **pslot;
@@ -311,7 +312,8 @@ static int migrate_page_move_mapping(struct address_space *mapping,
311 * the mapping back due to an elevated page count, we would have to 312 * the mapping back due to an elevated page count, we would have to
312 * block waiting on other references to be dropped. 313 * block waiting on other references to be dropped.
313 */ 314 */
314 if (!sync && head && !buffer_migrate_lock_buffers(head, sync)) { 315 if (mode == MIGRATE_ASYNC && head &&
316 !buffer_migrate_lock_buffers(head, mode)) {
315 page_unfreeze_refs(page, expected_count); 317 page_unfreeze_refs(page, expected_count);
316 spin_unlock_irq(&mapping->tree_lock); 318 spin_unlock_irq(&mapping->tree_lock);
317 return -EAGAIN; 319 return -EAGAIN;
@@ -472,13 +474,14 @@ EXPORT_SYMBOL(fail_migrate_page);
472 * Pages are locked upon entry and exit. 474 * Pages are locked upon entry and exit.
473 */ 475 */
474int migrate_page(struct address_space *mapping, 476int migrate_page(struct address_space *mapping,
475 struct page *newpage, struct page *page, bool sync) 477 struct page *newpage, struct page *page,
478 enum migrate_mode mode)
476{ 479{
477 int rc; 480 int rc;
478 481
479 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 482 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
480 483
481 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, sync); 484 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
482 485
483 if (rc) 486 if (rc)
484 return rc; 487 return rc;
@@ -495,17 +498,17 @@ EXPORT_SYMBOL(migrate_page);
495 * exist. 498 * exist.
496 */ 499 */
497int buffer_migrate_page(struct address_space *mapping, 500int buffer_migrate_page(struct address_space *mapping,
498 struct page *newpage, struct page *page, bool sync) 501 struct page *newpage, struct page *page, enum migrate_mode mode)
499{ 502{
500 struct buffer_head *bh, *head; 503 struct buffer_head *bh, *head;
501 int rc; 504 int rc;
502 505
503 if (!page_has_buffers(page)) 506 if (!page_has_buffers(page))
504 return migrate_page(mapping, newpage, page, sync); 507 return migrate_page(mapping, newpage, page, mode);
505 508
506 head = page_buffers(page); 509 head = page_buffers(page);
507 510
508 rc = migrate_page_move_mapping(mapping, newpage, page, head, sync); 511 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
509 512
510 if (rc) 513 if (rc)
511 return rc; 514 return rc;
@@ -515,8 +518,8 @@ int buffer_migrate_page(struct address_space *mapping,
515 * with an IRQ-safe spinlock held. In the sync case, the buffers 518 * with an IRQ-safe spinlock held. In the sync case, the buffers
516 * need to be locked now 519 * need to be locked now
517 */ 520 */
518 if (sync) 521 if (mode != MIGRATE_ASYNC)
519 BUG_ON(!buffer_migrate_lock_buffers(head, sync)); 522 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
520 523
521 ClearPagePrivate(page); 524 ClearPagePrivate(page);
522 set_page_private(newpage, page_private(page)); 525 set_page_private(newpage, page_private(page));
@@ -593,10 +596,11 @@ static int writeout(struct address_space *mapping, struct page *page)
593 * Default handling if a filesystem does not provide a migration function. 596 * Default handling if a filesystem does not provide a migration function.
594 */ 597 */
595static int fallback_migrate_page(struct address_space *mapping, 598static int fallback_migrate_page(struct address_space *mapping,
596 struct page *newpage, struct page *page, bool sync) 599 struct page *newpage, struct page *page, enum migrate_mode mode)
597{ 600{
598 if (PageDirty(page)) { 601 if (PageDirty(page)) {
599 if (!sync) 602 /* Only writeback pages in full synchronous migration */
603 if (mode != MIGRATE_SYNC)
600 return -EBUSY; 604 return -EBUSY;
601 return writeout(mapping, page); 605 return writeout(mapping, page);
602 } 606 }
@@ -609,7 +613,7 @@ static int fallback_migrate_page(struct address_space *mapping,
609 !try_to_release_page(page, GFP_KERNEL)) 613 !try_to_release_page(page, GFP_KERNEL))
610 return -EAGAIN; 614 return -EAGAIN;
611 615
612 return migrate_page(mapping, newpage, page, sync); 616 return migrate_page(mapping, newpage, page, mode);
613} 617}
614 618
615/* 619/*
@@ -624,7 +628,7 @@ static int fallback_migrate_page(struct address_space *mapping,
624 * == 0 - success 628 * == 0 - success
625 */ 629 */
626static int move_to_new_page(struct page *newpage, struct page *page, 630static int move_to_new_page(struct page *newpage, struct page *page,
627 int remap_swapcache, bool sync) 631 int remap_swapcache, enum migrate_mode mode)
628{ 632{
629 struct address_space *mapping; 633 struct address_space *mapping;
630 int rc; 634 int rc;
@@ -645,7 +649,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
645 649
646 mapping = page_mapping(page); 650 mapping = page_mapping(page);
647 if (!mapping) 651 if (!mapping)
648 rc = migrate_page(mapping, newpage, page, sync); 652 rc = migrate_page(mapping, newpage, page, mode);
649 else if (mapping->a_ops->migratepage) 653 else if (mapping->a_ops->migratepage)
650 /* 654 /*
651 * Most pages have a mapping and most filesystems provide a 655 * Most pages have a mapping and most filesystems provide a
@@ -654,9 +658,9 @@ static int move_to_new_page(struct page *newpage, struct page *page,
654 * is the most common path for page migration. 658 * is the most common path for page migration.
655 */ 659 */
656 rc = mapping->a_ops->migratepage(mapping, 660 rc = mapping->a_ops->migratepage(mapping,
657 newpage, page, sync); 661 newpage, page, mode);
658 else 662 else
659 rc = fallback_migrate_page(mapping, newpage, page, sync); 663 rc = fallback_migrate_page(mapping, newpage, page, mode);
660 664
661 if (rc) { 665 if (rc) {
662 newpage->mapping = NULL; 666 newpage->mapping = NULL;
@@ -671,7 +675,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
671} 675}
672 676
673static int __unmap_and_move(struct page *page, struct page *newpage, 677static int __unmap_and_move(struct page *page, struct page *newpage,
674 int force, bool offlining, bool sync) 678 int force, bool offlining, enum migrate_mode mode)
675{ 679{
676 int rc = -EAGAIN; 680 int rc = -EAGAIN;
677 int remap_swapcache = 1; 681 int remap_swapcache = 1;
@@ -680,7 +684,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
680 struct anon_vma *anon_vma = NULL; 684 struct anon_vma *anon_vma = NULL;
681 685
682 if (!trylock_page(page)) { 686 if (!trylock_page(page)) {
683 if (!force || !sync) 687 if (!force || mode == MIGRATE_ASYNC)
684 goto out; 688 goto out;
685 689
686 /* 690 /*
@@ -726,10 +730,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
726 730
727 if (PageWriteback(page)) { 731 if (PageWriteback(page)) {
728 /* 732 /*
729 * For !sync, there is no point retrying as the retry loop 733 * Only in the case of a full syncronous migration is it
730 * is expected to be too short for PageWriteback to be cleared 734 * necessary to wait for PageWriteback. In the async case,
735 * the retry loop is too short and in the sync-light case,
736 * the overhead of stalling is too much
731 */ 737 */
732 if (!sync) { 738 if (mode != MIGRATE_SYNC) {
733 rc = -EBUSY; 739 rc = -EBUSY;
734 goto uncharge; 740 goto uncharge;
735 } 741 }
@@ -800,7 +806,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
800 806
801skip_unmap: 807skip_unmap:
802 if (!page_mapped(page)) 808 if (!page_mapped(page))
803 rc = move_to_new_page(newpage, page, remap_swapcache, sync); 809 rc = move_to_new_page(newpage, page, remap_swapcache, mode);
804 810
805 if (rc && remap_swapcache) 811 if (rc && remap_swapcache)
806 remove_migration_ptes(page, page); 812 remove_migration_ptes(page, page);
@@ -823,7 +829,8 @@ out:
823 * to the newly allocated page in newpage. 829 * to the newly allocated page in newpage.
824 */ 830 */
825static int unmap_and_move(new_page_t get_new_page, unsigned long private, 831static int unmap_and_move(new_page_t get_new_page, unsigned long private,
826 struct page *page, int force, bool offlining, bool sync) 832 struct page *page, int force, bool offlining,
833 enum migrate_mode mode)
827{ 834{
828 int rc = 0; 835 int rc = 0;
829 int *result = NULL; 836 int *result = NULL;
@@ -843,7 +850,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
843 if (unlikely(split_huge_page(page))) 850 if (unlikely(split_huge_page(page)))
844 goto out; 851 goto out;
845 852
846 rc = __unmap_and_move(page, newpage, force, offlining, sync); 853 rc = __unmap_and_move(page, newpage, force, offlining, mode);
847out: 854out:
848 if (rc != -EAGAIN) { 855 if (rc != -EAGAIN) {
849 /* 856 /*
@@ -891,7 +898,8 @@ out:
891 */ 898 */
892static int unmap_and_move_huge_page(new_page_t get_new_page, 899static int unmap_and_move_huge_page(new_page_t get_new_page,
893 unsigned long private, struct page *hpage, 900 unsigned long private, struct page *hpage,
894 int force, bool offlining, bool sync) 901 int force, bool offlining,
902 enum migrate_mode mode)
895{ 903{
896 int rc = 0; 904 int rc = 0;
897 int *result = NULL; 905 int *result = NULL;
@@ -904,7 +912,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
904 rc = -EAGAIN; 912 rc = -EAGAIN;
905 913
906 if (!trylock_page(hpage)) { 914 if (!trylock_page(hpage)) {
907 if (!force || !sync) 915 if (!force || mode != MIGRATE_SYNC)
908 goto out; 916 goto out;
909 lock_page(hpage); 917 lock_page(hpage);
910 } 918 }
@@ -915,7 +923,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
915 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 923 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
916 924
917 if (!page_mapped(hpage)) 925 if (!page_mapped(hpage))
918 rc = move_to_new_page(new_hpage, hpage, 1, sync); 926 rc = move_to_new_page(new_hpage, hpage, 1, mode);
919 927
920 if (rc) 928 if (rc)
921 remove_migration_ptes(hpage, hpage); 929 remove_migration_ptes(hpage, hpage);
@@ -958,7 +966,7 @@ out:
958 */ 966 */
959int migrate_pages(struct list_head *from, 967int migrate_pages(struct list_head *from,
960 new_page_t get_new_page, unsigned long private, bool offlining, 968 new_page_t get_new_page, unsigned long private, bool offlining,
961 bool sync) 969 enum migrate_mode mode)
962{ 970{
963 int retry = 1; 971 int retry = 1;
964 int nr_failed = 0; 972 int nr_failed = 0;
@@ -979,7 +987,7 @@ int migrate_pages(struct list_head *from,
979 987
980 rc = unmap_and_move(get_new_page, private, 988 rc = unmap_and_move(get_new_page, private,
981 page, pass > 2, offlining, 989 page, pass > 2, offlining,
982 sync); 990 mode);
983 991
984 switch(rc) { 992 switch(rc) {
985 case -ENOMEM: 993 case -ENOMEM:
@@ -1009,7 +1017,7 @@ out:
1009 1017
1010int migrate_huge_pages(struct list_head *from, 1018int migrate_huge_pages(struct list_head *from,
1011 new_page_t get_new_page, unsigned long private, bool offlining, 1019 new_page_t get_new_page, unsigned long private, bool offlining,
1012 bool sync) 1020 enum migrate_mode mode)
1013{ 1021{
1014 int retry = 1; 1022 int retry = 1;
1015 int nr_failed = 0; 1023 int nr_failed = 0;
@@ -1026,7 +1034,7 @@ int migrate_huge_pages(struct list_head *from,
1026 1034
1027 rc = unmap_and_move_huge_page(get_new_page, 1035 rc = unmap_and_move_huge_page(get_new_page,
1028 private, page, pass > 2, offlining, 1036 private, page, pass > 2, offlining,
1029 sync); 1037 mode);
1030 1038
1031 switch(rc) { 1039 switch(rc) {
1032 case -ENOMEM: 1040 case -ENOMEM:
@@ -1155,7 +1163,7 @@ set_status:
1155 err = 0; 1163 err = 0;
1156 if (!list_empty(&pagelist)) { 1164 if (!list_empty(&pagelist)) {
1157 err = migrate_pages(&pagelist, new_page_node, 1165 err = migrate_pages(&pagelist, new_page_node,
1158 (unsigned long)pm, 0, true); 1166 (unsigned long)pm, 0, MIGRATE_SYNC);
1159 if (err) 1167 if (err)
1160 putback_lru_pages(&pagelist); 1168 putback_lru_pages(&pagelist);
1161 } 1169 }