aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-03-22 19:33:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:44:05 -0400
commit11bc82d67d1150767901bca54a24466621d763d7 (patch)
tree119442c599a82b35c553c6f54626c870e885ee50 /mm/migrate.c
parentb2eef8c0d09101bbbff2531c097543aedde0b525 (diff)
mm: compaction: Use async migration for __GFP_NO_KSWAPD and enforce no writeback
__GFP_NO_KSWAPD allocations are usually very expensive and not mandatory to succeed as they have graceful fallback. Waiting for I/O in those, tends to be overkill in terms of latencies, so we can reduce their latency by disabling sync migrate. Unfortunately, even with async migration it's still possible for the process to be blocked waiting for a request slot (e.g. get_request_wait in the block layer) when ->writepage is called. To prevent __GFP_NO_KSWAPD blocking, this patch prevents ->writepage being called on dirty page cache for asynchronous migration. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=31142 [mel@csn.ul.ie: Avoid writebacks for NFS, retry locked pages, use bool] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Arthur Marsh <arthur.marsh@internode.on.net> Cc: Clemens Ladisch <cladisch@googlemail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Reported-by: Alex Villacis Lasso <avillaci@ceibo.fiec.espol.edu.ec> Tested-by: Alex Villacis Lasso <avillaci@ceibo.fiec.espol.edu.ec> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c48
1 files changed, 33 insertions, 15 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 7d2983f3783..89e5c3fe8bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -564,7 +564,7 @@ static int fallback_migrate_page(struct address_space *mapping,
564 * == 0 - success 564 * == 0 - success
565 */ 565 */
566static int move_to_new_page(struct page *newpage, struct page *page, 566static int move_to_new_page(struct page *newpage, struct page *page,
567 int remap_swapcache) 567 int remap_swapcache, bool sync)
568{ 568{
569 struct address_space *mapping; 569 struct address_space *mapping;
570 int rc; 570 int rc;
@@ -586,18 +586,28 @@ static int move_to_new_page(struct page *newpage, struct page *page,
586 mapping = page_mapping(page); 586 mapping = page_mapping(page);
587 if (!mapping) 587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page); 588 rc = migrate_page(mapping, newpage, page);
589 else if (mapping->a_ops->migratepage) 589 else {
590 /* 590 /*
591 * Most pages have a mapping and most filesystems 591 * Do not writeback pages if !sync and migratepage is
592 * should provide a migration function. Anonymous 592 * not pointing to migrate_page() which is nonblocking
593 * pages are part of swap space which also has its 593 * (swapcache/tmpfs uses migratepage = migrate_page).
594 * own migration function. This is the most common
595 * path for page migration.
596 */ 594 */
597 rc = mapping->a_ops->migratepage(mapping, 595 if (PageDirty(page) && !sync &&
598 newpage, page); 596 mapping->a_ops->migratepage != migrate_page)
599 else 597 rc = -EBUSY;
600 rc = fallback_migrate_page(mapping, newpage, page); 598 else if (mapping->a_ops->migratepage)
599 /*
600 * Most pages have a mapping and most filesystems
601 * should provide a migration function. Anonymous
602 * pages are part of swap space which also has its
603 * own migration function. This is the most common
604 * path for page migration.
605 */
606 rc = mapping->a_ops->migratepage(mapping,
607 newpage, page);
608 else
609 rc = fallback_migrate_page(mapping, newpage, page);
610 }
601 611
602 if (rc) { 612 if (rc) {
603 newpage->mapping = NULL; 613 newpage->mapping = NULL;
@@ -641,7 +651,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
641 rc = -EAGAIN; 651 rc = -EAGAIN;
642 652
643 if (!trylock_page(page)) { 653 if (!trylock_page(page)) {
644 if (!force) 654 if (!force || !sync)
645 goto move_newpage; 655 goto move_newpage;
646 656
647 /* 657 /*
@@ -686,7 +696,15 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
686 BUG_ON(charge); 696 BUG_ON(charge);
687 697
688 if (PageWriteback(page)) { 698 if (PageWriteback(page)) {
689 if (!force || !sync) 699 /*
700 * For !sync, there is no point retrying as the retry loop
701 * is expected to be too short for PageWriteback to be cleared
702 */
703 if (!sync) {
704 rc = -EBUSY;
705 goto uncharge;
706 }
707 if (!force)
690 goto uncharge; 708 goto uncharge;
691 wait_on_page_writeback(page); 709 wait_on_page_writeback(page);
692 } 710 }
@@ -757,7 +775,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
757 775
758skip_unmap: 776skip_unmap:
759 if (!page_mapped(page)) 777 if (!page_mapped(page))
760 rc = move_to_new_page(newpage, page, remap_swapcache); 778 rc = move_to_new_page(newpage, page, remap_swapcache, sync);
761 779
762 if (rc && remap_swapcache) 780 if (rc && remap_swapcache)
763 remove_migration_ptes(page, page); 781 remove_migration_ptes(page, page);
@@ -850,7 +868,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
850 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
851 869
852 if (!page_mapped(hpage)) 870 if (!page_mapped(hpage))
853 rc = move_to_new_page(new_hpage, hpage, 1); 871 rc = move_to_new_page(new_hpage, hpage, 1, sync);
854 872
855 if (rc) 873 if (rc)
856 remove_migration_ptes(hpage, hpage); 874 remove_migration_ptes(hpage, hpage);