diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 05:03:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:52 -0400 |
commit | e24f0b8f76cc3dd96f36f5b6a9f020f6c3fce198 (patch) | |
tree | 2c6ca6f0385d3d36135855f77a0474188cf33842 | |
parent | 8f9de51a4a98ba32f839903b7d009788bc2c295d (diff) |
[PATCH] page migration: simplify migrate_pages()
Currently migrate_pages() is mess with lots of goto. Extract two functions
from migrate_pages() and get rid of the gotos.
Plus we can just unconditionally set the locked bit on the new page since we
are the only one holding a reference. Locking is to stop others from
accessing the page once we establish references to the new page.
Remove the list_del from move_to_lru in order to have finer control over list
processing.
[akpm@osdl.org: add debug check]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/migrate.c | 218 |
1 files changed, 115 insertions, 103 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index b5000d463893..09038163bfec 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -84,7 +84,6 @@ int migrate_prep(void) | |||
84 | 84 | ||
85 | static inline void move_to_lru(struct page *page) | 85 | static inline void move_to_lru(struct page *page) |
86 | { | 86 | { |
87 | list_del(&page->lru); | ||
88 | if (PageActive(page)) { | 87 | if (PageActive(page)) { |
89 | /* | 88 | /* |
90 | * lru_cache_add_active checks that | 89 | * lru_cache_add_active checks that |
@@ -110,6 +109,7 @@ int putback_lru_pages(struct list_head *l) | |||
110 | int count = 0; | 109 | int count = 0; |
111 | 110 | ||
112 | list_for_each_entry_safe(page, page2, l, lru) { | 111 | list_for_each_entry_safe(page, page2, l, lru) { |
112 | list_del(&page->lru); | ||
113 | move_to_lru(page); | 113 | move_to_lru(page); |
114 | count++; | 114 | count++; |
115 | } | 115 | } |
@@ -534,11 +534,108 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
534 | } | 534 | } |
535 | 535 | ||
536 | /* | 536 | /* |
537 | * Move a page to a newly allocated page | ||
538 | * The page is locked and all ptes have been successfully removed. | ||
539 | * | ||
540 | * The new page will have replaced the old page if this function | ||
541 | * is successful. | ||
542 | */ | ||
543 | static int move_to_new_page(struct page *newpage, struct page *page) | ||
544 | { | ||
545 | struct address_space *mapping; | ||
546 | int rc; | ||
547 | |||
548 | /* | ||
549 | * Block others from accessing the page when we get around to | ||
550 | * establishing additional references. We are the only one | ||
551 | * holding a reference to the new page at this point. | ||
552 | */ | ||
553 | if (TestSetPageLocked(newpage)) | ||
554 | BUG(); | ||
555 | |||
556 | /* Prepare mapping for the new page.*/ | ||
557 | newpage->index = page->index; | ||
558 | newpage->mapping = page->mapping; | ||
559 | |||
560 | mapping = page_mapping(page); | ||
561 | if (!mapping) | ||
562 | rc = migrate_page(mapping, newpage, page); | ||
563 | else if (mapping->a_ops->migratepage) | ||
564 | /* | ||
565 | * Most pages have a mapping and most filesystems | ||
566 | * should provide a migration function. Anonymous | ||
567 | * pages are part of swap space which also has its | ||
568 | * own migration function. This is the most common | ||
569 | * path for page migration. | ||
570 | */ | ||
571 | rc = mapping->a_ops->migratepage(mapping, | ||
572 | newpage, page); | ||
573 | else | ||
574 | rc = fallback_migrate_page(mapping, newpage, page); | ||
575 | |||
576 | if (!rc) | ||
577 | remove_migration_ptes(page, newpage); | ||
578 | else | ||
579 | newpage->mapping = NULL; | ||
580 | |||
581 | unlock_page(newpage); | ||
582 | |||
583 | return rc; | ||
584 | } | ||
585 | |||
586 | /* | ||
587 | * Obtain the lock on page, remove all ptes and migrate the page | ||
588 | * to the newly allocated page in newpage. | ||
589 | */ | ||
590 | static int unmap_and_move(struct page *newpage, struct page *page, int force) | ||
591 | { | ||
592 | int rc = 0; | ||
593 | |||
594 | if (page_count(page) == 1) | ||
595 | /* page was freed from under us. So we are done. */ | ||
596 | goto ret; | ||
597 | |||
598 | rc = -EAGAIN; | ||
599 | if (TestSetPageLocked(page)) { | ||
600 | if (!force) | ||
601 | goto ret; | ||
602 | lock_page(page); | ||
603 | } | ||
604 | |||
605 | if (PageWriteback(page)) { | ||
606 | if (!force) | ||
607 | goto unlock; | ||
608 | wait_on_page_writeback(page); | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Establish migration ptes or remove ptes | ||
613 | */ | ||
614 | if (try_to_unmap(page, 1) != SWAP_FAIL) { | ||
615 | if (!page_mapped(page)) | ||
616 | rc = move_to_new_page(newpage, page); | ||
617 | } else | ||
618 | /* A vma has VM_LOCKED set -> permanent failure */ | ||
619 | rc = -EPERM; | ||
620 | |||
621 | if (rc) | ||
622 | remove_migration_ptes(page, page); | ||
623 | unlock: | ||
624 | unlock_page(page); | ||
625 | ret: | ||
626 | if (rc != -EAGAIN) { | ||
627 | list_del(&newpage->lru); | ||
628 | move_to_lru(newpage); | ||
629 | } | ||
630 | return rc; | ||
631 | } | ||
632 | |||
633 | /* | ||
537 | * migrate_pages | 634 | * migrate_pages |
538 | * | 635 | * |
539 | * Two lists are passed to this function. The first list | 636 | * Two lists are passed to this function. The first list |
540 | * contains the pages isolated from the LRU to be migrated. | 637 | * contains the pages isolated from the LRU to be migrated. |
541 | * The second list contains new pages that the pages isolated | 638 | * The second list contains new pages that the isolated pages |
542 | * can be moved to. | 639 | * can be moved to. |
543 | * | 640 | * |
544 | * The function returns after 10 attempts or if no pages | 641 | * The function returns after 10 attempts or if no pages |
@@ -550,7 +647,7 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
550 | int migrate_pages(struct list_head *from, struct list_head *to, | 647 | int migrate_pages(struct list_head *from, struct list_head *to, |
551 | struct list_head *moved, struct list_head *failed) | 648 | struct list_head *moved, struct list_head *failed) |
552 | { | 649 | { |
553 | int retry; | 650 | int retry = 1; |
554 | int nr_failed = 0; | 651 | int nr_failed = 0; |
555 | int pass = 0; | 652 | int pass = 0; |
556 | struct page *page; | 653 | struct page *page; |
@@ -561,118 +658,33 @@ int migrate_pages(struct list_head *from, struct list_head *to, | |||
561 | if (!swapwrite) | 658 | if (!swapwrite) |
562 | current->flags |= PF_SWAPWRITE; | 659 | current->flags |= PF_SWAPWRITE; |
563 | 660 | ||
564 | redo: | 661 | for(pass = 0; pass < 10 && retry; pass++) { |
565 | retry = 0; | 662 | retry = 0; |
566 | |||
567 | list_for_each_entry_safe(page, page2, from, lru) { | ||
568 | struct page *newpage = NULL; | ||
569 | struct address_space *mapping; | ||
570 | |||
571 | cond_resched(); | ||
572 | |||
573 | rc = 0; | ||
574 | if (page_count(page) == 1) | ||
575 | /* page was freed from under us. So we are done. */ | ||
576 | goto next; | ||
577 | |||
578 | if (to && list_empty(to)) | ||
579 | break; | ||
580 | |||
581 | /* | ||
582 | * Skip locked pages during the first two passes to give the | ||
583 | * functions holding the lock time to release the page. Later we | ||
584 | * use lock_page() to have a higher chance of acquiring the | ||
585 | * lock. | ||
586 | */ | ||
587 | rc = -EAGAIN; | ||
588 | if (pass > 2) | ||
589 | lock_page(page); | ||
590 | else | ||
591 | if (TestSetPageLocked(page)) | ||
592 | goto next; | ||
593 | |||
594 | /* | ||
595 | * Only wait on writeback if we have already done a pass where | ||
596 | * we we may have triggered writeouts for lots of pages. | ||
597 | */ | ||
598 | if (pass > 0) | ||
599 | wait_on_page_writeback(page); | ||
600 | else | ||
601 | if (PageWriteback(page)) | ||
602 | goto unlock_page; | ||
603 | |||
604 | /* | ||
605 | * Establish migration ptes or remove ptes | ||
606 | */ | ||
607 | rc = -EPERM; | ||
608 | if (try_to_unmap(page, 1) == SWAP_FAIL) | ||
609 | /* A vma has VM_LOCKED set -> permanent failure */ | ||
610 | goto unlock_page; | ||
611 | 663 | ||
612 | rc = -EAGAIN; | 664 | list_for_each_entry_safe(page, page2, from, lru) { |
613 | if (page_mapped(page)) | ||
614 | goto unlock_page; | ||
615 | 665 | ||
616 | newpage = lru_to_page(to); | 666 | if (list_empty(to)) |
617 | lock_page(newpage); | 667 | break; |
618 | /* Prepare mapping for the new page.*/ | ||
619 | newpage->index = page->index; | ||
620 | newpage->mapping = page->mapping; | ||
621 | 668 | ||
622 | /* | 669 | cond_resched(); |
623 | * Pages are properly locked and writeback is complete. | ||
624 | * Try to migrate the page. | ||
625 | */ | ||
626 | mapping = page_mapping(page); | ||
627 | if (!mapping) | ||
628 | rc = migrate_page(mapping, newpage, page); | ||
629 | 670 | ||
630 | else if (mapping->a_ops->migratepage) | 671 | rc = unmap_and_move(lru_to_page(to), page, pass > 2); |
631 | /* | ||
632 | * Most pages have a mapping and most filesystems | ||
633 | * should provide a migration function. Anonymous | ||
634 | * pages are part of swap space which also has its | ||
635 | * own migration function. This is the most common | ||
636 | * path for page migration. | ||
637 | */ | ||
638 | rc = mapping->a_ops->migratepage(mapping, | ||
639 | newpage, page); | ||
640 | else | ||
641 | rc = fallback_migrate_page(mapping, newpage, page); | ||
642 | |||
643 | if (!rc) | ||
644 | remove_migration_ptes(page, newpage); | ||
645 | |||
646 | unlock_page(newpage); | ||
647 | |||
648 | unlock_page: | ||
649 | if (rc) | ||
650 | remove_migration_ptes(page, page); | ||
651 | 672 | ||
652 | unlock_page(page); | 673 | switch(rc) { |
653 | 674 | case -EAGAIN: | |
654 | next: | ||
655 | if (rc) { | ||
656 | if (newpage) | ||
657 | newpage->mapping = NULL; | ||
658 | |||
659 | if (rc == -EAGAIN) | ||
660 | retry++; | 675 | retry++; |
661 | else { | 676 | break; |
677 | case 0: | ||
678 | list_move(&page->lru, moved); | ||
679 | break; | ||
680 | default: | ||
662 | /* Permanent failure */ | 681 | /* Permanent failure */ |
663 | list_move(&page->lru, failed); | 682 | list_move(&page->lru, failed); |
664 | nr_failed++; | 683 | nr_failed++; |
684 | break; | ||
665 | } | 685 | } |
666 | } else { | ||
667 | if (newpage) { | ||
668 | /* Successful migration. Return page to LRU */ | ||
669 | move_to_lru(newpage); | ||
670 | } | ||
671 | list_move(&page->lru, moved); | ||
672 | } | 686 | } |
673 | } | 687 | } |
674 | if (retry && pass++ < 10) | ||
675 | goto redo; | ||
676 | 688 | ||
677 | if (!swapwrite) | 689 | if (!swapwrite) |
678 | current->flags &= ~PF_SWAPWRITE; | 690 | current->flags &= ~PF_SWAPWRITE; |