diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 23 | ||||
-rw-r--r-- | mm/migrate.c | 115 |
2 files changed, 53 insertions, 85 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 244f3f130e4a..f432642e9e66 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -87,6 +87,7 @@ | |||
87 | #include <linux/seq_file.h> | 87 | #include <linux/seq_file.h> |
88 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
89 | #include <linux/migrate.h> | 89 | #include <linux/migrate.h> |
90 | #include <linux/rmap.h> | ||
90 | 91 | ||
91 | #include <asm/tlbflush.h> | 92 | #include <asm/tlbflush.h> |
92 | #include <asm/uaccess.h> | 93 | #include <asm/uaccess.h> |
@@ -587,6 +588,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, | |||
587 | isolate_lru_page(page, pagelist); | 588 | isolate_lru_page(page, pagelist); |
588 | } | 589 | } |
589 | 590 | ||
591 | static struct page *new_node_page(struct page *page, unsigned long node) | ||
592 | { | ||
593 | return alloc_pages_node(node, GFP_HIGHUSER, 0); | ||
594 | } | ||
595 | |||
590 | /* | 596 | /* |
591 | * Migrate pages from one node to a target node. | 597 | * Migrate pages from one node to a target node. |
592 | * Returns error or the number of pages not migrated. | 598 | * Returns error or the number of pages not migrated. |
@@ -604,7 +610,8 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) | |||
604 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); | 610 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); |
605 | 611 | ||
606 | if (!list_empty(&pagelist)) | 612 | if (!list_empty(&pagelist)) |
607 | err = migrate_pages_to(&pagelist, NULL, dest); | 613 | err = migrate_pages(&pagelist, new_node_page, dest); |
614 | |||
608 | return err; | 615 | return err; |
609 | } | 616 | } |
610 | 617 | ||
@@ -691,6 +698,12 @@ int do_migrate_pages(struct mm_struct *mm, | |||
691 | 698 | ||
692 | } | 699 | } |
693 | 700 | ||
701 | static struct page *new_vma_page(struct page *page, unsigned long private) | ||
702 | { | ||
703 | struct vm_area_struct *vma = (struct vm_area_struct *)private; | ||
704 | |||
705 | return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); | ||
706 | } | ||
694 | #else | 707 | #else |
695 | 708 | ||
696 | static void migrate_page_add(struct page *page, struct list_head *pagelist, | 709 | static void migrate_page_add(struct page *page, struct list_head *pagelist, |
@@ -703,6 +716,11 @@ int do_migrate_pages(struct mm_struct *mm, | |||
703 | { | 716 | { |
704 | return -ENOSYS; | 717 | return -ENOSYS; |
705 | } | 718 | } |
719 | |||
720 | static struct page *new_vma_page(struct page *page, unsigned long private) | ||
721 | { | ||
722 | return NULL; | ||
723 | } | ||
706 | #endif | 724 | #endif |
707 | 725 | ||
708 | long do_mbind(unsigned long start, unsigned long len, | 726 | long do_mbind(unsigned long start, unsigned long len, |
@@ -764,7 +782,8 @@ long do_mbind(unsigned long start, unsigned long len, | |||
764 | err = mbind_range(vma, start, end, new); | 782 | err = mbind_range(vma, start, end, new); |
765 | 783 | ||
766 | if (!list_empty(&pagelist)) | 784 | if (!list_empty(&pagelist)) |
767 | nr_failed = migrate_pages_to(&pagelist, vma, -1); | 785 | nr_failed = migrate_pages(&pagelist, new_vma_page, |
786 | (unsigned long)vma); | ||
768 | 787 | ||
769 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | 788 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) |
770 | err = -EIO; | 789 | err = -EIO; |
diff --git a/mm/migrate.c b/mm/migrate.c index d3a1810a4c9f..251a8d158257 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -28,9 +28,6 @@ | |||
28 | 28 | ||
29 | #include "internal.h" | 29 | #include "internal.h" |
30 | 30 | ||
31 | /* The maximum number of pages to take off the LRU for migration */ | ||
32 | #define MIGRATE_CHUNK_SIZE 256 | ||
33 | |||
34 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 31 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) |
35 | 32 | ||
36 | /* | 33 | /* |
@@ -587,18 +584,23 @@ static int move_to_new_page(struct page *newpage, struct page *page) | |||
587 | * Obtain the lock on page, remove all ptes and migrate the page | 584 | * Obtain the lock on page, remove all ptes and migrate the page |
588 | * to the newly allocated page in newpage. | 585 | * to the newly allocated page in newpage. |
589 | */ | 586 | */ |
590 | static int unmap_and_move(struct page *newpage, struct page *page, int force) | 587 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
588 | struct page *page, int force) | ||
591 | { | 589 | { |
592 | int rc = 0; | 590 | int rc = 0; |
591 | struct page *newpage = get_new_page(page, private); | ||
592 | |||
593 | if (!newpage) | ||
594 | return -ENOMEM; | ||
593 | 595 | ||
594 | if (page_count(page) == 1) | 596 | if (page_count(page) == 1) |
595 | /* page was freed from under us. So we are done. */ | 597 | /* page was freed from under us. So we are done. */ |
596 | goto ret; | 598 | goto move_newpage; |
597 | 599 | ||
598 | rc = -EAGAIN; | 600 | rc = -EAGAIN; |
599 | if (TestSetPageLocked(page)) { | 601 | if (TestSetPageLocked(page)) { |
600 | if (!force) | 602 | if (!force) |
601 | goto ret; | 603 | goto move_newpage; |
602 | lock_page(page); | 604 | lock_page(page); |
603 | } | 605 | } |
604 | 606 | ||
@@ -622,7 +624,7 @@ static int unmap_and_move(struct page *newpage, struct page *page, int force) | |||
622 | remove_migration_ptes(page, page); | 624 | remove_migration_ptes(page, page); |
623 | unlock: | 625 | unlock: |
624 | unlock_page(page); | 626 | unlock_page(page); |
625 | ret: | 627 | |
626 | if (rc != -EAGAIN) { | 628 | if (rc != -EAGAIN) { |
627 | /* | 629 | /* |
628 | * A page that has been migrated has all references | 630 | * A page that has been migrated has all references |
@@ -632,29 +634,33 @@ ret: | |||
632 | */ | 634 | */ |
633 | list_del(&page->lru); | 635 | list_del(&page->lru); |
634 | move_to_lru(page); | 636 | move_to_lru(page); |
635 | |||
636 | list_del(&newpage->lru); | ||
637 | move_to_lru(newpage); | ||
638 | } | 637 | } |
638 | |||
639 | move_newpage: | ||
640 | /* | ||
641 | * Move the new page to the LRU. If migration was not successful | ||
642 | * then this will free the page. | ||
643 | */ | ||
644 | move_to_lru(newpage); | ||
639 | return rc; | 645 | return rc; |
640 | } | 646 | } |
641 | 647 | ||
642 | /* | 648 | /* |
643 | * migrate_pages | 649 | * migrate_pages |
644 | * | 650 | * |
645 | * Two lists are passed to this function. The first list | 651 | * The function takes one list of pages to migrate and a function |
646 | * contains the pages isolated from the LRU to be migrated. | 652 | * that determines from the page to be migrated and the private data |
647 | * The second list contains new pages that the isolated pages | 653 | * the target of the move and allocates the page. |
648 | * can be moved to. | ||
649 | * | 654 | * |
650 | * The function returns after 10 attempts or if no pages | 655 | * The function returns after 10 attempts or if no pages |
651 | * are movable anymore because to has become empty | 656 | * are movable anymore because to has become empty |
652 | * or no retryable pages exist anymore. All pages will be | 657 | * or no retryable pages exist anymore. All pages will be |
653 | * retruned to the LRU or freed. | 658 | * retruned to the LRU or freed. |
654 | * | 659 | * |
655 | * Return: Number of pages not migrated. | 660 | * Return: Number of pages not migrated or error code. |
656 | */ | 661 | */ |
657 | int migrate_pages(struct list_head *from, struct list_head *to) | 662 | int migrate_pages(struct list_head *from, |
663 | new_page_t get_new_page, unsigned long private) | ||
658 | { | 664 | { |
659 | int retry = 1; | 665 | int retry = 1; |
660 | int nr_failed = 0; | 666 | int nr_failed = 0; |
@@ -671,15 +677,14 @@ int migrate_pages(struct list_head *from, struct list_head *to) | |||
671 | retry = 0; | 677 | retry = 0; |
672 | 678 | ||
673 | list_for_each_entry_safe(page, page2, from, lru) { | 679 | list_for_each_entry_safe(page, page2, from, lru) { |
674 | |||
675 | if (list_empty(to)) | ||
676 | break; | ||
677 | |||
678 | cond_resched(); | 680 | cond_resched(); |
679 | 681 | ||
680 | rc = unmap_and_move(lru_to_page(to), page, pass > 2); | 682 | rc = unmap_and_move(get_new_page, private, |
683 | page, pass > 2); | ||
681 | 684 | ||
682 | switch(rc) { | 685 | switch(rc) { |
686 | case -ENOMEM: | ||
687 | goto out; | ||
683 | case -EAGAIN: | 688 | case -EAGAIN: |
684 | retry++; | 689 | retry++; |
685 | break; | 690 | break; |
@@ -692,72 +697,16 @@ int migrate_pages(struct list_head *from, struct list_head *to) | |||
692 | } | 697 | } |
693 | } | 698 | } |
694 | } | 699 | } |
695 | 700 | rc = 0; | |
701 | out: | ||
696 | if (!swapwrite) | 702 | if (!swapwrite) |
697 | current->flags &= ~PF_SWAPWRITE; | 703 | current->flags &= ~PF_SWAPWRITE; |
698 | 704 | ||
699 | putback_lru_pages(from); | 705 | putback_lru_pages(from); |
700 | return nr_failed + retry; | ||
701 | } | ||
702 | 706 | ||
703 | /* | 707 | if (rc) |
704 | * Migrate the list 'pagelist' of pages to a certain destination. | 708 | return rc; |
705 | * | ||
706 | * Specify destination with either non-NULL vma or dest_node >= 0 | ||
707 | * Return the number of pages not migrated or error code | ||
708 | */ | ||
709 | int migrate_pages_to(struct list_head *pagelist, | ||
710 | struct vm_area_struct *vma, int dest) | ||
711 | { | ||
712 | LIST_HEAD(newlist); | ||
713 | int err = 0; | ||
714 | unsigned long offset = 0; | ||
715 | int nr_pages; | ||
716 | int nr_failed = 0; | ||
717 | struct page *page; | ||
718 | struct list_head *p; | ||
719 | |||
720 | redo: | ||
721 | nr_pages = 0; | ||
722 | list_for_each(p, pagelist) { | ||
723 | if (vma) { | ||
724 | /* | ||
725 | * The address passed to alloc_page_vma is used to | ||
726 | * generate the proper interleave behavior. We fake | ||
727 | * the address here by an increasing offset in order | ||
728 | * to get the proper distribution of pages. | ||
729 | * | ||
730 | * No decision has been made as to which page | ||
731 | * a certain old page is moved to so we cannot | ||
732 | * specify the correct address. | ||
733 | */ | ||
734 | page = alloc_page_vma(GFP_HIGHUSER, vma, | ||
735 | offset + vma->vm_start); | ||
736 | offset += PAGE_SIZE; | ||
737 | } | ||
738 | else | ||
739 | page = alloc_pages_node(dest, GFP_HIGHUSER, 0); | ||
740 | |||
741 | if (!page) { | ||
742 | err = -ENOMEM; | ||
743 | goto out; | ||
744 | } | ||
745 | list_add_tail(&page->lru, &newlist); | ||
746 | nr_pages++; | ||
747 | if (nr_pages > MIGRATE_CHUNK_SIZE) | ||
748 | break; | ||
749 | } | ||
750 | err = migrate_pages(pagelist, &newlist); | ||
751 | |||
752 | if (err >= 0) { | ||
753 | nr_failed += err; | ||
754 | if (list_empty(&newlist) && !list_empty(pagelist)) | ||
755 | goto redo; | ||
756 | } | ||
757 | out: | ||
758 | 709 | ||
759 | /* Calculate number of leftover pages */ | 710 | return nr_failed + retry; |
760 | list_for_each(p, pagelist) | ||
761 | nr_failed++; | ||
762 | return nr_failed; | ||
763 | } | 711 | } |
712 | |||