diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 05:03:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:53 -0400 |
commit | 95a402c3847cc16f4ba03013cd01404fa0f14c2e (patch) | |
tree | 0fd9b3379f70cc99b2325bccaa150089abf6c8b3 /mm/mempolicy.c | |
parent | aaa994b300a172afafab47938804836b923e5ef7 (diff) |
[PATCH] page migration: use allocator function for migrate_pages()
Instead of passing a list of new pages, pass a function to allocate a new
page. This allows the correct placement of MPOL_INTERLEAVE pages during page
migration. It also further simplifies the callers of migrate pages.
migrate_pages() becomes similar to migrate_pages_to() so drop
migrate_pages_to(). The batching of new page allocations becomes unnecessary.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 244f3f130e4a..f432642e9e66 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -87,6 +87,7 @@ | |||
87 | #include <linux/seq_file.h> | 87 | #include <linux/seq_file.h> |
88 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
89 | #include <linux/migrate.h> | 89 | #include <linux/migrate.h> |
90 | #include <linux/rmap.h> | ||
90 | 91 | ||
91 | #include <asm/tlbflush.h> | 92 | #include <asm/tlbflush.h> |
92 | #include <asm/uaccess.h> | 93 | #include <asm/uaccess.h> |
@@ -587,6 +588,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, | |||
587 | isolate_lru_page(page, pagelist); | 588 | isolate_lru_page(page, pagelist); |
588 | } | 589 | } |
589 | 590 | ||
591 | static struct page *new_node_page(struct page *page, unsigned long node) | ||
592 | { | ||
593 | return alloc_pages_node(node, GFP_HIGHUSER, 0); | ||
594 | } | ||
595 | |||
590 | /* | 596 | /* |
591 | * Migrate pages from one node to a target node. | 597 | * Migrate pages from one node to a target node. |
592 | * Returns error or the number of pages not migrated. | 598 | * Returns error or the number of pages not migrated. |
@@ -604,7 +610,8 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) | |||
604 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); | 610 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); |
605 | 611 | ||
606 | if (!list_empty(&pagelist)) | 612 | if (!list_empty(&pagelist)) |
607 | err = migrate_pages_to(&pagelist, NULL, dest); | 613 | err = migrate_pages(&pagelist, new_node_page, dest); |
614 | |||
608 | return err; | 615 | return err; |
609 | } | 616 | } |
610 | 617 | ||
@@ -691,6 +698,12 @@ int do_migrate_pages(struct mm_struct *mm, | |||
691 | 698 | ||
692 | } | 699 | } |
693 | 700 | ||
701 | static struct page *new_vma_page(struct page *page, unsigned long private) | ||
702 | { | ||
703 | struct vm_area_struct *vma = (struct vm_area_struct *)private; | ||
704 | |||
705 | return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); | ||
706 | } | ||
694 | #else | 707 | #else |
695 | 708 | ||
696 | static void migrate_page_add(struct page *page, struct list_head *pagelist, | 709 | static void migrate_page_add(struct page *page, struct list_head *pagelist, |
@@ -703,6 +716,11 @@ int do_migrate_pages(struct mm_struct *mm, | |||
703 | { | 716 | { |
704 | return -ENOSYS; | 717 | return -ENOSYS; |
705 | } | 718 | } |
719 | |||
720 | static struct page *new_vma_page(struct page *page, unsigned long private) | ||
721 | { | ||
722 | return NULL; | ||
723 | } | ||
706 | #endif | 724 | #endif |
707 | 725 | ||
708 | long do_mbind(unsigned long start, unsigned long len, | 726 | long do_mbind(unsigned long start, unsigned long len, |
@@ -764,7 +782,8 @@ long do_mbind(unsigned long start, unsigned long len, | |||
764 | err = mbind_range(vma, start, end, new); | 782 | err = mbind_range(vma, start, end, new); |
765 | 783 | ||
766 | if (!list_empty(&pagelist)) | 784 | if (!list_empty(&pagelist)) |
767 | nr_failed = migrate_pages_to(&pagelist, vma, -1); | 785 | nr_failed = migrate_pages(&pagelist, new_vma_page, |
786 | (unsigned long)vma); | ||
768 | 787 | ||
769 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | 788 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) |
770 | err = -EIO; | 789 | err = -EIO; |