diff options
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 113 |
1 files changed, 23 insertions, 90 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 96195dcb62e1..e93cc740c22b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #include <linux/swap.h> | 86 | #include <linux/swap.h> |
87 | #include <linux/seq_file.h> | 87 | #include <linux/seq_file.h> |
88 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
89 | #include <linux/migrate.h> | ||
89 | 90 | ||
90 | #include <asm/tlbflush.h> | 91 | #include <asm/tlbflush.h> |
91 | #include <asm/uaccess.h> | 92 | #include <asm/uaccess.h> |
@@ -95,9 +96,6 @@ | |||
95 | #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ | 96 | #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ |
96 | #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ | 97 | #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ |
97 | 98 | ||
98 | /* The number of pages to migrate per call to migrate_pages() */ | ||
99 | #define MIGRATE_CHUNK_SIZE 256 | ||
100 | |||
101 | static struct kmem_cache *policy_cache; | 99 | static struct kmem_cache *policy_cache; |
102 | static struct kmem_cache *sn_cache; | 100 | static struct kmem_cache *sn_cache; |
103 | 101 | ||
@@ -331,17 +329,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
331 | struct vm_area_struct *first, *vma, *prev; | 329 | struct vm_area_struct *first, *vma, *prev; |
332 | 330 | ||
333 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { | 331 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { |
334 | /* Must have swap device for migration */ | ||
335 | if (nr_swap_pages <= 0) | ||
336 | return ERR_PTR(-ENODEV); | ||
337 | 332 | ||
338 | /* | 333 | err = migrate_prep(); |
339 | * Clear the LRU lists so pages can be isolated. | 334 | if (err) |
340 | * Note that pages may be moved off the LRU after we have | 335 | return ERR_PTR(err); |
341 | * drained them. Those pages will fail to migrate like other | ||
342 | * pages that may be busy. | ||
343 | */ | ||
344 | lru_add_drain_all(); | ||
345 | } | 336 | } |
346 | 337 | ||
347 | first = find_vma(mm, start); | 338 | first = find_vma(mm, start); |
@@ -550,92 +541,18 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask, | |||
550 | return err; | 541 | return err; |
551 | } | 542 | } |
552 | 543 | ||
544 | #ifdef CONFIG_MIGRATION | ||
553 | /* | 545 | /* |
554 | * page migration | 546 | * page migration |
555 | */ | 547 | */ |
556 | |||
557 | static void migrate_page_add(struct page *page, struct list_head *pagelist, | 548 | static void migrate_page_add(struct page *page, struct list_head *pagelist, |
558 | unsigned long flags) | 549 | unsigned long flags) |
559 | { | 550 | { |
560 | /* | 551 | /* |
561 | * Avoid migrating a page that is shared with others. | 552 | * Avoid migrating a page that is shared with others. |
562 | */ | 553 | */ |
563 | if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { | 554 | if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) |
564 | if (isolate_lru_page(page)) | 555 | isolate_lru_page(page, pagelist); |
565 | list_add_tail(&page->lru, pagelist); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * Migrate the list 'pagelist' of pages to a certain destination. | ||
571 | * | ||
572 | * Specify destination with either non-NULL vma or dest_node >= 0 | ||
573 | * Return the number of pages not migrated or error code | ||
574 | */ | ||
575 | static int migrate_pages_to(struct list_head *pagelist, | ||
576 | struct vm_area_struct *vma, int dest) | ||
577 | { | ||
578 | LIST_HEAD(newlist); | ||
579 | LIST_HEAD(moved); | ||
580 | LIST_HEAD(failed); | ||
581 | int err = 0; | ||
582 | unsigned long offset = 0; | ||
583 | int nr_pages; | ||
584 | struct page *page; | ||
585 | struct list_head *p; | ||
586 | |||
587 | redo: | ||
588 | nr_pages = 0; | ||
589 | list_for_each(p, pagelist) { | ||
590 | if (vma) { | ||
591 | /* | ||
592 | * The address passed to alloc_page_vma is used to | ||
593 | * generate the proper interleave behavior. We fake | ||
594 | * the address here by an increasing offset in order | ||
595 | * to get the proper distribution of pages. | ||
596 | * | ||
597 | * No decision has been made as to which page | ||
598 | * a certain old page is moved to so we cannot | ||
599 | * specify the correct address. | ||
600 | */ | ||
601 | page = alloc_page_vma(GFP_HIGHUSER, vma, | ||
602 | offset + vma->vm_start); | ||
603 | offset += PAGE_SIZE; | ||
604 | } | ||
605 | else | ||
606 | page = alloc_pages_node(dest, GFP_HIGHUSER, 0); | ||
607 | |||
608 | if (!page) { | ||
609 | err = -ENOMEM; | ||
610 | goto out; | ||
611 | } | ||
612 | list_add_tail(&page->lru, &newlist); | ||
613 | nr_pages++; | ||
614 | if (nr_pages > MIGRATE_CHUNK_SIZE) | ||
615 | break; | ||
616 | } | ||
617 | err = migrate_pages(pagelist, &newlist, &moved, &failed); | ||
618 | |||
619 | putback_lru_pages(&moved); /* Call release pages instead ?? */ | ||
620 | |||
621 | if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist)) | ||
622 | goto redo; | ||
623 | out: | ||
624 | /* Return leftover allocated pages */ | ||
625 | while (!list_empty(&newlist)) { | ||
626 | page = list_entry(newlist.next, struct page, lru); | ||
627 | list_del(&page->lru); | ||
628 | __free_page(page); | ||
629 | } | ||
630 | list_splice(&failed, pagelist); | ||
631 | if (err < 0) | ||
632 | return err; | ||
633 | |||
634 | /* Calculate number of leftover pages */ | ||
635 | nr_pages = 0; | ||
636 | list_for_each(p, pagelist) | ||
637 | nr_pages++; | ||
638 | return nr_pages; | ||
639 | } | 556 | } |
640 | 557 | ||
641 | /* | 558 | /* |
@@ -742,8 +659,23 @@ int do_migrate_pages(struct mm_struct *mm, | |||
742 | if (err < 0) | 659 | if (err < 0) |
743 | return err; | 660 | return err; |
744 | return busy; | 661 | return busy; |
662 | |||
745 | } | 663 | } |
746 | 664 | ||
665 | #else | ||
666 | |||
667 | static void migrate_page_add(struct page *page, struct list_head *pagelist, | ||
668 | unsigned long flags) | ||
669 | { | ||
670 | } | ||
671 | |||
672 | int do_migrate_pages(struct mm_struct *mm, | ||
673 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) | ||
674 | { | ||
675 | return -ENOSYS; | ||
676 | } | ||
677 | #endif | ||
678 | |||
747 | long do_mbind(unsigned long start, unsigned long len, | 679 | long do_mbind(unsigned long start, unsigned long len, |
748 | unsigned long mode, nodemask_t *nmask, unsigned long flags) | 680 | unsigned long mode, nodemask_t *nmask, unsigned long flags) |
749 | { | 681 | { |
@@ -808,6 +740,7 @@ long do_mbind(unsigned long start, unsigned long len, | |||
808 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | 740 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) |
809 | err = -EIO; | 741 | err = -EIO; |
810 | } | 742 | } |
743 | |||
811 | if (!list_empty(&pagelist)) | 744 | if (!list_empty(&pagelist)) |
812 | putback_lru_pages(&pagelist); | 745 | putback_lru_pages(&pagelist); |
813 | 746 | ||