diff options
| author | Jeff Garzik <jeff@garzik.org> | 2006-02-27 11:49:05 -0500 |
|---|---|---|
| committer | Jeff Garzik <jeff@garzik.org> | 2006-02-27 11:49:05 -0500 |
| commit | cccc65a3b60edaf721cdee5a14f68ba009341822 (patch) | |
| tree | a458d28f70cbe0e848596aad6005442a2621d388 /mm/mempolicy.c | |
| parent | ba70d0614728892b86b2be2f7eae0c696b436461 (diff) | |
| parent | e95a9ec1bb66e07b138861c743192f06e7b3e4de (diff) | |
Merge branch 'master'
Diffstat (limited to 'mm/mempolicy.c')
| -rw-r--r-- | mm/mempolicy.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bedfa4f09c80..67af4cea1e23 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -552,7 +552,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, | |||
| 552 | */ | 552 | */ |
| 553 | if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { | 553 | if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { |
| 554 | if (isolate_lru_page(page)) | 554 | if (isolate_lru_page(page)) |
| 555 | list_add(&page->lru, pagelist); | 555 | list_add_tail(&page->lru, pagelist); |
| 556 | } | 556 | } |
| 557 | } | 557 | } |
| 558 | 558 | ||
| @@ -569,6 +569,7 @@ static int migrate_pages_to(struct list_head *pagelist, | |||
| 569 | LIST_HEAD(moved); | 569 | LIST_HEAD(moved); |
| 570 | LIST_HEAD(failed); | 570 | LIST_HEAD(failed); |
| 571 | int err = 0; | 571 | int err = 0; |
| 572 | unsigned long offset = 0; | ||
| 572 | int nr_pages; | 573 | int nr_pages; |
| 573 | struct page *page; | 574 | struct page *page; |
| 574 | struct list_head *p; | 575 | struct list_head *p; |
| @@ -576,8 +577,21 @@ static int migrate_pages_to(struct list_head *pagelist, | |||
| 576 | redo: | 577 | redo: |
| 577 | nr_pages = 0; | 578 | nr_pages = 0; |
| 578 | list_for_each(p, pagelist) { | 579 | list_for_each(p, pagelist) { |
| 579 | if (vma) | 580 | if (vma) { |
| 580 | page = alloc_page_vma(GFP_HIGHUSER, vma, vma->vm_start); | 581 | /* |
| 582 | * The address passed to alloc_page_vma is used to | ||
| 583 | * generate the proper interleave behavior. We fake | ||
| 584 | * the address here by an increasing offset in order | ||
| 585 | * to get the proper distribution of pages. | ||
| 586 | * | ||
| 587 | * No decision has been made as to which page | ||
| 588 | * a certain old page is moved to so we cannot | ||
| 589 | * specify the correct address. | ||
| 590 | */ | ||
| 591 | page = alloc_page_vma(GFP_HIGHUSER, vma, | ||
| 592 | offset + vma->vm_start); | ||
| 593 | offset += PAGE_SIZE; | ||
| 594 | } | ||
| 581 | else | 595 | else |
| 582 | page = alloc_pages_node(dest, GFP_HIGHUSER, 0); | 596 | page = alloc_pages_node(dest, GFP_HIGHUSER, 0); |
| 583 | 597 | ||
| @@ -585,9 +599,9 @@ redo: | |||
| 585 | err = -ENOMEM; | 599 | err = -ENOMEM; |
| 586 | goto out; | 600 | goto out; |
| 587 | } | 601 | } |
| 588 | list_add(&page->lru, &newlist); | 602 | list_add_tail(&page->lru, &newlist); |
| 589 | nr_pages++; | 603 | nr_pages++; |
| 590 | if (nr_pages > MIGRATE_CHUNK_SIZE); | 604 | if (nr_pages > MIGRATE_CHUNK_SIZE) |
| 591 | break; | 605 | break; |
| 592 | } | 606 | } |
| 593 | err = migrate_pages(pagelist, &newlist, &moved, &failed); | 607 | err = migrate_pages(pagelist, &newlist, &moved, &failed); |
| @@ -808,7 +822,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, | |||
| 808 | nodes_clear(*nodes); | 822 | nodes_clear(*nodes); |
| 809 | if (maxnode == 0 || !nmask) | 823 | if (maxnode == 0 || !nmask) |
| 810 | return 0; | 824 | return 0; |
| 811 | if (maxnode > PAGE_SIZE) | 825 | if (maxnode > PAGE_SIZE*BITS_PER_BYTE) |
| 812 | return -EINVAL; | 826 | return -EINVAL; |
| 813 | 827 | ||
| 814 | nlongs = BITS_TO_LONGS(maxnode); | 828 | nlongs = BITS_TO_LONGS(maxnode); |
