aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/mempolicy.c21
-rw-r--r--mm/rmap.c7
2 files changed, 23 insertions, 5 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c1592a94582f..83c69f8a64c2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -722,12 +722,29 @@ out:
722 722
723} 723}
724 724
725/*
726 * Allocate a new page for page migration based on vma policy.
727 * Start assuming that page is mapped by vma pointed to by @private.
728 * Search forward from there, if not. N.B., this assumes that the
729 * list of pages handed to migrate_pages()--which is how we get here--
730 * is in virtual address order.
731 */
725static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 732static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
726{ 733{
727 struct vm_area_struct *vma = (struct vm_area_struct *)private; 734 struct vm_area_struct *vma = (struct vm_area_struct *)private;
735 unsigned long uninitialized_var(address);
728 736
729 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 737 while (vma) {
730 page_address_in_vma(page, vma)); 738 address = page_address_in_vma(page, vma);
739 if (address != -EFAULT)
740 break;
741 vma = vma->vm_next;
742 }
743
744 /*
745 * if !vma, alloc_page_vma() will use task or system default policy
746 */
747 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
731} 748}
732#else 749#else
733 750
diff --git a/mm/rmap.c b/mm/rmap.c
index 8990f909492f..dc3be5f5b0da 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -183,7 +183,9 @@ static void page_unlock_anon_vma(struct anon_vma *anon_vma)
183} 183}
184 184
185/* 185/*
186 * At what user virtual address is page expected in vma? 186 * At what user virtual address is page expected in @vma?
187 * Returns virtual address or -EFAULT if page's index/offset is not
188 * within the range mapped the @vma.
187 */ 189 */
188static inline unsigned long 190static inline unsigned long
189vma_address(struct page *page, struct vm_area_struct *vma) 191vma_address(struct page *page, struct vm_area_struct *vma)
@@ -193,8 +195,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
193 195
194 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 196 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
195 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 197 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
196 /* page should be within any vma from prio_tree_next */ 198 /* page should be within @vma mapping range */
197 BUG_ON(!PageAnon(page));
198 return -EFAULT; 199 return -EFAULT;
199 } 200 }
200 return address; 201 return address;