diff options
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3171f884d245..551cde40520b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -208,6 +208,17 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
208 | page = vm_normal_page(vma, addr, *pte); | 208 | page = vm_normal_page(vma, addr, *pte); |
209 | if (!page) | 209 | if (!page) |
210 | continue; | 210 | continue; |
211 | /* | ||
212 | * The check for PageReserved here is important to avoid | ||
213 | * handling zero pages and other pages that may have been | ||
214 | * marked special by the system. | ||
215 | * | ||
216 | * If the PageReserved would not be checked here then f.e. | ||
217 | * the location of the zero page could have an influence | ||
218 | * on MPOL_MF_STRICT, zero pages would be counted for | ||
219 | * the per node stats, and there would be useless attempts | ||
220 | * to put zero pages on the migration list. | ||
221 | */ | ||
211 | if (PageReserved(page)) | 222 | if (PageReserved(page)) |
212 | continue; | 223 | continue; |
213 | nid = page_to_nid(page); | 224 | nid = page_to_nid(page); |
@@ -216,11 +227,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
216 | 227 | ||
217 | if (flags & MPOL_MF_STATS) | 228 | if (flags & MPOL_MF_STATS) |
218 | gather_stats(page, private); | 229 | gather_stats(page, private); |
219 | else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { | 230 | else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) |
220 | spin_unlock(ptl); | ||
221 | migrate_page_add(vma, page, private, flags); | 231 | migrate_page_add(vma, page, private, flags); |
222 | spin_lock(ptl); | ||
223 | } | ||
224 | else | 232 | else |
225 | break; | 233 | break; |
226 | } while (pte++, addr += PAGE_SIZE, addr != end); | 234 | } while (pte++, addr += PAGE_SIZE, addr != end); |
@@ -309,6 +317,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
309 | int err; | 317 | int err; |
310 | struct vm_area_struct *first, *vma, *prev; | 318 | struct vm_area_struct *first, *vma, *prev; |
311 | 319 | ||
320 | /* Clear the LRU lists so pages can be isolated */ | ||
321 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | ||
322 | lru_add_drain_all(); | ||
323 | |||
312 | first = find_vma(mm, start); | 324 | first = find_vma(mm, start); |
313 | if (!first) | 325 | if (!first) |
314 | return ERR_PTR(-EFAULT); | 326 | return ERR_PTR(-EFAULT); |
@@ -555,15 +567,8 @@ static void migrate_page_add(struct vm_area_struct *vma, | |||
555 | if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || | 567 | if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || |
556 | mapping_writably_mapped(page->mapping) || | 568 | mapping_writably_mapped(page->mapping) || |
557 | single_mm_mapping(vma->vm_mm, page->mapping)) { | 569 | single_mm_mapping(vma->vm_mm, page->mapping)) { |
558 | int rc = isolate_lru_page(page); | 570 | if (isolate_lru_page(page)) |
559 | |||
560 | if (rc == 1) | ||
561 | list_add(&page->lru, pagelist); | 571 | list_add(&page->lru, pagelist); |
562 | /* | ||
563 | * If the isolate attempt was not successful then we just | ||
564 | * encountered an unswappable page. Something must be wrong. | ||
565 | */ | ||
566 | WARN_ON(rc == 0); | ||
567 | } | 572 | } |
568 | } | 573 | } |
569 | 574 | ||