aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-01-18 20:42:27 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 22:20:17 -0500
commit053837fce7aa79025ed57656855df09f80175527 (patch)
tree05d7615894131a368fc4943f641b11acdd2ae694 /mm/mempolicy.c
parente236a166b2bc437769a9b8b5d19186a3761bde48 (diff)
[PATCH] mm: migration page refcounting fix
Migration code currently does not take a reference to target page properly, so between unlocking the pte and trying to take a new reference to the page with isolate_lru_page, anything could happen to it. Fix this by holding the pte lock until we get a chance to elevate the refcount. Other small cleanups while we're here. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3171f884d245..551cde40520b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -208,6 +208,17 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
208 page = vm_normal_page(vma, addr, *pte); 208 page = vm_normal_page(vma, addr, *pte);
209 if (!page) 209 if (!page)
210 continue; 210 continue;
211 /*
212 * The check for PageReserved here is important to avoid
213 * handling zero pages and other pages that may have been
214 * marked special by the system.
215 *
216 * If the PageReserved would not be checked here then f.e.
217 * the location of the zero page could have an influence
218 * on MPOL_MF_STRICT, zero pages would be counted for
219 * the per node stats, and there would be useless attempts
220 * to put zero pages on the migration list.
221 */
211 if (PageReserved(page)) 222 if (PageReserved(page))
212 continue; 223 continue;
213 nid = page_to_nid(page); 224 nid = page_to_nid(page);
@@ -216,11 +227,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
216 227
217 if (flags & MPOL_MF_STATS) 228 if (flags & MPOL_MF_STATS)
218 gather_stats(page, private); 229 gather_stats(page, private);
219 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 230 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
220 spin_unlock(ptl);
221 migrate_page_add(vma, page, private, flags); 231 migrate_page_add(vma, page, private, flags);
222 spin_lock(ptl);
223 }
224 else 232 else
225 break; 233 break;
226 } while (pte++, addr += PAGE_SIZE, addr != end); 234 } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -309,6 +317,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
309 int err; 317 int err;
310 struct vm_area_struct *first, *vma, *prev; 318 struct vm_area_struct *first, *vma, *prev;
311 319
320 /* Clear the LRU lists so pages can be isolated */
321 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
322 lru_add_drain_all();
323
312 first = find_vma(mm, start); 324 first = find_vma(mm, start);
313 if (!first) 325 if (!first)
314 return ERR_PTR(-EFAULT); 326 return ERR_PTR(-EFAULT);
@@ -555,15 +567,8 @@ static void migrate_page_add(struct vm_area_struct *vma,
555 if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || 567 if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) ||
556 mapping_writably_mapped(page->mapping) || 568 mapping_writably_mapped(page->mapping) ||
557 single_mm_mapping(vma->vm_mm, page->mapping)) { 569 single_mm_mapping(vma->vm_mm, page->mapping)) {
558 int rc = isolate_lru_page(page); 570 if (isolate_lru_page(page))
559
560 if (rc == 1)
561 list_add(&page->lru, pagelist); 571 list_add(&page->lru, pagelist);
562 /*
563 * If the isolate attempt was not successful then we just
564 * encountered an unswappable page. Something must be wrong.
565 */
566 WARN_ON(rc == 0);
567 } 572 }
568} 573}
569 574