aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-25 08:46:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:00:55 -0400
commite6a1530d692d6a60cdf15dfbcfea07f5324d7b9f (patch)
treebb34a4d745eb7f7e8d3de40b171fac17822ee8ac /mm
parent7b2259b3e53f128c10a9fded0965e69d4a949847 (diff)
[PATCH] Allow migration of mlocked pages
Hugh clarified the role of VM_LOCKED. So we can now implement page migration for mlocked pages. Allow the migration of mlocked pages. This means that try_to_unmap must unmap mlocked pages in the migration case. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/rmap.c9
2 files changed, 8 insertions, 11 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 0576c0535988..3f1e0c2c942c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -616,15 +616,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
616 /* 616 /*
617 * Establish migration ptes or remove ptes 617 * Establish migration ptes or remove ptes
618 */ 618 */
619 if (try_to_unmap(page, 1) != SWAP_FAIL) { 619 try_to_unmap(page, 1);
620 if (!page_mapped(page)) 620 if (!page_mapped(page))
621 rc = move_to_new_page(newpage, page); 621 rc = move_to_new_page(newpage, page);
622 } else
623 /* A vma has VM_LOCKED set -> permanent failure */
624 rc = -EPERM;
625 622
626 if (rc) 623 if (rc)
627 remove_migration_ptes(page, page); 624 remove_migration_ptes(page, page);
625
628unlock: 626unlock:
629 unlock_page(page); 627 unlock_page(page);
630 628
diff --git a/mm/rmap.c b/mm/rmap.c
index 882a85826bb2..e76909e880ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
562 * If it's recently referenced (perhaps page_referenced 562 * If it's recently referenced (perhaps page_referenced
563 * skipped over this mm) then we should reactivate it. 563 * skipped over this mm) then we should reactivate it.
564 */ 564 */
565 if ((vma->vm_flags & VM_LOCKED) || 565 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
566 (ptep_clear_flush_young(vma, address, pte) 566 (ptep_clear_flush_young(vma, address, pte)))) {
567 && !migration)) {
568 ret = SWAP_FAIL; 567 ret = SWAP_FAIL;
569 goto out_unmap; 568 goto out_unmap;
570 } 569 }
@@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration)
771 770
772 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 771 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
773 shared.vm_set.list) { 772 shared.vm_set.list) {
774 if (vma->vm_flags & VM_LOCKED) 773 if ((vma->vm_flags & VM_LOCKED) && !migration)
775 continue; 774 continue;
776 cursor = (unsigned long) vma->vm_private_data; 775 cursor = (unsigned long) vma->vm_private_data;
777 if (cursor > max_nl_cursor) 776 if (cursor > max_nl_cursor)
@@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration)
805 do { 804 do {
806 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 805 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
807 shared.vm_set.list) { 806 shared.vm_set.list) {
808 if (vma->vm_flags & VM_LOCKED) 807 if ((vma->vm_flags & VM_LOCKED) && !migration)
809 continue; 808 continue;
810 cursor = (unsigned long) vma->vm_private_data; 809 cursor = (unsigned long) vma->vm_private_data;
811 while ( cursor < max_nl_cursor && 810 while ( cursor < max_nl_cursor &&