aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-23 05:03:35 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:50 -0400
commit0697212a411c1dae03c27845f2de2f3adb32c331 (patch)
tree4bedcdb27522f4a42c422e0a8af155501f43a69c /mm/rmap.c
parent8351a6e4785218a2b03c142be92926baff95ba5c (diff)
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes We take the upper two swapfiles for the two types of migration ptes and define a series of macros in swapops.h. The VM is modified to handle the migration entries. migration entries can only be encountered when the page they are pointing to is locked. This limits the number of places one has to fix. We also check in copy_pte_range and in mprotect_pte_range() for migration ptes. We check for migration ptes in do_swap_cache and call a function that will then wait on the page lock. This allows us to effectively stop all accesses to apge. Migration entries are created by try_to_unmap if called for migration and removed by local functions in migrate.c From: Hugh Dickins <hugh@veritas.com> Several times while testing swapless page migration (I've no NUMA, just hacking it up to migrate recklessly while running load), I've hit the BUG_ON(!PageLocked(p)) in migration_entry_to_page. This comes from an orphaned migration entry, unrelated to the current correctly locked migration, but hit by remove_anon_migration_ptes as it checks an address in each vma of the anon_vma list. Such an orphan may be left behind if an earlier migration raced with fork: copy_one_pte can duplicate a migration entry from parent to child, after remove_anon_migration_ptes has checked the child vma, but before it has removed it from the parent vma. (If the process were later to fault on this orphaned entry, it would hit the same BUG from migration_entry_wait.) This could be fixed by locking anon_vma in copy_one_pte, but we'd rather not. There's no such problem with file pages, because vma_prio_tree_add adds child vma after parent vma, and the page table locking at each end is enough to serialize. Follow that example with anon_vma: add new vmas to the tail instead of the head. (There's no corresponding problem when inserting migration entries, because a missed pte will leave the page count and mapcount high, which is allowed for. And there's no corresponding problem when migrating via swap, because a leftover swap entry will be correctly faulted. But the swapless method has no refcounting of its entries.) From: Ingo Molnar <mingo@elte.hu> pte_unmap_unlock() takes the pte pointer as an argument. From: Hugh Dickins <hugh@veritas.com> Several times while testing swapless page migration, gcc has tried to exec a pointer instead of a string: smells like COW mappings are not being properly write-protected on fork. The protection in copy_one_pte looks very convincing, until at last you realize that the second arg to make_migration_entry is a boolean "write", and SWP_MIGRATION_READ is 30. Anyway, it's better done like in change_pte_range, using is_write_migration_entry and make_migration_entry_read. From: Hugh Dickins <hugh@veritas.com> Remove unnecessary obfuscation from sys_swapon's range check on swap type, which blew up causing memory corruption once swapless migration made MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> From: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 10806b7af40c..3b8ce86daa3a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -103,7 +103,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
103 spin_lock(&mm->page_table_lock); 103 spin_lock(&mm->page_table_lock);
104 if (likely(!vma->anon_vma)) { 104 if (likely(!vma->anon_vma)) {
105 vma->anon_vma = anon_vma; 105 vma->anon_vma = anon_vma;
106 list_add(&vma->anon_vma_node, &anon_vma->head); 106 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
107 allocated = NULL; 107 allocated = NULL;
108 } 108 }
109 spin_unlock(&mm->page_table_lock); 109 spin_unlock(&mm->page_table_lock);
@@ -127,7 +127,7 @@ void __anon_vma_link(struct vm_area_struct *vma)
127 struct anon_vma *anon_vma = vma->anon_vma; 127 struct anon_vma *anon_vma = vma->anon_vma;
128 128
129 if (anon_vma) { 129 if (anon_vma) {
130 list_add(&vma->anon_vma_node, &anon_vma->head); 130 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
131 validate_anon_vma(vma); 131 validate_anon_vma(vma);
132 } 132 }
133} 133}
@@ -138,7 +138,7 @@ void anon_vma_link(struct vm_area_struct *vma)
138 138
139 if (anon_vma) { 139 if (anon_vma) {
140 spin_lock(&anon_vma->lock); 140 spin_lock(&anon_vma->lock);
141 list_add(&vma->anon_vma_node, &anon_vma->head); 141 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
142 validate_anon_vma(vma); 142 validate_anon_vma(vma);
143 spin_unlock(&anon_vma->lock); 143 spin_unlock(&anon_vma->lock);
144 } 144 }
@@ -620,17 +620,27 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
620 620
621 if (PageAnon(page)) { 621 if (PageAnon(page)) {
622 swp_entry_t entry = { .val = page_private(page) }; 622 swp_entry_t entry = { .val = page_private(page) };
623 /* 623
624 * Store the swap location in the pte. 624 if (PageSwapCache(page)) {
625 * See handle_pte_fault() ... 625 /*
626 */ 626 * Store the swap location in the pte.
627 BUG_ON(!PageSwapCache(page)); 627 * See handle_pte_fault() ...
628 swap_duplicate(entry); 628 */
629 if (list_empty(&mm->mmlist)) { 629 swap_duplicate(entry);
630 spin_lock(&mmlist_lock); 630 if (list_empty(&mm->mmlist)) {
631 if (list_empty(&mm->mmlist)) 631 spin_lock(&mmlist_lock);
632 list_add(&mm->mmlist, &init_mm.mmlist); 632 if (list_empty(&mm->mmlist))
633 spin_unlock(&mmlist_lock); 633 list_add(&mm->mmlist, &init_mm.mmlist);
634 spin_unlock(&mmlist_lock);
635 }
636 } else {
637 /*
638 * Store the pfn of the page in a special migration
639 * pte. do_swap_page() will wait until the migration
640 * pte is removed and then restart fault handling.
641 */
642 BUG_ON(!migration);
643 entry = make_migration_entry(page, pte_write(pteval));
634 } 644 }
635 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 645 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
636 BUG_ON(pte_file(*pte)); 646 BUG_ON(pte_file(*pte));