diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-06-21 20:15:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-21 21:46:21 -0400 |
commit | c475a8ab625d567eacf5e30ec35d6d8704558062 (patch) | |
tree | 0971bef7b876f1b3eb160621fc2b61cb5313827b /mm/memory.c | |
parent | d296e9cd02c92e576ecce5344026a4df4353cdb2 (diff) |
[PATCH] can_share_swap_page: use page_mapcount
Remember that ironic get_user_pages race? when the raised page_count on a
page swapped out led do_wp_page to decide that it had to copy on write, so
substituted a different page into userspace. 2.6.7 onwards have Andrea's
solution, where try_to_unmap_one backs out if it finds page_count raised.
Which works, but is unsatisfying (rmap.c has no other page_count heuristics),
and was found a few months ago to hang an intensive page migration test. A
year ago I was hesitant to engage page_mapcount, now it seems the right fix.
So remove the page_count hack from try_to_unmap_one; and use activate_page in
unuse_mm when dropping lock, to replace its secondary effect of helping
swapoff to make progress in that case.
Simplify can_share_swap_page (now called only on anonymous pages) to check
page_mapcount + page_swapcount == 1: still needs the page lock to stabilize
their (pessimistic) sum, but does not need swapper_space.tree_lock for that.
In do_swap_page, move swap_free and unlock_page below page_add_anon_rmap, to
keep sum on the high side, and correct when can_share_swap_page called.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1c0a3db78a05..da91b7bf9986 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1686,10 +1686,6 @@ static int do_swap_page(struct mm_struct * mm, | |||
1686 | } | 1686 | } |
1687 | 1687 | ||
1688 | /* The page isn't present yet, go ahead with the fault. */ | 1688 | /* The page isn't present yet, go ahead with the fault. */ |
1689 | |||
1690 | swap_free(entry); | ||
1691 | if (vm_swap_full()) | ||
1692 | remove_exclusive_swap_page(page); | ||
1693 | 1689 | ||
1694 | inc_mm_counter(mm, rss); | 1690 | inc_mm_counter(mm, rss); |
1695 | pte = mk_pte(page, vma->vm_page_prot); | 1691 | pte = mk_pte(page, vma->vm_page_prot); |
@@ -1697,12 +1693,16 @@ static int do_swap_page(struct mm_struct * mm, | |||
1697 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | 1693 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
1698 | write_access = 0; | 1694 | write_access = 0; |
1699 | } | 1695 | } |
1700 | unlock_page(page); | ||
1701 | 1696 | ||
1702 | flush_icache_page(vma, page); | 1697 | flush_icache_page(vma, page); |
1703 | set_pte_at(mm, address, page_table, pte); | 1698 | set_pte_at(mm, address, page_table, pte); |
1704 | page_add_anon_rmap(page, vma, address); | 1699 | page_add_anon_rmap(page, vma, address); |
1705 | 1700 | ||
1701 | swap_free(entry); | ||
1702 | if (vm_swap_full()) | ||
1703 | remove_exclusive_swap_page(page); | ||
1704 | unlock_page(page); | ||
1705 | |||
1706 | if (write_access) { | 1706 | if (write_access) { |
1707 | if (do_wp_page(mm, vma, address, | 1707 | if (do_wp_page(mm, vma, address, |
1708 | page_table, pmd, pte) == VM_FAULT_OOM) | 1708 | page_table, pmd, pte) == VM_FAULT_OOM) |