aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2010-09-09 19:37:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-09-09 21:57:24 -0400
commit4969c1192d15afa3389e7ae3302096ff684ba655 (patch)
treeabe560c8f293191be65488c49f4db3f3a626e63c /mm/ksm.c
parent7c5367f205f7d53659fb19b9fdf65b7bc1a592c6 (diff)
mm: fix swapin race condition
The pte_same check is reliable only if the swap entry remains pinned (by the page lock on swapcache). We've also to ensure the swapcache isn't removed before we take the lock as try_to_free_swap won't care about the page pin. One of the possible impacts of this patch is that a KSM-shared page can point to the anon_vma of another process, which could exit before the page is freed. This can leave a page with a pointer to a recycled anon_vma object, or worse, a pointer to something that is no longer an anon_vma. [riel@redhat.com: changelog help] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Hugh Dickins <hughd@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index e2ae00458320..b1873cf03ed9 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1504{ 1504{
1505 struct page *new_page; 1505 struct page *new_page;
1506 1506
1507 unlock_page(page); /* any racers will COW it, not modify it */
1508
1509 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1507 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1510 if (new_page) { 1508 if (new_page) {
1511 copy_user_highpage(new_page, page, address, vma); 1509 copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1521 add_page_to_unevictable_list(new_page); 1519 add_page_to_unevictable_list(new_page);
1522 } 1520 }
1523 1521
1524 page_cache_release(page);
1525 return new_page; 1522 return new_page;
1526} 1523}
1527 1524