aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2010-10-02 20:46:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-04 14:09:53 -0400
commit4829b906cc063cb7cd1b7f34fa05de6db75ec8bb (patch)
tree6ff55df09bb788ae2c66a5f9570bcacf70d6e3bb
parentc6ea21e35bf3691cad59647c771e6606067f627d (diff)
ksm: fix page_address_in_vma anon_vma oops
2.6.36-rc1 commit 21d0d443cdc1658a8c1484fdcece4803f0f96d0e "rmap: resurrect page_address_in_vma anon_vma check" was right to resurrect that check; but now that it's comparing anon_vma->roots instead of just anon_vmas, there's a danger of oopsing on a NULL anon_vma. In most cases no NULL anon_vma ever gets here; but it turns out that occasionally KSM, when enabled on a forked or forking process, will itself call page_address_in_vma() on a "half-KSM" page left over from an earlier failed attempt to merge - whose page_anon_vma() is NULL. It's my bug that those should be getting here at all: I thought they were already dealt with, this oops proves me wrong, I'll fix it in the next release - such pages are effectively pinned until their process exits, since rmap cannot find their ptes (though swapoff can). For now just work around it by making page_address_in_vma() safe (and add a comment on why that check is wanted anyway). A similar check in __page_check_anon_rmap() is safe because do_page_add_anon_rmap() already excluded KSM pages. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/rmap.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 9d2ba01bd4f9..92e6757f196e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 381unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
382{ 382{
383 if (PageAnon(page)) { 383 if (PageAnon(page)) {
384 if (vma->anon_vma->root != page_anon_vma(page)->root) 384 struct anon_vma *page__anon_vma = page_anon_vma(page);
385 /*
386 * Note: swapoff's unuse_vma() is more efficient with this
387 * check, and needs it to match anon_vma when KSM is active.
388 */
389 if (!vma->anon_vma || !page__anon_vma ||
390 vma->anon_vma->root != page__anon_vma->root)
385 return -EFAULT; 391 return -EFAULT;
386 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 392 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
387 if (!vma->vm_file || 393 if (!vma->vm_file ||