diff options
author | Hugh Dickins <hughd@google.com> | 2011-05-28 16:17:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 19:09:26 -0400 |
commit | 5dbe0af47f8a8f968bac2991c3ec974c6e3eaabc (patch) | |
tree | e936955d61fc2a69e36b2e9276f5ad7585134255 | |
parent | 826267cf1e6c6899eda1325a19f1b1d15c558b20 (diff) |
mm: fix kernel BUG at mm/rmap.c:1017!
I've hit the "address >= vma->vm_end" check in do_page_add_anon_rmap()
just once. The stack showed khugepaged allocation trying to compact
pages: the call to page_add_anon_rmap() coming from remove_migration_pte().
That path holds anon_vma lock, but does not hold mmap_sem: it can
therefore race with a split_vma(), and in commit 5f70b962ccc2 "mmap:
avoid unnecessary anon_vma lock" we just took away the anon_vma lock
protection when adjusting vma->vm_end.
I don't think that particular BUG_ON ever caught anything interesting,
so better replace it by a comment, than reinstate the anon_vma locking.
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/rmap.c | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -1014,7 +1014,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
1014 | return; | 1014 | return; |
1015 | 1015 | ||
1016 | VM_BUG_ON(!PageLocked(page)); | 1016 | VM_BUG_ON(!PageLocked(page)); |
1017 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1017 | /* address might be in next vma when migration races vma_adjust */ |
1018 | if (first) | 1018 | if (first) |
1019 | __page_set_anon_rmap(page, vma, address, exclusive); | 1019 | __page_set_anon_rmap(page, vma, address, exclusive); |
1020 | else | 1020 | else |
@@ -1709,7 +1709,7 @@ void hugepage_add_anon_rmap(struct page *page, | |||
1709 | 1709 | ||
1710 | BUG_ON(!PageLocked(page)); | 1710 | BUG_ON(!PageLocked(page)); |
1711 | BUG_ON(!anon_vma); | 1711 | BUG_ON(!anon_vma); |
1712 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1712 | /* address might be in next vma when migration races vma_adjust */ |
1713 | first = atomic_inc_and_test(&page->_mapcount); | 1713 | first = atomic_inc_and_test(&page->_mapcount); |
1714 | if (first) | 1714 | if (first) |
1715 | __hugepage_set_anon_rmap(page, vma, address, 0); | 1715 | __hugepage_set_anon_rmap(page, vma, address, 0); |