diff options
author | Hugh Dickins <hughd@google.com> | 2016-07-14 15:07:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-15 01:54:27 -0400 |
commit | 5a49973d7143ebbabd76e1dcd69ee42e349bb7b9 (patch) | |
tree | e1dc7cdb669b7fecf3a331b7a64183b1f24c65c5 | |
parent | 55bda43bb26d2c11eeedac742eff87a8ac34c106 (diff) |
mm: thp: refix false positive BUG in page_move_anon_rmap()
The VM_BUG_ON_PAGE in page_move_anon_rmap() is more trouble than it's
worth: the syzkaller fuzzer hit it again. It's still wrong for some THP
cases, because linear_page_index() was never intended to apply to
addresses before the start of a vma.
That's easily fixed with a signed long cast inside linear_page_index();
and Dmitry has tested such a patch, to verify the false positive. But
why extend linear_page_index() just for this case? when the avoidance in
page_move_anon_rmap() has already grown ugly, and there's no reason for
the check at all (nothing else there is using address or index).
Remove address arg from page_move_anon_rmap(), remove VM_BUG_ON_PAGE,
remove CONFIG_DEBUG_VM PageTransHuge adjustment.
And one more thing: should the compound_head(page) be done inside or
outside page_move_anon_rmap()? It's usually pushed down to the lowest
level nowadays (and mm/memory.c shows no other explicit use of it), so I
think it's better done in page_move_anon_rmap() than by caller.
Fixes: 0798d3c022dc ("mm: thp: avoid false positive VM_BUG_ON_PAGE in page_move_anon_rmap()")
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1607120444540.12528@eggly.anvils
Signed-off-by: Hugh Dickins <hughd@google.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: <stable@vger.kernel.org> [4.5+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/rmap.h | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 9 |
4 files changed, 6 insertions, 10 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 49eb4f8ebac9..2b0fad83683f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page); | |||
158 | /* | 158 | /* |
159 | * rmap interfaces called when adding or removing pte of page | 159 | * rmap interfaces called when adding or removing pte of page |
160 | */ | 160 | */ |
161 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 161 | void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
162 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, | 162 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, |
163 | unsigned long, bool); | 163 | unsigned long, bool); |
164 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | 164 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c1f3c0be150a..addfe4accc07 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3383,7 +3383,7 @@ retry_avoidcopy: | |||
3383 | /* If no-one else is actually using this page, avoid the copy | 3383 | /* If no-one else is actually using this page, avoid the copy |
3384 | * and just make the page writable */ | 3384 | * and just make the page writable */ |
3385 | if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { | 3385 | if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { |
3386 | page_move_anon_rmap(old_page, vma, address); | 3386 | page_move_anon_rmap(old_page, vma); |
3387 | set_huge_ptep_writable(vma, address, ptep); | 3387 | set_huge_ptep_writable(vma, address, ptep); |
3388 | return 0; | 3388 | return 0; |
3389 | } | 3389 | } |
diff --git a/mm/memory.c b/mm/memory.c index cd1f29e4897e..9e046819e619 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2399 | * Protected against the rmap code by | 2399 | * Protected against the rmap code by |
2400 | * the page lock. | 2400 | * the page lock. |
2401 | */ | 2401 | */ |
2402 | page_move_anon_rmap(compound_head(old_page), | 2402 | page_move_anon_rmap(old_page, vma); |
2403 | vma, address); | ||
2404 | } | 2403 | } |
2405 | unlock_page(old_page); | 2404 | unlock_page(old_page); |
2406 | return wp_page_reuse(mm, vma, address, page_table, ptl, | 2405 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean); | |||
1084 | * page_move_anon_rmap - move a page to our anon_vma | 1084 | * page_move_anon_rmap - move a page to our anon_vma |
1085 | * @page: the page to move to our anon_vma | 1085 | * @page: the page to move to our anon_vma |
1086 | * @vma: the vma the page belongs to | 1086 | * @vma: the vma the page belongs to |
1087 | * @address: the user virtual address mapped | ||
1088 | * | 1087 | * |
1089 | * When a page belongs exclusively to one process after a COW event, | 1088 | * When a page belongs exclusively to one process after a COW event, |
1090 | * that page can be moved into the anon_vma that belongs to just that | 1089 | * that page can be moved into the anon_vma that belongs to just that |
1091 | * process, so the rmap code will not search the parent or sibling | 1090 | * process, so the rmap code will not search the parent or sibling |
1092 | * processes. | 1091 | * processes. |
1093 | */ | 1092 | */ |
1094 | void page_move_anon_rmap(struct page *page, | 1093 | void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) |
1095 | struct vm_area_struct *vma, unsigned long address) | ||
1096 | { | 1094 | { |
1097 | struct anon_vma *anon_vma = vma->anon_vma; | 1095 | struct anon_vma *anon_vma = vma->anon_vma; |
1098 | 1096 | ||
1097 | page = compound_head(page); | ||
1098 | |||
1099 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 1099 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1100 | VM_BUG_ON_VMA(!anon_vma, vma); | 1100 | VM_BUG_ON_VMA(!anon_vma, vma); |
1101 | if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page)) | ||
1102 | address &= HPAGE_PMD_MASK; | ||
1103 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); | ||
1104 | 1101 | ||
1105 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 1102 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
1106 | /* | 1103 | /* |