aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2016-10-07 20:01:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:29 -0400
commit8f26e0b176f3484c49d55d88fe6083a9cf9ff443 (patch)
tree817114a7c54148b01bd4dc4327b1bf84b4f5a158 /mm
parent86d12e471d9f152217744f2054e63e3742949879 (diff)
mm: vma_merge: correct false positive from __vma_unlink->validate_mm_rb
The old code was always doing: vma->vm_end = next->vm_end vma_rb_erase(next) // in __vma_unlink vma->vm_next = next->vm_next // in __vma_unlink next = vma->vm_next vma_gap_update(next) The new code still does the above for remove_next == 1 and 2, but for remove_next == 3 it has been changed and it does: next->vm_start = vma->vm_start vma_rb_erase(vma) // in __vma_unlink vma_gap_update(next) In the latter case, while unlinking "vma", validate_mm_rb() is told to ignore "vma" that is being removed, but next->vm_start was reduced instead. So for the new case, to avoid the false positive from validate_mm_rb, it should be "next" that is ignored when "vma" is being unlinked. "vma" and "next" in the above comment, considered pre-swap(). Link: http://lkml.kernel.org/r/1474492522-2261-4-git-send-email-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Tested-by: Shaun Tancheff <shaun.tancheff@seagate.com> Cc: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Jan Vorlicek <janvorli@microsoft.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c59
1 files changed, 41 insertions, 18 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 4dc65be4766f..1af87c14183d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -402,15 +402,9 @@ static inline void vma_rb_insert(struct vm_area_struct *vma,
402 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 402 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
403} 403}
404 404
405static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) 405static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
406{ 406{
407 /* 407 /*
408 * All rb_subtree_gap values must be consistent prior to erase,
409 * with the possible exception of the vma being erased.
410 */
411 validate_mm_rb(root, vma);
412
413 /*
414 * Note rb_erase_augmented is a fairly large inline function, 408 * Note rb_erase_augmented is a fairly large inline function,
415 * so make sure we instantiate it only once with our desired 409 * so make sure we instantiate it only once with our desired
416 * augmented rbtree callbacks. 410 * augmented rbtree callbacks.
@@ -418,6 +412,32 @@ static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
418 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 412 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
419} 413}
420 414
415static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
416 struct rb_root *root,
417 struct vm_area_struct *ignore)
418{
419 /*
420 * All rb_subtree_gap values must be consistent prior to erase,
421 * with the possible exception of the "next" vma being erased if
422 * next->vm_start was reduced.
423 */
424 validate_mm_rb(root, ignore);
425
426 __vma_rb_erase(vma, root);
427}
428
429static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
430 struct rb_root *root)
431{
432 /*
433 * All rb_subtree_gap values must be consistent prior to erase,
434 * with the possible exception of the vma being erased.
435 */
436 validate_mm_rb(root, vma);
437
438 __vma_rb_erase(vma, root);
439}
440
421/* 441/*
422 * vma has some anon_vma assigned, and is already inserted on that 442 * vma has some anon_vma assigned, and is already inserted on that
423 * anon_vma's interval trees. 443 * anon_vma's interval trees.
@@ -604,11 +624,12 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
604static __always_inline void __vma_unlink_common(struct mm_struct *mm, 624static __always_inline void __vma_unlink_common(struct mm_struct *mm,
605 struct vm_area_struct *vma, 625 struct vm_area_struct *vma,
606 struct vm_area_struct *prev, 626 struct vm_area_struct *prev,
607 bool has_prev) 627 bool has_prev,
628 struct vm_area_struct *ignore)
608{ 629{
609 struct vm_area_struct *next; 630 struct vm_area_struct *next;
610 631
611 vma_rb_erase(vma, &mm->mm_rb); 632 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
612 next = vma->vm_next; 633 next = vma->vm_next;
613 if (has_prev) 634 if (has_prev)
614 prev->vm_next = next; 635 prev->vm_next = next;
@@ -630,13 +651,7 @@ static inline void __vma_unlink_prev(struct mm_struct *mm,
630 struct vm_area_struct *vma, 651 struct vm_area_struct *vma,
631 struct vm_area_struct *prev) 652 struct vm_area_struct *prev)
632{ 653{
633 __vma_unlink_common(mm, vma, prev, true); 654 __vma_unlink_common(mm, vma, prev, true, vma);
634}
635
636static inline void __vma_unlink(struct mm_struct *mm,
637 struct vm_area_struct *vma)
638{
639 __vma_unlink_common(mm, vma, NULL, false);
640} 655}
641 656
642/* 657/*
@@ -815,8 +830,16 @@ again:
815 if (remove_next != 3) 830 if (remove_next != 3)
816 __vma_unlink_prev(mm, next, vma); 831 __vma_unlink_prev(mm, next, vma);
817 else 832 else
818 /* vma is not before next if they've been swapped */ 833 /*
819 __vma_unlink(mm, next); 834 * vma is not before next if they've been
835 * swapped.
836 *
837 * pre-swap() next->vm_start was reduced so
838 * tell validate_mm_rb to ignore pre-swap()
839 * "next" (which is stored in post-swap()
840 * "vma").
841 */
842 __vma_unlink_common(mm, next, NULL, false, vma);
820 if (file) 843 if (file)
821 __remove_shared_vm_struct(next, file, mapping); 844 __remove_shared_vm_struct(next, file, mapping);
822 } else if (insert) { 845 } else if (insert) {