diff options
author | Hugh Dickins <hughd@google.com> | 2014-06-23 16:22:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-23 19:47:44 -0400 |
commit | d05f0cdcbe6388723f1900c549b4850360545201 (patch) | |
tree | 7dd172c9cebd9e3ec76d4590a061954ac054adec /mm | |
parent | b43ae21bd1d8199df10548f3fc0d806052027f29 (diff) |
mm: fix crashes from mbind() merging vmas
In v2.6.34 commit 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
introduced vma merging to mbind(), but it should have also changed the
convention of passing start vma from queue_pages_range() (formerly
check_range()) to new_vma_page(): vma merging may have already freed
that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
worse crashes.
Fixes: 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: <stable@vger.kernel.org> [2.6.34+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 46 |
1 files changed, 20 insertions, 26 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 284974230459..eb58de19f815 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, | |||
656 | * @nodes and @flags,) it's isolated and queued to the pagelist which is | 656 | * @nodes and @flags,) it's isolated and queued to the pagelist which is |
657 | * passed via @private.) | 657 | * passed via @private.) |
658 | */ | 658 | */ |
659 | static struct vm_area_struct * | 659 | static int |
660 | queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, | 660 | queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, |
661 | const nodemask_t *nodes, unsigned long flags, void *private) | 661 | const nodemask_t *nodes, unsigned long flags, void *private) |
662 | { | 662 | { |
663 | int err; | 663 | int err = 0; |
664 | struct vm_area_struct *first, *vma, *prev; | 664 | struct vm_area_struct *vma, *prev; |
665 | |||
666 | 665 | ||
667 | first = find_vma(mm, start); | 666 | vma = find_vma(mm, start); |
668 | if (!first) | 667 | if (!vma) |
669 | return ERR_PTR(-EFAULT); | 668 | return -EFAULT; |
670 | prev = NULL; | 669 | prev = NULL; |
671 | for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { | 670 | for (; vma && vma->vm_start < end; vma = vma->vm_next) { |
672 | unsigned long endvma = vma->vm_end; | 671 | unsigned long endvma = vma->vm_end; |
673 | 672 | ||
674 | if (endvma > end) | 673 | if (endvma > end) |
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
678 | 677 | ||
679 | if (!(flags & MPOL_MF_DISCONTIG_OK)) { | 678 | if (!(flags & MPOL_MF_DISCONTIG_OK)) { |
680 | if (!vma->vm_next && vma->vm_end < end) | 679 | if (!vma->vm_next && vma->vm_end < end) |
681 | return ERR_PTR(-EFAULT); | 680 | return -EFAULT; |
682 | if (prev && prev->vm_end < vma->vm_start) | 681 | if (prev && prev->vm_end < vma->vm_start) |
683 | return ERR_PTR(-EFAULT); | 682 | return -EFAULT; |
684 | } | 683 | } |
685 | 684 | ||
686 | if (flags & MPOL_MF_LAZY) { | 685 | if (flags & MPOL_MF_LAZY) { |
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
694 | 693 | ||
695 | err = queue_pages_pgd_range(vma, start, endvma, nodes, | 694 | err = queue_pages_pgd_range(vma, start, endvma, nodes, |
696 | flags, private); | 695 | flags, private); |
697 | if (err) { | 696 | if (err) |
698 | first = ERR_PTR(err); | ||
699 | break; | 697 | break; |
700 | } | ||
701 | } | 698 | } |
702 | next: | 699 | next: |
703 | prev = vma; | 700 | prev = vma; |
704 | } | 701 | } |
705 | return first; | 702 | return err; |
706 | } | 703 | } |
707 | 704 | ||
708 | /* | 705 | /* |
@@ -1156,16 +1153,17 @@ out: | |||
1156 | 1153 | ||
1157 | /* | 1154 | /* |
1158 | * Allocate a new page for page migration based on vma policy. | 1155 | * Allocate a new page for page migration based on vma policy. |
1159 | * Start assuming that page is mapped by vma pointed to by @private. | 1156 | * Start by assuming the page is mapped by the same vma as contains @start. |
1160 | * Search forward from there, if not. N.B., this assumes that the | 1157 | * Search forward from there, if not. N.B., this assumes that the |
1161 | * list of pages handed to migrate_pages()--which is how we get here-- | 1158 | * list of pages handed to migrate_pages()--which is how we get here-- |
1162 | * is in virtual address order. | 1159 | * is in virtual address order. |
1163 | */ | 1160 | */ |
1164 | static struct page *new_vma_page(struct page *page, unsigned long private, int **x) | 1161 | static struct page *new_page(struct page *page, unsigned long start, int **x) |
1165 | { | 1162 | { |
1166 | struct vm_area_struct *vma = (struct vm_area_struct *)private; | 1163 | struct vm_area_struct *vma; |
1167 | unsigned long uninitialized_var(address); | 1164 | unsigned long uninitialized_var(address); |
1168 | 1165 | ||
1166 | vma = find_vma(current->mm, start); | ||
1169 | while (vma) { | 1167 | while (vma) { |
1170 | address = page_address_in_vma(page, vma); | 1168 | address = page_address_in_vma(page, vma); |
1171 | if (address != -EFAULT) | 1169 | if (address != -EFAULT) |
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, | |||
1195 | return -ENOSYS; | 1193 | return -ENOSYS; |
1196 | } | 1194 | } |
1197 | 1195 | ||
1198 | static struct page *new_vma_page(struct page *page, unsigned long private, int **x) | 1196 | static struct page *new_page(struct page *page, unsigned long start, int **x) |
1199 | { | 1197 | { |
1200 | return NULL; | 1198 | return NULL; |
1201 | } | 1199 | } |
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1205 | unsigned short mode, unsigned short mode_flags, | 1203 | unsigned short mode, unsigned short mode_flags, |
1206 | nodemask_t *nmask, unsigned long flags) | 1204 | nodemask_t *nmask, unsigned long flags) |
1207 | { | 1205 | { |
1208 | struct vm_area_struct *vma; | ||
1209 | struct mm_struct *mm = current->mm; | 1206 | struct mm_struct *mm = current->mm; |
1210 | struct mempolicy *new; | 1207 | struct mempolicy *new; |
1211 | unsigned long end; | 1208 | unsigned long end; |
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1271 | if (err) | 1268 | if (err) |
1272 | goto mpol_out; | 1269 | goto mpol_out; |
1273 | 1270 | ||
1274 | vma = queue_pages_range(mm, start, end, nmask, | 1271 | err = queue_pages_range(mm, start, end, nmask, |
1275 | flags | MPOL_MF_INVERT, &pagelist); | 1272 | flags | MPOL_MF_INVERT, &pagelist); |
1276 | 1273 | if (!err) | |
1277 | err = PTR_ERR(vma); /* maybe ... */ | ||
1278 | if (!IS_ERR(vma)) | ||
1279 | err = mbind_range(mm, start, end, new); | 1274 | err = mbind_range(mm, start, end, new); |
1280 | 1275 | ||
1281 | if (!err) { | 1276 | if (!err) { |
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1283 | 1278 | ||
1284 | if (!list_empty(&pagelist)) { | 1279 | if (!list_empty(&pagelist)) { |
1285 | WARN_ON_ONCE(flags & MPOL_MF_LAZY); | 1280 | WARN_ON_ONCE(flags & MPOL_MF_LAZY); |
1286 | nr_failed = migrate_pages(&pagelist, new_vma_page, | 1281 | nr_failed = migrate_pages(&pagelist, new_page, NULL, |
1287 | NULL, (unsigned long)vma, | 1282 | start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); |
1288 | MIGRATE_SYNC, MR_MEMPOLICY_MBIND); | ||
1289 | if (nr_failed) | 1283 | if (nr_failed) |
1290 | putback_movable_pages(&pagelist); | 1284 | putback_movable_pages(&pagelist); |
1291 | } | 1285 | } |