aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c60
1 files changed, 29 insertions, 31 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index eae90af60ea6..3f758c7f4c81 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1603,39 +1603,19 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1603 1603
1604EXPORT_SYMBOL(find_vma); 1604EXPORT_SYMBOL(find_vma);
1605 1605
1606/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ 1606/*
1607 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
1608 * Note: pprev is set to NULL when return value is NULL.
1609 */
1607struct vm_area_struct * 1610struct vm_area_struct *
1608find_vma_prev(struct mm_struct *mm, unsigned long addr, 1611find_vma_prev(struct mm_struct *mm, unsigned long addr,
1609 struct vm_area_struct **pprev) 1612 struct vm_area_struct **pprev)
1610{ 1613{
1611 struct vm_area_struct *vma = NULL, *prev = NULL; 1614 struct vm_area_struct *vma;
1612 struct rb_node *rb_node;
1613 if (!mm)
1614 goto out;
1615
1616 /* Guard against addr being lower than the first VMA */
1617 vma = mm->mmap;
1618
1619 /* Go through the RB tree quickly. */
1620 rb_node = mm->mm_rb.rb_node;
1621
1622 while (rb_node) {
1623 struct vm_area_struct *vma_tmp;
1624 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1625
1626 if (addr < vma_tmp->vm_end) {
1627 rb_node = rb_node->rb_left;
1628 } else {
1629 prev = vma_tmp;
1630 if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1631 break;
1632 rb_node = rb_node->rb_right;
1633 }
1634 }
1635 1615
1636out: 1616 vma = find_vma(mm, addr);
1637 *pprev = prev; 1617 *pprev = vma ? vma->vm_prev : NULL;
1638 return prev ? prev->vm_next : vma; 1618 return vma;
1639} 1619}
1640 1620
1641/* 1621/*
@@ -2322,13 +2302,16 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2322 struct vm_area_struct *new_vma, *prev; 2302 struct vm_area_struct *new_vma, *prev;
2323 struct rb_node **rb_link, *rb_parent; 2303 struct rb_node **rb_link, *rb_parent;
2324 struct mempolicy *pol; 2304 struct mempolicy *pol;
2305 bool faulted_in_anon_vma = true;
2325 2306
2326 /* 2307 /*
2327 * If anonymous vma has not yet been faulted, update new pgoff 2308 * If anonymous vma has not yet been faulted, update new pgoff
2328 * to match new location, to increase its chance of merging. 2309 * to match new location, to increase its chance of merging.
2329 */ 2310 */
2330 if (!vma->vm_file && !vma->anon_vma) 2311 if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2331 pgoff = addr >> PAGE_SHIFT; 2312 pgoff = addr >> PAGE_SHIFT;
2313 faulted_in_anon_vma = false;
2314 }
2332 2315
2333 find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 2316 find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2334 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 2317 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
@@ -2337,9 +2320,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2337 /* 2320 /*
2338 * Source vma may have been merged into new_vma 2321 * Source vma may have been merged into new_vma
2339 */ 2322 */
2340 if (vma_start >= new_vma->vm_start && 2323 if (unlikely(vma_start >= new_vma->vm_start &&
2341 vma_start < new_vma->vm_end) 2324 vma_start < new_vma->vm_end)) {
2325 /*
2326 * The only way we can get a vma_merge with
2327 * self during an mremap is if the vma hasn't
2328 * been faulted in yet and we were allowed to
2329 * reset the dst vma->vm_pgoff to the
2330 * destination address of the mremap to allow
2331 * the merge to happen. mremap must change the
2332 * vm_pgoff linearity between src and dst vmas
2333 * (in turn preventing a vma_merge) to be
2334 * safe. It is only safe to keep the vm_pgoff
2335 * linear if there are no pages mapped yet.
2336 */
2337 VM_BUG_ON(faulted_in_anon_vma);
2342 *vmap = new_vma; 2338 *vmap = new_vma;
2339 } else
2340 anon_vma_moveto_tail(new_vma);
2343 } else { 2341 } else {
2344 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2342 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2345 if (new_vma) { 2343 if (new_vma) {