aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c74
1 files changed, 50 insertions, 24 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 456ec6f27889..6128dc8e5ede 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
389 struct vm_area_struct *prev, struct rb_node *rb_parent) 389 struct vm_area_struct *prev, struct rb_node *rb_parent)
390{ 390{
391 struct vm_area_struct *next;
392
393 vma->vm_prev = prev;
391 if (prev) { 394 if (prev) {
392 vma->vm_next = prev->vm_next; 395 next = prev->vm_next;
393 prev->vm_next = vma; 396 prev->vm_next = vma;
394 } else { 397 } else {
395 mm->mmap = vma; 398 mm->mmap = vma;
396 if (rb_parent) 399 if (rb_parent)
397 vma->vm_next = rb_entry(rb_parent, 400 next = rb_entry(rb_parent,
398 struct vm_area_struct, vm_rb); 401 struct vm_area_struct, vm_rb);
399 else 402 else
400 vma->vm_next = NULL; 403 next = NULL;
401 } 404 }
405 vma->vm_next = next;
406 if (next)
407 next->vm_prev = vma;
402} 408}
403 409
404void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 410void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -452,12 +458,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
452 spin_lock(&mapping->i_mmap_lock); 458 spin_lock(&mapping->i_mmap_lock);
453 vma->vm_truncate_count = mapping->truncate_count; 459 vma->vm_truncate_count = mapping->truncate_count;
454 } 460 }
455 anon_vma_lock(vma);
456 461
457 __vma_link(mm, vma, prev, rb_link, rb_parent); 462 __vma_link(mm, vma, prev, rb_link, rb_parent);
458 __vma_link_file(vma); 463 __vma_link_file(vma);
459 464
460 anon_vma_unlock(vma);
461 if (mapping) 465 if (mapping)
462 spin_unlock(&mapping->i_mmap_lock); 466 spin_unlock(&mapping->i_mmap_lock);
463 467
@@ -485,7 +489,11 @@ static inline void
485__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 489__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
486 struct vm_area_struct *prev) 490 struct vm_area_struct *prev)
487{ 491{
488 prev->vm_next = vma->vm_next; 492 struct vm_area_struct *next = vma->vm_next;
493
494 prev->vm_next = next;
495 if (next)
496 next->vm_prev = prev;
489 rb_erase(&vma->vm_rb, &mm->mm_rb); 497 rb_erase(&vma->vm_rb, &mm->mm_rb);
490 if (mm->mmap_cache == vma) 498 if (mm->mmap_cache == vma)
491 mm->mmap_cache = prev; 499 mm->mmap_cache = prev;
@@ -506,6 +514,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
506 struct vm_area_struct *importer = NULL; 514 struct vm_area_struct *importer = NULL;
507 struct address_space *mapping = NULL; 515 struct address_space *mapping = NULL;
508 struct prio_tree_root *root = NULL; 516 struct prio_tree_root *root = NULL;
517 struct anon_vma *anon_vma = NULL;
509 struct file *file = vma->vm_file; 518 struct file *file = vma->vm_file;
510 long adjust_next = 0; 519 long adjust_next = 0;
511 int remove_next = 0; 520 int remove_next = 0;
@@ -578,6 +587,17 @@ again: remove_next = 1 + (end > next->vm_end);
578 } 587 }
579 } 588 }
580 589
590 /*
591 * When changing only vma->vm_end, we don't really need anon_vma
592 * lock. This is a fairly rare case by itself, but the anon_vma
593 * lock may be shared between many sibling processes. Skipping
594 * the lock for brk adjustments makes a difference sometimes.
595 */
596 if (vma->anon_vma && (insert || importer || start != vma->vm_start)) {
597 anon_vma = vma->anon_vma;
598 anon_vma_lock(anon_vma);
599 }
600
581 if (root) { 601 if (root) {
582 flush_dcache_mmap_lock(mapping); 602 flush_dcache_mmap_lock(mapping);
583 vma_prio_tree_remove(vma, root); 603 vma_prio_tree_remove(vma, root);
@@ -617,6 +637,8 @@ again: remove_next = 1 + (end > next->vm_end);
617 __insert_vm_struct(mm, insert); 637 __insert_vm_struct(mm, insert);
618 } 638 }
619 639
640 if (anon_vma)
641 anon_vma_unlock(anon_vma);
620 if (mapping) 642 if (mapping)
621 spin_unlock(&mapping->i_mmap_lock); 643 spin_unlock(&mapping->i_mmap_lock);
622 644
@@ -1694,9 +1716,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1694 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 1716 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1695 * vma is the last one with address > vma->vm_end. Have to extend vma. 1717 * vma is the last one with address > vma->vm_end. Have to extend vma.
1696 */ 1718 */
1697#ifndef CONFIG_IA64
1698static
1699#endif
1700int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1719int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1701{ 1720{
1702 int error; 1721 int error;
@@ -1710,7 +1729,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1710 */ 1729 */
1711 if (unlikely(anon_vma_prepare(vma))) 1730 if (unlikely(anon_vma_prepare(vma)))
1712 return -ENOMEM; 1731 return -ENOMEM;
1713 anon_vma_lock(vma); 1732 vma_lock_anon_vma(vma);
1714 1733
1715 /* 1734 /*
1716 * vma->vm_start/vm_end cannot change under us because the caller 1735 * vma->vm_start/vm_end cannot change under us because the caller
@@ -1721,7 +1740,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1721 if (address < PAGE_ALIGN(address+4)) 1740 if (address < PAGE_ALIGN(address+4))
1722 address = PAGE_ALIGN(address+4); 1741 address = PAGE_ALIGN(address+4);
1723 else { 1742 else {
1724 anon_vma_unlock(vma); 1743 vma_unlock_anon_vma(vma);
1725 return -ENOMEM; 1744 return -ENOMEM;
1726 } 1745 }
1727 error = 0; 1746 error = 0;
@@ -1734,10 +1753,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1734 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1753 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1735 1754
1736 error = acct_stack_growth(vma, size, grow); 1755 error = acct_stack_growth(vma, size, grow);
1737 if (!error) 1756 if (!error) {
1738 vma->vm_end = address; 1757 vma->vm_end = address;
1758 perf_event_mmap(vma);
1759 }
1739 } 1760 }
1740 anon_vma_unlock(vma); 1761 vma_unlock_anon_vma(vma);
1741 return error; 1762 return error;
1742} 1763}
1743#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 1764#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1762,7 +1783,7 @@ static int expand_downwards(struct vm_area_struct *vma,
1762 if (error) 1783 if (error)
1763 return error; 1784 return error;
1764 1785
1765 anon_vma_lock(vma); 1786 vma_lock_anon_vma(vma);
1766 1787
1767 /* 1788 /*
1768 * vma->vm_start/vm_end cannot change under us because the caller 1789 * vma->vm_start/vm_end cannot change under us because the caller
@@ -1781,9 +1802,10 @@ static int expand_downwards(struct vm_area_struct *vma,
1781 if (!error) { 1802 if (!error) {
1782 vma->vm_start = address; 1803 vma->vm_start = address;
1783 vma->vm_pgoff -= grow; 1804 vma->vm_pgoff -= grow;
1805 perf_event_mmap(vma);
1784 } 1806 }
1785 } 1807 }
1786 anon_vma_unlock(vma); 1808 vma_unlock_anon_vma(vma);
1787 return error; 1809 return error;
1788} 1810}
1789 1811
@@ -1900,6 +1922,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1900 unsigned long addr; 1922 unsigned long addr;
1901 1923
1902 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1924 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1925 vma->vm_prev = NULL;
1903 do { 1926 do {
1904 rb_erase(&vma->vm_rb, &mm->mm_rb); 1927 rb_erase(&vma->vm_rb, &mm->mm_rb);
1905 mm->map_count--; 1928 mm->map_count--;
@@ -1907,6 +1930,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1907 vma = vma->vm_next; 1930 vma = vma->vm_next;
1908 } while (vma && vma->vm_start < end); 1931 } while (vma && vma->vm_start < end);
1909 *insertion_point = vma; 1932 *insertion_point = vma;
1933 if (vma)
1934 vma->vm_prev = prev;
1910 tail_vma->vm_next = NULL; 1935 tail_vma->vm_next = NULL;
1911 if (mm->unmap_area == arch_unmap_area) 1936 if (mm->unmap_area == arch_unmap_area)
1912 addr = prev ? prev->vm_end : mm->mmap_base; 1937 addr = prev ? prev->vm_end : mm->mmap_base;
@@ -2208,6 +2233,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2208 vma->vm_page_prot = vm_get_page_prot(flags); 2233 vma->vm_page_prot = vm_get_page_prot(flags);
2209 vma_link(mm, vma, prev, rb_link, rb_parent); 2234 vma_link(mm, vma, prev, rb_link, rb_parent);
2210out: 2235out:
2236 perf_event_mmap(vma);
2211 mm->total_vm += len >> PAGE_SHIFT; 2237 mm->total_vm += len >> PAGE_SHIFT;
2212 if (flags & VM_LOCKED) { 2238 if (flags & VM_LOCKED) {
2213 if (!mlock_vma_pages_range(vma, addr, addr + len)) 2239 if (!mlock_vma_pages_range(vma, addr, addr + len))
@@ -2466,23 +2492,23 @@ static DEFINE_MUTEX(mm_all_locks_mutex);
2466 2492
2467static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 2493static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2468{ 2494{
2469 if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { 2495 if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2470 /* 2496 /*
2471 * The LSB of head.next can't change from under us 2497 * The LSB of head.next can't change from under us
2472 * because we hold the mm_all_locks_mutex. 2498 * because we hold the mm_all_locks_mutex.
2473 */ 2499 */
2474 spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem); 2500 spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
2475 /* 2501 /*
2476 * We can safely modify head.next after taking the 2502 * We can safely modify head.next after taking the
2477 * anon_vma->lock. If some other vma in this mm shares 2503 * anon_vma->root->lock. If some other vma in this mm shares
2478 * the same anon_vma we won't take it again. 2504 * the same anon_vma we won't take it again.
2479 * 2505 *
2480 * No need of atomic instructions here, head.next 2506 * No need of atomic instructions here, head.next
2481 * can't change from under us thanks to the 2507 * can't change from under us thanks to the
2482 * anon_vma->lock. 2508 * anon_vma->root->lock.
2483 */ 2509 */
2484 if (__test_and_set_bit(0, (unsigned long *) 2510 if (__test_and_set_bit(0, (unsigned long *)
2485 &anon_vma->head.next)) 2511 &anon_vma->root->head.next))
2486 BUG(); 2512 BUG();
2487 } 2513 }
2488} 2514}
@@ -2573,7 +2599,7 @@ out_unlock:
2573 2599
2574static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 2600static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2575{ 2601{
2576 if (test_bit(0, (unsigned long *) &anon_vma->head.next)) { 2602 if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2577 /* 2603 /*
2578 * The LSB of head.next can't change to 0 from under 2604 * The LSB of head.next can't change to 0 from under
2579 * us because we hold the mm_all_locks_mutex. 2605 * us because we hold the mm_all_locks_mutex.
@@ -2584,12 +2610,12 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2584 * 2610 *
2585 * No need of atomic instructions here, head.next 2611 * No need of atomic instructions here, head.next
2586 * can't change from under us until we release the 2612 * can't change from under us until we release the
2587 * anon_vma->lock. 2613 * anon_vma->root->lock.
2588 */ 2614 */
2589 if (!__test_and_clear_bit(0, (unsigned long *) 2615 if (!__test_and_clear_bit(0, (unsigned long *)
2590 &anon_vma->head.next)) 2616 &anon_vma->root->head.next))
2591 BUG(); 2617 BUG();
2592 spin_unlock(&anon_vma->lock); 2618 anon_vma_unlock(anon_vma);
2593 } 2619 }
2594} 2620}
2595 2621