aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c151
1 files changed, 93 insertions, 58 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index ee2298936fe6..f1b4448626bf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -265,7 +265,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
265 * segment grow beyond its set limit the in case where the limit is 265 * segment grow beyond its set limit the in case where the limit is
266 * not page aligned -Ram Gupta 266 * not page aligned -Ram Gupta
267 */ 267 */
268 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; 268 rlim = rlimit(RLIMIT_DATA);
269 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + 269 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
270 (mm->end_data - mm->start_data) > rlim) 270 (mm->end_data - mm->start_data) > rlim)
271 goto out; 271 goto out;
@@ -437,7 +437,6 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
437{ 437{
438 __vma_link_list(mm, vma, prev, rb_parent); 438 __vma_link_list(mm, vma, prev, rb_parent);
439 __vma_link_rb(mm, vma, rb_link, rb_parent); 439 __vma_link_rb(mm, vma, rb_link, rb_parent);
440 __anon_vma_link(vma);
441} 440}
442 441
443static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 442static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -499,7 +498,7 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
499 * are necessary. The "insert" vma (if any) is to be inserted 498 * are necessary. The "insert" vma (if any) is to be inserted
500 * before we drop the necessary locks. 499 * before we drop the necessary locks.
501 */ 500 */
502void vma_adjust(struct vm_area_struct *vma, unsigned long start, 501int vma_adjust(struct vm_area_struct *vma, unsigned long start,
503 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 502 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
504{ 503{
505 struct mm_struct *mm = vma->vm_mm; 504 struct mm_struct *mm = vma->vm_mm;
@@ -542,6 +541,26 @@ again: remove_next = 1 + (end > next->vm_end);
542 } 541 }
543 } 542 }
544 543
544 /*
545 * When changing only vma->vm_end, we don't really need anon_vma lock.
546 */
547 if (vma->anon_vma && (insert || importer || start != vma->vm_start))
548 anon_vma = vma->anon_vma;
549 if (anon_vma) {
550 /*
551 * Easily overlooked: when mprotect shifts the boundary,
552 * make sure the expanding vma has anon_vma set if the
553 * shrinking vma had, to cover any anon pages imported.
554 */
555 if (importer && !importer->anon_vma) {
556 /* Block reverse map lookups until things are set up. */
557 if (anon_vma_clone(importer, vma)) {
558 return -ENOMEM;
559 }
560 importer->anon_vma = anon_vma;
561 }
562 }
563
545 if (file) { 564 if (file) {
546 mapping = file->f_mapping; 565 mapping = file->f_mapping;
547 if (!(vma->vm_flags & VM_NONLINEAR)) 566 if (!(vma->vm_flags & VM_NONLINEAR))
@@ -567,25 +586,6 @@ again: remove_next = 1 + (end > next->vm_end);
567 } 586 }
568 } 587 }
569 588
570 /*
571 * When changing only vma->vm_end, we don't really need
572 * anon_vma lock.
573 */
574 if (vma->anon_vma && (insert || importer || start != vma->vm_start))
575 anon_vma = vma->anon_vma;
576 if (anon_vma) {
577 spin_lock(&anon_vma->lock);
578 /*
579 * Easily overlooked: when mprotect shifts the boundary,
580 * make sure the expanding vma has anon_vma set if the
581 * shrinking vma had, to cover any anon pages imported.
582 */
583 if (importer && !importer->anon_vma) {
584 importer->anon_vma = anon_vma;
585 __anon_vma_link(importer);
586 }
587 }
588
589 if (root) { 589 if (root) {
590 flush_dcache_mmap_lock(mapping); 590 flush_dcache_mmap_lock(mapping);
591 vma_prio_tree_remove(vma, root); 591 vma_prio_tree_remove(vma, root);
@@ -616,8 +616,6 @@ again: remove_next = 1 + (end > next->vm_end);
616 __vma_unlink(mm, next, vma); 616 __vma_unlink(mm, next, vma);
617 if (file) 617 if (file)
618 __remove_shared_vm_struct(next, file, mapping); 618 __remove_shared_vm_struct(next, file, mapping);
619 if (next->anon_vma)
620 __anon_vma_merge(vma, next);
621 } else if (insert) { 619 } else if (insert) {
622 /* 620 /*
623 * split_vma has split insert from vma, and needs 621 * split_vma has split insert from vma, and needs
@@ -627,8 +625,6 @@ again: remove_next = 1 + (end > next->vm_end);
627 __insert_vm_struct(mm, insert); 625 __insert_vm_struct(mm, insert);
628 } 626 }
629 627
630 if (anon_vma)
631 spin_unlock(&anon_vma->lock);
632 if (mapping) 628 if (mapping)
633 spin_unlock(&mapping->i_mmap_lock); 629 spin_unlock(&mapping->i_mmap_lock);
634 630
@@ -638,6 +634,8 @@ again: remove_next = 1 + (end > next->vm_end);
638 if (next->vm_flags & VM_EXECUTABLE) 634 if (next->vm_flags & VM_EXECUTABLE)
639 removed_exe_file_vma(mm); 635 removed_exe_file_vma(mm);
640 } 636 }
637 if (next->anon_vma)
638 anon_vma_merge(vma, next);
641 mm->map_count--; 639 mm->map_count--;
642 mpol_put(vma_policy(next)); 640 mpol_put(vma_policy(next));
643 kmem_cache_free(vm_area_cachep, next); 641 kmem_cache_free(vm_area_cachep, next);
@@ -653,6 +651,8 @@ again: remove_next = 1 + (end > next->vm_end);
653 } 651 }
654 652
655 validate_mm(mm); 653 validate_mm(mm);
654
655 return 0;
656} 656}
657 657
658/* 658/*
@@ -759,6 +759,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
759{ 759{
760 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 760 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
761 struct vm_area_struct *area, *next; 761 struct vm_area_struct *area, *next;
762 int err;
762 763
763 /* 764 /*
764 * We later require that vma->vm_flags == vm_flags, 765 * We later require that vma->vm_flags == vm_flags,
@@ -792,11 +793,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
792 is_mergeable_anon_vma(prev->anon_vma, 793 is_mergeable_anon_vma(prev->anon_vma,
793 next->anon_vma)) { 794 next->anon_vma)) {
794 /* cases 1, 6 */ 795 /* cases 1, 6 */
795 vma_adjust(prev, prev->vm_start, 796 err = vma_adjust(prev, prev->vm_start,
796 next->vm_end, prev->vm_pgoff, NULL); 797 next->vm_end, prev->vm_pgoff, NULL);
797 } else /* cases 2, 5, 7 */ 798 } else /* cases 2, 5, 7 */
798 vma_adjust(prev, prev->vm_start, 799 err = vma_adjust(prev, prev->vm_start,
799 end, prev->vm_pgoff, NULL); 800 end, prev->vm_pgoff, NULL);
801 if (err)
802 return NULL;
800 return prev; 803 return prev;
801 } 804 }
802 805
@@ -808,11 +811,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
808 can_vma_merge_before(next, vm_flags, 811 can_vma_merge_before(next, vm_flags,
809 anon_vma, file, pgoff+pglen)) { 812 anon_vma, file, pgoff+pglen)) {
810 if (prev && addr < prev->vm_end) /* case 4 */ 813 if (prev && addr < prev->vm_end) /* case 4 */
811 vma_adjust(prev, prev->vm_start, 814 err = vma_adjust(prev, prev->vm_start,
812 addr, prev->vm_pgoff, NULL); 815 addr, prev->vm_pgoff, NULL);
813 else /* cases 3, 8 */ 816 else /* cases 3, 8 */
814 vma_adjust(area, addr, next->vm_end, 817 err = vma_adjust(area, addr, next->vm_end,
815 next->vm_pgoff - pglen, NULL); 818 next->vm_pgoff - pglen, NULL);
819 if (err)
820 return NULL;
816 return area; 821 return area;
817 } 822 }
818 823
@@ -967,7 +972,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
967 unsigned long locked, lock_limit; 972 unsigned long locked, lock_limit;
968 locked = len >> PAGE_SHIFT; 973 locked = len >> PAGE_SHIFT;
969 locked += mm->locked_vm; 974 locked += mm->locked_vm;
970 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 975 lock_limit = rlimit(RLIMIT_MEMLOCK);
971 lock_limit >>= PAGE_SHIFT; 976 lock_limit >>= PAGE_SHIFT;
972 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 977 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
973 return -EAGAIN; 978 return -EAGAIN;
@@ -1205,6 +1210,7 @@ munmap_back:
1205 vma->vm_flags = vm_flags; 1210 vma->vm_flags = vm_flags;
1206 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1211 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1207 vma->vm_pgoff = pgoff; 1212 vma->vm_pgoff = pgoff;
1213 INIT_LIST_HEAD(&vma->anon_vma_chain);
1208 1214
1209 if (file) { 1215 if (file) {
1210 error = -EINVAL; 1216 error = -EINVAL;
@@ -1265,13 +1271,8 @@ out:
1265 mm->total_vm += len >> PAGE_SHIFT; 1271 mm->total_vm += len >> PAGE_SHIFT;
1266 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1272 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1267 if (vm_flags & VM_LOCKED) { 1273 if (vm_flags & VM_LOCKED) {
1268 /* 1274 if (!mlock_vma_pages_range(vma, addr, addr + len))
1269 * makes pages present; downgrades, drops, reacquires mmap_sem 1275 mm->locked_vm += (len >> PAGE_SHIFT);
1270 */
1271 long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
1272 if (nr_pages < 0)
1273 return nr_pages; /* vma gone! */
1274 mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
1275 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1276 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1276 make_pages_present(addr, addr + len); 1277 make_pages_present(addr, addr + len);
1277 return addr; 1278 return addr;
@@ -1599,7 +1600,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1599 return -ENOMEM; 1600 return -ENOMEM;
1600 1601
1601 /* Stack limit test */ 1602 /* Stack limit test */
1602 if (size > rlim[RLIMIT_STACK].rlim_cur) 1603 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1603 return -ENOMEM; 1604 return -ENOMEM;
1604 1605
1605 /* mlock limit tests */ 1606 /* mlock limit tests */
@@ -1607,7 +1608,8 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1607 unsigned long locked; 1608 unsigned long locked;
1608 unsigned long limit; 1609 unsigned long limit;
1609 locked = mm->locked_vm + grow; 1610 locked = mm->locked_vm + grow;
1610 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 1611 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
1612 limit >>= PAGE_SHIFT;
1611 if (locked > limit && !capable(CAP_IPC_LOCK)) 1613 if (locked > limit && !capable(CAP_IPC_LOCK))
1612 return -ENOMEM; 1614 return -ENOMEM;
1613 } 1615 }
@@ -1754,8 +1756,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
1754 if (!prev || expand_stack(prev, addr)) 1756 if (!prev || expand_stack(prev, addr))
1755 return NULL; 1757 return NULL;
1756 if (prev->vm_flags & VM_LOCKED) { 1758 if (prev->vm_flags & VM_LOCKED) {
1757 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) 1759 mlock_vma_pages_range(prev, addr, prev->vm_end);
1758 return NULL; /* vma gone! */
1759 } 1760 }
1760 return prev; 1761 return prev;
1761} 1762}
@@ -1783,8 +1784,7 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1783 if (expand_stack(vma, addr)) 1784 if (expand_stack(vma, addr))
1784 return NULL; 1785 return NULL;
1785 if (vma->vm_flags & VM_LOCKED) { 1786 if (vma->vm_flags & VM_LOCKED) {
1786 if (mlock_vma_pages_range(vma, addr, start) < 0) 1787 mlock_vma_pages_range(vma, addr, start);
1787 return NULL; /* vma gone! */
1788 } 1788 }
1789 return vma; 1789 return vma;
1790} 1790}
@@ -1871,6 +1871,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1871{ 1871{
1872 struct mempolicy *pol; 1872 struct mempolicy *pol;
1873 struct vm_area_struct *new; 1873 struct vm_area_struct *new;
1874 int err = -ENOMEM;
1874 1875
1875 if (is_vm_hugetlb_page(vma) && (addr & 1876 if (is_vm_hugetlb_page(vma) && (addr &
1876 ~(huge_page_mask(hstate_vma(vma))))) 1877 ~(huge_page_mask(hstate_vma(vma)))))
@@ -1878,11 +1879,13 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1878 1879
1879 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1880 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1880 if (!new) 1881 if (!new)
1881 return -ENOMEM; 1882 goto out_err;
1882 1883
1883 /* most fields are the same, copy all, and then fixup */ 1884 /* most fields are the same, copy all, and then fixup */
1884 *new = *vma; 1885 *new = *vma;
1885 1886
1887 INIT_LIST_HEAD(&new->anon_vma_chain);
1888
1886 if (new_below) 1889 if (new_below)
1887 new->vm_end = addr; 1890 new->vm_end = addr;
1888 else { 1891 else {
@@ -1892,11 +1895,14 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1892 1895
1893 pol = mpol_dup(vma_policy(vma)); 1896 pol = mpol_dup(vma_policy(vma));
1894 if (IS_ERR(pol)) { 1897 if (IS_ERR(pol)) {
1895 kmem_cache_free(vm_area_cachep, new); 1898 err = PTR_ERR(pol);
1896 return PTR_ERR(pol); 1899 goto out_free_vma;
1897 } 1900 }
1898 vma_set_policy(new, pol); 1901 vma_set_policy(new, pol);
1899 1902
1903 if (anon_vma_clone(new, vma))
1904 goto out_free_mpol;
1905
1900 if (new->vm_file) { 1906 if (new->vm_file) {
1901 get_file(new->vm_file); 1907 get_file(new->vm_file);
1902 if (vma->vm_flags & VM_EXECUTABLE) 1908 if (vma->vm_flags & VM_EXECUTABLE)
@@ -1907,12 +1913,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1907 new->vm_ops->open(new); 1913 new->vm_ops->open(new);
1908 1914
1909 if (new_below) 1915 if (new_below)
1910 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 1916 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1911 ((addr - new->vm_start) >> PAGE_SHIFT), new); 1917 ((addr - new->vm_start) >> PAGE_SHIFT), new);
1912 else 1918 else
1913 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 1919 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1914 1920
1915 return 0; 1921 /* Success. */
1922 if (!err)
1923 return 0;
1924
1925 /* Clean everything up if vma_adjust failed. */
1926 new->vm_ops->close(new);
1927 if (new->vm_file) {
1928 if (vma->vm_flags & VM_EXECUTABLE)
1929 removed_exe_file_vma(mm);
1930 fput(new->vm_file);
1931 }
1932 out_free_mpol:
1933 mpol_put(pol);
1934 out_free_vma:
1935 kmem_cache_free(vm_area_cachep, new);
1936 out_err:
1937 return err;
1916} 1938}
1917 1939
1918/* 1940/*
@@ -2074,7 +2096,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2074 unsigned long locked, lock_limit; 2096 unsigned long locked, lock_limit;
2075 locked = len >> PAGE_SHIFT; 2097 locked = len >> PAGE_SHIFT;
2076 locked += mm->locked_vm; 2098 locked += mm->locked_vm;
2077 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 2099 lock_limit = rlimit(RLIMIT_MEMLOCK);
2078 lock_limit >>= PAGE_SHIFT; 2100 lock_limit >>= PAGE_SHIFT;
2079 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 2101 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2080 return -EAGAIN; 2102 return -EAGAIN;
@@ -2122,6 +2144,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2122 return -ENOMEM; 2144 return -ENOMEM;
2123 } 2145 }
2124 2146
2147 INIT_LIST_HEAD(&vma->anon_vma_chain);
2125 vma->vm_mm = mm; 2148 vma->vm_mm = mm;
2126 vma->vm_start = addr; 2149 vma->vm_start = addr;
2127 vma->vm_end = addr + len; 2150 vma->vm_end = addr + len;
@@ -2258,10 +2281,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2258 if (new_vma) { 2281 if (new_vma) {
2259 *new_vma = *vma; 2282 *new_vma = *vma;
2260 pol = mpol_dup(vma_policy(vma)); 2283 pol = mpol_dup(vma_policy(vma));
2261 if (IS_ERR(pol)) { 2284 if (IS_ERR(pol))
2262 kmem_cache_free(vm_area_cachep, new_vma); 2285 goto out_free_vma;
2263 return NULL; 2286 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2264 } 2287 if (anon_vma_clone(new_vma, vma))
2288 goto out_free_mempol;
2265 vma_set_policy(new_vma, pol); 2289 vma_set_policy(new_vma, pol);
2266 new_vma->vm_start = addr; 2290 new_vma->vm_start = addr;
2267 new_vma->vm_end = addr + len; 2291 new_vma->vm_end = addr + len;
@@ -2277,6 +2301,12 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2277 } 2301 }
2278 } 2302 }
2279 return new_vma; 2303 return new_vma;
2304
2305 out_free_mempol:
2306 mpol_put(pol);
2307 out_free_vma:
2308 kmem_cache_free(vm_area_cachep, new_vma);
2309 return NULL;
2280} 2310}
2281 2311
2282/* 2312/*
@@ -2288,7 +2318,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2288 unsigned long cur = mm->total_vm; /* pages */ 2318 unsigned long cur = mm->total_vm; /* pages */
2289 unsigned long lim; 2319 unsigned long lim;
2290 2320
2291 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 2321 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2292 2322
2293 if (cur + npages > lim) 2323 if (cur + npages > lim)
2294 return 0; 2324 return 0;
@@ -2354,6 +2384,7 @@ int install_special_mapping(struct mm_struct *mm,
2354 if (unlikely(vma == NULL)) 2384 if (unlikely(vma == NULL))
2355 return -ENOMEM; 2385 return -ENOMEM;
2356 2386
2387 INIT_LIST_HEAD(&vma->anon_vma_chain);
2357 vma->vm_mm = mm; 2388 vma->vm_mm = mm;
2358 vma->vm_start = addr; 2389 vma->vm_start = addr;
2359 vma->vm_end = addr + len; 2390 vma->vm_end = addr + len;
@@ -2454,6 +2485,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2454int mm_take_all_locks(struct mm_struct *mm) 2485int mm_take_all_locks(struct mm_struct *mm)
2455{ 2486{
2456 struct vm_area_struct *vma; 2487 struct vm_area_struct *vma;
2488 struct anon_vma_chain *avc;
2457 int ret = -EINTR; 2489 int ret = -EINTR;
2458 2490
2459 BUG_ON(down_read_trylock(&mm->mmap_sem)); 2491 BUG_ON(down_read_trylock(&mm->mmap_sem));
@@ -2471,7 +2503,8 @@ int mm_take_all_locks(struct mm_struct *mm)
2471 if (signal_pending(current)) 2503 if (signal_pending(current))
2472 goto out_unlock; 2504 goto out_unlock;
2473 if (vma->anon_vma) 2505 if (vma->anon_vma)
2474 vm_lock_anon_vma(mm, vma->anon_vma); 2506 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2507 vm_lock_anon_vma(mm, avc->anon_vma);
2475 } 2508 }
2476 2509
2477 ret = 0; 2510 ret = 0;
@@ -2526,13 +2559,15 @@ static void vm_unlock_mapping(struct address_space *mapping)
2526void mm_drop_all_locks(struct mm_struct *mm) 2559void mm_drop_all_locks(struct mm_struct *mm)
2527{ 2560{
2528 struct vm_area_struct *vma; 2561 struct vm_area_struct *vma;
2562 struct anon_vma_chain *avc;
2529 2563
2530 BUG_ON(down_read_trylock(&mm->mmap_sem)); 2564 BUG_ON(down_read_trylock(&mm->mmap_sem));
2531 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 2565 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2532 2566
2533 for (vma = mm->mmap; vma; vma = vma->vm_next) { 2567 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2534 if (vma->anon_vma) 2568 if (vma->anon_vma)
2535 vm_unlock_anon_vma(vma->anon_vma); 2569 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2570 vm_unlock_anon_vma(avc->anon_vma);
2536 if (vma->vm_file && vma->vm_file->f_mapping) 2571 if (vma->vm_file && vma->vm_file->f_mapping)
2537 vm_unlock_mapping(vma->vm_file->f_mapping); 2572 vm_unlock_mapping(vma->vm_file->f_mapping);
2538 } 2573 }