aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c158
1 files changed, 77 insertions, 81 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 848ef52d9603..3edfcdfa42d9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/audit.h> 31#include <linux/audit.h>
32#include <linux/khugepaged.h> 32#include <linux/khugepaged.h>
33#include <linux/uprobes.h>
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
@@ -546,8 +547,15 @@ again: remove_next = 1 + (end > next->vm_end);
546 547
547 if (file) { 548 if (file) {
548 mapping = file->f_mapping; 549 mapping = file->f_mapping;
549 if (!(vma->vm_flags & VM_NONLINEAR)) 550 if (!(vma->vm_flags & VM_NONLINEAR)) {
550 root = &mapping->i_mmap; 551 root = &mapping->i_mmap;
552 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
553
554 if (adjust_next)
555 uprobe_munmap(next, next->vm_start,
556 next->vm_end);
557 }
558
551 mutex_lock(&mapping->i_mmap_mutex); 559 mutex_lock(&mapping->i_mmap_mutex);
552 if (insert) { 560 if (insert) {
553 /* 561 /*
@@ -617,8 +625,16 @@ again: remove_next = 1 + (end > next->vm_end);
617 if (mapping) 625 if (mapping)
618 mutex_unlock(&mapping->i_mmap_mutex); 626 mutex_unlock(&mapping->i_mmap_mutex);
619 627
628 if (root) {
629 uprobe_mmap(vma);
630
631 if (adjust_next)
632 uprobe_mmap(next);
633 }
634
620 if (remove_next) { 635 if (remove_next) {
621 if (file) { 636 if (file) {
637 uprobe_munmap(next, next->vm_start, next->vm_end);
622 fput(file); 638 fput(file);
623 if (next->vm_flags & VM_EXECUTABLE) 639 if (next->vm_flags & VM_EXECUTABLE)
624 removed_exe_file_vma(mm); 640 removed_exe_file_vma(mm);
@@ -638,6 +654,8 @@ again: remove_next = 1 + (end > next->vm_end);
638 goto again; 654 goto again;
639 } 655 }
640 } 656 }
657 if (insert && file)
658 uprobe_mmap(insert);
641 659
642 validate_mm(mm); 660 validate_mm(mm);
643 661
@@ -953,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
953 * The caller must hold down_write(&current->mm->mmap_sem). 971 * The caller must hold down_write(&current->mm->mmap_sem).
954 */ 972 */
955 973
956static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 974unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
957 unsigned long len, unsigned long prot, 975 unsigned long len, unsigned long prot,
958 unsigned long flags, unsigned long pgoff) 976 unsigned long flags, unsigned long pgoff)
959{ 977{
960 struct mm_struct * mm = current->mm; 978 struct mm_struct * mm = current->mm;
961 struct inode *inode; 979 struct inode *inode;
962 vm_flags_t vm_flags; 980 vm_flags_t vm_flags;
963 int error;
964 unsigned long reqprot = prot;
965 981
966 /* 982 /*
967 * Does the application expect PROT_READ to imply PROT_EXEC? 983 * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1083,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1083 } 1099 }
1084 } 1100 }
1085 1101
1086 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1087 if (error)
1088 return error;
1089
1090 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1102 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1091} 1103}
1092 1104
1093unsigned long do_mmap(struct file *file, unsigned long addr,
1094 unsigned long len, unsigned long prot,
1095 unsigned long flag, unsigned long offset)
1096{
1097 if (unlikely(offset + PAGE_ALIGN(len) < offset))
1098 return -EINVAL;
1099 if (unlikely(offset & ~PAGE_MASK))
1100 return -EINVAL;
1101 return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1102}
1103EXPORT_SYMBOL(do_mmap);
1104
1105unsigned long vm_mmap(struct file *file, unsigned long addr,
1106 unsigned long len, unsigned long prot,
1107 unsigned long flag, unsigned long offset)
1108{
1109 unsigned long ret;
1110 struct mm_struct *mm = current->mm;
1111
1112 down_write(&mm->mmap_sem);
1113 ret = do_mmap(file, addr, len, prot, flag, offset);
1114 up_write(&mm->mmap_sem);
1115 return ret;
1116}
1117EXPORT_SYMBOL(vm_mmap);
1118
1119SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1105SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1120 unsigned long, prot, unsigned long, flags, 1106 unsigned long, prot, unsigned long, flags,
1121 unsigned long, fd, unsigned long, pgoff) 1107 unsigned long, fd, unsigned long, pgoff)
@@ -1147,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1147 1133
1148 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1134 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1149 1135
1150 down_write(&current->mm->mmap_sem); 1136 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1151 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1152 up_write(&current->mm->mmap_sem);
1153
1154 if (file) 1137 if (file)
1155 fput(file); 1138 fput(file);
1156out: 1139out:
@@ -1371,6 +1354,11 @@ out:
1371 mm->locked_vm += (len >> PAGE_SHIFT); 1354 mm->locked_vm += (len >> PAGE_SHIFT);
1372 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1355 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1373 make_pages_present(addr, addr + len); 1356 make_pages_present(addr, addr + len);
1357
1358 if (file && uprobe_mmap(vma))
1359 /* matching probes but cannot insert */
1360 goto unmap_and_free_vma;
1361
1374 return addr; 1362 return addr;
1375 1363
1376unmap_and_free_vma: 1364unmap_and_free_vma:
@@ -1606,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1606 if (addr & ~PAGE_MASK) 1594 if (addr & ~PAGE_MASK)
1607 return -EINVAL; 1595 return -EINVAL;
1608 1596
1609 return arch_rebalance_pgtables(addr, len); 1597 addr = arch_rebalance_pgtables(addr, len);
1598 error = security_mmap_addr(addr);
1599 return error ? error : addr;
1610} 1600}
1611 1601
1612EXPORT_SYMBOL(get_unmapped_area); 1602EXPORT_SYMBOL(get_unmapped_area);
@@ -1616,33 +1606,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1616{ 1606{
1617 struct vm_area_struct *vma = NULL; 1607 struct vm_area_struct *vma = NULL;
1618 1608
1619 if (mm) { 1609 if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
1620 /* Check the cache first. */ 1610 return NULL;
1621 /* (Cache hit rate is typically around 35%.) */ 1611
1622 vma = mm->mmap_cache; 1612 /* Check the cache first. */
1623 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 1613 /* (Cache hit rate is typically around 35%.) */
1624 struct rb_node * rb_node; 1614 vma = mm->mmap_cache;
1625 1615 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1626 rb_node = mm->mm_rb.rb_node; 1616 struct rb_node *rb_node;
1627 vma = NULL; 1617
1628 1618 rb_node = mm->mm_rb.rb_node;
1629 while (rb_node) { 1619 vma = NULL;
1630 struct vm_area_struct * vma_tmp; 1620
1631 1621 while (rb_node) {
1632 vma_tmp = rb_entry(rb_node, 1622 struct vm_area_struct *vma_tmp;
1633 struct vm_area_struct, vm_rb); 1623
1634 1624 vma_tmp = rb_entry(rb_node,
1635 if (vma_tmp->vm_end > addr) { 1625 struct vm_area_struct, vm_rb);
1636 vma = vma_tmp; 1626
1637 if (vma_tmp->vm_start <= addr) 1627 if (vma_tmp->vm_end > addr) {
1638 break; 1628 vma = vma_tmp;
1639 rb_node = rb_node->rb_left; 1629 if (vma_tmp->vm_start <= addr)
1640 } else 1630 break;
1641 rb_node = rb_node->rb_right; 1631 rb_node = rb_node->rb_left;
1642 } 1632 } else
1643 if (vma) 1633 rb_node = rb_node->rb_right;
1644 mm->mmap_cache = vma;
1645 } 1634 }
1635 if (vma)
1636 mm->mmap_cache = vma;
1646 } 1637 }
1647 return vma; 1638 return vma;
1648} 1639}
@@ -1795,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
1795 return -ENOMEM; 1786 return -ENOMEM;
1796 1787
1797 address &= PAGE_MASK; 1788 address &= PAGE_MASK;
1798 error = security_file_mmap(NULL, 0, 0, 0, address, 1); 1789 error = security_mmap_addr(address);
1799 if (error) 1790 if (error)
1800 return error; 1791 return error;
1801 1792
@@ -1889,15 +1880,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1889 */ 1880 */
1890static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1881static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1891{ 1882{
1883 unsigned long nr_accounted = 0;
1884
1892 /* Update high watermark before we lower total_vm */ 1885 /* Update high watermark before we lower total_vm */
1893 update_hiwater_vm(mm); 1886 update_hiwater_vm(mm);
1894 do { 1887 do {
1895 long nrpages = vma_pages(vma); 1888 long nrpages = vma_pages(vma);
1896 1889
1890 if (vma->vm_flags & VM_ACCOUNT)
1891 nr_accounted += nrpages;
1897 mm->total_vm -= nrpages; 1892 mm->total_vm -= nrpages;
1898 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1893 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1899 vma = remove_vma(vma); 1894 vma = remove_vma(vma);
1900 } while (vma); 1895 } while (vma);
1896 vm_unacct_memory(nr_accounted);
1901 validate_mm(mm); 1897 validate_mm(mm);
1902} 1898}
1903 1899
@@ -1912,13 +1908,11 @@ static void unmap_region(struct mm_struct *mm,
1912{ 1908{
1913 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1909 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1914 struct mmu_gather tlb; 1910 struct mmu_gather tlb;
1915 unsigned long nr_accounted = 0;
1916 1911
1917 lru_add_drain(); 1912 lru_add_drain();
1918 tlb_gather_mmu(&tlb, mm, 0); 1913 tlb_gather_mmu(&tlb, mm, 0);
1919 update_hiwater_rss(mm); 1914 update_hiwater_rss(mm);
1920 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1915 unmap_vmas(&tlb, vma, start, end);
1921 vm_unacct_memory(nr_accounted);
1922 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1916 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1923 next ? next->vm_start : 0); 1917 next ? next->vm_start : 0);
1924 tlb_finish_mmu(&tlb, start, end); 1918 tlb_finish_mmu(&tlb, start, end);
@@ -2132,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2132 2126
2133 return 0; 2127 return 0;
2134} 2128}
2135EXPORT_SYMBOL(do_munmap);
2136 2129
2137int vm_munmap(unsigned long start, size_t len) 2130int vm_munmap(unsigned long start, size_t len)
2138{ 2131{
@@ -2180,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
2180 if (!len) 2173 if (!len)
2181 return addr; 2174 return addr;
2182 2175
2183 error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2184 if (error)
2185 return error;
2186
2187 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2176 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2188 2177
2189 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 2178 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2305,8 +2294,7 @@ void exit_mmap(struct mm_struct *mm)
2305 tlb_gather_mmu(&tlb, mm, 1); 2294 tlb_gather_mmu(&tlb, mm, 1);
2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2295 /* update_hiwater_rss(mm) here? but nobody should be looking */
2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2296 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2308 unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2297 unmap_vmas(&tlb, vma, 0, -1);
2309 vm_unacct_memory(nr_accounted);
2310 2298
2311 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2299 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2312 tlb_finish_mmu(&tlb, 0, -1); 2300 tlb_finish_mmu(&tlb, 0, -1);
@@ -2315,8 +2303,12 @@ void exit_mmap(struct mm_struct *mm)
2315 * Walk the list again, actually closing and freeing it, 2303 * Walk the list again, actually closing and freeing it,
2316 * with preemption enabled, without holding any MM locks. 2304 * with preemption enabled, without holding any MM locks.
2317 */ 2305 */
2318 while (vma) 2306 while (vma) {
2307 if (vma->vm_flags & VM_ACCOUNT)
2308 nr_accounted += vma_pages(vma);
2319 vma = remove_vma(vma); 2309 vma = remove_vma(vma);
2310 }
2311 vm_unacct_memory(nr_accounted);
2320 2312
2321 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2313 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2322} 2314}
@@ -2352,6 +2344,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2352 if ((vma->vm_flags & VM_ACCOUNT) && 2344 if ((vma->vm_flags & VM_ACCOUNT) &&
2353 security_vm_enough_memory_mm(mm, vma_pages(vma))) 2345 security_vm_enough_memory_mm(mm, vma_pages(vma)))
2354 return -ENOMEM; 2346 return -ENOMEM;
2347
2348 if (vma->vm_file && uprobe_mmap(vma))
2349 return -EINVAL;
2350
2355 vma_link(mm, vma, prev, rb_link, rb_parent); 2351 vma_link(mm, vma, prev, rb_link, rb_parent);
2356 return 0; 2352 return 0;
2357} 2353}
@@ -2421,6 +2417,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2421 new_vma->vm_pgoff = pgoff; 2417 new_vma->vm_pgoff = pgoff;
2422 if (new_vma->vm_file) { 2418 if (new_vma->vm_file) {
2423 get_file(new_vma->vm_file); 2419 get_file(new_vma->vm_file);
2420
2421 if (uprobe_mmap(new_vma))
2422 goto out_free_mempol;
2423
2424 if (vma->vm_flags & VM_EXECUTABLE) 2424 if (vma->vm_flags & VM_EXECUTABLE)
2425 added_exe_file_vma(mm); 2425 added_exe_file_vma(mm);
2426 } 2426 }
@@ -2525,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
2525 vma->vm_ops = &special_mapping_vmops; 2525 vma->vm_ops = &special_mapping_vmops;
2526 vma->vm_private_data = pages; 2526 vma->vm_private_data = pages;
2527 2527
2528 ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
2529 if (ret)
2530 goto out;
2531
2532 ret = insert_vm_struct(mm, vma); 2528 ret = insert_vm_struct(mm, vma);
2533 if (ret) 2529 if (ret)
2534 goto out; 2530 goto out;