diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 96 |
1 files changed, 52 insertions, 44 deletions
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/ima.h> | ||
24 | #include <linux/hugetlb.h> | 23 | #include <linux/hugetlb.h> |
25 | #include <linux/profile.h> | 24 | #include <linux/profile.h> |
26 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -932,13 +931,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
932 | if (!(flags & MAP_FIXED)) | 931 | if (!(flags & MAP_FIXED)) |
933 | addr = round_hint_to_min(addr); | 932 | addr = round_hint_to_min(addr); |
934 | 933 | ||
935 | error = arch_mmap_check(addr, len, flags); | ||
936 | if (error) | ||
937 | return error; | ||
938 | |||
939 | /* Careful about overflows.. */ | 934 | /* Careful about overflows.. */ |
940 | len = PAGE_ALIGN(len); | 935 | len = PAGE_ALIGN(len); |
941 | if (!len || len > TASK_SIZE) | 936 | if (!len) |
942 | return -ENOMEM; | 937 | return -ENOMEM; |
943 | 938 | ||
944 | /* offset overflow? */ | 939 | /* offset overflow? */ |
@@ -949,24 +944,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
949 | if (mm->map_count > sysctl_max_map_count) | 944 | if (mm->map_count > sysctl_max_map_count) |
950 | return -ENOMEM; | 945 | return -ENOMEM; |
951 | 946 | ||
952 | if (flags & MAP_HUGETLB) { | ||
953 | struct user_struct *user = NULL; | ||
954 | if (file) | ||
955 | return -EINVAL; | ||
956 | |||
957 | /* | ||
958 | * VM_NORESERVE is used because the reservations will be | ||
959 | * taken when vm_ops->mmap() is called | ||
960 | * A dummy user value is used because we are not locking | ||
961 | * memory so no accounting is necessary | ||
962 | */ | ||
963 | len = ALIGN(len, huge_page_size(&default_hstate)); | ||
964 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, | ||
965 | &user, HUGETLB_ANONHUGE_INODE); | ||
966 | if (IS_ERR(file)) | ||
967 | return PTR_ERR(file); | ||
968 | } | ||
969 | |||
970 | /* Obtain the address to map to. we verify (or select) it and ensure | 947 | /* Obtain the address to map to. we verify (or select) it and ensure |
971 | * that it represents a valid section of the address space. | 948 | * that it represents a valid section of the address space. |
972 | */ | 949 | */ |
@@ -1061,9 +1038,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1061 | error = security_file_mmap(file, reqprot, prot, flags, addr, 0); | 1038 | error = security_file_mmap(file, reqprot, prot, flags, addr, 0); |
1062 | if (error) | 1039 | if (error) |
1063 | return error; | 1040 | return error; |
1064 | error = ima_file_mmap(file, prot); | ||
1065 | if (error) | ||
1066 | return error; | ||
1067 | 1041 | ||
1068 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); | 1042 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); |
1069 | } | 1043 | } |
@@ -1224,8 +1198,20 @@ munmap_back: | |||
1224 | goto free_vma; | 1198 | goto free_vma; |
1225 | } | 1199 | } |
1226 | 1200 | ||
1227 | if (vma_wants_writenotify(vma)) | 1201 | if (vma_wants_writenotify(vma)) { |
1202 | pgprot_t pprot = vma->vm_page_prot; | ||
1203 | |||
1204 | /* Can vma->vm_page_prot have changed?? | ||
1205 | * | ||
1206 | * Answer: Yes, drivers may have changed it in their | ||
1207 | * f_op->mmap method. | ||
1208 | * | ||
1209 | * Ensures that vmas marked as uncached stay that way. | ||
1210 | */ | ||
1228 | vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); | 1211 | vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); |
1212 | if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot))) | ||
1213 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
1214 | } | ||
1229 | 1215 | ||
1230 | vma_link(mm, vma, prev, rb_link, rb_parent); | 1216 | vma_link(mm, vma, prev, rb_link, rb_parent); |
1231 | file = vma->vm_file; | 1217 | file = vma->vm_file; |
@@ -1459,6 +1445,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, | |||
1459 | unsigned long (*get_area)(struct file *, unsigned long, | 1445 | unsigned long (*get_area)(struct file *, unsigned long, |
1460 | unsigned long, unsigned long, unsigned long); | 1446 | unsigned long, unsigned long, unsigned long); |
1461 | 1447 | ||
1448 | unsigned long error = arch_mmap_check(addr, len, flags); | ||
1449 | if (error) | ||
1450 | return error; | ||
1451 | |||
1452 | /* Careful about overflows.. */ | ||
1453 | if (len > TASK_SIZE) | ||
1454 | return -ENOMEM; | ||
1455 | |||
1462 | get_area = current->mm->get_unmapped_area; | 1456 | get_area = current->mm->get_unmapped_area; |
1463 | if (file && file->f_op && file->f_op->get_unmapped_area) | 1457 | if (file && file->f_op && file->f_op->get_unmapped_area) |
1464 | get_area = file->f_op->get_unmapped_area; | 1458 | get_area = file->f_op->get_unmapped_area; |
@@ -1829,10 +1823,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1829 | } | 1823 | } |
1830 | 1824 | ||
1831 | /* | 1825 | /* |
1832 | * Split a vma into two pieces at address 'addr', a new vma is allocated | 1826 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the |
1833 | * either for the first part or the tail. | 1827 | * munmap path where it doesn't make sense to fail. |
1834 | */ | 1828 | */ |
1835 | int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | 1829 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, |
1836 | unsigned long addr, int new_below) | 1830 | unsigned long addr, int new_below) |
1837 | { | 1831 | { |
1838 | struct mempolicy *pol; | 1832 | struct mempolicy *pol; |
@@ -1842,9 +1836,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1842 | ~(huge_page_mask(hstate_vma(vma))))) | 1836 | ~(huge_page_mask(hstate_vma(vma))))) |
1843 | return -EINVAL; | 1837 | return -EINVAL; |
1844 | 1838 | ||
1845 | if (mm->map_count >= sysctl_max_map_count) | ||
1846 | return -ENOMEM; | ||
1847 | |||
1848 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 1839 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
1849 | if (!new) | 1840 | if (!new) |
1850 | return -ENOMEM; | 1841 | return -ENOMEM; |
@@ -1884,6 +1875,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1884 | return 0; | 1875 | return 0; |
1885 | } | 1876 | } |
1886 | 1877 | ||
1878 | /* | ||
1879 | * Split a vma into two pieces at address 'addr', a new vma is allocated | ||
1880 | * either for the first part or the tail. | ||
1881 | */ | ||
1882 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | ||
1883 | unsigned long addr, int new_below) | ||
1884 | { | ||
1885 | if (mm->map_count >= sysctl_max_map_count) | ||
1886 | return -ENOMEM; | ||
1887 | |||
1888 | return __split_vma(mm, vma, addr, new_below); | ||
1889 | } | ||
1890 | |||
1887 | /* Munmap is split into 2 main parts -- this part which finds | 1891 | /* Munmap is split into 2 main parts -- this part which finds |
1888 | * what needs doing, and the areas themselves, which do the | 1892 | * what needs doing, and the areas themselves, which do the |
1889 | * work. This now handles partial unmappings. | 1893 | * work. This now handles partial unmappings. |
@@ -1919,7 +1923,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1919 | * places tmp vma above, and higher split_vma places tmp vma below. | 1923 | * places tmp vma above, and higher split_vma places tmp vma below. |
1920 | */ | 1924 | */ |
1921 | if (start > vma->vm_start) { | 1925 | if (start > vma->vm_start) { |
1922 | int error = split_vma(mm, vma, start, 0); | 1926 | int error; |
1927 | |||
1928 | /* | ||
1929 | * Make sure that map_count on return from munmap() will | ||
1930 | * not exceed its limit; but let map_count go just above | ||
1931 | * its limit temporarily, to help free resources as expected. | ||
1932 | */ | ||
1933 | if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) | ||
1934 | return -ENOMEM; | ||
1935 | |||
1936 | error = __split_vma(mm, vma, start, 0); | ||
1923 | if (error) | 1937 | if (error) |
1924 | return error; | 1938 | return error; |
1925 | prev = vma; | 1939 | prev = vma; |
@@ -1928,7 +1942,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1928 | /* Does it split the last one? */ | 1942 | /* Does it split the last one? */ |
1929 | last = find_vma(mm, end); | 1943 | last = find_vma(mm, end); |
1930 | if (last && end > last->vm_start) { | 1944 | if (last && end > last->vm_start) { |
1931 | int error = split_vma(mm, last, end, 1); | 1945 | int error = __split_vma(mm, last, end, 1); |
1932 | if (error) | 1946 | if (error) |
1933 | return error; | 1947 | return error; |
1934 | } | 1948 | } |
@@ -2003,20 +2017,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2003 | if (!len) | 2017 | if (!len) |
2004 | return addr; | 2018 | return addr; |
2005 | 2019 | ||
2006 | if ((addr + len) > TASK_SIZE || (addr + len) < addr) | ||
2007 | return -EINVAL; | ||
2008 | |||
2009 | if (is_hugepage_only_range(mm, addr, len)) | ||
2010 | return -EINVAL; | ||
2011 | |||
2012 | error = security_file_mmap(NULL, 0, 0, 0, addr, 1); | 2020 | error = security_file_mmap(NULL, 0, 0, 0, addr, 1); |
2013 | if (error) | 2021 | if (error) |
2014 | return error; | 2022 | return error; |
2015 | 2023 | ||
2016 | flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; | 2024 | flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; |
2017 | 2025 | ||
2018 | error = arch_mmap_check(addr, len, flags); | 2026 | error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); |
2019 | if (error) | 2027 | if (error & ~PAGE_MASK) |
2020 | return error; | 2028 | return error; |
2021 | 2029 | ||
2022 | /* | 2030 | /* |