diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 36 |
1 files changed, 28 insertions, 8 deletions
@@ -1811,10 +1811,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | /* | 1813 | /* |
1814 | * Split a vma into two pieces at address 'addr', a new vma is allocated | 1814 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the |
1815 | * either for the first part or the tail. | 1815 | * munmap path where it doesn't make sense to fail. |
1816 | */ | 1816 | */ |
1817 | int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | 1817 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, |
1818 | unsigned long addr, int new_below) | 1818 | unsigned long addr, int new_below) |
1819 | { | 1819 | { |
1820 | struct mempolicy *pol; | 1820 | struct mempolicy *pol; |
@@ -1824,9 +1824,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1824 | ~(huge_page_mask(hstate_vma(vma))))) | 1824 | ~(huge_page_mask(hstate_vma(vma))))) |
1825 | return -EINVAL; | 1825 | return -EINVAL; |
1826 | 1826 | ||
1827 | if (mm->map_count >= sysctl_max_map_count) | ||
1828 | return -ENOMEM; | ||
1829 | |||
1830 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 1827 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
1831 | if (!new) | 1828 | if (!new) |
1832 | return -ENOMEM; | 1829 | return -ENOMEM; |
@@ -1866,6 +1863,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1866 | return 0; | 1863 | return 0; |
1867 | } | 1864 | } |
1868 | 1865 | ||
1866 | /* | ||
1867 | * Split a vma into two pieces at address 'addr', a new vma is allocated | ||
1868 | * either for the first part or the tail. | ||
1869 | */ | ||
1870 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | ||
1871 | unsigned long addr, int new_below) | ||
1872 | { | ||
1873 | if (mm->map_count >= sysctl_max_map_count) | ||
1874 | return -ENOMEM; | ||
1875 | |||
1876 | return __split_vma(mm, vma, addr, new_below); | ||
1877 | } | ||
1878 | |||
1869 | /* Munmap is split into 2 main parts -- this part which finds | 1879 | /* Munmap is split into 2 main parts -- this part which finds |
1870 | * what needs doing, and the areas themselves, which do the | 1880 | * what needs doing, and the areas themselves, which do the |
1871 | * work. This now handles partial unmappings. | 1881 | * work. This now handles partial unmappings. |
@@ -1901,7 +1911,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1901 | * places tmp vma above, and higher split_vma places tmp vma below. | 1911 | * places tmp vma above, and higher split_vma places tmp vma below. |
1902 | */ | 1912 | */ |
1903 | if (start > vma->vm_start) { | 1913 | if (start > vma->vm_start) { |
1904 | int error = split_vma(mm, vma, start, 0); | 1914 | int error; |
1915 | |||
1916 | /* | ||
1917 | * Make sure that map_count on return from munmap() will | ||
1918 | * not exceed its limit; but let map_count go just above | ||
1919 | * its limit temporarily, to help free resources as expected. | ||
1920 | */ | ||
1921 | if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) | ||
1922 | return -ENOMEM; | ||
1923 | |||
1924 | error = __split_vma(mm, vma, start, 0); | ||
1905 | if (error) | 1925 | if (error) |
1906 | return error; | 1926 | return error; |
1907 | prev = vma; | 1927 | prev = vma; |
@@ -1910,7 +1930,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1910 | /* Does it split the last one? */ | 1930 | /* Does it split the last one? */ |
1911 | last = find_vma(mm, end); | 1931 | last = find_vma(mm, end); |
1912 | if (last && end > last->vm_start) { | 1932 | if (last && end > last->vm_start) { |
1913 | int error = split_vma(mm, last, end, 1); | 1933 | int error = __split_vma(mm, last, end, 1); |
1914 | if (error) | 1934 | if (error) |
1915 | return error; | 1935 | return error; |
1916 | } | 1936 | } |