diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 39 |
1 files changed, 31 insertions, 8 deletions
@@ -936,6 +936,19 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, | |||
936 | #endif /* CONFIG_PROC_FS */ | 936 | #endif /* CONFIG_PROC_FS */ |
937 | 937 | ||
938 | /* | 938 | /* |
939 | * If a hint addr is less than mmap_min_addr change hint to be as | ||
940 | * low as possible but still greater than mmap_min_addr | ||
941 | */ | ||
942 | static inline unsigned long round_hint_to_min(unsigned long hint) | ||
943 | { | ||
944 | hint &= PAGE_MASK; | ||
945 | if (((void *)hint != NULL) && | ||
946 | (hint < mmap_min_addr)) | ||
947 | return PAGE_ALIGN(mmap_min_addr); | ||
948 | return hint; | ||
949 | } | ||
950 | |||
951 | /* | ||
939 | * The caller must hold down_write(¤t->mm->mmap_sem). | 952 | * The caller must hold down_write(¤t->mm->mmap_sem). |
940 | */ | 953 | */ |
941 | 954 | ||
@@ -1235,7 +1248,7 @@ munmap_back: | |||
1235 | */ | 1248 | */ |
1236 | if (accountable_mapping(file, vm_flags)) { | 1249 | if (accountable_mapping(file, vm_flags)) { |
1237 | charged = len >> PAGE_SHIFT; | 1250 | charged = len >> PAGE_SHIFT; |
1238 | if (security_vm_enough_memory(charged)) | 1251 | if (security_vm_enough_memory_mm(mm, charged)) |
1239 | return -ENOMEM; | 1252 | return -ENOMEM; |
1240 | vm_flags |= VM_ACCOUNT; | 1253 | vm_flags |= VM_ACCOUNT; |
1241 | } | 1254 | } |
@@ -1266,8 +1279,9 @@ munmap_back: | |||
1266 | vma->vm_pgoff = pgoff; | 1279 | vma->vm_pgoff = pgoff; |
1267 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 1280 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
1268 | 1281 | ||
1282 | error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ | ||
1283 | |||
1269 | if (file) { | 1284 | if (file) { |
1270 | error = -EINVAL; | ||
1271 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) | 1285 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
1272 | goto free_vma; | 1286 | goto free_vma; |
1273 | if (vm_flags & VM_DENYWRITE) { | 1287 | if (vm_flags & VM_DENYWRITE) { |
@@ -1293,6 +1307,8 @@ munmap_back: | |||
1293 | pgoff = vma->vm_pgoff; | 1307 | pgoff = vma->vm_pgoff; |
1294 | vm_flags = vma->vm_flags; | 1308 | vm_flags = vma->vm_flags; |
1295 | } else if (vm_flags & VM_SHARED) { | 1309 | } else if (vm_flags & VM_SHARED) { |
1310 | if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) | ||
1311 | goto free_vma; | ||
1296 | error = shmem_zero_setup(vma); | 1312 | error = shmem_zero_setup(vma); |
1297 | if (error) | 1313 | if (error) |
1298 | goto free_vma; | 1314 | goto free_vma; |
@@ -1605,7 +1621,6 @@ EXPORT_SYMBOL(find_vma); | |||
1605 | 1621 | ||
1606 | /* | 1622 | /* |
1607 | * Same as find_vma, but also return a pointer to the previous VMA in *pprev. | 1623 | * Same as find_vma, but also return a pointer to the previous VMA in *pprev. |
1608 | * Note: pprev is set to NULL when return value is NULL. | ||
1609 | */ | 1624 | */ |
1610 | struct vm_area_struct * | 1625 | struct vm_area_struct * |
1611 | find_vma_prev(struct mm_struct *mm, unsigned long addr, | 1626 | find_vma_prev(struct mm_struct *mm, unsigned long addr, |
@@ -1614,7 +1629,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, | |||
1614 | struct vm_area_struct *vma; | 1629 | struct vm_area_struct *vma; |
1615 | 1630 | ||
1616 | vma = find_vma(mm, addr); | 1631 | vma = find_vma(mm, addr); |
1617 | *pprev = vma ? vma->vm_prev : NULL; | 1632 | if (vma) { |
1633 | *pprev = vma->vm_prev; | ||
1634 | } else { | ||
1635 | struct rb_node *rb_node = mm->mm_rb.rb_node; | ||
1636 | *pprev = NULL; | ||
1637 | while (rb_node) { | ||
1638 | *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); | ||
1639 | rb_node = rb_node->rb_right; | ||
1640 | } | ||
1641 | } | ||
1618 | return vma; | 1642 | return vma; |
1619 | } | 1643 | } |
1620 | 1644 | ||
@@ -2169,7 +2193,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2169 | if (mm->map_count > sysctl_max_map_count) | 2193 | if (mm->map_count > sysctl_max_map_count) |
2170 | return -ENOMEM; | 2194 | return -ENOMEM; |
2171 | 2195 | ||
2172 | if (security_vm_enough_memory(len >> PAGE_SHIFT)) | 2196 | if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) |
2173 | return -ENOMEM; | 2197 | return -ENOMEM; |
2174 | 2198 | ||
2175 | /* Can we just expand an old private anonymous mapping? */ | 2199 | /* Can we just expand an old private anonymous mapping? */ |
@@ -2213,7 +2237,6 @@ void exit_mmap(struct mm_struct *mm) | |||
2213 | struct mmu_gather tlb; | 2237 | struct mmu_gather tlb; |
2214 | struct vm_area_struct *vma; | 2238 | struct vm_area_struct *vma; |
2215 | unsigned long nr_accounted = 0; | 2239 | unsigned long nr_accounted = 0; |
2216 | unsigned long end; | ||
2217 | 2240 | ||
2218 | /* mm's last user has gone, and its about to be pulled down */ | 2241 | /* mm's last user has gone, and its about to be pulled down */ |
2219 | mmu_notifier_release(mm); | 2242 | mmu_notifier_release(mm); |
@@ -2238,11 +2261,11 @@ void exit_mmap(struct mm_struct *mm) | |||
2238 | tlb_gather_mmu(&tlb, mm, 1); | 2261 | tlb_gather_mmu(&tlb, mm, 1); |
2239 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2262 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
2240 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2263 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2241 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); | 2264 | unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); |
2242 | vm_unacct_memory(nr_accounted); | 2265 | vm_unacct_memory(nr_accounted); |
2243 | 2266 | ||
2244 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 2267 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); |
2245 | tlb_finish_mmu(&tlb, 0, end); | 2268 | tlb_finish_mmu(&tlb, 0, -1); |
2246 | 2269 | ||
2247 | /* | 2270 | /* |
2248 | * Walk the list again, actually closing and freeing it, | 2271 | * Walk the list again, actually closing and freeing it, |