diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 83 |
1 files changed, 49 insertions, 34 deletions
@@ -144,7 +144,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
144 | */ | 144 | */ |
145 | free -= global_page_state(NR_SHMEM); | 145 | free -= global_page_state(NR_SHMEM); |
146 | 146 | ||
147 | free += nr_swap_pages; | 147 | free += get_nr_swap_pages(); |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Any slabs which are created with the | 150 | * Any slabs which are created with the |
@@ -256,6 +256,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
256 | unsigned long newbrk, oldbrk; | 256 | unsigned long newbrk, oldbrk; |
257 | struct mm_struct *mm = current->mm; | 257 | struct mm_struct *mm = current->mm; |
258 | unsigned long min_brk; | 258 | unsigned long min_brk; |
259 | bool populate; | ||
259 | 260 | ||
260 | down_write(&mm->mmap_sem); | 261 | down_write(&mm->mmap_sem); |
261 | 262 | ||
@@ -305,8 +306,15 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
305 | /* Ok, looks good - let it rip. */ | 306 | /* Ok, looks good - let it rip. */ |
306 | if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) | 307 | if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) |
307 | goto out; | 308 | goto out; |
309 | |||
308 | set_brk: | 310 | set_brk: |
309 | mm->brk = brk; | 311 | mm->brk = brk; |
312 | populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; | ||
313 | up_write(&mm->mmap_sem); | ||
314 | if (populate) | ||
315 | mm_populate(oldbrk, newbrk - oldbrk); | ||
316 | return brk; | ||
317 | |||
310 | out: | 318 | out: |
311 | retval = mm->brk; | 319 | retval = mm->brk; |
312 | up_write(&mm->mmap_sem); | 320 | up_write(&mm->mmap_sem); |
@@ -801,7 +809,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
801 | anon_vma_interval_tree_post_update_vma(vma); | 809 | anon_vma_interval_tree_post_update_vma(vma); |
802 | if (adjust_next) | 810 | if (adjust_next) |
803 | anon_vma_interval_tree_post_update_vma(next); | 811 | anon_vma_interval_tree_post_update_vma(next); |
804 | anon_vma_unlock(anon_vma); | 812 | anon_vma_unlock_write(anon_vma); |
805 | } | 813 | } |
806 | if (mapping) | 814 | if (mapping) |
807 | mutex_unlock(&mapping->i_mmap_mutex); | 815 | mutex_unlock(&mapping->i_mmap_mutex); |
@@ -1154,12 +1162,15 @@ static inline unsigned long round_hint_to_min(unsigned long hint) | |||
1154 | 1162 | ||
1155 | unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | 1163 | unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, |
1156 | unsigned long len, unsigned long prot, | 1164 | unsigned long len, unsigned long prot, |
1157 | unsigned long flags, unsigned long pgoff) | 1165 | unsigned long flags, unsigned long pgoff, |
1166 | unsigned long *populate) | ||
1158 | { | 1167 | { |
1159 | struct mm_struct * mm = current->mm; | 1168 | struct mm_struct * mm = current->mm; |
1160 | struct inode *inode; | 1169 | struct inode *inode; |
1161 | vm_flags_t vm_flags; | 1170 | vm_flags_t vm_flags; |
1162 | 1171 | ||
1172 | *populate = 0; | ||
1173 | |||
1163 | /* | 1174 | /* |
1164 | * Does the application expect PROT_READ to imply PROT_EXEC? | 1175 | * Does the application expect PROT_READ to imply PROT_EXEC? |
1165 | * | 1176 | * |
@@ -1280,7 +1291,24 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1280 | } | 1291 | } |
1281 | } | 1292 | } |
1282 | 1293 | ||
1283 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); | 1294 | /* |
1295 | * Set 'VM_NORESERVE' if we should not account for the | ||
1296 | * memory use of this mapping. | ||
1297 | */ | ||
1298 | if (flags & MAP_NORESERVE) { | ||
1299 | /* We honor MAP_NORESERVE if allowed to overcommit */ | ||
1300 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) | ||
1301 | vm_flags |= VM_NORESERVE; | ||
1302 | |||
1303 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ | ||
1304 | if (file && is_file_hugepages(file)) | ||
1305 | vm_flags |= VM_NORESERVE; | ||
1306 | } | ||
1307 | |||
1308 | addr = mmap_region(file, addr, len, vm_flags, pgoff); | ||
1309 | if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) | ||
1310 | *populate = len; | ||
1311 | return addr; | ||
1284 | } | 1312 | } |
1285 | 1313 | ||
1286 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | 1314 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
@@ -1395,8 +1423,7 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) | |||
1395 | } | 1423 | } |
1396 | 1424 | ||
1397 | unsigned long mmap_region(struct file *file, unsigned long addr, | 1425 | unsigned long mmap_region(struct file *file, unsigned long addr, |
1398 | unsigned long len, unsigned long flags, | 1426 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) |
1399 | vm_flags_t vm_flags, unsigned long pgoff) | ||
1400 | { | 1427 | { |
1401 | struct mm_struct *mm = current->mm; | 1428 | struct mm_struct *mm = current->mm; |
1402 | struct vm_area_struct *vma, *prev; | 1429 | struct vm_area_struct *vma, *prev; |
@@ -1420,20 +1447,6 @@ munmap_back: | |||
1420 | return -ENOMEM; | 1447 | return -ENOMEM; |
1421 | 1448 | ||
1422 | /* | 1449 | /* |
1423 | * Set 'VM_NORESERVE' if we should not account for the | ||
1424 | * memory use of this mapping. | ||
1425 | */ | ||
1426 | if ((flags & MAP_NORESERVE)) { | ||
1427 | /* We honor MAP_NORESERVE if allowed to overcommit */ | ||
1428 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) | ||
1429 | vm_flags |= VM_NORESERVE; | ||
1430 | |||
1431 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ | ||
1432 | if (file && is_file_hugepages(file)) | ||
1433 | vm_flags |= VM_NORESERVE; | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * Private writable mapping: check memory availability | 1450 | * Private writable mapping: check memory availability |
1438 | */ | 1451 | */ |
1439 | if (accountable_mapping(file, vm_flags)) { | 1452 | if (accountable_mapping(file, vm_flags)) { |
@@ -1531,10 +1544,12 @@ out: | |||
1531 | 1544 | ||
1532 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); | 1545 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); |
1533 | if (vm_flags & VM_LOCKED) { | 1546 | if (vm_flags & VM_LOCKED) { |
1534 | if (!mlock_vma_pages_range(vma, addr, addr + len)) | 1547 | if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || |
1548 | vma == get_gate_vma(current->mm))) | ||
1535 | mm->locked_vm += (len >> PAGE_SHIFT); | 1549 | mm->locked_vm += (len >> PAGE_SHIFT); |
1536 | } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) | 1550 | else |
1537 | make_pages_present(addr, addr + len); | 1551 | vma->vm_flags &= ~VM_LOCKED; |
1552 | } | ||
1538 | 1553 | ||
1539 | if (file) | 1554 | if (file) |
1540 | uprobe_mmap(vma); | 1555 | uprobe_mmap(vma); |
@@ -2187,9 +2202,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
2187 | return vma; | 2202 | return vma; |
2188 | if (!prev || expand_stack(prev, addr)) | 2203 | if (!prev || expand_stack(prev, addr)) |
2189 | return NULL; | 2204 | return NULL; |
2190 | if (prev->vm_flags & VM_LOCKED) { | 2205 | if (prev->vm_flags & VM_LOCKED) |
2191 | mlock_vma_pages_range(prev, addr, prev->vm_end); | 2206 | __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL); |
2192 | } | ||
2193 | return prev; | 2207 | return prev; |
2194 | } | 2208 | } |
2195 | #else | 2209 | #else |
@@ -2215,9 +2229,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) | |||
2215 | start = vma->vm_start; | 2229 | start = vma->vm_start; |
2216 | if (expand_stack(vma, addr)) | 2230 | if (expand_stack(vma, addr)) |
2217 | return NULL; | 2231 | return NULL; |
2218 | if (vma->vm_flags & VM_LOCKED) { | 2232 | if (vma->vm_flags & VM_LOCKED) |
2219 | mlock_vma_pages_range(vma, addr, start); | 2233 | __mlock_vma_pages_range(vma, addr, start, NULL); |
2220 | } | ||
2221 | return vma; | 2234 | return vma; |
2222 | } | 2235 | } |
2223 | #endif | 2236 | #endif |
@@ -2590,10 +2603,8 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2590 | out: | 2603 | out: |
2591 | perf_event_mmap(vma); | 2604 | perf_event_mmap(vma); |
2592 | mm->total_vm += len >> PAGE_SHIFT; | 2605 | mm->total_vm += len >> PAGE_SHIFT; |
2593 | if (flags & VM_LOCKED) { | 2606 | if (flags & VM_LOCKED) |
2594 | if (!mlock_vma_pages_range(vma, addr, addr + len)) | 2607 | mm->locked_vm += (len >> PAGE_SHIFT); |
2595 | mm->locked_vm += (len >> PAGE_SHIFT); | ||
2596 | } | ||
2597 | return addr; | 2608 | return addr; |
2598 | } | 2609 | } |
2599 | 2610 | ||
@@ -2601,10 +2612,14 @@ unsigned long vm_brk(unsigned long addr, unsigned long len) | |||
2601 | { | 2612 | { |
2602 | struct mm_struct *mm = current->mm; | 2613 | struct mm_struct *mm = current->mm; |
2603 | unsigned long ret; | 2614 | unsigned long ret; |
2615 | bool populate; | ||
2604 | 2616 | ||
2605 | down_write(&mm->mmap_sem); | 2617 | down_write(&mm->mmap_sem); |
2606 | ret = do_brk(addr, len); | 2618 | ret = do_brk(addr, len); |
2619 | populate = ((mm->def_flags & VM_LOCKED) != 0); | ||
2607 | up_write(&mm->mmap_sem); | 2620 | up_write(&mm->mmap_sem); |
2621 | if (populate) | ||
2622 | mm_populate(addr, len); | ||
2608 | return ret; | 2623 | return ret; |
2609 | } | 2624 | } |
2610 | EXPORT_SYMBOL(vm_brk); | 2625 | EXPORT_SYMBOL(vm_brk); |
@@ -3002,7 +3017,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma) | |||
3002 | if (!__test_and_clear_bit(0, (unsigned long *) | 3017 | if (!__test_and_clear_bit(0, (unsigned long *) |
3003 | &anon_vma->root->rb_root.rb_node)) | 3018 | &anon_vma->root->rb_root.rb_node)) |
3004 | BUG(); | 3019 | BUG(); |
3005 | anon_vma_unlock(anon_vma); | 3020 | anon_vma_unlock_write(anon_vma); |
3006 | } | 3021 | } |
3007 | } | 3022 | } |
3008 | 3023 | ||