diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 84 |
1 files changed, 45 insertions, 39 deletions
@@ -70,7 +70,7 @@ static void unmap_region(struct mm_struct *mm, | |||
70 | * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes | 70 | * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes |
71 | * w: (no) no w: (no) no w: (yes) yes w: (no) no | 71 | * w: (no) no w: (no) no w: (yes) yes w: (no) no |
72 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes | 72 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes |
73 | * | 73 | * |
74 | * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes | 74 | * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes |
75 | * w: (no) no w: (no) no w: (copy) copy w: (no) no | 75 | * w: (no) no w: (no) no w: (copy) copy w: (no) no |
76 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes | 76 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes |
@@ -268,7 +268,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len); | |||
268 | 268 | ||
269 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 269 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
270 | { | 270 | { |
271 | unsigned long rlim, retval; | 271 | unsigned long retval; |
272 | unsigned long newbrk, oldbrk; | 272 | unsigned long newbrk, oldbrk; |
273 | struct mm_struct *mm = current->mm; | 273 | struct mm_struct *mm = current->mm; |
274 | unsigned long min_brk; | 274 | unsigned long min_brk; |
@@ -298,9 +298,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
298 | * segment grow beyond its set limit the in case where the limit is | 298 | * segment grow beyond its set limit the in case where the limit is |
299 | * not page aligned -Ram Gupta | 299 | * not page aligned -Ram Gupta |
300 | */ | 300 | */ |
301 | rlim = rlimit(RLIMIT_DATA); | 301 | if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, |
302 | if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + | 302 | mm->end_data, mm->start_data)) |
303 | (mm->end_data - mm->start_data) > rlim) | ||
304 | goto out; | 303 | goto out; |
305 | 304 | ||
306 | newbrk = PAGE_ALIGN(brk); | 305 | newbrk = PAGE_ALIGN(brk); |
@@ -369,20 +368,22 @@ static int browse_rb(struct rb_root *root) | |||
369 | struct vm_area_struct *vma; | 368 | struct vm_area_struct *vma; |
370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); | 369 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); |
371 | if (vma->vm_start < prev) { | 370 | if (vma->vm_start < prev) { |
372 | pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); | 371 | pr_emerg("vm_start %lx < prev %lx\n", |
372 | vma->vm_start, prev); | ||
373 | bug = 1; | 373 | bug = 1; |
374 | } | 374 | } |
375 | if (vma->vm_start < pend) { | 375 | if (vma->vm_start < pend) { |
376 | pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); | 376 | pr_emerg("vm_start %lx < pend %lx\n", |
377 | vma->vm_start, pend); | ||
377 | bug = 1; | 378 | bug = 1; |
378 | } | 379 | } |
379 | if (vma->vm_start > vma->vm_end) { | 380 | if (vma->vm_start > vma->vm_end) { |
380 | pr_info("vm_end %lx < vm_start %lx\n", | 381 | pr_emerg("vm_start %lx > vm_end %lx\n", |
381 | vma->vm_end, vma->vm_start); | 382 | vma->vm_start, vma->vm_end); |
382 | bug = 1; | 383 | bug = 1; |
383 | } | 384 | } |
384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { | 385 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { |
385 | pr_info("free gap %lx, correct %lx\n", | 386 | pr_emerg("free gap %lx, correct %lx\n", |
386 | vma->rb_subtree_gap, | 387 | vma->rb_subtree_gap, |
387 | vma_compute_subtree_gap(vma)); | 388 | vma_compute_subtree_gap(vma)); |
388 | bug = 1; | 389 | bug = 1; |
@@ -396,7 +397,7 @@ static int browse_rb(struct rb_root *root) | |||
396 | for (nd = pn; nd; nd = rb_prev(nd)) | 397 | for (nd = pn; nd; nd = rb_prev(nd)) |
397 | j++; | 398 | j++; |
398 | if (i != j) { | 399 | if (i != j) { |
399 | pr_info("backwards %d, forwards %d\n", j, i); | 400 | pr_emerg("backwards %d, forwards %d\n", j, i); |
400 | bug = 1; | 401 | bug = 1; |
401 | } | 402 | } |
402 | return bug ? -1 : i; | 403 | return bug ? -1 : i; |
@@ -409,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) | |||
409 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | 410 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { |
410 | struct vm_area_struct *vma; | 411 | struct vm_area_struct *vma; |
411 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); | 412 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); |
412 | BUG_ON(vma != ignore && | 413 | VM_BUG_ON_VMA(vma != ignore && |
413 | vma->rb_subtree_gap != vma_compute_subtree_gap(vma)); | 414 | vma->rb_subtree_gap != vma_compute_subtree_gap(vma), |
415 | vma); | ||
414 | } | 416 | } |
415 | } | 417 | } |
416 | 418 | ||
@@ -420,8 +422,10 @@ static void validate_mm(struct mm_struct *mm) | |||
420 | int i = 0; | 422 | int i = 0; |
421 | unsigned long highest_address = 0; | 423 | unsigned long highest_address = 0; |
422 | struct vm_area_struct *vma = mm->mmap; | 424 | struct vm_area_struct *vma = mm->mmap; |
425 | |||
423 | while (vma) { | 426 | while (vma) { |
424 | struct anon_vma_chain *avc; | 427 | struct anon_vma_chain *avc; |
428 | |||
425 | vma_lock_anon_vma(vma); | 429 | vma_lock_anon_vma(vma); |
426 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) | 430 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
427 | anon_vma_interval_tree_verify(avc); | 431 | anon_vma_interval_tree_verify(avc); |
@@ -431,20 +435,21 @@ static void validate_mm(struct mm_struct *mm) | |||
431 | i++; | 435 | i++; |
432 | } | 436 | } |
433 | if (i != mm->map_count) { | 437 | if (i != mm->map_count) { |
434 | pr_info("map_count %d vm_next %d\n", mm->map_count, i); | 438 | pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); |
435 | bug = 1; | 439 | bug = 1; |
436 | } | 440 | } |
437 | if (highest_address != mm->highest_vm_end) { | 441 | if (highest_address != mm->highest_vm_end) { |
438 | pr_info("mm->highest_vm_end %lx, found %lx\n", | 442 | pr_emerg("mm->highest_vm_end %lx, found %lx\n", |
439 | mm->highest_vm_end, highest_address); | 443 | mm->highest_vm_end, highest_address); |
440 | bug = 1; | 444 | bug = 1; |
441 | } | 445 | } |
442 | i = browse_rb(&mm->mm_rb); | 446 | i = browse_rb(&mm->mm_rb); |
443 | if (i != mm->map_count) { | 447 | if (i != mm->map_count) { |
444 | pr_info("map_count %d rb %d\n", mm->map_count, i); | 448 | if (i != -1) |
449 | pr_emerg("map_count %d rb %d\n", mm->map_count, i); | ||
445 | bug = 1; | 450 | bug = 1; |
446 | } | 451 | } |
447 | BUG_ON(bug); | 452 | VM_BUG_ON_MM(bug, mm); |
448 | } | 453 | } |
449 | #else | 454 | #else |
450 | #define validate_mm_rb(root, ignore) do { } while (0) | 455 | #define validate_mm_rb(root, ignore) do { } while (0) |
@@ -741,7 +746,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
741 | * split_vma inserting another: so it must be | 746 | * split_vma inserting another: so it must be |
742 | * mprotect case 4 shifting the boundary down. | 747 | * mprotect case 4 shifting the boundary down. |
743 | */ | 748 | */ |
744 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); | 749 | adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); |
745 | exporter = vma; | 750 | exporter = vma; |
746 | importer = next; | 751 | importer = next; |
747 | } | 752 | } |
@@ -787,8 +792,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
787 | if (!anon_vma && adjust_next) | 792 | if (!anon_vma && adjust_next) |
788 | anon_vma = next->anon_vma; | 793 | anon_vma = next->anon_vma; |
789 | if (anon_vma) { | 794 | if (anon_vma) { |
790 | VM_BUG_ON(adjust_next && next->anon_vma && | 795 | VM_BUG_ON_VMA(adjust_next && next->anon_vma && |
791 | anon_vma != next->anon_vma); | 796 | anon_vma != next->anon_vma, next); |
792 | anon_vma_lock_write(anon_vma); | 797 | anon_vma_lock_write(anon_vma); |
793 | anon_vma_interval_tree_pre_update_vma(vma); | 798 | anon_vma_interval_tree_pre_update_vma(vma); |
794 | if (adjust_next) | 799 | if (adjust_next) |
@@ -1010,7 +1015,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, | |||
1010 | struct vm_area_struct *vma_merge(struct mm_struct *mm, | 1015 | struct vm_area_struct *vma_merge(struct mm_struct *mm, |
1011 | struct vm_area_struct *prev, unsigned long addr, | 1016 | struct vm_area_struct *prev, unsigned long addr, |
1012 | unsigned long end, unsigned long vm_flags, | 1017 | unsigned long end, unsigned long vm_flags, |
1013 | struct anon_vma *anon_vma, struct file *file, | 1018 | struct anon_vma *anon_vma, struct file *file, |
1014 | pgoff_t pgoff, struct mempolicy *policy) | 1019 | pgoff_t pgoff, struct mempolicy *policy) |
1015 | { | 1020 | { |
1016 | pgoff_t pglen = (end - addr) >> PAGE_SHIFT; | 1021 | pgoff_t pglen = (end - addr) >> PAGE_SHIFT; |
@@ -1036,7 +1041,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
1036 | * Can it merge with the predecessor? | 1041 | * Can it merge with the predecessor? |
1037 | */ | 1042 | */ |
1038 | if (prev && prev->vm_end == addr && | 1043 | if (prev && prev->vm_end == addr && |
1039 | mpol_equal(vma_policy(prev), policy) && | 1044 | mpol_equal(vma_policy(prev), policy) && |
1040 | can_vma_merge_after(prev, vm_flags, | 1045 | can_vma_merge_after(prev, vm_flags, |
1041 | anon_vma, file, pgoff)) { | 1046 | anon_vma, file, pgoff)) { |
1042 | /* | 1047 | /* |
@@ -1064,7 +1069,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
1064 | * Can this new request be merged in front of next? | 1069 | * Can this new request be merged in front of next? |
1065 | */ | 1070 | */ |
1066 | if (next && end == next->vm_start && | 1071 | if (next && end == next->vm_start && |
1067 | mpol_equal(policy, vma_policy(next)) && | 1072 | mpol_equal(policy, vma_policy(next)) && |
1068 | can_vma_merge_before(next, vm_flags, | 1073 | can_vma_merge_before(next, vm_flags, |
1069 | anon_vma, file, pgoff+pglen)) { | 1074 | anon_vma, file, pgoff+pglen)) { |
1070 | if (prev && addr < prev->vm_end) /* case 4 */ | 1075 | if (prev && addr < prev->vm_end) /* case 4 */ |
@@ -1235,7 +1240,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1235 | unsigned long flags, unsigned long pgoff, | 1240 | unsigned long flags, unsigned long pgoff, |
1236 | unsigned long *populate) | 1241 | unsigned long *populate) |
1237 | { | 1242 | { |
1238 | struct mm_struct * mm = current->mm; | 1243 | struct mm_struct *mm = current->mm; |
1239 | vm_flags_t vm_flags; | 1244 | vm_flags_t vm_flags; |
1240 | 1245 | ||
1241 | *populate = 0; | 1246 | *populate = 0; |
@@ -1263,7 +1268,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1263 | 1268 | ||
1264 | /* offset overflow? */ | 1269 | /* offset overflow? */ |
1265 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | 1270 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) |
1266 | return -EOVERFLOW; | 1271 | return -EOVERFLOW; |
1267 | 1272 | ||
1268 | /* Too many mappings? */ | 1273 | /* Too many mappings? */ |
1269 | if (mm->map_count > sysctl_max_map_count) | 1274 | if (mm->map_count > sysctl_max_map_count) |
@@ -1921,7 +1926,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1921 | info.align_mask = 0; | 1926 | info.align_mask = 0; |
1922 | return vm_unmapped_area(&info); | 1927 | return vm_unmapped_area(&info); |
1923 | } | 1928 | } |
1924 | #endif | 1929 | #endif |
1925 | 1930 | ||
1926 | /* | 1931 | /* |
1927 | * This mmap-allocator allocates new areas top-down from below the | 1932 | * This mmap-allocator allocates new areas top-down from below the |
@@ -2321,13 +2326,13 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) | |||
2321 | } | 2326 | } |
2322 | 2327 | ||
2323 | struct vm_area_struct * | 2328 | struct vm_area_struct * |
2324 | find_extend_vma(struct mm_struct * mm, unsigned long addr) | 2329 | find_extend_vma(struct mm_struct *mm, unsigned long addr) |
2325 | { | 2330 | { |
2326 | struct vm_area_struct * vma; | 2331 | struct vm_area_struct *vma; |
2327 | unsigned long start; | 2332 | unsigned long start; |
2328 | 2333 | ||
2329 | addr &= PAGE_MASK; | 2334 | addr &= PAGE_MASK; |
2330 | vma = find_vma(mm,addr); | 2335 | vma = find_vma(mm, addr); |
2331 | if (!vma) | 2336 | if (!vma) |
2332 | return NULL; | 2337 | return NULL; |
2333 | if (vma->vm_start <= addr) | 2338 | if (vma->vm_start <= addr) |
@@ -2376,7 +2381,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2376 | struct vm_area_struct *vma, struct vm_area_struct *prev, | 2381 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
2377 | unsigned long start, unsigned long end) | 2382 | unsigned long start, unsigned long end) |
2378 | { | 2383 | { |
2379 | struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; | 2384 | struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; |
2380 | struct mmu_gather tlb; | 2385 | struct mmu_gather tlb; |
2381 | 2386 | ||
2382 | lru_add_drain(); | 2387 | lru_add_drain(); |
@@ -2423,7 +2428,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2423 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the | 2428 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the |
2424 | * munmap path where it doesn't make sense to fail. | 2429 | * munmap path where it doesn't make sense to fail. |
2425 | */ | 2430 | */ |
2426 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | 2431 | static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
2427 | unsigned long addr, int new_below) | 2432 | unsigned long addr, int new_below) |
2428 | { | 2433 | { |
2429 | struct vm_area_struct *new; | 2434 | struct vm_area_struct *new; |
@@ -2512,7 +2517,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2512 | if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) | 2517 | if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) |
2513 | return -EINVAL; | 2518 | return -EINVAL; |
2514 | 2519 | ||
2515 | if ((len = PAGE_ALIGN(len)) == 0) | 2520 | len = PAGE_ALIGN(len); |
2521 | if (len == 0) | ||
2516 | return -EINVAL; | 2522 | return -EINVAL; |
2517 | 2523 | ||
2518 | /* Find the first overlapping VMA */ | 2524 | /* Find the first overlapping VMA */ |
@@ -2558,7 +2564,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2558 | if (error) | 2564 | if (error) |
2559 | return error; | 2565 | return error; |
2560 | } | 2566 | } |
2561 | vma = prev? prev->vm_next: mm->mmap; | 2567 | vma = prev ? prev->vm_next : mm->mmap; |
2562 | 2568 | ||
2563 | /* | 2569 | /* |
2564 | * unlock any mlock()ed ranges before detaching vmas | 2570 | * unlock any mlock()ed ranges before detaching vmas |
@@ -2621,10 +2627,10 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2621 | */ | 2627 | */ |
2622 | static unsigned long do_brk(unsigned long addr, unsigned long len) | 2628 | static unsigned long do_brk(unsigned long addr, unsigned long len) |
2623 | { | 2629 | { |
2624 | struct mm_struct * mm = current->mm; | 2630 | struct mm_struct *mm = current->mm; |
2625 | struct vm_area_struct * vma, * prev; | 2631 | struct vm_area_struct *vma, *prev; |
2626 | unsigned long flags; | 2632 | unsigned long flags; |
2627 | struct rb_node ** rb_link, * rb_parent; | 2633 | struct rb_node **rb_link, *rb_parent; |
2628 | pgoff_t pgoff = addr >> PAGE_SHIFT; | 2634 | pgoff_t pgoff = addr >> PAGE_SHIFT; |
2629 | int error; | 2635 | int error; |
2630 | 2636 | ||
@@ -2848,7 +2854,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2848 | * safe. It is only safe to keep the vm_pgoff | 2854 | * safe. It is only safe to keep the vm_pgoff |
2849 | * linear if there are no pages mapped yet. | 2855 | * linear if there are no pages mapped yet. |
2850 | */ | 2856 | */ |
2851 | VM_BUG_ON(faulted_in_anon_vma); | 2857 | VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); |
2852 | *vmap = vma = new_vma; | 2858 | *vmap = vma = new_vma; |
2853 | } | 2859 | } |
2854 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); | 2860 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); |
@@ -3196,7 +3202,7 @@ void __init mmap_init(void) | |||
3196 | { | 3202 | { |
3197 | int ret; | 3203 | int ret; |
3198 | 3204 | ||
3199 | ret = percpu_counter_init(&vm_committed_as, 0); | 3205 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
3200 | VM_BUG_ON(ret); | 3206 | VM_BUG_ON(ret); |
3201 | } | 3207 | } |
3202 | 3208 | ||