diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 62 |
1 files changed, 37 insertions, 25 deletions
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/mempolicy.h> | 28 | #include <linux/mempolicy.h> |
29 | #include <linux/rmap.h> | 29 | #include <linux/rmap.h> |
30 | #include <linux/mmu_notifier.h> | 30 | #include <linux/mmu_notifier.h> |
31 | #include <linux/perf_counter.h> | 31 | #include <linux/perf_event.h> |
32 | 32 | ||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
@@ -88,9 +88,6 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */ | |||
88 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 88 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
89 | struct percpu_counter vm_committed_as; | 89 | struct percpu_counter vm_committed_as; |
90 | 90 | ||
91 | /* amount of vm to protect from userspace access */ | ||
92 | unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; | ||
93 | |||
94 | /* | 91 | /* |
95 | * Check that a process has enough memory to allocate a new virtual | 92 | * Check that a process has enough memory to allocate a new virtual |
96 | * mapping. 0 means there is enough memory for the allocation to | 93 | * mapping. 0 means there is enough memory for the allocation to |
@@ -573,9 +570,9 @@ again: remove_next = 1 + (end > next->vm_end); | |||
573 | 570 | ||
574 | /* | 571 | /* |
575 | * When changing only vma->vm_end, we don't really need | 572 | * When changing only vma->vm_end, we don't really need |
576 | * anon_vma lock: but is that case worth optimizing out? | 573 | * anon_vma lock. |
577 | */ | 574 | */ |
578 | if (vma->anon_vma) | 575 | if (vma->anon_vma && (insert || importer || start != vma->vm_start)) |
579 | anon_vma = vma->anon_vma; | 576 | anon_vma = vma->anon_vma; |
580 | if (anon_vma) { | 577 | if (anon_vma) { |
581 | spin_lock(&anon_vma->lock); | 578 | spin_lock(&anon_vma->lock); |
@@ -659,9 +656,6 @@ again: remove_next = 1 + (end > next->vm_end); | |||
659 | validate_mm(mm); | 656 | validate_mm(mm); |
660 | } | 657 | } |
661 | 658 | ||
662 | /* Flags that can be inherited from an existing mapping when merging */ | ||
663 | #define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR) | ||
664 | |||
665 | /* | 659 | /* |
666 | * If the vma has a ->close operation then the driver probably needs to release | 660 | * If the vma has a ->close operation then the driver probably needs to release |
667 | * per-vma resources, so we don't attempt to merge those. | 661 | * per-vma resources, so we don't attempt to merge those. |
@@ -669,7 +663,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
669 | static inline int is_mergeable_vma(struct vm_area_struct *vma, | 663 | static inline int is_mergeable_vma(struct vm_area_struct *vma, |
670 | struct file *file, unsigned long vm_flags) | 664 | struct file *file, unsigned long vm_flags) |
671 | { | 665 | { |
672 | if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS) | 666 | /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */ |
667 | if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR) | ||
673 | return 0; | 668 | return 0; |
674 | if (vma->vm_file != file) | 669 | if (vma->vm_file != file) |
675 | return 0; | 670 | return 0; |
@@ -908,7 +903,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, | |||
908 | #endif /* CONFIG_PROC_FS */ | 903 | #endif /* CONFIG_PROC_FS */ |
909 | 904 | ||
910 | /* | 905 | /* |
911 | * The caller must hold down_write(current->mm->mmap_sem). | 906 | * The caller must hold down_write(¤t->mm->mmap_sem). |
912 | */ | 907 | */ |
913 | 908 | ||
914 | unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | 909 | unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, |
@@ -954,6 +949,24 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
954 | if (mm->map_count > sysctl_max_map_count) | 949 | if (mm->map_count > sysctl_max_map_count) |
955 | return -ENOMEM; | 950 | return -ENOMEM; |
956 | 951 | ||
952 | if (flags & MAP_HUGETLB) { | ||
953 | struct user_struct *user = NULL; | ||
954 | if (file) | ||
955 | return -EINVAL; | ||
956 | |||
957 | /* | ||
958 | * VM_NORESERVE is used because the reservations will be | ||
959 | * taken when vm_ops->mmap() is called | ||
960 | * A dummy user value is used because we are not locking | ||
961 | * memory so no accounting is necessary | ||
962 | */ | ||
963 | len = ALIGN(len, huge_page_size(&default_hstate)); | ||
964 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, | ||
965 | &user, HUGETLB_ANONHUGE_INODE); | ||
966 | if (IS_ERR(file)) | ||
967 | return PTR_ERR(file); | ||
968 | } | ||
969 | |||
957 | /* Obtain the address to map to. we verify (or select) it and ensure | 970 | /* Obtain the address to map to. we verify (or select) it and ensure |
958 | * that it represents a valid section of the address space. | 971 | * that it represents a valid section of the address space. |
959 | */ | 972 | */ |
@@ -968,11 +981,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
968 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | | 981 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | |
969 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 982 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
970 | 983 | ||
971 | if (flags & MAP_LOCKED) { | 984 | if (flags & MAP_LOCKED) |
972 | if (!can_do_mlock()) | 985 | if (!can_do_mlock()) |
973 | return -EPERM; | 986 | return -EPERM; |
974 | vm_flags |= VM_LOCKED; | ||
975 | } | ||
976 | 987 | ||
977 | /* mlock MCL_FUTURE? */ | 988 | /* mlock MCL_FUTURE? */ |
978 | if (vm_flags & VM_LOCKED) { | 989 | if (vm_flags & VM_LOCKED) { |
@@ -1198,21 +1209,21 @@ munmap_back: | |||
1198 | goto unmap_and_free_vma; | 1209 | goto unmap_and_free_vma; |
1199 | if (vm_flags & VM_EXECUTABLE) | 1210 | if (vm_flags & VM_EXECUTABLE) |
1200 | added_exe_file_vma(mm); | 1211 | added_exe_file_vma(mm); |
1212 | |||
1213 | /* Can addr have changed?? | ||
1214 | * | ||
1215 | * Answer: Yes, several device drivers can do it in their | ||
1216 | * f_op->mmap method. -DaveM | ||
1217 | */ | ||
1218 | addr = vma->vm_start; | ||
1219 | pgoff = vma->vm_pgoff; | ||
1220 | vm_flags = vma->vm_flags; | ||
1201 | } else if (vm_flags & VM_SHARED) { | 1221 | } else if (vm_flags & VM_SHARED) { |
1202 | error = shmem_zero_setup(vma); | 1222 | error = shmem_zero_setup(vma); |
1203 | if (error) | 1223 | if (error) |
1204 | goto free_vma; | 1224 | goto free_vma; |
1205 | } | 1225 | } |
1206 | 1226 | ||
1207 | /* Can addr have changed?? | ||
1208 | * | ||
1209 | * Answer: Yes, several device drivers can do it in their | ||
1210 | * f_op->mmap method. -DaveM | ||
1211 | */ | ||
1212 | addr = vma->vm_start; | ||
1213 | pgoff = vma->vm_pgoff; | ||
1214 | vm_flags = vma->vm_flags; | ||
1215 | |||
1216 | if (vma_wants_writenotify(vma)) | 1227 | if (vma_wants_writenotify(vma)) |
1217 | vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); | 1228 | vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); |
1218 | 1229 | ||
@@ -1223,7 +1234,7 @@ munmap_back: | |||
1223 | if (correct_wcount) | 1234 | if (correct_wcount) |
1224 | atomic_inc(&inode->i_writecount); | 1235 | atomic_inc(&inode->i_writecount); |
1225 | out: | 1236 | out: |
1226 | perf_counter_mmap(vma); | 1237 | perf_event_mmap(vma); |
1227 | 1238 | ||
1228 | mm->total_vm += len >> PAGE_SHIFT; | 1239 | mm->total_vm += len >> PAGE_SHIFT; |
1229 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); | 1240 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); |
@@ -2114,6 +2125,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2114 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2125 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2115 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); | 2126 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); |
2116 | vm_unacct_memory(nr_accounted); | 2127 | vm_unacct_memory(nr_accounted); |
2128 | |||
2117 | free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); | 2129 | free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); |
2118 | tlb_finish_mmu(tlb, 0, end); | 2130 | tlb_finish_mmu(tlb, 0, end); |
2119 | 2131 | ||
@@ -2311,7 +2323,7 @@ int install_special_mapping(struct mm_struct *mm, | |||
2311 | 2323 | ||
2312 | mm->total_vm += len >> PAGE_SHIFT; | 2324 | mm->total_vm += len >> PAGE_SHIFT; |
2313 | 2325 | ||
2314 | perf_counter_mmap(vma); | 2326 | perf_event_mmap(vma); |
2315 | 2327 | ||
2316 | return 0; | 2328 | return 0; |
2317 | } | 2329 | } |