aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c57
1 files changed, 36 insertions, 21 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 26892e346d8f..21d4029a07b3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -28,7 +28,7 @@
28#include <linux/mempolicy.h> 28#include <linux/mempolicy.h>
29#include <linux/rmap.h> 29#include <linux/rmap.h>
30#include <linux/mmu_notifier.h> 30#include <linux/mmu_notifier.h>
31#include <linux/perf_counter.h> 31#include <linux/perf_event.h>
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
@@ -570,9 +570,9 @@ again: remove_next = 1 + (end > next->vm_end);
570 570
571 /* 571 /*
572 * When changing only vma->vm_end, we don't really need 572 * When changing only vma->vm_end, we don't really need
573 * anon_vma lock: but is that case worth optimizing out? 573 * anon_vma lock.
574 */ 574 */
575 if (vma->anon_vma) 575 if (vma->anon_vma && (insert || importer || start != vma->vm_start))
576 anon_vma = vma->anon_vma; 576 anon_vma = vma->anon_vma;
577 if (anon_vma) { 577 if (anon_vma) {
578 spin_lock(&anon_vma->lock); 578 spin_lock(&anon_vma->lock);
@@ -656,9 +656,6 @@ again: remove_next = 1 + (end > next->vm_end);
656 validate_mm(mm); 656 validate_mm(mm);
657} 657}
658 658
659/* Flags that can be inherited from an existing mapping when merging */
660#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
661
662/* 659/*
663 * If the vma has a ->close operation then the driver probably needs to release 660 * If the vma has a ->close operation then the driver probably needs to release
664 * per-vma resources, so we don't attempt to merge those. 661 * per-vma resources, so we don't attempt to merge those.
@@ -666,7 +663,8 @@ again: remove_next = 1 + (end > next->vm_end);
666static inline int is_mergeable_vma(struct vm_area_struct *vma, 663static inline int is_mergeable_vma(struct vm_area_struct *vma,
667 struct file *file, unsigned long vm_flags) 664 struct file *file, unsigned long vm_flags)
668{ 665{
669 if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS) 666 /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
667 if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
670 return 0; 668 return 0;
671 if (vma->vm_file != file) 669 if (vma->vm_file != file)
672 return 0; 670 return 0;
@@ -951,6 +949,24 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
951 if (mm->map_count > sysctl_max_map_count) 949 if (mm->map_count > sysctl_max_map_count)
952 return -ENOMEM; 950 return -ENOMEM;
953 951
952 if (flags & MAP_HUGETLB) {
953 struct user_struct *user = NULL;
954 if (file)
955 return -EINVAL;
956
957 /*
958 * VM_NORESERVE is used because the reservations will be
959 * taken when vm_ops->mmap() is called
960 * A dummy user value is used because we are not locking
961 * memory so no accounting is necessary
962 */
963 len = ALIGN(len, huge_page_size(&default_hstate));
964 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
965 &user, HUGETLB_ANONHUGE_INODE);
966 if (IS_ERR(file))
967 return PTR_ERR(file);
968 }
969
954 /* Obtain the address to map to. we verify (or select) it and ensure 970 /* Obtain the address to map to. we verify (or select) it and ensure
955 * that it represents a valid section of the address space. 971 * that it represents a valid section of the address space.
956 */ 972 */
@@ -965,11 +981,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
965 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | 981 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
966 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 982 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
967 983
968 if (flags & MAP_LOCKED) { 984 if (flags & MAP_LOCKED)
969 if (!can_do_mlock()) 985 if (!can_do_mlock())
970 return -EPERM; 986 return -EPERM;
971 vm_flags |= VM_LOCKED;
972 }
973 987
974 /* mlock MCL_FUTURE? */ 988 /* mlock MCL_FUTURE? */
975 if (vm_flags & VM_LOCKED) { 989 if (vm_flags & VM_LOCKED) {
@@ -1195,21 +1209,21 @@ munmap_back:
1195 goto unmap_and_free_vma; 1209 goto unmap_and_free_vma;
1196 if (vm_flags & VM_EXECUTABLE) 1210 if (vm_flags & VM_EXECUTABLE)
1197 added_exe_file_vma(mm); 1211 added_exe_file_vma(mm);
1212
1213 /* Can addr have changed??
1214 *
1215 * Answer: Yes, several device drivers can do it in their
1216 * f_op->mmap method. -DaveM
1217 */
1218 addr = vma->vm_start;
1219 pgoff = vma->vm_pgoff;
1220 vm_flags = vma->vm_flags;
1198 } else if (vm_flags & VM_SHARED) { 1221 } else if (vm_flags & VM_SHARED) {
1199 error = shmem_zero_setup(vma); 1222 error = shmem_zero_setup(vma);
1200 if (error) 1223 if (error)
1201 goto free_vma; 1224 goto free_vma;
1202 } 1225 }
1203 1226
1204 /* Can addr have changed??
1205 *
1206 * Answer: Yes, several device drivers can do it in their
1207 * f_op->mmap method. -DaveM
1208 */
1209 addr = vma->vm_start;
1210 pgoff = vma->vm_pgoff;
1211 vm_flags = vma->vm_flags;
1212
1213 if (vma_wants_writenotify(vma)) 1227 if (vma_wants_writenotify(vma))
1214 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1228 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1215 1229
@@ -1220,7 +1234,7 @@ munmap_back:
1220 if (correct_wcount) 1234 if (correct_wcount)
1221 atomic_inc(&inode->i_writecount); 1235 atomic_inc(&inode->i_writecount);
1222out: 1236out:
1223 perf_counter_mmap(vma); 1237 perf_event_mmap(vma);
1224 1238
1225 mm->total_vm += len >> PAGE_SHIFT; 1239 mm->total_vm += len >> PAGE_SHIFT;
1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1240 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -2111,6 +2125,7 @@ void exit_mmap(struct mm_struct *mm)
2111 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2125 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2112 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2126 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2113 vm_unacct_memory(nr_accounted); 2127 vm_unacct_memory(nr_accounted);
2128
2114 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); 2129 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2115 tlb_finish_mmu(tlb, 0, end); 2130 tlb_finish_mmu(tlb, 0, end);
2116 2131
@@ -2308,7 +2323,7 @@ int install_special_mapping(struct mm_struct *mm,
2308 2323
2309 mm->total_vm += len >> PAGE_SHIFT; 2324 mm->total_vm += len >> PAGE_SHIFT;
2310 2325
2311 perf_counter_mmap(vma); 2326 perf_event_mmap(vma);
2312 2327
2313 return 0; 2328 return 0;
2314} 2329}