aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c109
1 files changed, 54 insertions, 55 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 8d95902e9a38..00ced3ee49a8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end);
658 validate_mm(mm); 658 validate_mm(mm);
659} 659}
660 660
661/* Flags that can be inherited from an existing mapping when merging */
662#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
663
661/* 664/*
662 * If the vma has a ->close operation then the driver probably needs to release 665 * If the vma has a ->close operation then the driver probably needs to release
663 * per-vma resources, so we don't attempt to merge those. 666 * per-vma resources, so we don't attempt to merge those.
@@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end);
665static inline int is_mergeable_vma(struct vm_area_struct *vma, 668static inline int is_mergeable_vma(struct vm_area_struct *vma,
666 struct file *file, unsigned long vm_flags) 669 struct file *file, unsigned long vm_flags)
667{ 670{
668 if (vma->vm_flags != vm_flags) 671 if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
669 return 0; 672 return 0;
670 if (vma->vm_file != file) 673 if (vma->vm_file != file)
671 return 0; 674 return 0;
@@ -915,7 +918,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
915 struct inode *inode; 918 struct inode *inode;
916 unsigned int vm_flags; 919 unsigned int vm_flags;
917 int error; 920 int error;
918 int accountable = 1;
919 unsigned long reqprot = prot; 921 unsigned long reqprot = prot;
920 922
921 /* 923 /*
@@ -1016,8 +1018,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1016 return -EPERM; 1018 return -EPERM;
1017 vm_flags &= ~VM_MAYEXEC; 1019 vm_flags &= ~VM_MAYEXEC;
1018 } 1020 }
1019 if (is_file_hugepages(file))
1020 accountable = 0;
1021 1021
1022 if (!file->f_op || !file->f_op->mmap) 1022 if (!file->f_op || !file->f_op->mmap)
1023 return -ENODEV; 1023 return -ENODEV;
@@ -1050,8 +1050,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1050 if (error) 1050 if (error)
1051 return error; 1051 return error;
1052 1052
1053 return mmap_region(file, addr, len, flags, vm_flags, pgoff, 1053 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1054 accountable);
1055} 1054}
1056EXPORT_SYMBOL(do_mmap_pgoff); 1055EXPORT_SYMBOL(do_mmap_pgoff);
1057 1056
@@ -1087,10 +1086,25 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1087 mapping_cap_account_dirty(vma->vm_file->f_mapping); 1086 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1088} 1087}
1089 1088
1089/*
1090 * We account for memory if it's a private writeable mapping,
1091 * not hugepages and VM_NORESERVE wasn't set.
1092 */
1093static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1094{
1095 /*
1096 * hugetlb has its own accounting separate from the core VM
1097 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1098 */
1099 if (file && is_file_hugepages(file))
1100 return 0;
1101
1102 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1103}
1104
1090unsigned long mmap_region(struct file *file, unsigned long addr, 1105unsigned long mmap_region(struct file *file, unsigned long addr,
1091 unsigned long len, unsigned long flags, 1106 unsigned long len, unsigned long flags,
1092 unsigned int vm_flags, unsigned long pgoff, 1107 unsigned int vm_flags, unsigned long pgoff)
1093 int accountable)
1094{ 1108{
1095 struct mm_struct *mm = current->mm; 1109 struct mm_struct *mm = current->mm;
1096 struct vm_area_struct *vma, *prev; 1110 struct vm_area_struct *vma, *prev;
@@ -1114,38 +1128,38 @@ munmap_back:
1114 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 1128 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1115 return -ENOMEM; 1129 return -ENOMEM;
1116 1130
1117 if (flags & MAP_NORESERVE) 1131 /*
1118 vm_flags |= VM_NORESERVE; 1132 * Set 'VM_NORESERVE' if we should not account for the
1133 * memory use of this mapping.
1134 */
1135 if ((flags & MAP_NORESERVE)) {
1136 /* We honor MAP_NORESERVE if allowed to overcommit */
1137 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1138 vm_flags |= VM_NORESERVE;
1119 1139
1120 if (accountable && (!(flags & MAP_NORESERVE) || 1140 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1121 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 1141 if (file && is_file_hugepages(file))
1122 if (vm_flags & VM_SHARED) { 1142 vm_flags |= VM_NORESERVE;
1123 /* Check memory availability in shmem_file_setup? */
1124 vm_flags |= VM_ACCOUNT;
1125 } else if (vm_flags & VM_WRITE) {
1126 /*
1127 * Private writable mapping: check memory availability
1128 */
1129 charged = len >> PAGE_SHIFT;
1130 if (security_vm_enough_memory(charged))
1131 return -ENOMEM;
1132 vm_flags |= VM_ACCOUNT;
1133 }
1134 } 1143 }
1135 1144
1136 /* 1145 /*
1137 * Can we just expand an old private anonymous mapping? 1146 * Private writable mapping: check memory availability
1138 * The VM_SHARED test is necessary because shmem_zero_setup
1139 * will create the file object for a shared anonymous map below.
1140 */ 1147 */
1141 if (!file && !(vm_flags & VM_SHARED)) { 1148 if (accountable_mapping(file, vm_flags)) {
1142 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 1149 charged = len >> PAGE_SHIFT;
1143 NULL, NULL, pgoff, NULL); 1150 if (security_vm_enough_memory(charged))
1144 if (vma) 1151 return -ENOMEM;
1145 goto out; 1152 vm_flags |= VM_ACCOUNT;
1146 } 1153 }
1147 1154
1148 /* 1155 /*
1156 * Can we just expand an old mapping?
1157 */
1158 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1159 if (vma)
1160 goto out;
1161
1162 /*
1149 * Determine the object being mapped and call the appropriate 1163 * Determine the object being mapped and call the appropriate
1150 * specific mapper. the address has already been validated, but 1164 * specific mapper. the address has already been validated, but
1151 * not unmapped, but the maps are removed from the list. 1165 * not unmapped, but the maps are removed from the list.
@@ -1186,14 +1200,6 @@ munmap_back:
1186 goto free_vma; 1200 goto free_vma;
1187 } 1201 }
1188 1202
1189 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1190 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1191 * that memory reservation must be checked; but that reservation
1192 * belongs to shared memory object, not to vma: so now clear it.
1193 */
1194 if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1195 vma->vm_flags &= ~VM_ACCOUNT;
1196
1197 /* Can addr have changed?? 1203 /* Can addr have changed??
1198 * 1204 *
1199 * Answer: Yes, several device drivers can do it in their 1205 * Answer: Yes, several device drivers can do it in their
@@ -1206,17 +1212,8 @@ munmap_back:
1206 if (vma_wants_writenotify(vma)) 1212 if (vma_wants_writenotify(vma))
1207 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1213 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1208 1214
1209 if (file && vma_merge(mm, prev, addr, vma->vm_end, 1215 vma_link(mm, vma, prev, rb_link, rb_parent);
1210 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1216 file = vma->vm_file;
1211 mpol_put(vma_policy(vma));
1212 kmem_cache_free(vm_area_cachep, vma);
1213 fput(file);
1214 if (vm_flags & VM_EXECUTABLE)
1215 removed_exe_file_vma(mm);
1216 } else {
1217 vma_link(mm, vma, prev, rb_link, rb_parent);
1218 file = vma->vm_file;
1219 }
1220 1217
1221 /* Once vma denies write, undo our temporary denial count */ 1218 /* Once vma denies write, undo our temporary denial count */
1222 if (correct_wcount) 1219 if (correct_wcount)
@@ -2087,12 +2084,8 @@ void exit_mmap(struct mm_struct *mm)
2087 unsigned long end; 2084 unsigned long end;
2088 2085
2089 /* mm's last user has gone, and its about to be pulled down */ 2086 /* mm's last user has gone, and its about to be pulled down */
2090 arch_exit_mmap(mm);
2091 mmu_notifier_release(mm); 2087 mmu_notifier_release(mm);
2092 2088
2093 if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
2094 return;
2095
2096 if (mm->locked_vm) { 2089 if (mm->locked_vm) {
2097 vma = mm->mmap; 2090 vma = mm->mmap;
2098 while (vma) { 2091 while (vma) {
@@ -2101,7 +2094,13 @@ void exit_mmap(struct mm_struct *mm)
2101 vma = vma->vm_next; 2094 vma = vma->vm_next;
2102 } 2095 }
2103 } 2096 }
2097
2098 arch_exit_mmap(mm);
2099
2104 vma = mm->mmap; 2100 vma = mm->mmap;
2101 if (!vma) /* Can happen if dup_mmap() received an OOM */
2102 return;
2103
2105 lru_add_drain(); 2104 lru_add_drain();
2106 flush_cache_mm(mm); 2105 flush_cache_mm(mm);
2107 tlb = tlb_gather_mmu(mm, 1); 2106 tlb = tlb_gather_mmu(mm, 1);