aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-03-23 19:52:46 -0400
committerJames Morris <jmorris@namei.org>2009-03-23 19:52:46 -0400
commit703a3cd72817e99201cef84a8a7aecc60b2b3581 (patch)
tree3e943755178ff410694722bb031f523136fbc432 /mm/mmap.c
parentdf7f54c012b92ec93d56b68547351dcdf8a163d3 (diff)
parent8e0ee43bc2c3e19db56a4adaa9a9b04ce885cd84 (diff)
Merge branch 'master' into next
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 3b3ed0bb9fdb..1abb9185a686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -919,7 +919,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
919 struct inode *inode; 919 struct inode *inode;
920 unsigned int vm_flags; 920 unsigned int vm_flags;
921 int error; 921 int error;
922 int accountable = 1;
923 unsigned long reqprot = prot; 922 unsigned long reqprot = prot;
924 923
925 /* 924 /*
@@ -1020,8 +1019,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1020 return -EPERM; 1019 return -EPERM;
1021 vm_flags &= ~VM_MAYEXEC; 1020 vm_flags &= ~VM_MAYEXEC;
1022 } 1021 }
1023 if (is_file_hugepages(file))
1024 accountable = 0;
1025 1022
1026 if (!file->f_op || !file->f_op->mmap) 1023 if (!file->f_op || !file->f_op->mmap)
1027 return -ENODEV; 1024 return -ENODEV;
@@ -1057,8 +1054,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1057 if (error) 1054 if (error)
1058 return error; 1055 return error;
1059 1056
1060 return mmap_region(file, addr, len, flags, vm_flags, pgoff, 1057 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1061 accountable);
1062} 1058}
1063EXPORT_SYMBOL(do_mmap_pgoff); 1059EXPORT_SYMBOL(do_mmap_pgoff);
1064 1060
@@ -1096,17 +1092,23 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1096 1092
1097/* 1093/*
1098 * We account for memory if it's a private writeable mapping, 1094 * We account for memory if it's a private writeable mapping,
1099 * and VM_NORESERVE wasn't set. 1095 * not hugepages and VM_NORESERVE wasn't set.
1100 */ 1096 */
1101static inline int accountable_mapping(unsigned int vm_flags) 1097static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1102{ 1098{
1099 /*
1100 * hugetlb has its own accounting separate from the core VM
1101 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1102 */
1103 if (file && is_file_hugepages(file))
1104 return 0;
1105
1103 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1106 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1104} 1107}
1105 1108
1106unsigned long mmap_region(struct file *file, unsigned long addr, 1109unsigned long mmap_region(struct file *file, unsigned long addr,
1107 unsigned long len, unsigned long flags, 1110 unsigned long len, unsigned long flags,
1108 unsigned int vm_flags, unsigned long pgoff, 1111 unsigned int vm_flags, unsigned long pgoff)
1109 int accountable)
1110{ 1112{
1111 struct mm_struct *mm = current->mm; 1113 struct mm_struct *mm = current->mm;
1112 struct vm_area_struct *vma, *prev; 1114 struct vm_area_struct *vma, *prev;
@@ -1132,18 +1134,22 @@ munmap_back:
1132 1134
1133 /* 1135 /*
1134 * Set 'VM_NORESERVE' if we should not account for the 1136 * Set 'VM_NORESERVE' if we should not account for the
1135 * memory use of this mapping. We only honor MAP_NORESERVE 1137 * memory use of this mapping.
1136 * if we're allowed to overcommit memory.
1137 */ 1138 */
1138 if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1139 if ((flags & MAP_NORESERVE)) {
1139 vm_flags |= VM_NORESERVE; 1140 /* We honor MAP_NORESERVE if allowed to overcommit */
1140 if (!accountable) 1141 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1141 vm_flags |= VM_NORESERVE; 1142 vm_flags |= VM_NORESERVE;
1143
1144 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1145 if (file && is_file_hugepages(file))
1146 vm_flags |= VM_NORESERVE;
1147 }
1142 1148
1143 /* 1149 /*
1144 * Private writable mapping: check memory availability 1150 * Private writable mapping: check memory availability
1145 */ 1151 */
1146 if (accountable_mapping(vm_flags)) { 1152 if (accountable_mapping(file, vm_flags)) {
1147 charged = len >> PAGE_SHIFT; 1153 charged = len >> PAGE_SHIFT;
1148 if (security_vm_enough_memory(charged)) 1154 if (security_vm_enough_memory(charged))
1149 return -ENOMEM; 1155 return -ENOMEM;
@@ -2082,12 +2088,8 @@ void exit_mmap(struct mm_struct *mm)
2082 unsigned long end; 2088 unsigned long end;
2083 2089
2084 /* mm's last user has gone, and its about to be pulled down */ 2090 /* mm's last user has gone, and its about to be pulled down */
2085 arch_exit_mmap(mm);
2086 mmu_notifier_release(mm); 2091 mmu_notifier_release(mm);
2087 2092
2088 if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
2089 return;
2090
2091 if (mm->locked_vm) { 2093 if (mm->locked_vm) {
2092 vma = mm->mmap; 2094 vma = mm->mmap;
2093 while (vma) { 2095 while (vma) {
@@ -2096,7 +2098,13 @@ void exit_mmap(struct mm_struct *mm)
2096 vma = vma->vm_next; 2098 vma = vma->vm_next;
2097 } 2099 }
2098 } 2100 }
2101
2102 arch_exit_mmap(mm);
2103
2099 vma = mm->mmap; 2104 vma = mm->mmap;
2105 if (!vma) /* Can happen if dup_mmap() received an OOM */
2106 return;
2107
2100 lru_add_drain(); 2108 lru_add_drain();
2101 flush_cache_mm(mm); 2109 flush_cache_mm(mm);
2102 tlb = tlb_gather_mmu(mm, 1); 2110 tlb = tlb_gather_mmu(mm, 1);