aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c52
1 files changed, 32 insertions, 20 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 214b6a258eeb..1abb9185a686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
23#include <linux/hugetlb.h> 24#include <linux/hugetlb.h>
24#include <linux/profile.h> 25#include <linux/profile.h>
25#include <linux/module.h> 26#include <linux/module.h>
@@ -918,7 +919,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
918 struct inode *inode; 919 struct inode *inode;
919 unsigned int vm_flags; 920 unsigned int vm_flags;
920 int error; 921 int error;
921 int accountable = 1;
922 unsigned long reqprot = prot; 922 unsigned long reqprot = prot;
923 923
924 /* 924 /*
@@ -1019,8 +1019,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1019 return -EPERM; 1019 return -EPERM;
1020 vm_flags &= ~VM_MAYEXEC; 1020 vm_flags &= ~VM_MAYEXEC;
1021 } 1021 }
1022 if (is_file_hugepages(file))
1023 accountable = 0;
1024 1022
1025 if (!file->f_op || !file->f_op->mmap) 1023 if (!file->f_op || !file->f_op->mmap)
1026 return -ENODEV; 1024 return -ENODEV;
@@ -1052,9 +1050,11 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1052 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1050 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1053 if (error) 1051 if (error)
1054 return error; 1052 return error;
1053 error = ima_file_mmap(file, prot);
1054 if (error)
1055 return error;
1055 1056
1056 return mmap_region(file, addr, len, flags, vm_flags, pgoff, 1057 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1057 accountable);
1058} 1058}
1059EXPORT_SYMBOL(do_mmap_pgoff); 1059EXPORT_SYMBOL(do_mmap_pgoff);
1060 1060
@@ -1092,17 +1092,23 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1092 1092
1093/* 1093/*
1094 * We account for memory if it's a private writeable mapping, 1094 * We account for memory if it's a private writeable mapping,
1095 * and VM_NORESERVE wasn't set. 1095 * not hugepages and VM_NORESERVE wasn't set.
1096 */ 1096 */
1097static inline int accountable_mapping(unsigned int vm_flags) 1097static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1098{ 1098{
1099 /*
1100 * hugetlb has its own accounting separate from the core VM
1101 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1102 */
1103 if (file && is_file_hugepages(file))
1104 return 0;
1105
1099 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1106 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1100} 1107}
1101 1108
1102unsigned long mmap_region(struct file *file, unsigned long addr, 1109unsigned long mmap_region(struct file *file, unsigned long addr,
1103 unsigned long len, unsigned long flags, 1110 unsigned long len, unsigned long flags,
1104 unsigned int vm_flags, unsigned long pgoff, 1111 unsigned int vm_flags, unsigned long pgoff)
1105 int accountable)
1106{ 1112{
1107 struct mm_struct *mm = current->mm; 1113 struct mm_struct *mm = current->mm;
1108 struct vm_area_struct *vma, *prev; 1114 struct vm_area_struct *vma, *prev;
@@ -1128,18 +1134,22 @@ munmap_back:
1128 1134
1129 /* 1135 /*
1130 * Set 'VM_NORESERVE' if we should not account for the 1136 * Set 'VM_NORESERVE' if we should not account for the
1131 * memory use of this mapping. We only honor MAP_NORESERVE 1137 * memory use of this mapping.
1132 * if we're allowed to overcommit memory.
1133 */ 1138 */
1134 if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1139 if ((flags & MAP_NORESERVE)) {
1135 vm_flags |= VM_NORESERVE; 1140 /* We honor MAP_NORESERVE if allowed to overcommit */
1136 if (!accountable) 1141 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1137 vm_flags |= VM_NORESERVE; 1142 vm_flags |= VM_NORESERVE;
1143
1144 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1145 if (file && is_file_hugepages(file))
1146 vm_flags |= VM_NORESERVE;
1147 }
1138 1148
1139 /* 1149 /*
1140 * Private writable mapping: check memory availability 1150 * Private writable mapping: check memory availability
1141 */ 1151 */
1142 if (accountable_mapping(vm_flags)) { 1152 if (accountable_mapping(file, vm_flags)) {
1143 charged = len >> PAGE_SHIFT; 1153 charged = len >> PAGE_SHIFT;
1144 if (security_vm_enough_memory(charged)) 1154 if (security_vm_enough_memory(charged))
1145 return -ENOMEM; 1155 return -ENOMEM;
@@ -2078,12 +2088,8 @@ void exit_mmap(struct mm_struct *mm)
2078 unsigned long end; 2088 unsigned long end;
2079 2089
2080 /* mm's last user has gone, and its about to be pulled down */ 2090 /* mm's last user has gone, and its about to be pulled down */
2081 arch_exit_mmap(mm);
2082 mmu_notifier_release(mm); 2091 mmu_notifier_release(mm);
2083 2092
2084 if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
2085 return;
2086
2087 if (mm->locked_vm) { 2093 if (mm->locked_vm) {
2088 vma = mm->mmap; 2094 vma = mm->mmap;
2089 while (vma) { 2095 while (vma) {
@@ -2092,7 +2098,13 @@ void exit_mmap(struct mm_struct *mm)
2092 vma = vma->vm_next; 2098 vma = vma->vm_next;
2093 } 2099 }
2094 } 2100 }
2101
2102 arch_exit_mmap(mm);
2103
2095 vma = mm->mmap; 2104 vma = mm->mmap;
2105 if (!vma) /* Can happen if dup_mmap() received an OOM */
2106 return;
2107
2096 lru_add_drain(); 2108 lru_add_drain();
2097 flush_cache_mm(mm); 2109 flush_cache_mm(mm);
2098 tlb = tlb_gather_mmu(mm, 1); 2110 tlb = tlb_gather_mmu(mm, 1);