aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-02-05 19:01:45 -0500
committerJames Morris <jmorris@namei.org>2009-02-05 19:01:45 -0500
commitcb5629b10d64a8006622ce3a52bc887d91057d69 (patch)
tree7c06d8f30783115e3384721046258ce615b129c5 /mm/mmap.c
parent8920d5ad6ba74ae8ab020e90cc4d976980e68701 (diff)
parentf01d1d546abb2f4028b5299092f529eefb01253a (diff)
Merge branch 'master' into next
Conflicts: fs/namei.c Manually merged per: diff --cc fs/namei.c index 734f2b5,bbc15c2..0000000 --- a/fs/namei.c +++ b/fs/namei.c @@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode); if (err == -EAGAIN) - err = vfs_permission(nd, MAY_EXEC); + err = inode_permission(nd->path.dentry->d_inode, + MAY_EXEC); + if (!err) + err = ima_path_check(&nd->path, MAY_EXEC); if (err) break; @@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc flag &= ~O_TRUNC; } - error = vfs_permission(nd, acc_mode); + error = inode_permission(inode, acc_mode); if (error) return error; + - error = ima_path_check(&nd->path, ++ error = ima_path_check(path, + acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC)); + if (error) + return error; /* * An append-only file must be opened in append mode for writing. */ Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c117
1 files changed, 60 insertions, 57 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index c3647f3b0621..3b3ed0bb9fdb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Written by obz. 4 * Written by obz.
5 * 5 *
6 * Address space accounting code <alan@redhat.com> 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 */ 7 */
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
@@ -246,7 +246,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
246 return next; 246 return next;
247} 247}
248 248
249asmlinkage unsigned long sys_brk(unsigned long brk) 249SYSCALL_DEFINE1(brk, unsigned long, brk)
250{ 250{
251 unsigned long rlim, retval; 251 unsigned long rlim, retval;
252 unsigned long newbrk, oldbrk; 252 unsigned long newbrk, oldbrk;
@@ -414,7 +414,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
414 414
415static void __vma_link_file(struct vm_area_struct *vma) 415static void __vma_link_file(struct vm_area_struct *vma)
416{ 416{
417 struct file * file; 417 struct file *file;
418 418
419 file = vma->vm_file; 419 file = vma->vm_file;
420 if (file) { 420 if (file) {
@@ -475,11 +475,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
475 * insert vm structure into list and rbtree and anon_vma, 475 * insert vm structure into list and rbtree and anon_vma,
476 * but it has already been inserted into prio_tree earlier. 476 * but it has already been inserted into prio_tree earlier.
477 */ 477 */
478static void 478static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
479__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
480{ 479{
481 struct vm_area_struct * __vma, * prev; 480 struct vm_area_struct *__vma, *prev;
482 struct rb_node ** rb_link, * rb_parent; 481 struct rb_node **rb_link, *rb_parent;
483 482
484 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 483 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
485 BUG_ON(__vma && __vma->vm_start < vma->vm_end); 484 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
@@ -660,6 +659,9 @@ again: remove_next = 1 + (end > next->vm_end);
660 validate_mm(mm); 659 validate_mm(mm);
661} 660}
662 661
662/* Flags that can be inherited from an existing mapping when merging */
663#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
664
663/* 665/*
664 * If the vma has a ->close operation then the driver probably needs to release 666 * If the vma has a ->close operation then the driver probably needs to release
665 * per-vma resources, so we don't attempt to merge those. 667 * per-vma resources, so we don't attempt to merge those.
@@ -667,7 +669,7 @@ again: remove_next = 1 + (end > next->vm_end);
667static inline int is_mergeable_vma(struct vm_area_struct *vma, 669static inline int is_mergeable_vma(struct vm_area_struct *vma,
668 struct file *file, unsigned long vm_flags) 670 struct file *file, unsigned long vm_flags)
669{ 671{
670 if (vma->vm_flags != vm_flags) 672 if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
671 return 0; 673 return 0;
672 if (vma->vm_file != file) 674 if (vma->vm_file != file)
673 return 0; 675 return 0;
@@ -909,7 +911,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
909 * The caller must hold down_write(current->mm->mmap_sem). 911 * The caller must hold down_write(current->mm->mmap_sem).
910 */ 912 */
911 913
912unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, 914unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
913 unsigned long len, unsigned long prot, 915 unsigned long len, unsigned long prot,
914 unsigned long flags, unsigned long pgoff) 916 unsigned long flags, unsigned long pgoff)
915{ 917{
@@ -1092,6 +1094,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1092 mapping_cap_account_dirty(vma->vm_file->f_mapping); 1094 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1093} 1095}
1094 1096
1097/*
1098 * We account for memory if it's a private writeable mapping,
1099 * and VM_NORESERVE wasn't set.
1100 */
1101static inline int accountable_mapping(unsigned int vm_flags)
1102{
1103 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1104}
1105
1095unsigned long mmap_region(struct file *file, unsigned long addr, 1106unsigned long mmap_region(struct file *file, unsigned long addr,
1096 unsigned long len, unsigned long flags, 1107 unsigned long len, unsigned long flags,
1097 unsigned int vm_flags, unsigned long pgoff, 1108 unsigned int vm_flags, unsigned long pgoff,
@@ -1119,36 +1130,32 @@ munmap_back:
1119 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 1130 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1120 return -ENOMEM; 1131 return -ENOMEM;
1121 1132
1122 if (flags & MAP_NORESERVE) 1133 /*
1134 * Set 'VM_NORESERVE' if we should not account for the
1135 * memory use of this mapping. We only honor MAP_NORESERVE
1136 * if we're allowed to overcommit memory.
1137 */
1138 if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1139 vm_flags |= VM_NORESERVE;
1140 if (!accountable)
1123 vm_flags |= VM_NORESERVE; 1141 vm_flags |= VM_NORESERVE;
1124 1142
1125 if (accountable && (!(flags & MAP_NORESERVE) || 1143 /*
1126 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 1144 * Private writable mapping: check memory availability
1127 if (vm_flags & VM_SHARED) { 1145 */
1128 /* Check memory availability in shmem_file_setup? */ 1146 if (accountable_mapping(vm_flags)) {
1129 vm_flags |= VM_ACCOUNT; 1147 charged = len >> PAGE_SHIFT;
1130 } else if (vm_flags & VM_WRITE) { 1148 if (security_vm_enough_memory(charged))
1131 /* 1149 return -ENOMEM;
1132 * Private writable mapping: check memory availability 1150 vm_flags |= VM_ACCOUNT;
1133 */
1134 charged = len >> PAGE_SHIFT;
1135 if (security_vm_enough_memory(charged))
1136 return -ENOMEM;
1137 vm_flags |= VM_ACCOUNT;
1138 }
1139 } 1151 }
1140 1152
1141 /* 1153 /*
1142 * Can we just expand an old private anonymous mapping? 1154 * Can we just expand an old mapping?
1143 * The VM_SHARED test is necessary because shmem_zero_setup
1144 * will create the file object for a shared anonymous map below.
1145 */ 1155 */
1146 if (!file && !(vm_flags & VM_SHARED)) { 1156 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1147 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 1157 if (vma)
1148 NULL, NULL, pgoff, NULL); 1158 goto out;
1149 if (vma)
1150 goto out;
1151 }
1152 1159
1153 /* 1160 /*
1154 * Determine the object being mapped and call the appropriate 1161 * Determine the object being mapped and call the appropriate
@@ -1191,14 +1198,6 @@ munmap_back:
1191 goto free_vma; 1198 goto free_vma;
1192 } 1199 }
1193 1200
1194 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1195 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1196 * that memory reservation must be checked; but that reservation
1197 * belongs to shared memory object, not to vma: so now clear it.
1198 */
1199 if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1200 vma->vm_flags &= ~VM_ACCOUNT;
1201
1202 /* Can addr have changed?? 1201 /* Can addr have changed??
1203 * 1202 *
1204 * Answer: Yes, several device drivers can do it in their 1203 * Answer: Yes, several device drivers can do it in their
@@ -1211,17 +1210,8 @@ munmap_back:
1211 if (vma_wants_writenotify(vma)) 1210 if (vma_wants_writenotify(vma))
1212 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1211 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1213 1212
1214 if (file && vma_merge(mm, prev, addr, vma->vm_end, 1213 vma_link(mm, vma, prev, rb_link, rb_parent);
1215 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1214 file = vma->vm_file;
1216 mpol_put(vma_policy(vma));
1217 kmem_cache_free(vm_area_cachep, vma);
1218 fput(file);
1219 if (vm_flags & VM_EXECUTABLE)
1220 removed_exe_file_vma(mm);
1221 } else {
1222 vma_link(mm, vma, prev, rb_link, rb_parent);
1223 file = vma->vm_file;
1224 }
1225 1215
1226 /* Once vma denies write, undo our temporary denial count */ 1216 /* Once vma denies write, undo our temporary denial count */
1227 if (correct_wcount) 1217 if (correct_wcount)
@@ -1468,7 +1458,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1468EXPORT_SYMBOL(get_unmapped_area); 1458EXPORT_SYMBOL(get_unmapped_area);
1469 1459
1470/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1460/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1471struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) 1461struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1472{ 1462{
1473 struct vm_area_struct *vma = NULL; 1463 struct vm_area_struct *vma = NULL;
1474 1464
@@ -1511,7 +1501,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
1511 struct vm_area_struct **pprev) 1501 struct vm_area_struct **pprev)
1512{ 1502{
1513 struct vm_area_struct *vma = NULL, *prev = NULL; 1503 struct vm_area_struct *vma = NULL, *prev = NULL;
1514 struct rb_node * rb_node; 1504 struct rb_node *rb_node;
1515 if (!mm) 1505 if (!mm)
1516 goto out; 1506 goto out;
1517 1507
@@ -1545,7 +1535,7 @@ out:
1545 * update accounting. This is shared with both the 1535 * update accounting. This is shared with both the
1546 * grow-up and grow-down cases. 1536 * grow-up and grow-down cases.
1547 */ 1537 */
1548static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow) 1538static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1549{ 1539{
1550 struct mm_struct *mm = vma->vm_mm; 1540 struct mm_struct *mm = vma->vm_mm;
1551 struct rlimit *rlim = current->signal->rlim; 1541 struct rlimit *rlim = current->signal->rlim;
@@ -1953,7 +1943,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1953 1943
1954EXPORT_SYMBOL(do_munmap); 1944EXPORT_SYMBOL(do_munmap);
1955 1945
1956asmlinkage long sys_munmap(unsigned long addr, size_t len) 1946SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1957{ 1947{
1958 int ret; 1948 int ret;
1959 struct mm_struct *mm = current->mm; 1949 struct mm_struct *mm = current->mm;
@@ -2095,6 +2085,9 @@ void exit_mmap(struct mm_struct *mm)
2095 arch_exit_mmap(mm); 2085 arch_exit_mmap(mm);
2096 mmu_notifier_release(mm); 2086 mmu_notifier_release(mm);
2097 2087
2088 if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
2089 return;
2090
2098 if (mm->locked_vm) { 2091 if (mm->locked_vm) {
2099 vma = mm->mmap; 2092 vma = mm->mmap;
2100 while (vma) { 2093 while (vma) {
@@ -2107,7 +2100,7 @@ void exit_mmap(struct mm_struct *mm)
2107 lru_add_drain(); 2100 lru_add_drain();
2108 flush_cache_mm(mm); 2101 flush_cache_mm(mm);
2109 tlb = tlb_gather_mmu(mm, 1); 2102 tlb = tlb_gather_mmu(mm, 1);
2110 /* Don't update_hiwater_rss(mm) here, do_exit already did */ 2103 /* update_hiwater_rss(mm) here? but nobody should be looking */
2111 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2104 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2112 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2105 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2113 vm_unacct_memory(nr_accounted); 2106 vm_unacct_memory(nr_accounted);
@@ -2474,3 +2467,13 @@ void mm_drop_all_locks(struct mm_struct *mm)
2474 2467
2475 mutex_unlock(&mm_all_locks_mutex); 2468 mutex_unlock(&mm_all_locks_mutex);
2476} 2469}
2470
2471/*
2472 * initialise the VMA slab
2473 */
2474void __init mmap_init(void)
2475{
2476 vm_area_cachep = kmem_cache_create("vm_area_struct",
2477 sizeof(struct vm_area_struct), 0,
2478 SLAB_PANIC, NULL);
2479}