aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 497e502dfd..9717337293 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -188,7 +188,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
188 struct file *file, struct address_space *mapping) 188 struct file *file, struct address_space *mapping)
189{ 189{
190 if (vma->vm_flags & VM_DENYWRITE) 190 if (vma->vm_flags & VM_DENYWRITE)
191 atomic_inc(&file->f_dentry->d_inode->i_writecount); 191 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
192 if (vma->vm_flags & VM_SHARED) 192 if (vma->vm_flags & VM_SHARED)
193 mapping->i_mmap_writable--; 193 mapping->i_mmap_writable--;
194 194
@@ -399,7 +399,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
399 struct address_space *mapping = file->f_mapping; 399 struct address_space *mapping = file->f_mapping;
400 400
401 if (vma->vm_flags & VM_DENYWRITE) 401 if (vma->vm_flags & VM_DENYWRITE)
402 atomic_dec(&file->f_dentry->d_inode->i_writecount); 402 atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
403 if (vma->vm_flags & VM_SHARED) 403 if (vma->vm_flags & VM_SHARED)
404 mapping->i_mmap_writable++; 404 mapping->i_mmap_writable++;
405 405
@@ -907,7 +907,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
907 * mounted, in which case we dont add PROT_EXEC.) 907 * mounted, in which case we dont add PROT_EXEC.)
908 */ 908 */
909 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 909 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
910 if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))) 910 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
911 prot |= PROT_EXEC; 911 prot |= PROT_EXEC;
912 912
913 if (!len) 913 if (!len)
@@ -960,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
960 return -EAGAIN; 960 return -EAGAIN;
961 } 961 }
962 962
963 inode = file ? file->f_dentry->d_inode : NULL; 963 inode = file ? file->f_path.dentry->d_inode : NULL;
964 964
965 if (file) { 965 if (file) {
966 switch (flags & MAP_TYPE) { 966 switch (flags & MAP_TYPE) {
@@ -989,7 +989,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
989 case MAP_PRIVATE: 989 case MAP_PRIVATE:
990 if (!(file->f_mode & FMODE_READ)) 990 if (!(file->f_mode & FMODE_READ))
991 return -EACCES; 991 return -EACCES;
992 if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) { 992 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
993 if (vm_flags & VM_EXEC) 993 if (vm_flags & VM_EXEC)
994 return -EPERM; 994 return -EPERM;
995 vm_flags &= ~VM_MAYEXEC; 995 vm_flags &= ~VM_MAYEXEC;
@@ -1379,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1379 * Check if the given range is hugepage aligned, and 1379 * Check if the given range is hugepage aligned, and
1380 * can be made suitable for hugepages. 1380 * can be made suitable for hugepages.
1381 */ 1381 */
1382 ret = prepare_hugepage_range(addr, len); 1382 ret = prepare_hugepage_range(addr, len, pgoff);
1383 } else { 1383 } else {
1384 /* 1384 /*
1385 * Ensure that a normal request is not falling in a 1385 * Ensure that a normal request is not falling in a
@@ -1736,7 +1736,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1736 if (mm->map_count >= sysctl_max_map_count) 1736 if (mm->map_count >= sysctl_max_map_count)
1737 return -ENOMEM; 1737 return -ENOMEM;
1738 1738
1739 new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1739 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1740 if (!new) 1740 if (!new)
1741 return -ENOMEM; 1741 return -ENOMEM;
1742 1742
@@ -1880,6 +1880,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1880 if ((addr + len) > TASK_SIZE || (addr + len) < addr) 1880 if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1881 return -EINVAL; 1881 return -EINVAL;
1882 1882
1883 if (is_hugepage_only_range(mm, addr, len))
1884 return -EINVAL;
1885
1883 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1886 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1884 1887
1885 error = arch_mmap_check(addr, len, flags); 1888 error = arch_mmap_check(addr, len, flags);
@@ -2054,7 +2057,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2054 vma_start < new_vma->vm_end) 2057 vma_start < new_vma->vm_end)
2055 *vmap = new_vma; 2058 *vmap = new_vma;
2056 } else { 2059 } else {
2057 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2060 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2058 if (new_vma) { 2061 if (new_vma) {
2059 *new_vma = *vma; 2062 *new_vma = *vma;
2060 pol = mpol_copy(vma_policy(vma)); 2063 pol = mpol_copy(vma_policy(vma));