diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 89 |
1 files changed, 84 insertions, 5 deletions
@@ -188,7 +188,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, | |||
188 | struct file *file, struct address_space *mapping) | 188 | struct file *file, struct address_space *mapping) |
189 | { | 189 | { |
190 | if (vma->vm_flags & VM_DENYWRITE) | 190 | if (vma->vm_flags & VM_DENYWRITE) |
191 | atomic_inc(&file->f_dentry->d_inode->i_writecount); | 191 | atomic_inc(&file->f_path.dentry->d_inode->i_writecount); |
192 | if (vma->vm_flags & VM_SHARED) | 192 | if (vma->vm_flags & VM_SHARED) |
193 | mapping->i_mmap_writable--; | 193 | mapping->i_mmap_writable--; |
194 | 194 | ||
@@ -399,7 +399,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma) | |||
399 | struct address_space *mapping = file->f_mapping; | 399 | struct address_space *mapping = file->f_mapping; |
400 | 400 | ||
401 | if (vma->vm_flags & VM_DENYWRITE) | 401 | if (vma->vm_flags & VM_DENYWRITE) |
402 | atomic_dec(&file->f_dentry->d_inode->i_writecount); | 402 | atomic_dec(&file->f_path.dentry->d_inode->i_writecount); |
403 | if (vma->vm_flags & VM_SHARED) | 403 | if (vma->vm_flags & VM_SHARED) |
404 | mapping->i_mmap_writable++; | 404 | mapping->i_mmap_writable++; |
405 | 405 | ||
@@ -907,7 +907,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, | |||
907 | * mounted, in which case we dont add PROT_EXEC.) | 907 | * mounted, in which case we dont add PROT_EXEC.) |
908 | */ | 908 | */ |
909 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | 909 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
910 | if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))) | 910 | if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) |
911 | prot |= PROT_EXEC; | 911 | prot |= PROT_EXEC; |
912 | 912 | ||
913 | if (!len) | 913 | if (!len) |
@@ -960,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, | |||
960 | return -EAGAIN; | 960 | return -EAGAIN; |
961 | } | 961 | } |
962 | 962 | ||
963 | inode = file ? file->f_dentry->d_inode : NULL; | 963 | inode = file ? file->f_path.dentry->d_inode : NULL; |
964 | 964 | ||
965 | if (file) { | 965 | if (file) { |
966 | switch (flags & MAP_TYPE) { | 966 | switch (flags & MAP_TYPE) { |
@@ -989,7 +989,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, | |||
989 | case MAP_PRIVATE: | 989 | case MAP_PRIVATE: |
990 | if (!(file->f_mode & FMODE_READ)) | 990 | if (!(file->f_mode & FMODE_READ)) |
991 | return -EACCES; | 991 | return -EACCES; |
992 | if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) { | 992 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { |
993 | if (vm_flags & VM_EXEC) | 993 | if (vm_flags & VM_EXEC) |
994 | return -EPERM; | 994 | return -EPERM; |
995 | vm_flags &= ~VM_MAYEXEC; | 995 | vm_flags &= ~VM_MAYEXEC; |
@@ -1477,6 +1477,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un | |||
1477 | { | 1477 | { |
1478 | struct mm_struct *mm = vma->vm_mm; | 1478 | struct mm_struct *mm = vma->vm_mm; |
1479 | struct rlimit *rlim = current->signal->rlim; | 1479 | struct rlimit *rlim = current->signal->rlim; |
1480 | unsigned long new_start; | ||
1480 | 1481 | ||
1481 | /* address space limit tests */ | 1482 | /* address space limit tests */ |
1482 | if (!may_expand_vm(mm, grow)) | 1483 | if (!may_expand_vm(mm, grow)) |
@@ -1496,6 +1497,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un | |||
1496 | return -ENOMEM; | 1497 | return -ENOMEM; |
1497 | } | 1498 | } |
1498 | 1499 | ||
1500 | /* Check to ensure the stack will not grow into a hugetlb-only region */ | ||
1501 | new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : | ||
1502 | vma->vm_end - size; | ||
1503 | if (is_hugepage_only_range(vma->vm_mm, new_start, size)) | ||
1504 | return -EFAULT; | ||
1505 | |||
1499 | /* | 1506 | /* |
1500 | * Overcommit.. This must be the final test, as it will | 1507 | * Overcommit.. This must be the final test, as it will |
1501 | * update security statistics. | 1508 | * update security statistics. |
@@ -2094,3 +2101,75 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) | |||
2094 | return 0; | 2101 | return 0; |
2095 | return 1; | 2102 | return 1; |
2096 | } | 2103 | } |
2104 | |||
2105 | |||
2106 | static struct page *special_mapping_nopage(struct vm_area_struct *vma, | ||
2107 | unsigned long address, int *type) | ||
2108 | { | ||
2109 | struct page **pages; | ||
2110 | |||
2111 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
2112 | |||
2113 | address -= vma->vm_start; | ||
2114 | for (pages = vma->vm_private_data; address > 0 && *pages; ++pages) | ||
2115 | address -= PAGE_SIZE; | ||
2116 | |||
2117 | if (*pages) { | ||
2118 | struct page *page = *pages; | ||
2119 | get_page(page); | ||
2120 | return page; | ||
2121 | } | ||
2122 | |||
2123 | return NOPAGE_SIGBUS; | ||
2124 | } | ||
2125 | |||
2126 | /* | ||
2127 | * Having a close hook prevents vma merging regardless of flags. | ||
2128 | */ | ||
2129 | static void special_mapping_close(struct vm_area_struct *vma) | ||
2130 | { | ||
2131 | } | ||
2132 | |||
2133 | static struct vm_operations_struct special_mapping_vmops = { | ||
2134 | .close = special_mapping_close, | ||
2135 | .nopage = special_mapping_nopage, | ||
2136 | }; | ||
2137 | |||
2138 | /* | ||
2139 | * Called with mm->mmap_sem held for writing. | ||
2140 | * Insert a new vma covering the given region, with the given flags. | ||
2141 | * Its pages are supplied by the given array of struct page *. | ||
2142 | * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. | ||
2143 | * The region past the last page supplied will always produce SIGBUS. | ||
2144 | * The array pointer and the pages it points to are assumed to stay alive | ||
2145 | * for as long as this mapping might exist. | ||
2146 | */ | ||
2147 | int install_special_mapping(struct mm_struct *mm, | ||
2148 | unsigned long addr, unsigned long len, | ||
2149 | unsigned long vm_flags, struct page **pages) | ||
2150 | { | ||
2151 | struct vm_area_struct *vma; | ||
2152 | |||
2153 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
2154 | if (unlikely(vma == NULL)) | ||
2155 | return -ENOMEM; | ||
2156 | |||
2157 | vma->vm_mm = mm; | ||
2158 | vma->vm_start = addr; | ||
2159 | vma->vm_end = addr + len; | ||
2160 | |||
2161 | vma->vm_flags = vm_flags | mm->def_flags; | ||
2162 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; | ||
2163 | |||
2164 | vma->vm_ops = &special_mapping_vmops; | ||
2165 | vma->vm_private_data = pages; | ||
2166 | |||
2167 | if (unlikely(insert_vm_struct(mm, vma))) { | ||
2168 | kmem_cache_free(vm_area_cachep, vma); | ||
2169 | return -ENOMEM; | ||
2170 | } | ||
2171 | |||
2172 | mm->total_vm += len >> PAGE_SHIFT; | ||
2173 | |||
2174 | return 0; | ||
2175 | } | ||