diff options
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r-- | fs/hugetlbfs/inode.c | 46 |
1 files changed, 19 insertions, 27 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 4ee3f006b861..7f4756963d05 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
62 | loff_t len, vma_len; | 62 | loff_t len, vma_len; |
63 | int ret; | 63 | int ret; |
64 | 64 | ||
65 | if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) | 65 | /* |
66 | return -EINVAL; | 66 | * vma alignment has already been checked by prepare_hugepage_range. |
67 | 67 | * If you add any error returns here, do so after setting VM_HUGETLB, | |
68 | if (vma->vm_start & ~HPAGE_MASK) | 68 | * so is_vm_hugetlb_page tests below unmap_region go the right way |
69 | return -EINVAL; | 69 | * when do_mmap_pgoff unwinds (may be important on powerpc and ia64). |
70 | 70 | */ | |
71 | if (vma->vm_end & ~HPAGE_MASK) | 71 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; |
72 | return -EINVAL; | 72 | vma->vm_ops = &hugetlb_vm_ops; |
73 | |||
74 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) | ||
75 | return -EINVAL; | ||
76 | 73 | ||
77 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | 74 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
78 | 75 | ||
79 | mutex_lock(&inode->i_mutex); | 76 | mutex_lock(&inode->i_mutex); |
80 | file_accessed(file); | 77 | file_accessed(file); |
81 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; | ||
82 | vma->vm_ops = &hugetlb_vm_ops; | ||
83 | 78 | ||
84 | ret = -ENOMEM; | 79 | ret = -ENOMEM; |
85 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | 80 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
@@ -271,26 +266,24 @@ static void hugetlbfs_drop_inode(struct inode *inode) | |||
271 | hugetlbfs_forget_inode(inode); | 266 | hugetlbfs_forget_inode(inode); |
272 | } | 267 | } |
273 | 268 | ||
274 | /* | ||
275 | * h_pgoff is in HPAGE_SIZE units. | ||
276 | * vma->vm_pgoff is in PAGE_SIZE units. | ||
277 | */ | ||
278 | static inline void | 269 | static inline void |
279 | hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff) | 270 | hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff) |
280 | { | 271 | { |
281 | struct vm_area_struct *vma; | 272 | struct vm_area_struct *vma; |
282 | struct prio_tree_iter iter; | 273 | struct prio_tree_iter iter; |
283 | 274 | ||
284 | vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) { | 275 | vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) { |
285 | unsigned long h_vm_pgoff; | ||
286 | unsigned long v_offset; | 276 | unsigned long v_offset; |
287 | 277 | ||
288 | h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT); | ||
289 | v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT; | ||
290 | /* | 278 | /* |
291 | * Is this VMA fully outside the truncation point? | 279 | * Can the expression below overflow on 32-bit arches? |
280 | * No, because the prio_tree returns us only those vmas | ||
281 | * which overlap the truncated area starting at pgoff, | ||
282 | * and no vma on a 32-bit arch can span beyond the 4GB. | ||
292 | */ | 283 | */ |
293 | if (h_vm_pgoff >= h_pgoff) | 284 | if (vma->vm_pgoff < pgoff) |
285 | v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; | ||
286 | else | ||
294 | v_offset = 0; | 287 | v_offset = 0; |
295 | 288 | ||
296 | __unmap_hugepage_range(vma, | 289 | __unmap_hugepage_range(vma, |
@@ -303,14 +296,14 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff) | |||
303 | */ | 296 | */ |
304 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | 297 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
305 | { | 298 | { |
306 | unsigned long pgoff; | 299 | pgoff_t pgoff; |
307 | struct address_space *mapping = inode->i_mapping; | 300 | struct address_space *mapping = inode->i_mapping; |
308 | 301 | ||
309 | if (offset > inode->i_size) | 302 | if (offset > inode->i_size) |
310 | return -EINVAL; | 303 | return -EINVAL; |
311 | 304 | ||
312 | BUG_ON(offset & ~HPAGE_MASK); | 305 | BUG_ON(offset & ~HPAGE_MASK); |
313 | pgoff = offset >> HPAGE_SHIFT; | 306 | pgoff = offset >> PAGE_SHIFT; |
314 | 307 | ||
315 | inode->i_size = offset; | 308 | inode->i_size = offset; |
316 | spin_lock(&mapping->i_mmap_lock); | 309 | spin_lock(&mapping->i_mmap_lock); |
@@ -624,7 +617,6 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |||
624 | do_div(size, 100); | 617 | do_div(size, 100); |
625 | rest++; | 618 | rest++; |
626 | } | 619 | } |
627 | size &= HPAGE_MASK; | ||
628 | pconfig->nr_blocks = (size >> HPAGE_SHIFT); | 620 | pconfig->nr_blocks = (size >> HPAGE_SHIFT); |
629 | value = rest; | 621 | value = rest; |
630 | } else if (!strcmp(opt,"nr_inodes")) { | 622 | } else if (!strcmp(opt,"nr_inodes")) { |