aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2006-11-14 05:03:32 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-11-14 12:09:27 -0500
commit68589bc353037f233fe510ad9ff432338c95db66 (patch)
treededc58ff66134f54796642917e2a2a26ac6802b0 /fs/hugetlbfs/inode.c
parent69ae9e3ee4ce99140a7db424bebf55d8d180da2f (diff)
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:) If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example, because the given file offset is not hugepage aligned - then do_mmap_pgoff will go to the unmap_and_free_vma backout path. But at this stage the vma hasn't been marked as hugepage, and the backout path will call unmap_region() on it. That will eventually call down to the non-hugepage version of unmap_page_range(). On ppc64, at least, that will cause serious problems if there are any existing hugepage pagetable entries in the vicinity - for example if there are any other hugepage mappings under the same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud entries. I suspect this will also cause bad problems on ia64, though I don't have a machine to test it on. (Hugh:) prepare_hugepage_range() should check file offset alignment when it checks virtual address and length, to stop MAP_FIXED with a bad huge offset from unmapping before it fails further down. PowerPC should apply the same prepare_hugepage_range alignment checks as ia64 and all the others do. Then none of the alignment checks in hugetlbfs_file_mmap are required (nor is the check for too small a mapping); but even so, move up setting of VM_HUGETLB and add a comment to warn of what David Gibson discovered - if hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region when unwinding from error will go the non-huge way, which may cause bad behaviour on architectures (powerpc and ia64) which segregate their huge mappings into a separate region of the address space. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: David Gibson <david@gibson.dropbear.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0bea6a619e1..7f4756963d0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
62 loff_t len, vma_len; 62 loff_t len, vma_len;
63 int ret; 63 int ret;
64 64
65 if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) 65 /*
66 return -EINVAL; 66 * vma alignment has already been checked by prepare_hugepage_range.
67 67 * If you add any error returns here, do so after setting VM_HUGETLB,
68 if (vma->vm_start & ~HPAGE_MASK) 68 * so is_vm_hugetlb_page tests below unmap_region go the right way
69 return -EINVAL; 69 * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
70 70 */
71 if (vma->vm_end & ~HPAGE_MASK) 71 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
72 return -EINVAL; 72 vma->vm_ops = &hugetlb_vm_ops;
73
74 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
75 return -EINVAL;
76 73
77 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 74 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
78 75
79 mutex_lock(&inode->i_mutex); 76 mutex_lock(&inode->i_mutex);
80 file_accessed(file); 77 file_accessed(file);
81 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
82 vma->vm_ops = &hugetlb_vm_ops;
83 78
84 ret = -ENOMEM; 79 ret = -ENOMEM;
85 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 80 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);