diff options
| author | David Gibson <david@gibson.dropbear.id.au> | 2007-08-31 02:56:40 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-31 04:42:23 -0400 |
| commit | dec4ad86c2fbea062e9ef9caa6d6e79f7c5e0b12 (patch) | |
| tree | 9882d3b1f59fb293cf0f70afc80bdc7bb1e0021e | |
| parent | 4a58448b0a375f7198de34dd0d3e2881afeaf025 (diff) | |
hugepage: fix broken check for offset alignment in hugepage mappings
For hugepage mappings, the file offset, like the address and size, needs to
be aligned to the size of a hugepage.
In commit 68589bc353037f233fe510ad9ff432338c95db66, the check for this was
moved into prepare_hugepage_range() along with the address and size checks.
But since BenH's rework of the get_unmapped_area() paths leading up to
commit 4b1d89290b62bb2db476c94c82cf7442aab440c8, prepare_hugepage_range()
is only called for MAP_FIXED mappings, not for other mappings. This means
we're no longer ever checking for an aligned offset - I've confirmed that
mmap() will (apparently) succeed with a misaligned offset on both powerpc
and i386 at least.
This patch restores the check, removing it from prepare_hugepage_range()
and putting it back into hugetlbfs_file_mmap(). I'm putting it there,
rather than in the get_unmapped_area() path so it only needs to go in one
place, than separately in the half-dozen or so arch-specific
implementations of hugetlb_get_unmapped_area().
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | arch/i386/mm/hugetlbpage.c | 2 | ||||
| -rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 6 | ||||
| -rw-r--r-- | arch/sparc64/mm/hugetlbpage.c | 2 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 15 | ||||
| -rw-r--r-- | include/linux/hugetlb.h | 10 |
5 files changed, 17 insertions, 18 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index efdf95ac8031..6c06d9c0488e 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c | |||
| @@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 367 | return -ENOMEM; | 367 | return -ENOMEM; |
| 368 | 368 | ||
| 369 | if (flags & MAP_FIXED) { | 369 | if (flags & MAP_FIXED) { |
| 370 | if (prepare_hugepage_range(addr, len, pgoff)) | 370 | if (prepare_hugepage_range(addr, len)) |
| 371 | return -EINVAL; | 371 | return -EINVAL; |
| 372 | return addr; | 372 | return addr; |
| 373 | } | 373 | } |
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index d22861c5b04c..a9ff685aea25 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
| @@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | |||
| 75 | * Don't actually need to do any preparation, but need to make sure | 75 | * Don't actually need to do any preparation, but need to make sure |
| 76 | * the address is in the right region. | 76 | * the address is in the right region. |
| 77 | */ | 77 | */ |
| 78 | int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff) | 78 | int prepare_hugepage_range(unsigned long addr, unsigned long len) |
| 79 | { | 79 | { |
| 80 | if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) | ||
| 81 | return -EINVAL; | ||
| 82 | if (len & ~HPAGE_MASK) | 80 | if (len & ~HPAGE_MASK) |
| 83 | return -EINVAL; | 81 | return -EINVAL; |
| 84 | if (addr & ~HPAGE_MASK) | 82 | if (addr & ~HPAGE_MASK) |
| @@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u | |||
| 151 | 149 | ||
| 152 | /* Handle MAP_FIXED */ | 150 | /* Handle MAP_FIXED */ |
| 153 | if (flags & MAP_FIXED) { | 151 | if (flags & MAP_FIXED) { |
| 154 | if (prepare_hugepage_range(addr, len, pgoff)) | 152 | if (prepare_hugepage_range(addr, len)) |
| 155 | return -EINVAL; | 153 | return -EINVAL; |
| 156 | return addr; | 154 | return addr; |
| 157 | } | 155 | } |
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index eaba9b70b184..6cfab2e4d340 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
| @@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 175 | return -ENOMEM; | 175 | return -ENOMEM; |
| 176 | 176 | ||
| 177 | if (flags & MAP_FIXED) { | 177 | if (flags & MAP_FIXED) { |
| 178 | if (prepare_hugepage_range(addr, len, pgoff)) | 178 | if (prepare_hugepage_range(addr, len)) |
| 179 | return -EINVAL; | 179 | return -EINVAL; |
| 180 | return addr; | 180 | return addr; |
| 181 | } | 181 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index c848a191525d..950c2fbb815b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 82 | int ret; | 82 | int ret; |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * vma alignment has already been checked by prepare_hugepage_range. | 85 | * vma address alignment (but not the pgoff alignment) has |
| 86 | * If you add any error returns here, do so after setting VM_HUGETLB, | 86 | * already been checked by prepare_hugepage_range. If you add |
| 87 | * so is_vm_hugetlb_page tests below unmap_region go the right way | 87 | * any error returns here, do so after setting VM_HUGETLB, so |
| 88 | * when do_mmap_pgoff unwinds (may be important on powerpc and ia64). | 88 | * is_vm_hugetlb_page tests below unmap_region go the right |
| 89 | * way when do_mmap_pgoff unwinds (may be important on powerpc | ||
| 90 | * and ia64). | ||
| 89 | */ | 91 | */ |
| 90 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; | 92 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; |
| 91 | vma->vm_ops = &hugetlb_vm_ops; | 93 | vma->vm_ops = &hugetlb_vm_ops; |
| 92 | 94 | ||
| 95 | if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT)) | ||
| 96 | return -EINVAL; | ||
| 97 | |||
| 93 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | 98 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
| 94 | 99 | ||
| 95 | mutex_lock(&inode->i_mutex); | 100 | mutex_lock(&inode->i_mutex); |
| @@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 132 | return -ENOMEM; | 137 | return -ENOMEM; |
| 133 | 138 | ||
| 134 | if (flags & MAP_FIXED) { | 139 | if (flags & MAP_FIXED) { |
| 135 | if (prepare_hugepage_range(addr, len, pgoff)) | 140 | if (prepare_hugepage_range(addr, len)) |
| 136 | return -EINVAL; | 141 | return -EINVAL; |
| 137 | return addr; | 142 | return addr; |
| 138 | } | 143 | } |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e6a71c82d204..3a19b032c0eb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | |||
| 66 | * If the arch doesn't supply something else, assume that hugepage | 66 | * If the arch doesn't supply something else, assume that hugepage |
| 67 | * size aligned regions are ok without further preparation. | 67 | * size aligned regions are ok without further preparation. |
| 68 | */ | 68 | */ |
| 69 | static inline int prepare_hugepage_range(unsigned long addr, unsigned long len, | 69 | static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) |
| 70 | pgoff_t pgoff) | ||
| 71 | { | 70 | { |
| 72 | if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) | ||
| 73 | return -EINVAL; | ||
| 74 | if (len & ~HPAGE_MASK) | 71 | if (len & ~HPAGE_MASK) |
| 75 | return -EINVAL; | 72 | return -EINVAL; |
| 76 | if (addr & ~HPAGE_MASK) | 73 | if (addr & ~HPAGE_MASK) |
| @@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len, | |||
| 78 | return 0; | 75 | return 0; |
| 79 | } | 76 | } |
| 80 | #else | 77 | #else |
| 81 | int prepare_hugepage_range(unsigned long addr, unsigned long len, | 78 | int prepare_hugepage_range(unsigned long addr, unsigned long len); |
| 82 | pgoff_t pgoff); | ||
| 83 | #endif | 79 | #endif |
| 84 | 80 | ||
| 85 | #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE | 81 | #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE |
| @@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void) | |||
| 117 | #define hugetlb_report_meminfo(buf) 0 | 113 | #define hugetlb_report_meminfo(buf) 0 |
| 118 | #define hugetlb_report_node_meminfo(n, buf) 0 | 114 | #define hugetlb_report_node_meminfo(n, buf) 0 |
| 119 | #define follow_huge_pmd(mm, addr, pmd, write) NULL | 115 | #define follow_huge_pmd(mm, addr, pmd, write) NULL |
| 120 | #define prepare_hugepage_range(addr,len,pgoff) (-EINVAL) | 116 | #define prepare_hugepage_range(addr,len) (-EINVAL) |
| 121 | #define pmd_huge(x) 0 | 117 | #define pmd_huge(x) 0 |
| 122 | #define is_hugepage_only_range(mm, addr, len) 0 | 118 | #define is_hugepage_only_range(mm, addr, len) 0 |
| 123 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 119 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
