aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--fs/hugetlbfs/inode.c21
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--mm/mmap.c2
5 files changed, 25 insertions, 20 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index eee5c1cfbe32..f3a9585e98a8 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -70,8 +70,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
70 * Don't actually need to do any preparation, but need to make sure 70 * Don't actually need to do any preparation, but need to make sure
71 * the address is in the right region. 71 * the address is in the right region.
72 */ 72 */
73int prepare_hugepage_range(unsigned long addr, unsigned long len) 73int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
74{ 74{
75 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
76 return -EINVAL;
75 if (len & ~HPAGE_MASK) 77 if (len & ~HPAGE_MASK)
76 return -EINVAL; 78 return -EINVAL;
77 if (addr & ~HPAGE_MASK) 79 if (addr & ~HPAGE_MASK)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fd68b74c07c3..506d89768d45 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -491,11 +491,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
491 return 0; 491 return 0;
492} 492}
493 493
494int prepare_hugepage_range(unsigned long addr, unsigned long len) 494int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
495{ 495{
496 int err = 0; 496 int err = 0;
497 497
498 if ( (addr+len) < addr ) 498 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
499 return -EINVAL;
500 if (len & ~HPAGE_MASK)
501 return -EINVAL;
502 if (addr & ~HPAGE_MASK)
499 return -EINVAL; 503 return -EINVAL;
500 504
501 if (addr < 0x100000000UL) 505 if (addr < 0x100000000UL)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0bea6a619e10..7f4756963d05 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
62 loff_t len, vma_len; 62 loff_t len, vma_len;
63 int ret; 63 int ret;
64 64
65 if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) 65 /*
66 return -EINVAL; 66 * vma alignment has already been checked by prepare_hugepage_range.
67 67 * If you add any error returns here, do so after setting VM_HUGETLB,
68 if (vma->vm_start & ~HPAGE_MASK) 68 * so is_vm_hugetlb_page tests below unmap_region go the right way
69 return -EINVAL; 69 * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
70 70 */
71 if (vma->vm_end & ~HPAGE_MASK) 71 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
72 return -EINVAL; 72 vma->vm_ops = &hugetlb_vm_ops;
73
74 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
75 return -EINVAL;
76 73
77 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 74 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
78 75
79 mutex_lock(&inode->i_mutex); 76 mutex_lock(&inode->i_mutex);
80 file_accessed(file); 77 file_accessed(file);
81 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
82 vma->vm_ops = &hugetlb_vm_ops;
83 78
84 ret = -ENOMEM; 79 ret = -ENOMEM;
85 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 80 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5081d27bfa27..ace64e57e17f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,8 +60,11 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
60 * If the arch doesn't supply something else, assume that hugepage 60 * If the arch doesn't supply something else, assume that hugepage
61 * size aligned regions are ok without further preparation. 61 * size aligned regions are ok without further preparation.
62 */ 62 */
63static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) 63static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
64 pgoff_t pgoff)
64{ 65{
66 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
67 return -EINVAL;
65 if (len & ~HPAGE_MASK) 68 if (len & ~HPAGE_MASK)
66 return -EINVAL; 69 return -EINVAL;
67 if (addr & ~HPAGE_MASK) 70 if (addr & ~HPAGE_MASK)
@@ -69,7 +72,8 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
69 return 0; 72 return 0;
70} 73}
71#else 74#else
72int prepare_hugepage_range(unsigned long addr, unsigned long len); 75int prepare_hugepage_range(unsigned long addr, unsigned long len,
76 pgoff_t pgoff);
73#endif 77#endif
74 78
75#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE 79#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -107,7 +111,7 @@ static inline unsigned long hugetlb_total_pages(void)
107#define hugetlb_report_meminfo(buf) 0 111#define hugetlb_report_meminfo(buf) 0
108#define hugetlb_report_node_meminfo(n, buf) 0 112#define hugetlb_report_node_meminfo(n, buf) 0
109#define follow_huge_pmd(mm, addr, pmd, write) NULL 113#define follow_huge_pmd(mm, addr, pmd, write) NULL
110#define prepare_hugepage_range(addr, len) (-EINVAL) 114#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
111#define pmd_huge(x) 0 115#define pmd_huge(x) 0
112#define is_hugepage_only_range(mm, addr, len) 0 116#define is_hugepage_only_range(mm, addr, len) 0
113#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 117#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
diff --git a/mm/mmap.c b/mm/mmap.c
index 497e502dfd6b..bdace87d7c01 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1379,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1379 * Check if the given range is hugepage aligned, and 1379 * Check if the given range is hugepage aligned, and
1380 * can be made suitable for hugepages. 1380 * can be made suitable for hugepages.
1381 */ 1381 */
1382 ret = prepare_hugepage_range(addr, len); 1382 ret = prepare_hugepage_range(addr, len, pgoff);
1383 } else { 1383 } else {
1384 /* 1384 /*
1385 * Ensure that a normal request is not falling in a 1385 * Ensure that a normal request is not falling in a