diff options
| -rw-r--r-- | fs/hugetlbfs/inode.c | 17 | ||||
| -rw-r--r-- | mm/hugetlb.c | 7 |
2 files changed, 21 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 8fe1b0aa2896..b9a254dcc0e7 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -108,6 +108,16 @@ static void huge_pagevec_release(struct pagevec *pvec) | |||
| 108 | pagevec_reinit(pvec); | 108 | pagevec_reinit(pvec); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /* | ||
| 112 | * Mask used when checking the page offset value passed in via system | ||
| 113 | * calls. This value will be converted to a loff_t which is signed. | ||
| 114 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | ||
| 115 | * value. The extra bit (- 1 in the shift value) is to take the sign | ||
| 116 | * bit into account. | ||
| 117 | */ | ||
| 118 | #define PGOFF_LOFFT_MAX \ | ||
| 119 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | ||
| 120 | |||
| 111 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | 121 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 112 | { | 122 | { |
| 113 | struct inode *inode = file_inode(file); | 123 | struct inode *inode = file_inode(file); |
| @@ -127,12 +137,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 127 | vma->vm_ops = &hugetlb_vm_ops; | 137 | vma->vm_ops = &hugetlb_vm_ops; |
| 128 | 138 | ||
| 129 | /* | 139 | /* |
| 130 | * Offset passed to mmap (before page shift) could have been | 140 | * page based offset in vm_pgoff could be sufficiently large to |
| 131 | * negative when represented as a (l)off_t. | 141 | * overflow a (l)off_t when converted to byte offset. |
| 132 | */ | 142 | */ |
| 133 | if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) | 143 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) |
| 134 | return -EINVAL; | 144 | return -EINVAL; |
| 135 | 145 | ||
| 146 | /* must be huge page aligned */ | ||
| 136 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | 147 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
| 137 | return -EINVAL; | 148 | return -EINVAL; |
| 138 | 149 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a963f2034dfc..976bbc5646fe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
| 19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/mmdebug.h> | ||
| 21 | #include <linux/sched/signal.h> | 22 | #include <linux/sched/signal.h> |
| 22 | #include <linux/rmap.h> | 23 | #include <linux/rmap.h> |
| 23 | #include <linux/string_helpers.h> | 24 | #include <linux/string_helpers.h> |
| @@ -4374,6 +4375,12 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 4374 | struct resv_map *resv_map; | 4375 | struct resv_map *resv_map; |
| 4375 | long gbl_reserve; | 4376 | long gbl_reserve; |
| 4376 | 4377 | ||
| 4378 | /* This should never happen */ | ||
| 4379 | if (from > to) { | ||
| 4380 | VM_WARN(1, "%s called with a negative range\n", __func__); | ||
| 4381 | return -EINVAL; | ||
| 4382 | } | ||
| 4383 | |||
| 4377 | /* | 4384 | /* |
| 4378 | * Only apply hugepage reservation if asked. At fault time, an | 4385 | * Only apply hugepage reservation if asked. At fault time, an |
| 4379 | * attempt will be made for VM_NORESERVE to allocate a page | 4386 | * attempt will be made for VM_NORESERVE to allocate a page |
