aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2018-03-22 19:17:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-22 20:07:01 -0400
commit63489f8e821144000e0bdca7e65a8d1cc23a7ee7 (patch)
tree0304c60c01b205b545c02822331fb72b2b5fc6f7
parent2e517d681632326ed98399cb4dd99519efe3e32c (diff)
hugetlbfs: check for pgoff value overflow
A vma with vm_pgoff large enough to overflow a loff_t type when converted to a byte offset can be passed via the remap_file_pages system call. The hugetlbfs mmap routine uses the byte offset to calculate reservations and file size. A sequence such as: mmap(0x20a00000, 0x600000, 0, 0x66033, -1, 0); remap_file_pages(0x20a00000, 0x600000, 0, 0x20000000000000, 0); will result in the following when task exits/file closed, kernel BUG at mm/hugetlb.c:749! Call Trace: hugetlbfs_evict_inode+0x2f/0x40 evict+0xcb/0x190 __dentry_kill+0xcb/0x150 __fput+0x164/0x1e0 task_work_run+0x84/0xa0 exit_to_usermode_loop+0x7d/0x80 do_syscall_64+0x18b/0x190 entry_SYSCALL_64_after_hwframe+0x3d/0xa2 The overflowed pgoff value causes hugetlbfs to try to set up a mapping with a negative range (end < start) that leaves invalid state which causes the BUG. The previous overflow fix to this code was incomplete and did not take the remap_file_pages system call into account. [mike.kravetz@oracle.com: v3] Link: http://lkml.kernel.org/r/20180309002726.7248-1-mike.kravetz@oracle.com [akpm@linux-foundation.org: include mmdebug.h] [akpm@linux-foundation.org: fix -ve left shift count on sh] Link: http://lkml.kernel.org/r/20180308210502.15952-1-mike.kravetz@oracle.com Fixes: 045c7a3f53d9 ("hugetlbfs: fix offset overflow in hugetlbfs mmap") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reported-by: Nic Losby <blurbdust@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Yisheng Xie <xieyisheng1@huawei.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/hugetlbfs/inode.c17
-rw-r--r--mm/hugetlb.c7
2 files changed, 21 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8fe1b0aa2896..b9a254dcc0e7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,6 +108,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
108 pagevec_reinit(pvec); 108 pagevec_reinit(pvec);
109} 109}
110 110
111/*
112 * Mask used when checking the page offset value passed in via system
113 * calls. This value will be converted to a loff_t which is signed.
114 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
115 * value. The extra bit (- 1 in the shift value) is to take the sign
116 * bit into account.
117 */
118#define PGOFF_LOFFT_MAX \
119 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
120
111static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 121static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
112{ 122{
113 struct inode *inode = file_inode(file); 123 struct inode *inode = file_inode(file);
@@ -127,12 +137,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
127 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
128 138
129 /* 139 /*
130 * Offset passed to mmap (before page shift) could have been 140 * page based offset in vm_pgoff could be sufficiently large to
131 * negative when represented as a (l)off_t. 141 * overflow a (l)off_t when converted to byte offset.
132 */ 142 */
133 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) 143 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
134 return -EINVAL; 144 return -EINVAL;
135 145
146 /* must be huge page aligned */
136 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 147 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
137 return -EINVAL; 148 return -EINVAL;
138 149
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a963f2034dfc..976bbc5646fe 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -18,6 +18,7 @@
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/mmdebug.h>
21#include <linux/sched/signal.h> 22#include <linux/sched/signal.h>
22#include <linux/rmap.h> 23#include <linux/rmap.h>
23#include <linux/string_helpers.h> 24#include <linux/string_helpers.h>
@@ -4374,6 +4375,12 @@ int hugetlb_reserve_pages(struct inode *inode,
4374 struct resv_map *resv_map; 4375 struct resv_map *resv_map;
4375 long gbl_reserve; 4376 long gbl_reserve;
4376 4377
4378 /* This should never happen */
4379 if (from > to) {
4380 VM_WARN(1, "%s called with a negative range\n", __func__);
4381 return -EINVAL;
4382 }
4383
4377 /* 4384 /*
4378 * Only apply hugepage reservation if asked. At fault time, an 4385 * Only apply hugepage reservation if asked. At fault time, an
4379 * attempt will be made for VM_NORESERVE to allocate a page 4386 * attempt will be made for VM_NORESERVE to allocate a page