aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-05-07 19:18:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 21:38:27 -0400
commitaf73e4d9506d3b797509f3c030e7dcd554f7d9c4 (patch)
treef9f1bf7483495b66b6cf2cfb3c676791133733b1 /mm
parent1ab4ce762370b82870834899e49c08129d7ae271 (diff)
hugetlbfs: fix mmap failure in unaligned size request
The current kernel returns -EINVAL unless a given mmap length is "almost" hugepage aligned. This is because in sys_mmap_pgoff() the given length is passed to vm_mmap_pgoff() as it is without being aligned with hugepage boundary. This is a regression introduced in commit 40716e29243d ("hugetlbfs: fix alignment of huge page requests"), where alignment code is pushed into hugetlb_file_setup() and the variable len in caller side is not changed. To fix this, this patch partially reverts that commit, and adds alignment code in caller side. And it also introduces hstate_sizelog() in order to get proper hstate to specified hugepage size. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=56881 [akpm@linux-foundation.org: fix warning when CONFIG_HUGETLB_PAGE=n] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: <iceman_dvd@yahoo.com> Cc: Steven Truelove <steven.truelove@utoronto.ca> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Hugh Dickins <hughd@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index da3e9c04bf37..1ae21d645c68 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1363,15 +1363,20 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1363 file = fget(fd); 1363 file = fget(fd);
1364 if (!file) 1364 if (!file)
1365 goto out; 1365 goto out;
1366 if (is_file_hugepages(file))
1367 len = ALIGN(len, huge_page_size(hstate_file(file)));
1366 } else if (flags & MAP_HUGETLB) { 1368 } else if (flags & MAP_HUGETLB) {
1367 struct user_struct *user = NULL; 1369 struct user_struct *user = NULL;
1370
1371 len = ALIGN(len, huge_page_size(hstate_sizelog(
1372 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)));
1368 /* 1373 /*
1369 * VM_NORESERVE is used because the reservations will be 1374 * VM_NORESERVE is used because the reservations will be
1370 * taken when vm_ops->mmap() is called 1375 * taken when vm_ops->mmap() is called
1371 * A dummy user value is used because we are not locking 1376 * A dummy user value is used because we are not locking
1372 * memory so no accounting is necessary 1377 * memory so no accounting is necessary
1373 */ 1378 */
1374 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len, 1379 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1375 VM_NORESERVE, 1380 VM_NORESERVE,
1376 &user, HUGETLB_ANONHUGE_INODE, 1381 &user, HUGETLB_ANONHUGE_INODE,
1377 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1382 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);