aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
authorZhang, Yanmin <yanmin.zhang@intel.com>2006-07-10 07:44:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-10 16:24:21 -0400
commitb6174df5eec9cdfd598c03d6d0807e344e109213 (patch)
treed61c8627138a8feee31de8320e337251d567fca9 /fs/hugetlbfs/inode.c
parentd6b7d3b62069be60d5b13358bac8670dacdd7a81 (diff)
[PATCH] mmap zero-length hugetlb file with PROT_NONE to protect a hugetlb virtual area
Sometimes, applications need below call to be successful although "/mnt/hugepages/file1" doesn't exist. fd = open("/mnt/hugepages/file1", O_CREAT|O_RDWR, 0755); *addr = mmap(NULL, 0x1024*1024*256, PROT_NONE, 0, fd, 0); As for regular pages (or files), above call does work, but as for huge pages, above call would fail because hugetlbfs_file_mmap would fail if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size). This capability on huge page is useful on ia64 when the process wants to protect one area on region 4, so other threads couldn't read/write this area. A famous JVM (Java Virtual Machine) implementation on IA64 needs the capability. Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Hugh Dickins <hugh@veritas.com> [ Expand-on-mmap semantics again... this time matching normal fs's. wli ] Acked-by: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6449cb697967..c3920c96dadf 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -83,8 +83,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
83 83
84 ret = -ENOMEM; 84 ret = -ENOMEM;
85 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 85 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
86 if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
87 goto out;
88 86
89 if (vma->vm_flags & VM_MAYSHARE && 87 if (vma->vm_flags & VM_MAYSHARE &&
90 hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT), 88 hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT),
@@ -93,7 +91,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
93 91
94 ret = 0; 92 ret = 0;
95 hugetlb_prefault_arch_hook(vma->vm_mm); 93 hugetlb_prefault_arch_hook(vma->vm_mm);
96 if (inode->i_size < len) 94 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
97 inode->i_size = len; 95 inode->i_size = len;
98out: 96out:
99 mutex_unlock(&inode->i_mutex); 97 mutex_unlock(&inode->i_mutex);