aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/hugetlbfs/inode.c63
1 files changed, 53 insertions, 10 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 2627efe767cf..e026c807e6b3 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -45,9 +45,58 @@ static struct backing_dev_info hugetlbfs_backing_dev_info = {
45 45
46int sysctl_hugetlb_shm_group; 46int sysctl_hugetlb_shm_group;
47 47
48static void huge_pagevec_release(struct pagevec *pvec)
49{
50 int i;
51
52 for (i = 0; i < pagevec_count(pvec); ++i)
53 put_page(pvec->pages[i]);
54
55 pagevec_reinit(pvec);
56}
57
58/*
59 * huge_pages_needed tries to determine the number of new huge pages that
60 * will be required to fully populate this VMA. This will be equal to
61 * the size of the VMA in huge pages minus the number of huge pages
62 * (covered by this VMA) that are found in the page cache.
63 *
64 * Result is in bytes to be compatible with is_hugepage_mem_enough()
65 */
66unsigned long
67huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma)
68{
69 int i;
70 struct pagevec pvec;
71 unsigned long start = vma->vm_start;
72 unsigned long end = vma->vm_end;
73 unsigned long hugepages = (end - start) >> HPAGE_SHIFT;
74 pgoff_t next = vma->vm_pgoff;
75 pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT);
76
77 pagevec_init(&pvec, 0);
78 while (next < endpg) {
79 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
80 break;
81 for (i = 0; i < pagevec_count(&pvec); i++) {
82 struct page *page = pvec.pages[i];
83 if (page->index > next)
84 next = page->index;
85 if (page->index >= endpg)
86 break;
87 next++;
88 hugepages--;
89 }
90 huge_pagevec_release(&pvec);
91 }
92 return hugepages << HPAGE_SHIFT;
93}
94
48static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 95static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
49{ 96{
50 struct inode *inode = file->f_dentry->d_inode; 97 struct inode *inode = file->f_dentry->d_inode;
98 struct address_space *mapping = inode->i_mapping;
99 unsigned long bytes;
51 loff_t len, vma_len; 100 loff_t len, vma_len;
52 int ret; 101 int ret;
53 102
@@ -66,6 +115,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
66 if (vma->vm_end - vma->vm_start < HPAGE_SIZE) 115 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
67 return -EINVAL; 116 return -EINVAL;
68 117
118 bytes = huge_pages_needed(mapping, vma);
119 if (!is_hugepage_mem_enough(bytes))
120 return -ENOMEM;
121
69 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 122 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
70 123
71 down(&inode->i_sem); 124 down(&inode->i_sem);
@@ -168,16 +221,6 @@ static int hugetlbfs_commit_write(struct file *file,
168 return -EINVAL; 221 return -EINVAL;
169} 222}
170 223
171static void huge_pagevec_release(struct pagevec *pvec)
172{
173 int i;
174
175 for (i = 0; i < pagevec_count(pvec); ++i)
176 put_page(pvec->pages[i]);
177
178 pagevec_reinit(pvec);
179}
180
181static void truncate_huge_page(struct page *page) 224static void truncate_huge_page(struct page *page)
182{ 225{
183 clear_page_dirty(page); 226 clear_page_dirty(page);