diff options
author | Adam Litke <agl@us.ibm.com> | 2005-10-29 21:16:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:43 -0400 |
commit | 2e9b367c2273ed21c9852a04d90944d472c4f3e6 (patch) | |
tree | 75e802f07a8c4f0554547e8dd795f544c7e9d7e8 /fs/hugetlbfs/inode.c | |
parent | 4c887265977213985091476be40ab11dfdcb4caf (diff) |
[PATCH] hugetlb: overcommit accounting check
Basic overcommit checking for hugetlb_file_map() based on an implementation
used with demand faulting in SLES9.
Since demand faulting can't guarantee the availability of pages at mmap
time, this patch implements a basic sanity check to ensure that the number
of huge pages required to satisfy the mmap are currently available.
Despite the obvious race, I think it is a good start on doing proper
accounting. I'd like to work towards an accounting system that mimics the
semantics of normal pages (especially for the MAP_PRIVATE/COW case). That
work is underway and builds on what this patch starts.
Huge page shared memory segments are simpler and still maintain their
commit on shmget semantics.
Signed-off-by: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r-- | fs/hugetlbfs/inode.c | 63 |
1 files changed, 53 insertions, 10 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 2627efe767cf..e026c807e6b3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -45,9 +45,58 @@ static struct backing_dev_info hugetlbfs_backing_dev_info = { | |||
45 | 45 | ||
46 | int sysctl_hugetlb_shm_group; | 46 | int sysctl_hugetlb_shm_group; |
47 | 47 | ||
48 | static void huge_pagevec_release(struct pagevec *pvec) | ||
49 | { | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < pagevec_count(pvec); ++i) | ||
53 | put_page(pvec->pages[i]); | ||
54 | |||
55 | pagevec_reinit(pvec); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * huge_pages_needed tries to determine the number of new huge pages that | ||
60 | * will be required to fully populate this VMA. This will be equal to | ||
61 | * the size of the VMA in huge pages minus the number of huge pages | ||
62 | * (covered by this VMA) that are found in the page cache. | ||
63 | * | ||
64 | * Result is in bytes to be compatible with is_hugepage_mem_enough() | ||
65 | */ | ||
66 | unsigned long | ||
67 | huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma) | ||
68 | { | ||
69 | int i; | ||
70 | struct pagevec pvec; | ||
71 | unsigned long start = vma->vm_start; | ||
72 | unsigned long end = vma->vm_end; | ||
73 | unsigned long hugepages = (end - start) >> HPAGE_SHIFT; | ||
74 | pgoff_t next = vma->vm_pgoff; | ||
75 | pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT); | ||
76 | |||
77 | pagevec_init(&pvec, 0); | ||
78 | while (next < endpg) { | ||
79 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) | ||
80 | break; | ||
81 | for (i = 0; i < pagevec_count(&pvec); i++) { | ||
82 | struct page *page = pvec.pages[i]; | ||
83 | if (page->index > next) | ||
84 | next = page->index; | ||
85 | if (page->index >= endpg) | ||
86 | break; | ||
87 | next++; | ||
88 | hugepages--; | ||
89 | } | ||
90 | huge_pagevec_release(&pvec); | ||
91 | } | ||
92 | return hugepages << HPAGE_SHIFT; | ||
93 | } | ||
94 | |||
48 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | 95 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
49 | { | 96 | { |
50 | struct inode *inode = file->f_dentry->d_inode; | 97 | struct inode *inode = file->f_dentry->d_inode; |
98 | struct address_space *mapping = inode->i_mapping; | ||
99 | unsigned long bytes; | ||
51 | loff_t len, vma_len; | 100 | loff_t len, vma_len; |
52 | int ret; | 101 | int ret; |
53 | 102 | ||
@@ -66,6 +115,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
66 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) | 115 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) |
67 | return -EINVAL; | 116 | return -EINVAL; |
68 | 117 | ||
118 | bytes = huge_pages_needed(mapping, vma); | ||
119 | if (!is_hugepage_mem_enough(bytes)) | ||
120 | return -ENOMEM; | ||
121 | |||
69 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | 122 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
70 | 123 | ||
71 | down(&inode->i_sem); | 124 | down(&inode->i_sem); |
@@ -168,16 +221,6 @@ static int hugetlbfs_commit_write(struct file *file, | |||
168 | return -EINVAL; | 221 | return -EINVAL; |
169 | } | 222 | } |
170 | 223 | ||
171 | static void huge_pagevec_release(struct pagevec *pvec) | ||
172 | { | ||
173 | int i; | ||
174 | |||
175 | for (i = 0; i < pagevec_count(pvec); ++i) | ||
176 | put_page(pvec->pages[i]); | ||
177 | |||
178 | pagevec_reinit(pvec); | ||
179 | } | ||
180 | |||
181 | static void truncate_huge_page(struct page *page) | 224 | static void truncate_huge_page(struct page *page) |
182 | { | 225 | { |
183 | clear_page_dirty(page); | 226 | clear_page_dirty(page); |