aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs
diff options
context:
space:
mode:
authorBadari Pulavarty <pbadari@us.ibm.com>2007-10-16 04:26:22 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:03 -0400
commite63e1e5a6b75416ab758025fd80e247c4f103b1b (patch)
treecfc15590308980ebc1794d5a44cb02120868a025 /fs/hugetlbfs
parent7aa91e104028b87ff13f5eeb7a0d7ffe7b5a2348 (diff)
hugetlbfs read() support
Support for reading from hugetlbfs files. libhugetlbfs lets application text/data to be placed in large pages. When we do that, oprofile doesn't work - since libbfd tries to read from it. This code is very similar to what do_generic_mapping_read() does, but I can't use it since it has PAGE_CACHE_SIZE assumptions. [akpm@linux-foundation.org: cleanups, fix leak] [bunk@stusta.de: make hugetlbfs_read() static] Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com> Acked-by: William Irwin <bill.irwin@oracle.com> Tested-by: Nishanth Aravamudan <nacc@us.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hugetlbfs')
-rw-r--r--fs/hugetlbfs/inode.c125
1 files changed, 125 insertions, 0 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8f8e13385e96..04598e12c489 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -179,6 +179,130 @@ full_search:
179} 179}
180#endif 180#endif
181 181
182static int
183hugetlbfs_read_actor(struct page *page, unsigned long offset,
184 char __user *buf, unsigned long count,
185 unsigned long size)
186{
187 char *kaddr;
188 unsigned long left, copied = 0;
189 int i, chunksize;
190
191 if (size > count)
192 size = count;
193
194 /* Find which 4k chunk and offset with in that chunk */
195 i = offset >> PAGE_CACHE_SHIFT;
196 offset = offset & ~PAGE_CACHE_MASK;
197
198 while (size) {
199 chunksize = PAGE_CACHE_SIZE;
200 if (offset)
201 chunksize -= offset;
202 if (chunksize > size)
203 chunksize = size;
204 kaddr = kmap(&page[i]);
205 left = __copy_to_user(buf, kaddr + offset, chunksize);
206 kunmap(&page[i]);
207 if (left) {
208 copied += (chunksize - left);
209 break;
210 }
211 offset = 0;
212 size -= chunksize;
213 buf += chunksize;
214 copied += chunksize;
215 i++;
216 }
217 return copied ? copied : -EFAULT;
218}
219
220/*
221 * Support for read() - Find the page attached to f_mapping and copy out the
222 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
223 * since it has PAGE_CACHE_SIZE assumptions.
224 */
225static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
226 size_t len, loff_t *ppos)
227{
228 struct address_space *mapping = filp->f_mapping;
229 struct inode *inode = mapping->host;
230 unsigned long index = *ppos >> HPAGE_SHIFT;
231 unsigned long offset = *ppos & ~HPAGE_MASK;
232 unsigned long end_index;
233 loff_t isize;
234 ssize_t retval = 0;
235
236 mutex_lock(&inode->i_mutex);
237
238 /* validate length */
239 if (len == 0)
240 goto out;
241
242 isize = i_size_read(inode);
243 if (!isize)
244 goto out;
245
246 end_index = (isize - 1) >> HPAGE_SHIFT;
247 for (;;) {
248 struct page *page;
249 int nr, ret;
250
251 /* nr is the maximum number of bytes to copy from this page */
252 nr = HPAGE_SIZE;
253 if (index >= end_index) {
254 if (index > end_index)
255 goto out;
256 nr = ((isize - 1) & ~HPAGE_MASK) + 1;
257 if (nr <= offset) {
258 goto out;
259 }
260 }
261 nr = nr - offset;
262
263 /* Find the page */
264 page = find_get_page(mapping, index);
265 if (unlikely(page == NULL)) {
266 /*
267 * We have a HOLE, zero out the user-buffer for the
268 * length of the hole or request.
269 */
270 ret = len < nr ? len : nr;
271 if (clear_user(buf, ret))
272 ret = -EFAULT;
273 } else {
274 /*
275 * We have the page, copy it to user space buffer.
276 */
277 ret = hugetlbfs_read_actor(page, offset, buf, len, nr);
278 }
279 if (ret < 0) {
280 if (retval == 0)
281 retval = ret;
282 if (page)
283 page_cache_release(page);
284 goto out;
285 }
286
287 offset += ret;
288 retval += ret;
289 len -= ret;
290 index += offset >> HPAGE_SHIFT;
291 offset &= ~HPAGE_MASK;
292
293 if (page)
294 page_cache_release(page);
295
296 /* short read or no more work */
297 if ((ret != nr) || (len == 0))
298 break;
299 }
300out:
301 *ppos = ((loff_t)index << HPAGE_SHIFT) + offset;
302 mutex_unlock(&inode->i_mutex);
303 return retval;
304}
305
182/* 306/*
183 * Read a page. Again trivial. If it didn't already exist 307 * Read a page. Again trivial. If it didn't already exist
184 * in the page cache, it is zero-filled. 308 * in the page cache, it is zero-filled.
@@ -581,6 +705,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
581} 705}
582 706
583const struct file_operations hugetlbfs_file_operations = { 707const struct file_operations hugetlbfs_file_operations = {
708 .read = hugetlbfs_read,
584 .mmap = hugetlbfs_file_mmap, 709 .mmap = hugetlbfs_file_mmap,
585 .fsync = simple_sync_file, 710 .fsync = simple_sync_file,
586 .get_unmapped_area = hugetlb_get_unmapped_area, 711 .get_unmapped_area = hugetlb_get_unmapped_area,