aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-07-19 04:48:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:44 -0400
commitd8983910a4045fa21022cfccf76ed13eb40fd7f5 (patch)
tree81902a5157ace03a94aa4b62599a20bc87f7a1c0 /fs/splice.c
parent431a4820bfcdf7ff530e745230bafb06c9bf2d6d (diff)
readahead: pass real splice size
Pass real splice size to page_cache_readahead_ondemand(). The splice code works in chunks of 16 pages internally. The readahead code should be told of the overall splice size, instead of the internal chunk size. Otherwize bad things may happen. Imagine some 17-page random splice reads. The code before this patch will result in two readahead calls: readahead(16); readahead(1); That leads to one 16-page I/O and one 32-page I/O: one extra I/O and 31 readahead miss pages. Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 421b3b821152..6ddd0329f866 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -265,7 +265,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
265 unsigned int flags) 265 unsigned int flags)
266{ 266{
267 struct address_space *mapping = in->f_mapping; 267 struct address_space *mapping = in->f_mapping;
268 unsigned int loff, nr_pages; 268 unsigned int loff, nr_pages, req_pages;
269 struct page *pages[PIPE_BUFFERS]; 269 struct page *pages[PIPE_BUFFERS];
270 struct partial_page partial[PIPE_BUFFERS]; 270 struct partial_page partial[PIPE_BUFFERS];
271 struct page *page; 271 struct page *page;
@@ -281,10 +281,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
281 281
282 index = *ppos >> PAGE_CACHE_SHIFT; 282 index = *ppos >> PAGE_CACHE_SHIFT;
283 loff = *ppos & ~PAGE_CACHE_MASK; 283 loff = *ppos & ~PAGE_CACHE_MASK;
284 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 284 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
285 285 nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
286 if (nr_pages > PIPE_BUFFERS)
287 nr_pages = PIPE_BUFFERS;
288 286
289 /* 287 /*
290 * Lookup the (hopefully) full range of pages we need. 288 * Lookup the (hopefully) full range of pages we need.
@@ -298,7 +296,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
298 */ 296 */
299 if (spd.nr_pages < nr_pages) 297 if (spd.nr_pages < nr_pages)
300 page_cache_readahead_ondemand(mapping, &in->f_ra, in, 298 page_cache_readahead_ondemand(mapping, &in->f_ra, in,
301 NULL, index, nr_pages - spd.nr_pages); 299 NULL, index, req_pages - spd.nr_pages);
302 300
303 error = 0; 301 error = 0;
304 while (spd.nr_pages < nr_pages) { 302 while (spd.nr_pages < nr_pages) {
@@ -355,7 +353,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
355 353
356 if (PageReadahead(page)) 354 if (PageReadahead(page))
357 page_cache_readahead_ondemand(mapping, &in->f_ra, in, 355 page_cache_readahead_ondemand(mapping, &in->f_ra, in,
358 page, index, nr_pages - page_nr); 356 page, index, req_pages - page_nr);
359 357
360 /* 358 /*
361 * If the page isn't uptodate, we may need to start io on it 359 * If the page isn't uptodate, we may need to start io on it