aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/splice.c99
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--mm/filemap.c32
3 files changed, 101 insertions, 32 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 447ebc0a37..a46ddd2856 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -279,7 +279,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
279 pgoff_t index, end_index; 279 pgoff_t index, end_index;
280 loff_t isize; 280 loff_t isize;
281 size_t total_len; 281 size_t total_len;
282 int error; 282 int error, page_nr;
283 struct splice_pipe_desc spd = { 283 struct splice_pipe_desc spd = {
284 .pages = pages, 284 .pages = pages,
285 .partial = partial, 285 .partial = partial,
@@ -299,47 +299,75 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
299 * read-ahead if this is a non-zero offset (we are likely doing small 299 * read-ahead if this is a non-zero offset (we are likely doing small
300 * chunk splice and the page is already there) for a single page. 300 * chunk splice and the page is already there) for a single page.
301 */ 301 */
302 if (!loff || spd.nr_pages > 1) 302 if (!loff || nr_pages > 1)
303 do_page_cache_readahead(mapping, in, index, spd.nr_pages); 303 page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
304 304
305 /* 305 /*
306 * Now fill in the holes: 306 * Now fill in the holes:
307 */ 307 */
308 error = 0; 308 error = 0;
309 total_len = 0; 309 total_len = 0;
310 for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
311 unsigned int this_len;
312 310
313 if (!len) 311 /*
314 break; 312 * Lookup the (hopefully) full range of pages we need.
313 */
314 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
315 315
316 /*
317 * If find_get_pages_contig() returned fewer pages than we needed,
318 * allocate the rest.
319 */
320 index += spd.nr_pages;
321 while (spd.nr_pages < nr_pages) {
316 /* 322 /*
317 * this_len is the max we'll use from this page 323 * Page could be there, find_get_pages_contig() breaks on
318 */ 324 * the first hole.
319 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
320find_page:
321 /*
322 * lookup the page for this index
323 */ 325 */
324 page = find_get_page(mapping, index); 326 page = find_get_page(mapping, index);
325 if (!page) { 327 if (!page) {
326 /* 328 /*
327 * page didn't exist, allocate one 329 * page didn't exist, allocate one.
328 */ 330 */
329 page = page_cache_alloc_cold(mapping); 331 page = page_cache_alloc_cold(mapping);
330 if (!page) 332 if (!page)
331 break; 333 break;
332 334
333 error = add_to_page_cache_lru(page, mapping, index, 335 error = add_to_page_cache_lru(page, mapping, index,
334 mapping_gfp_mask(mapping)); 336 mapping_gfp_mask(mapping));
335 if (unlikely(error)) { 337 if (unlikely(error)) {
336 page_cache_release(page); 338 page_cache_release(page);
337 break; 339 break;
338 } 340 }
339 341 /*
340 goto readpage; 342 * add_to_page_cache() locks the page, unlock it
343 * to avoid convoluting the logic below even more.
344 */
345 unlock_page(page);
341 } 346 }
342 347
348 pages[spd.nr_pages++] = page;
349 index++;
350 }
351
352 /*
353 * Now loop over the map and see if we need to start IO on any
354 * pages, fill in the partial map, etc.
355 */
356 index = *ppos >> PAGE_CACHE_SHIFT;
357 nr_pages = spd.nr_pages;
358 spd.nr_pages = 0;
359 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
360 unsigned int this_len;
361
362 if (!len)
363 break;
364
365 /*
366 * this_len is the max we'll use from this page
367 */
368 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
369 page = pages[page_nr];
370
343 /* 371 /*
344 * If the page isn't uptodate, we may need to start io on it 372 * If the page isn't uptodate, we may need to start io on it
345 */ 373 */
@@ -360,7 +388,6 @@ find_page:
360 */ 388 */
361 if (!page->mapping) { 389 if (!page->mapping) {
362 unlock_page(page); 390 unlock_page(page);
363 page_cache_release(page);
364 break; 391 break;
365 } 392 }
366 /* 393 /*
@@ -371,16 +398,20 @@ find_page:
371 goto fill_it; 398 goto fill_it;
372 } 399 }
373 400
374readpage:
375 /* 401 /*
376 * need to read in the page 402 * need to read in the page
377 */ 403 */
378 error = mapping->a_ops->readpage(in, page); 404 error = mapping->a_ops->readpage(in, page);
379
380 if (unlikely(error)) { 405 if (unlikely(error)) {
381 page_cache_release(page); 406 /*
407 * We really should re-lookup the page here,
408 * but it complicates things a lot. Instead
409 * lets just do what we already stored, and
410 * we'll get it the next time we are called.
411 */
382 if (error == AOP_TRUNCATED_PAGE) 412 if (error == AOP_TRUNCATED_PAGE)
383 goto find_page; 413 error = 0;
414
384 break; 415 break;
385 } 416 }
386 417
@@ -389,10 +420,8 @@ readpage:
389 */ 420 */
390 isize = i_size_read(mapping->host); 421 isize = i_size_read(mapping->host);
391 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 422 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
392 if (unlikely(!isize || index > end_index)) { 423 if (unlikely(!isize || index > end_index))
393 page_cache_release(page);
394 break; 424 break;
395 }
396 425
397 /* 426 /*
398 * if this is the last page, see if we need to shrink 427 * if this is the last page, see if we need to shrink
@@ -400,27 +429,33 @@ readpage:
400 */ 429 */
401 if (end_index == index) { 430 if (end_index == index) {
402 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); 431 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
403 if (total_len + loff > isize) { 432 if (total_len + loff > isize)
404 page_cache_release(page);
405 break; 433 break;
406 }
407 /* 434 /*
408 * force quit after adding this page 435 * force quit after adding this page
409 */ 436 */
410 nr_pages = spd.nr_pages; 437 len = this_len;
411 this_len = min(this_len, loff); 438 this_len = min(this_len, loff);
412 loff = 0; 439 loff = 0;
413 } 440 }
414 } 441 }
415fill_it: 442fill_it:
416 pages[spd.nr_pages] = page; 443 partial[page_nr].offset = loff;
417 partial[spd.nr_pages].offset = loff; 444 partial[page_nr].len = this_len;
418 partial[spd.nr_pages].len = this_len;
419 len -= this_len; 445 len -= this_len;
420 total_len += this_len; 446 total_len += this_len;
421 loff = 0; 447 loff = 0;
448 spd.nr_pages++;
449 index++;
422 } 450 }
423 451
452 /*
453 * Release any pages at the end, if we quit early. 'i' is how far
454 * we got, 'nr_pages' is how many pages are in the map.
455 */
456 while (page_nr < nr_pages)
457 page_cache_release(pages[page_nr++]);
458
424 if (spd.nr_pages) 459 if (spd.nr_pages)
425 return splice_to_pipe(pipe, &spd); 460 return splice_to_pipe(pipe, &spd);
426 461
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9539efd4f7..7a1af574de 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -78,6 +78,8 @@ extern struct page * find_or_create_page(struct address_space *mapping,
78 unsigned long index, gfp_t gfp_mask); 78 unsigned long index, gfp_t gfp_mask);
79unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 79unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
80 unsigned int nr_pages, struct page **pages); 80 unsigned int nr_pages, struct page **pages);
81unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
82 unsigned int nr_pages, struct page **pages);
81unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 83unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
82 int tag, unsigned int nr_pages, struct page **pages); 84 int tag, unsigned int nr_pages, struct page **pages);
83 85
diff --git a/mm/filemap.c b/mm/filemap.c
index 3ef20739e7..fd57442186 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
697 return ret; 697 return ret;
698} 698}
699 699
700/**
701 * find_get_pages_contig - gang contiguous pagecache lookup
702 * @mapping: The address_space to search
703 * @index: The starting page index
704 * @nr_pages: The maximum number of pages
705 * @pages: Where the resulting pages are placed
706 *
707 * find_get_pages_contig() works exactly like find_get_pages(), except
708 * that the returned number of pages are guaranteed to be contiguous.
709 *
710 * find_get_pages_contig() returns the number of pages which were found.
711 */
712unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
713 unsigned int nr_pages, struct page **pages)
714{
715 unsigned int i;
716 unsigned int ret;
717
718 read_lock_irq(&mapping->tree_lock);
719 ret = radix_tree_gang_lookup(&mapping->page_tree,
720 (void **)pages, index, nr_pages);
721 for (i = 0; i < ret; i++) {
722 if (pages[i]->mapping == NULL || pages[i]->index != index)
723 break;
724
725 page_cache_get(pages[i]);
726 index++;
727 }
728 read_unlock_irq(&mapping->tree_lock);
729 return i;
730}
731
700/* 732/*
701 * Like find_get_pages, except we only return pages which are tagged with 733 * Like find_get_pages, except we only return pages which are tagged with
702 * `tag'. We update *index to index the next page for the traversal. 734 * `tag'. We update *index to index the next page for the traversal.