aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-03-27 02:55:08 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-03-27 02:55:08 -0400
commit485ddb4b9741bafb70b22e5c1f9b4f37dc3e85bd (patch)
tree9d666e849cdf9c495d446df242d87e798d4baec9
parent1ffb96c587fa2af0d690dc3548a4a781c477bfb7 (diff)
1/2 splice: dont steal
Stealing pages with splice is problematic because we cannot just insert an uptodate page into the pagecache and hope the filesystem can take care of it later. We also cannot just ClearPageUptodate, then hope prepare_write does not write anything into the page, because I don't think prepare_write gives that guarantee. Remove support for SPLICE_F_MOVE for now. If we really want to bring it back, we might be able to do so with a the new filesystem buffered write aops APIs I'm working on. If we really don't want to bring it back, then we should decide that sooner rather than later, and remove the flag and all the stealing infrastructure before anybody starts using it. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--fs/splice.c101
1 files changed, 38 insertions, 63 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 2fca6ebf4cc2..badc78ff1246 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -576,76 +576,51 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
576 if (this_len + offset > PAGE_CACHE_SIZE) 576 if (this_len + offset > PAGE_CACHE_SIZE)
577 this_len = PAGE_CACHE_SIZE - offset; 577 this_len = PAGE_CACHE_SIZE - offset;
578 578
579 /* 579find_page:
580 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full 580 page = find_lock_page(mapping, index);
581 * page. 581 if (!page) {
582 */ 582 ret = -ENOMEM;
583 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { 583 page = page_cache_alloc_cold(mapping);
584 if (unlikely(!page))
585 goto out_ret;
586
584 /* 587 /*
585 * If steal succeeds, buf->page is now pruned from the 588 * This will also lock the page
586 * pagecache and we can reuse it. The page will also be
587 * locked on successful return.
588 */ 589 */
589 if (buf->ops->steal(pipe, buf)) 590 ret = add_to_page_cache_lru(page, mapping, index,
590 goto find_page; 591 GFP_KERNEL);
591 592 if (unlikely(ret))
592 page = buf->page; 593 goto out;
593 if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) { 594 }
594 unlock_page(page);
595 goto find_page;
596 }
597
598 page_cache_get(page);
599
600 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
601 lru_cache_add(page);
602 } else {
603find_page:
604 page = find_lock_page(mapping, index);
605 if (!page) {
606 ret = -ENOMEM;
607 page = page_cache_alloc_cold(mapping);
608 if (unlikely(!page))
609 goto out_ret;
610 595
611 /* 596 /*
612 * This will also lock the page 597 * We get here with the page locked. If the page is also
613 */ 598 * uptodate, we don't need to do more. If it isn't, we
614 ret = add_to_page_cache_lru(page, mapping, index, 599 * may need to bring it in if we are not going to overwrite
615 GFP_KERNEL); 600 * the full page.
601 */
602 if (!PageUptodate(page)) {
603 if (this_len < PAGE_CACHE_SIZE) {
604 ret = mapping->a_ops->readpage(file, page);
616 if (unlikely(ret)) 605 if (unlikely(ret))
617 goto out; 606 goto out;
618 }
619 607
620 /* 608 lock_page(page);
621 * We get here with the page locked. If the page is also 609
622 * uptodate, we don't need to do more. If it isn't, we 610 if (!PageUptodate(page)) {
623 * may need to bring it in if we are not going to overwrite 611 /*
624 * the full page. 612 * Page got invalidated, repeat.
625 */ 613 */
626 if (!PageUptodate(page)) { 614 if (!page->mapping) {
627 if (this_len < PAGE_CACHE_SIZE) { 615 unlock_page(page);
628 ret = mapping->a_ops->readpage(file, page); 616 page_cache_release(page);
629 if (unlikely(ret)) 617 goto find_page;
630 goto out;
631
632 lock_page(page);
633
634 if (!PageUptodate(page)) {
635 /*
636 * Page got invalidated, repeat.
637 */
638 if (!page->mapping) {
639 unlock_page(page);
640 page_cache_release(page);
641 goto find_page;
642 }
643 ret = -EIO;
644 goto out;
645 } 618 }
646 } else 619 ret = -EIO;
647 SetPageUptodate(page); 620 goto out;
648 } 621 }
622 } else
623 SetPageUptodate(page);
649 } 624 }
650 625
651 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); 626 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);