diff options
author | Jens Axboe <axboe@suse.de> | 2006-05-01 13:50:48 -0400 |
---|---|---|
committer | Jens Axboe <axboe@suse.de> | 2006-05-01 13:50:48 -0400 |
commit | 0568b409c74f7a125d92a09a3f386785700ef688 (patch) | |
tree | 79125b2f4755e98949f0d941a0092e5d3367bbff /fs | |
parent | 46e678c96bbd775abd05d3ddbe2fd334794f9157 (diff) |
[PATCH] splice: fix bugs in pipe_to_file()
Found by Oleg Nesterov <oleg@tv-sign.ru>, fixed by me.
- Only allow full pages to go to the page cache.
- Check page != buf->page instead of using PIPE_BUF_FLAG_STOLEN.
- Remember to clear 'stolen' if add_to_page_cache() fails.
And as a cleanup on that:
- Make the bottom fall-through logic a little less convoluted. Also make
the steal path hold an extra reference to the page, so we don't have
to differentiate between stolen and non-stolen at the end.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/pipe.c | 3 | ||||
-rw-r--r-- | fs/splice.c | 37 |
2 files changed, 19 insertions, 21 deletions
@@ -99,8 +99,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | |||
99 | { | 99 | { |
100 | struct page *page = buf->page; | 100 | struct page *page = buf->page; |
101 | 101 | ||
102 | buf->flags &= ~PIPE_BUF_FLAG_STOLEN; | ||
103 | |||
104 | /* | 102 | /* |
105 | * If nobody else uses this page, and we don't already have a | 103 | * If nobody else uses this page, and we don't already have a |
106 | * temporary page, let's keep track of it as a one-deep | 104 | * temporary page, let's keep track of it as a one-deep |
@@ -130,7 +128,6 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, | |||
130 | struct page *page = buf->page; | 128 | struct page *page = buf->page; |
131 | 129 | ||
132 | if (page_count(page) == 1) { | 130 | if (page_count(page) == 1) { |
133 | buf->flags |= PIPE_BUF_FLAG_STOLEN; | ||
134 | lock_page(page); | 131 | lock_page(page); |
135 | return 0; | 132 | return 0; |
136 | } | 133 | } |
diff --git a/fs/splice.c b/fs/splice.c index 9df28d30efa0..1633778f3652 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -78,7 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | |||
78 | return 1; | 78 | return 1; |
79 | } | 79 | } |
80 | 80 | ||
81 | buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU; | 81 | buf->flags |= PIPE_BUF_FLAG_LRU; |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
@@ -87,7 +87,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info, | |||
87 | { | 87 | { |
88 | page_cache_release(buf->page); | 88 | page_cache_release(buf->page); |
89 | buf->page = NULL; | 89 | buf->page = NULL; |
90 | buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU); | 90 | buf->flags &= ~PIPE_BUF_FLAG_LRU; |
91 | } | 91 | } |
92 | 92 | ||
93 | static void *page_cache_pipe_buf_map(struct file *file, | 93 | static void *page_cache_pipe_buf_map(struct file *file, |
@@ -587,9 +587,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
587 | this_len = PAGE_CACHE_SIZE - offset; | 587 | this_len = PAGE_CACHE_SIZE - offset; |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * Reuse buf page, if SPLICE_F_MOVE is set. | 590 | * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full |
591 | * page. | ||
591 | */ | 592 | */ |
592 | if (sd->flags & SPLICE_F_MOVE) { | 593 | if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { |
593 | /* | 594 | /* |
594 | * If steal succeeds, buf->page is now pruned from the vm | 595 | * If steal succeeds, buf->page is now pruned from the vm |
595 | * side (LRU and page cache) and we can reuse it. The page | 596 | * side (LRU and page cache) and we can reuse it. The page |
@@ -604,6 +605,8 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
604 | goto find_page; | 605 | goto find_page; |
605 | } | 606 | } |
606 | 607 | ||
608 | page_cache_get(page); | ||
609 | |||
607 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | 610 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) |
608 | lru_cache_add(page); | 611 | lru_cache_add(page); |
609 | } else { | 612 | } else { |
@@ -662,7 +665,7 @@ find_page: | |||
662 | } else if (ret) | 665 | } else if (ret) |
663 | goto out; | 666 | goto out; |
664 | 667 | ||
665 | if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) { | 668 | if (buf->page != page) { |
666 | char *dst = kmap_atomic(page, KM_USER0); | 669 | char *dst = kmap_atomic(page, KM_USER0); |
667 | 670 | ||
668 | memcpy(dst + offset, src + buf->offset, this_len); | 671 | memcpy(dst + offset, src + buf->offset, this_len); |
@@ -671,22 +674,20 @@ find_page: | |||
671 | } | 674 | } |
672 | 675 | ||
673 | ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); | 676 | ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); |
674 | if (ret == AOP_TRUNCATED_PAGE) { | 677 | if (!ret) { |
678 | /* | ||
679 | * Return the number of bytes written and mark page as | ||
680 | * accessed, we are now done! | ||
681 | */ | ||
682 | ret = this_len; | ||
683 | mark_page_accessed(page); | ||
684 | balance_dirty_pages_ratelimited(mapping); | ||
685 | } else if (ret == AOP_TRUNCATED_PAGE) { | ||
675 | page_cache_release(page); | 686 | page_cache_release(page); |
676 | goto find_page; | 687 | goto find_page; |
677 | } else if (ret) | 688 | } |
678 | goto out; | ||
679 | |||
680 | /* | ||
681 | * Return the number of bytes written. | ||
682 | */ | ||
683 | ret = this_len; | ||
684 | mark_page_accessed(page); | ||
685 | balance_dirty_pages_ratelimited(mapping); | ||
686 | out: | 689 | out: |
687 | if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) | 690 | page_cache_release(page); |
688 | page_cache_release(page); | ||
689 | |||
690 | unlock_page(page); | 691 | unlock_page(page); |
691 | out_nomem: | 692 | out_nomem: |
692 | buf->ops->unmap(info, buf); | 693 | buf->ops->unmap(info, buf); |