aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c166
1 files changed, 83 insertions, 83 deletions
diff --git a/fs/splice.c b/fs/splice.c
index a46ddd28561e..b150493b6fc3 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -78,7 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
78 return 1; 78 return 1;
79 } 79 }
80 80
81 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU; 81 buf->flags |= PIPE_BUF_FLAG_LRU;
82 return 0; 82 return 0;
83} 83}
84 84
@@ -87,12 +87,11 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
87{ 87{
88 page_cache_release(buf->page); 88 page_cache_release(buf->page);
89 buf->page = NULL; 89 buf->page = NULL;
90 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU); 90 buf->flags &= ~PIPE_BUF_FLAG_LRU;
91} 91}
92 92
93static void *page_cache_pipe_buf_map(struct file *file, 93static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
94 struct pipe_inode_info *info, 94 struct pipe_buffer *buf)
95 struct pipe_buffer *buf)
96{ 95{
97 struct page *page = buf->page; 96 struct page *page = buf->page;
98 int err; 97 int err;
@@ -118,64 +117,44 @@ static void *page_cache_pipe_buf_map(struct file *file,
118 } 117 }
119 118
120 /* 119 /*
121 * Page is ok afterall, fall through to mapping. 120 * Page is ok afterall, we are done.
122 */ 121 */
123 unlock_page(page); 122 unlock_page(page);
124 } 123 }
125 124
126 return kmap(page); 125 return 0;
127error: 126error:
128 unlock_page(page); 127 unlock_page(page);
129 return ERR_PTR(err); 128 return err;
130}
131
132static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
133 struct pipe_buffer *buf)
134{
135 kunmap(buf->page);
136}
137
138static void *user_page_pipe_buf_map(struct file *file,
139 struct pipe_inode_info *pipe,
140 struct pipe_buffer *buf)
141{
142 return kmap(buf->page);
143}
144
145static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
146 struct pipe_buffer *buf)
147{
148 kunmap(buf->page);
149}
150
151static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
152 struct pipe_buffer *buf)
153{
154 page_cache_get(buf->page);
155} 129}
156 130
157static struct pipe_buf_operations page_cache_pipe_buf_ops = { 131static struct pipe_buf_operations page_cache_pipe_buf_ops = {
158 .can_merge = 0, 132 .can_merge = 0,
159 .map = page_cache_pipe_buf_map, 133 .map = generic_pipe_buf_map,
160 .unmap = page_cache_pipe_buf_unmap, 134 .unmap = generic_pipe_buf_unmap,
135 .pin = page_cache_pipe_buf_pin,
161 .release = page_cache_pipe_buf_release, 136 .release = page_cache_pipe_buf_release,
162 .steal = page_cache_pipe_buf_steal, 137 .steal = page_cache_pipe_buf_steal,
163 .get = page_cache_pipe_buf_get, 138 .get = generic_pipe_buf_get,
164}; 139};
165 140
166static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 141static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
167 struct pipe_buffer *buf) 142 struct pipe_buffer *buf)
168{ 143{
169 return 1; 144 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
145 return 1;
146
147 return 0;
170} 148}
171 149
172static struct pipe_buf_operations user_page_pipe_buf_ops = { 150static struct pipe_buf_operations user_page_pipe_buf_ops = {
173 .can_merge = 0, 151 .can_merge = 0,
174 .map = user_page_pipe_buf_map, 152 .map = generic_pipe_buf_map,
175 .unmap = user_page_pipe_buf_unmap, 153 .unmap = generic_pipe_buf_unmap,
154 .pin = generic_pipe_buf_pin,
176 .release = page_cache_pipe_buf_release, 155 .release = page_cache_pipe_buf_release,
177 .steal = user_page_pipe_buf_steal, 156 .steal = user_page_pipe_buf_steal,
178 .get = page_cache_pipe_buf_get, 157 .get = generic_pipe_buf_get,
179}; 158};
180 159
181/* 160/*
@@ -210,6 +189,9 @@ static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
210 buf->offset = spd->partial[page_nr].offset; 189 buf->offset = spd->partial[page_nr].offset;
211 buf->len = spd->partial[page_nr].len; 190 buf->len = spd->partial[page_nr].len;
212 buf->ops = spd->ops; 191 buf->ops = spd->ops;
192 if (spd->flags & SPLICE_F_GIFT)
193 buf->flags |= PIPE_BUF_FLAG_GIFT;
194
213 pipe->nrbufs++; 195 pipe->nrbufs++;
214 page_nr++; 196 page_nr++;
215 ret += buf->len; 197 ret += buf->len;
@@ -326,6 +308,12 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
326 page = find_get_page(mapping, index); 308 page = find_get_page(mapping, index);
327 if (!page) { 309 if (!page) {
328 /* 310 /*
311 * Make sure the read-ahead engine is notified
312 * about this failure.
313 */
314 handle_ra_miss(mapping, &in->f_ra, index);
315
316 /*
329 * page didn't exist, allocate one. 317 * page didn't exist, allocate one.
330 */ 318 */
331 page = page_cache_alloc_cold(mapping); 319 page = page_cache_alloc_cold(mapping);
@@ -517,26 +505,16 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
517{ 505{
518 struct file *file = sd->file; 506 struct file *file = sd->file;
519 loff_t pos = sd->pos; 507 loff_t pos = sd->pos;
520 ssize_t ret; 508 int ret, more;
521 void *ptr;
522 int more;
523
524 /*
525 * Sub-optimal, but we are limited by the pipe ->map. We don't
526 * need a kmap'ed buffer here, we just want to make sure we
527 * have the page pinned if the pipe page originates from the
528 * page cache.
529 */
530 ptr = buf->ops->map(file, info, buf);
531 if (IS_ERR(ptr))
532 return PTR_ERR(ptr);
533 509
534 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; 510 ret = buf->ops->pin(info, buf);
511 if (!ret) {
512 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
535 513
536 ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len, 514 ret = file->f_op->sendpage(file, buf->page, buf->offset,
537 &pos, more); 515 sd->len, &pos, more);
516 }
538 517
539 buf->ops->unmap(info, buf);
540 return ret; 518 return ret;
541} 519}
542 520
@@ -569,15 +547,14 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
569 unsigned int offset, this_len; 547 unsigned int offset, this_len;
570 struct page *page; 548 struct page *page;
571 pgoff_t index; 549 pgoff_t index;
572 char *src;
573 int ret; 550 int ret;
574 551
575 /* 552 /*
576 * make sure the data in this buffer is uptodate 553 * make sure the data in this buffer is uptodate
577 */ 554 */
578 src = buf->ops->map(file, info, buf); 555 ret = buf->ops->pin(info, buf);
579 if (IS_ERR(src)) 556 if (unlikely(ret))
580 return PTR_ERR(src); 557 return ret;
581 558
582 index = sd->pos >> PAGE_CACHE_SHIFT; 559 index = sd->pos >> PAGE_CACHE_SHIFT;
583 offset = sd->pos & ~PAGE_CACHE_MASK; 560 offset = sd->pos & ~PAGE_CACHE_MASK;
@@ -587,9 +564,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
587 this_len = PAGE_CACHE_SIZE - offset; 564 this_len = PAGE_CACHE_SIZE - offset;
588 565
589 /* 566 /*
590 * Reuse buf page, if SPLICE_F_MOVE is set. 567 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
568 * page.
591 */ 569 */
592 if (sd->flags & SPLICE_F_MOVE) { 570 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
593 /* 571 /*
594 * If steal succeeds, buf->page is now pruned from the vm 572 * If steal succeeds, buf->page is now pruned from the vm
595 * side (LRU and page cache) and we can reuse it. The page 573 * side (LRU and page cache) and we can reuse it. The page
@@ -599,8 +577,12 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
599 goto find_page; 577 goto find_page;
600 578
601 page = buf->page; 579 page = buf->page;
602 if (add_to_page_cache(page, mapping, index, gfp_mask)) 580 if (add_to_page_cache(page, mapping, index, gfp_mask)) {
581 unlock_page(page);
603 goto find_page; 582 goto find_page;
583 }
584
585 page_cache_get(page);
604 586
605 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 587 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
606 lru_cache_add(page); 588 lru_cache_add(page);
@@ -660,34 +642,36 @@ find_page:
660 } else if (ret) 642 } else if (ret)
661 goto out; 643 goto out;
662 644
663 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) { 645 if (buf->page != page) {
664 char *dst = kmap_atomic(page, KM_USER0); 646 /*
647 * Careful, ->map() uses KM_USER0!
648 */
649 char *src = buf->ops->map(info, buf, 1);
650 char *dst = kmap_atomic(page, KM_USER1);
665 651
666 memcpy(dst + offset, src + buf->offset, this_len); 652 memcpy(dst + offset, src + buf->offset, this_len);
667 flush_dcache_page(page); 653 flush_dcache_page(page);
668 kunmap_atomic(dst, KM_USER0); 654 kunmap_atomic(dst, KM_USER1);
655 buf->ops->unmap(info, buf, src);
669 } 656 }
670 657
671 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); 658 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
672 if (ret == AOP_TRUNCATED_PAGE) { 659 if (!ret) {
660 /*
661 * Return the number of bytes written and mark page as
662 * accessed, we are now done!
663 */
664 ret = this_len;
665 mark_page_accessed(page);
666 balance_dirty_pages_ratelimited(mapping);
667 } else if (ret == AOP_TRUNCATED_PAGE) {
673 page_cache_release(page); 668 page_cache_release(page);
674 goto find_page; 669 goto find_page;
675 } else if (ret) 670 }
676 goto out;
677
678 /*
679 * Return the number of bytes written.
680 */
681 ret = this_len;
682 mark_page_accessed(page);
683 balance_dirty_pages_ratelimited(mapping);
684out: 671out:
685 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) 672 page_cache_release(page);
686 page_cache_release(page);
687
688 unlock_page(page); 673 unlock_page(page);
689out_nomem: 674out_nomem:
690 buf->ops->unmap(info, buf);
691 return ret; 675 return ret;
692} 676}
693 677
@@ -1095,7 +1079,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1095 */ 1079 */
1096static int get_iovec_page_array(const struct iovec __user *iov, 1080static int get_iovec_page_array(const struct iovec __user *iov,
1097 unsigned int nr_vecs, struct page **pages, 1081 unsigned int nr_vecs, struct page **pages,
1098 struct partial_page *partial) 1082 struct partial_page *partial, int aligned)
1099{ 1083{
1100 int buffers = 0, error = 0; 1084 int buffers = 0, error = 0;
1101 1085
@@ -1135,6 +1119,15 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1135 * in the user pages. 1119 * in the user pages.
1136 */ 1120 */
1137 off = (unsigned long) base & ~PAGE_MASK; 1121 off = (unsigned long) base & ~PAGE_MASK;
1122
1123 /*
1124 * If asked for alignment, the offset must be zero and the
1125 * length a multiple of the PAGE_SIZE.
1126 */
1127 error = -EINVAL;
1128 if (aligned && (off || len & ~PAGE_MASK))
1129 break;
1130
1138 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1131 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1139 if (npages > PIPE_BUFFERS - buffers) 1132 if (npages > PIPE_BUFFERS - buffers)
1140 npages = PIPE_BUFFERS - buffers; 1133 npages = PIPE_BUFFERS - buffers;
@@ -1228,7 +1221,8 @@ static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1228 else if (unlikely(!nr_segs)) 1221 else if (unlikely(!nr_segs))
1229 return 0; 1222 return 0;
1230 1223
1231 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial); 1224 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1225 flags & SPLICE_F_GIFT);
1232 if (spd.nr_pages <= 0) 1226 if (spd.nr_pages <= 0)
1233 return spd.nr_pages; 1227 return spd.nr_pages;
1234 1228
@@ -1336,6 +1330,12 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1336 obuf = opipe->bufs + nbuf; 1330 obuf = opipe->bufs + nbuf;
1337 *obuf = *ibuf; 1331 *obuf = *ibuf;
1338 1332
1333 /*
1334 * Don't inherit the gift flag, we need to
1335 * prevent multiple steals of this page.
1336 */
1337 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1338
1339 if (obuf->len > len) 1339 if (obuf->len > len)
1340 obuf->len = len; 1340 obuf->len = len;
1341 1341