aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-05-01 13:59:03 -0400
committerJens Axboe <axboe@suse.de>2006-05-01 13:59:03 -0400
commitf84d751994441292593523c7069ed147176f6cab (patch)
treea1a0c4836289df86bb62e7eae5c80c66fca1643a /fs/splice.c
parent0568b409c74f7a125d92a09a3f386785700ef688 (diff)
[PATCH] pipe: introduce ->pin() buffer operation
The ->map() function is really expensive on highmem machines right now, since it has to use the slower kmap() instead of kmap_atomic(). Splice rarely needs to access the virtual address of a page, so it's a waste of time doing it. Introduce ->pin() to take over the responsibility of making sure the page data is valid. ->map() is then reduced to just kmap(). That way we can also share a most of the pipe buffer ops between pipe.c and splice.c Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c91
1 files changed, 30 insertions, 61 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 1633778f3652..d7538d83c367 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -90,9 +90,8 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
90 buf->flags &= ~PIPE_BUF_FLAG_LRU; 90 buf->flags &= ~PIPE_BUF_FLAG_LRU;
91} 91}
92 92
93static void *page_cache_pipe_buf_map(struct file *file, 93static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
94 struct pipe_inode_info *info, 94 struct pipe_buffer *buf)
95 struct pipe_buffer *buf)
96{ 95{
97 struct page *page = buf->page; 96 struct page *page = buf->page;
98 int err; 97 int err;
@@ -118,49 +117,25 @@ static void *page_cache_pipe_buf_map(struct file *file,
118 } 117 }
119 118
120 /* 119 /*
121 * Page is ok afterall, fall through to mapping. 120 * Page is ok afterall, we are done.
122 */ 121 */
123 unlock_page(page); 122 unlock_page(page);
124 } 123 }
125 124
126 return kmap(page); 125 return 0;
127error: 126error:
128 unlock_page(page); 127 unlock_page(page);
129 return ERR_PTR(err); 128 return err;
130}
131
132static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
133 struct pipe_buffer *buf)
134{
135 kunmap(buf->page);
136}
137
138static void *user_page_pipe_buf_map(struct file *file,
139 struct pipe_inode_info *pipe,
140 struct pipe_buffer *buf)
141{
142 return kmap(buf->page);
143}
144
145static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
146 struct pipe_buffer *buf)
147{
148 kunmap(buf->page);
149}
150
151static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
152 struct pipe_buffer *buf)
153{
154 page_cache_get(buf->page);
155} 129}
156 130
157static struct pipe_buf_operations page_cache_pipe_buf_ops = { 131static struct pipe_buf_operations page_cache_pipe_buf_ops = {
158 .can_merge = 0, 132 .can_merge = 0,
159 .map = page_cache_pipe_buf_map, 133 .map = generic_pipe_buf_map,
160 .unmap = page_cache_pipe_buf_unmap, 134 .unmap = generic_pipe_buf_unmap,
135 .pin = page_cache_pipe_buf_pin,
161 .release = page_cache_pipe_buf_release, 136 .release = page_cache_pipe_buf_release,
162 .steal = page_cache_pipe_buf_steal, 137 .steal = page_cache_pipe_buf_steal,
163 .get = page_cache_pipe_buf_get, 138 .get = generic_pipe_buf_get,
164}; 139};
165 140
166static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 141static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -171,11 +146,12 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
171 146
172static struct pipe_buf_operations user_page_pipe_buf_ops = { 147static struct pipe_buf_operations user_page_pipe_buf_ops = {
173 .can_merge = 0, 148 .can_merge = 0,
174 .map = user_page_pipe_buf_map, 149 .map = generic_pipe_buf_map,
175 .unmap = user_page_pipe_buf_unmap, 150 .unmap = generic_pipe_buf_unmap,
151 .pin = generic_pipe_buf_pin,
176 .release = page_cache_pipe_buf_release, 152 .release = page_cache_pipe_buf_release,
177 .steal = user_page_pipe_buf_steal, 153 .steal = user_page_pipe_buf_steal,
178 .get = page_cache_pipe_buf_get, 154 .get = generic_pipe_buf_get,
179}; 155};
180 156
181/* 157/*
@@ -517,26 +493,16 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
517{ 493{
518 struct file *file = sd->file; 494 struct file *file = sd->file;
519 loff_t pos = sd->pos; 495 loff_t pos = sd->pos;
520 ssize_t ret; 496 int ret, more;
521 void *ptr;
522 int more;
523 497
524 /* 498 ret = buf->ops->pin(info, buf);
525 * Sub-optimal, but we are limited by the pipe ->map. We don't 499 if (!ret) {
526 * need a kmap'ed buffer here, we just want to make sure we 500 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
527 * have the page pinned if the pipe page originates from the
528 * page cache.
529 */
530 ptr = buf->ops->map(file, info, buf);
531 if (IS_ERR(ptr))
532 return PTR_ERR(ptr);
533
534 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
535 501
536 ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len, 502 ret = file->f_op->sendpage(file, buf->page, buf->offset,
537 &pos, more); 503 sd->len, &pos, more);
504 }
538 505
539 buf->ops->unmap(info, buf);
540 return ret; 506 return ret;
541} 507}
542 508
@@ -569,15 +535,14 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
569 unsigned int offset, this_len; 535 unsigned int offset, this_len;
570 struct page *page; 536 struct page *page;
571 pgoff_t index; 537 pgoff_t index;
572 char *src;
573 int ret; 538 int ret;
574 539
575 /* 540 /*
576 * make sure the data in this buffer is uptodate 541 * make sure the data in this buffer is uptodate
577 */ 542 */
578 src = buf->ops->map(file, info, buf); 543 ret = buf->ops->pin(info, buf);
579 if (IS_ERR(src)) 544 if (unlikely(ret))
580 return PTR_ERR(src); 545 return ret;
581 546
582 index = sd->pos >> PAGE_CACHE_SHIFT; 547 index = sd->pos >> PAGE_CACHE_SHIFT;
583 offset = sd->pos & ~PAGE_CACHE_MASK; 548 offset = sd->pos & ~PAGE_CACHE_MASK;
@@ -666,11 +631,16 @@ find_page:
666 goto out; 631 goto out;
667 632
668 if (buf->page != page) { 633 if (buf->page != page) {
669 char *dst = kmap_atomic(page, KM_USER0); 634 /*
635 * Careful, ->map() uses KM_USER0!
636 */
637 char *src = buf->ops->map(info, buf);
638 char *dst = kmap_atomic(page, KM_USER1);
670 639
671 memcpy(dst + offset, src + buf->offset, this_len); 640 memcpy(dst + offset, src + buf->offset, this_len);
672 flush_dcache_page(page); 641 flush_dcache_page(page);
673 kunmap_atomic(dst, KM_USER0); 642 kunmap_atomic(dst, KM_USER1);
643 buf->ops->unmap(info, buf);
674 } 644 }
675 645
676 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); 646 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
@@ -690,7 +660,6 @@ out:
690 page_cache_release(page); 660 page_cache_release(page);
691 unlock_page(page); 661 unlock_page(page);
692out_nomem: 662out_nomem:
693 buf->ops->unmap(info, buf);
694 return ret; 663 return ret;
695} 664}
696 665