aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/pipe.c70
-rw-r--r--fs/splice.c24
3 files changed, 27 insertions, 73 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index f90af6fe6884..aac71ce373e4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -667,7 +667,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
667 struct pipe_buffer *buf = cs->currbuf; 667 struct pipe_buffer *buf = cs->currbuf;
668 668
669 if (!cs->write) { 669 if (!cs->write) {
670 buf->ops->unmap(cs->pipe, buf, cs->mapaddr); 670 kunmap_atomic(cs->mapaddr);
671 } else { 671 } else {
672 kunmap_atomic(cs->mapaddr); 672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 673 buf->len = PAGE_SIZE - cs->len;
@@ -706,7 +706,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 706
707 BUG_ON(!cs->nr_segs); 707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 708 cs->currbuf = buf;
709 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); 709 cs->mapaddr = kmap_atomic(buf->page);
710 cs->len = buf->len; 710 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset; 711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 712 cs->pipebufs++;
@@ -874,7 +874,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 874out_fallback_unlock:
875 unlock_page(newpage); 875 unlock_page(newpage);
876out_fallback: 876out_fallback:
877 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); 877 cs->mapaddr = kmap_atomic(buf->page);
878 cs->buf = cs->mapaddr + buf->offset; 878 cs->buf = cs->mapaddr + buf->offset;
879 879
880 err = lock_request(cs->fc, cs->req); 880 err = lock_request(cs->fc, cs->req);
diff --git a/fs/pipe.c b/fs/pipe.c
index 78fd0d0788db..6679c95eb4c3 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -226,52 +226,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
226} 226}
227 227
228/** 228/**
229 * generic_pipe_buf_map - virtually map a pipe buffer
230 * @pipe: the pipe that the buffer belongs to
231 * @buf: the buffer that should be mapped
232 * @atomic: whether to use an atomic map
233 *
234 * Description:
235 * This function returns a kernel virtual address mapping for the
236 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
237 * and the caller has to be careful not to fault before calling
238 * the unmap function.
239 *
240 * Note that this function calls kmap_atomic() if @atomic != 0.
241 */
242void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
243 struct pipe_buffer *buf, int atomic)
244{
245 if (atomic) {
246 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
247 return kmap_atomic(buf->page);
248 }
249
250 return kmap(buf->page);
251}
252EXPORT_SYMBOL(generic_pipe_buf_map);
253
254/**
255 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
256 * @pipe: the pipe that the buffer belongs to
257 * @buf: the buffer that should be unmapped
258 * @map_data: the data that the mapping function returned
259 *
260 * Description:
261 * This function undoes the mapping that ->map() provided.
262 */
263void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
264 struct pipe_buffer *buf, void *map_data)
265{
266 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
267 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
268 kunmap_atomic(map_data);
269 } else
270 kunmap(buf->page);
271}
272EXPORT_SYMBOL(generic_pipe_buf_unmap);
273
274/**
275 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer 229 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
276 * @pipe: the pipe that the buffer belongs to 230 * @pipe: the pipe that the buffer belongs to
277 * @buf: the buffer to attempt to steal 231 * @buf: the buffer to attempt to steal
@@ -351,8 +305,6 @@ EXPORT_SYMBOL(generic_pipe_buf_release);
351 305
352static const struct pipe_buf_operations anon_pipe_buf_ops = { 306static const struct pipe_buf_operations anon_pipe_buf_ops = {
353 .can_merge = 1, 307 .can_merge = 1,
354 .map = generic_pipe_buf_map,
355 .unmap = generic_pipe_buf_unmap,
356 .confirm = generic_pipe_buf_confirm, 308 .confirm = generic_pipe_buf_confirm,
357 .release = anon_pipe_buf_release, 309 .release = anon_pipe_buf_release,
358 .steal = generic_pipe_buf_steal, 310 .steal = generic_pipe_buf_steal,
@@ -361,8 +313,6 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
361 313
362static const struct pipe_buf_operations packet_pipe_buf_ops = { 314static const struct pipe_buf_operations packet_pipe_buf_ops = {
363 .can_merge = 0, 315 .can_merge = 0,
364 .map = generic_pipe_buf_map,
365 .unmap = generic_pipe_buf_unmap,
366 .confirm = generic_pipe_buf_confirm, 316 .confirm = generic_pipe_buf_confirm,
367 .release = anon_pipe_buf_release, 317 .release = anon_pipe_buf_release,
368 .steal = generic_pipe_buf_steal, 318 .steal = generic_pipe_buf_steal,
@@ -410,9 +360,15 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
410 360
411 atomic = !iov_fault_in_pages_write(iov, chars); 361 atomic = !iov_fault_in_pages_write(iov, chars);
412redo: 362redo:
413 addr = ops->map(pipe, buf, atomic); 363 if (atomic)
364 addr = kmap_atomic(buf->page);
365 else
366 addr = kmap(buf->page);
414 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); 367 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
415 ops->unmap(pipe, buf, addr); 368 if (atomic)
369 kunmap_atomic(addr);
370 else
371 kunmap(buf->page);
416 if (unlikely(error)) { 372 if (unlikely(error)) {
417 /* 373 /*
418 * Just retry with the slow path if we failed. 374 * Just retry with the slow path if we failed.
@@ -538,10 +494,16 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
538 494
539 iov_fault_in_pages_read(iov, chars); 495 iov_fault_in_pages_read(iov, chars);
540redo1: 496redo1:
541 addr = ops->map(pipe, buf, atomic); 497 if (atomic)
498 addr = kmap_atomic(buf->page);
499 else
500 addr = kmap(buf->page);
542 error = pipe_iov_copy_from_user(offset + addr, iov, 501 error = pipe_iov_copy_from_user(offset + addr, iov,
543 chars, atomic); 502 chars, atomic);
544 ops->unmap(pipe, buf, addr); 503 if (atomic)
504 kunmap_atomic(addr);
505 else
506 kunmap(buf->page);
545 ret = error; 507 ret = error;
546 do_wakeup = 1; 508 do_wakeup = 1;
547 if (error) { 509 if (error) {
diff --git a/fs/splice.c b/fs/splice.c
index 12028fa41def..ca3bfbd970f3 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -136,8 +136,6 @@ error:
136 136
137const struct pipe_buf_operations page_cache_pipe_buf_ops = { 137const struct pipe_buf_operations page_cache_pipe_buf_ops = {
138 .can_merge = 0, 138 .can_merge = 0,
139 .map = generic_pipe_buf_map,
140 .unmap = generic_pipe_buf_unmap,
141 .confirm = page_cache_pipe_buf_confirm, 139 .confirm = page_cache_pipe_buf_confirm,
142 .release = page_cache_pipe_buf_release, 140 .release = page_cache_pipe_buf_release,
143 .steal = page_cache_pipe_buf_steal, 141 .steal = page_cache_pipe_buf_steal,
@@ -156,8 +154,6 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
156 154
157static const struct pipe_buf_operations user_page_pipe_buf_ops = { 155static const struct pipe_buf_operations user_page_pipe_buf_ops = {
158 .can_merge = 0, 156 .can_merge = 0,
159 .map = generic_pipe_buf_map,
160 .unmap = generic_pipe_buf_unmap,
161 .confirm = generic_pipe_buf_confirm, 157 .confirm = generic_pipe_buf_confirm,
162 .release = page_cache_pipe_buf_release, 158 .release = page_cache_pipe_buf_release,
163 .steal = user_page_pipe_buf_steal, 159 .steal = user_page_pipe_buf_steal,
@@ -547,8 +543,6 @@ EXPORT_SYMBOL(generic_file_splice_read);
547 543
548static const struct pipe_buf_operations default_pipe_buf_ops = { 544static const struct pipe_buf_operations default_pipe_buf_ops = {
549 .can_merge = 0, 545 .can_merge = 0,
550 .map = generic_pipe_buf_map,
551 .unmap = generic_pipe_buf_unmap,
552 .confirm = generic_pipe_buf_confirm, 546 .confirm = generic_pipe_buf_confirm,
553 .release = generic_pipe_buf_release, 547 .release = generic_pipe_buf_release,
554 .steal = generic_pipe_buf_steal, 548 .steal = generic_pipe_buf_steal,
@@ -564,8 +558,6 @@ static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
564/* Pipe buffer operations for a socket and similar. */ 558/* Pipe buffer operations for a socket and similar. */
565const struct pipe_buf_operations nosteal_pipe_buf_ops = { 559const struct pipe_buf_operations nosteal_pipe_buf_ops = {
566 .can_merge = 0, 560 .can_merge = 0,
567 .map = generic_pipe_buf_map,
568 .unmap = generic_pipe_buf_unmap,
569 .confirm = generic_pipe_buf_confirm, 561 .confirm = generic_pipe_buf_confirm,
570 .release = generic_pipe_buf_release, 562 .release = generic_pipe_buf_release,
571 .steal = generic_pipe_buf_nosteal, 563 .steal = generic_pipe_buf_nosteal,
@@ -767,13 +759,13 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
767 goto out; 759 goto out;
768 760
769 if (buf->page != page) { 761 if (buf->page != page) {
770 char *src = buf->ops->map(pipe, buf, 1); 762 char *src = kmap_atomic(buf->page);
771 char *dst = kmap_atomic(page); 763 char *dst = kmap_atomic(page);
772 764
773 memcpy(dst + offset, src + buf->offset, this_len); 765 memcpy(dst + offset, src + buf->offset, this_len);
774 flush_dcache_page(page); 766 flush_dcache_page(page);
775 kunmap_atomic(dst); 767 kunmap_atomic(dst);
776 buf->ops->unmap(pipe, buf, src); 768 kunmap_atomic(src);
777 } 769 }
778 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 770 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
779 page, fsdata); 771 page, fsdata);
@@ -1067,9 +1059,9 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1067 void *data; 1059 void *data;
1068 loff_t tmp = sd->pos; 1060 loff_t tmp = sd->pos;
1069 1061
1070 data = buf->ops->map(pipe, buf, 0); 1062 data = kmap(buf->page);
1071 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); 1063 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1072 buf->ops->unmap(pipe, buf, data); 1064 kunmap(buf->page);
1073 1065
1074 return ret; 1066 return ret;
1075} 1067}
@@ -1536,10 +1528,10 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1536 * pages and doing an atomic copy 1528 * pages and doing an atomic copy
1537 */ 1529 */
1538 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { 1530 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1539 src = buf->ops->map(pipe, buf, 1); 1531 src = kmap_atomic(buf->page);
1540 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, 1532 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1541 sd->len); 1533 sd->len);
1542 buf->ops->unmap(pipe, buf, src); 1534 kunmap_atomic(src);
1543 if (!ret) { 1535 if (!ret) {
1544 ret = sd->len; 1536 ret = sd->len;
1545 goto out; 1537 goto out;
@@ -1549,13 +1541,13 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1549 /* 1541 /*
1550 * No dice, use slow non-atomic map and copy 1542 * No dice, use slow non-atomic map and copy
1551 */ 1543 */
1552 src = buf->ops->map(pipe, buf, 0); 1544 src = kmap(buf->page);
1553 1545
1554 ret = sd->len; 1546 ret = sd->len;
1555 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) 1547 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1556 ret = -EFAULT; 1548 ret = -EFAULT;
1557 1549
1558 buf->ops->unmap(pipe, buf, src); 1550 kunmap(buf->page);
1559out: 1551out:
1560 if (ret > 0) 1552 if (ret > 0)
1561 sd->u.userptr += ret; 1553 sd->u.userptr += ret;