diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2014-04-03 15:05:18 -0400 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-05-06 17:39:42 -0400 |
| commit | f0d1bec9d58d4c038d0ac958c9af82be6eb18045 (patch) | |
| tree | dd8f6896941a030b723fedbaff6e64e3073ba6fc | |
| parent | 84c3d55cc474f9c234c023c92e2769f940d5548c (diff) | |
new helper: copy_page_from_iter()
parallel to copy_page_to_iter(). pipe_write() switched to it (and became
->write_iter()).
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
| -rw-r--r-- | fs/pipe.c | 129 | ||||
| -rw-r--r-- | include/linux/uio.h | 2 | ||||
| -rw-r--r-- | mm/iov_iter.c | 78 |
3 files changed, 99 insertions, 110 deletions
| @@ -116,50 +116,6 @@ void pipe_wait(struct pipe_inode_info *pipe) | |||
| 116 | pipe_lock(pipe); | 116 | pipe_lock(pipe); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static int | ||
| 120 | pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, | ||
| 121 | int atomic) | ||
| 122 | { | ||
| 123 | unsigned long copy; | ||
| 124 | |||
| 125 | while (len > 0) { | ||
| 126 | while (!iov->iov_len) | ||
| 127 | iov++; | ||
| 128 | copy = min_t(unsigned long, len, iov->iov_len); | ||
| 129 | |||
| 130 | if (atomic) { | ||
| 131 | if (__copy_from_user_inatomic(to, iov->iov_base, copy)) | ||
| 132 | return -EFAULT; | ||
| 133 | } else { | ||
| 134 | if (copy_from_user(to, iov->iov_base, copy)) | ||
| 135 | return -EFAULT; | ||
| 136 | } | ||
| 137 | to += copy; | ||
| 138 | len -= copy; | ||
| 139 | iov->iov_base += copy; | ||
| 140 | iov->iov_len -= copy; | ||
| 141 | } | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Pre-fault in the user memory, so we can use atomic copies. | ||
| 147 | */ | ||
| 148 | static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len) | ||
| 149 | { | ||
| 150 | while (!iov->iov_len) | ||
| 151 | iov++; | ||
| 152 | |||
| 153 | while (len > 0) { | ||
| 154 | unsigned long this_len; | ||
| 155 | |||
| 156 | this_len = min_t(unsigned long, len, iov->iov_len); | ||
| 157 | fault_in_pages_readable(iov->iov_base, this_len); | ||
| 158 | len -= this_len; | ||
| 159 | iov++; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | 119 | static void anon_pipe_buf_release(struct pipe_inode_info *pipe, |
| 164 | struct pipe_buffer *buf) | 120 | struct pipe_buffer *buf) |
| 165 | { | 121 | { |
| @@ -380,24 +336,19 @@ static inline int is_packetized(struct file *file) | |||
| 380 | } | 336 | } |
| 381 | 337 | ||
| 382 | static ssize_t | 338 | static ssize_t |
| 383 | pipe_write(struct kiocb *iocb, const struct iovec *_iov, | 339 | pipe_write(struct kiocb *iocb, struct iov_iter *from) |
| 384 | unsigned long nr_segs, loff_t ppos) | ||
| 385 | { | 340 | { |
| 386 | struct file *filp = iocb->ki_filp; | 341 | struct file *filp = iocb->ki_filp; |
| 387 | struct pipe_inode_info *pipe = filp->private_data; | 342 | struct pipe_inode_info *pipe = filp->private_data; |
| 388 | ssize_t ret; | 343 | ssize_t ret = 0; |
| 389 | int do_wakeup; | 344 | int do_wakeup = 0; |
| 390 | struct iovec *iov = (struct iovec *)_iov; | 345 | size_t total_len = iov_iter_count(from); |
| 391 | size_t total_len; | ||
| 392 | ssize_t chars; | 346 | ssize_t chars; |
| 393 | 347 | ||
| 394 | total_len = iov_length(iov, nr_segs); | ||
| 395 | /* Null write succeeds. */ | 348 | /* Null write succeeds. */ |
| 396 | if (unlikely(total_len == 0)) | 349 | if (unlikely(total_len == 0)) |
| 397 | return 0; | 350 | return 0; |
| 398 | 351 | ||
| 399 | do_wakeup = 0; | ||
| 400 | ret = 0; | ||
| 401 | __pipe_lock(pipe); | 352 | __pipe_lock(pipe); |
| 402 | 353 | ||
| 403 | if (!pipe->readers) { | 354 | if (!pipe->readers) { |
| @@ -416,38 +367,19 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, | |||
| 416 | int offset = buf->offset + buf->len; | 367 | int offset = buf->offset + buf->len; |
| 417 | 368 | ||
| 418 | if (ops->can_merge && offset + chars <= PAGE_SIZE) { | 369 | if (ops->can_merge && offset + chars <= PAGE_SIZE) { |
| 419 | int error, atomic = 1; | 370 | int error = ops->confirm(pipe, buf); |
| 420 | void *addr; | ||
| 421 | |||
| 422 | error = ops->confirm(pipe, buf); | ||
| 423 | if (error) | 371 | if (error) |
| 424 | goto out; | 372 | goto out; |
| 425 | 373 | ||
| 426 | iov_fault_in_pages_read(iov, chars); | 374 | ret = copy_page_from_iter(buf->page, offset, chars, from); |
| 427 | redo1: | 375 | if (unlikely(ret < chars)) { |
| 428 | if (atomic) | 376 | error = -EFAULT; |
| 429 | addr = kmap_atomic(buf->page); | ||
| 430 | else | ||
| 431 | addr = kmap(buf->page); | ||
| 432 | error = pipe_iov_copy_from_user(offset + addr, iov, | ||
| 433 | chars, atomic); | ||
| 434 | if (atomic) | ||
| 435 | kunmap_atomic(addr); | ||
| 436 | else | ||
| 437 | kunmap(buf->page); | ||
| 438 | ret = error; | ||
| 439 | do_wakeup = 1; | ||
| 440 | if (error) { | ||
| 441 | if (atomic) { | ||
| 442 | atomic = 0; | ||
| 443 | goto redo1; | ||
| 444 | } | ||
| 445 | goto out; | 377 | goto out; |
| 446 | } | 378 | } |
| 379 | do_wakeup = 1; | ||
| 447 | buf->len += chars; | 380 | buf->len += chars; |
| 448 | total_len -= chars; | ||
| 449 | ret = chars; | 381 | ret = chars; |
| 450 | if (!total_len) | 382 | if (!iov_iter_count(from)) |
| 451 | goto out; | 383 | goto out; |
| 452 | } | 384 | } |
| 453 | } | 385 | } |
| @@ -466,8 +398,7 @@ redo1: | |||
| 466 | int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); | 398 | int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); |
| 467 | struct pipe_buffer *buf = pipe->bufs + newbuf; | 399 | struct pipe_buffer *buf = pipe->bufs + newbuf; |
| 468 | struct page *page = pipe->tmp_page; | 400 | struct page *page = pipe->tmp_page; |
| 469 | char *src; | 401 | int copied; |
| 470 | int error, atomic = 1; | ||
| 471 | 402 | ||
| 472 | if (!page) { | 403 | if (!page) { |
| 473 | page = alloc_page(GFP_HIGHUSER); | 404 | page = alloc_page(GFP_HIGHUSER); |
| @@ -483,40 +414,19 @@ redo1: | |||
| 483 | * FIXME! Is this really true? | 414 | * FIXME! Is this really true? |
| 484 | */ | 415 | */ |
| 485 | do_wakeup = 1; | 416 | do_wakeup = 1; |
| 486 | chars = PAGE_SIZE; | 417 | copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); |
| 487 | if (chars > total_len) | 418 | if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { |
| 488 | chars = total_len; | ||
| 489 | |||
| 490 | iov_fault_in_pages_read(iov, chars); | ||
| 491 | redo2: | ||
| 492 | if (atomic) | ||
| 493 | src = kmap_atomic(page); | ||
| 494 | else | ||
| 495 | src = kmap(page); | ||
| 496 | |||
| 497 | error = pipe_iov_copy_from_user(src, iov, chars, | ||
| 498 | atomic); | ||
| 499 | if (atomic) | ||
| 500 | kunmap_atomic(src); | ||
| 501 | else | ||
| 502 | kunmap(page); | ||
| 503 | |||
| 504 | if (unlikely(error)) { | ||
| 505 | if (atomic) { | ||
| 506 | atomic = 0; | ||
| 507 | goto redo2; | ||
| 508 | } | ||
| 509 | if (!ret) | 419 | if (!ret) |
| 510 | ret = error; | 420 | ret = -EFAULT; |
| 511 | break; | 421 | break; |
| 512 | } | 422 | } |
| 513 | ret += chars; | 423 | ret += copied; |
| 514 | 424 | ||
| 515 | /* Insert it into the buffer array */ | 425 | /* Insert it into the buffer array */ |
| 516 | buf->page = page; | 426 | buf->page = page; |
| 517 | buf->ops = &anon_pipe_buf_ops; | 427 | buf->ops = &anon_pipe_buf_ops; |
| 518 | buf->offset = 0; | 428 | buf->offset = 0; |
| 519 | buf->len = chars; | 429 | buf->len = copied; |
| 520 | buf->flags = 0; | 430 | buf->flags = 0; |
| 521 | if (is_packetized(filp)) { | 431 | if (is_packetized(filp)) { |
| 522 | buf->ops = &packet_pipe_buf_ops; | 432 | buf->ops = &packet_pipe_buf_ops; |
| @@ -525,8 +435,7 @@ redo2: | |||
| 525 | pipe->nrbufs = ++bufs; | 435 | pipe->nrbufs = ++bufs; |
| 526 | pipe->tmp_page = NULL; | 436 | pipe->tmp_page = NULL; |
| 527 | 437 | ||
| 528 | total_len -= chars; | 438 | if (!iov_iter_count(from)) |
| 529 | if (!total_len) | ||
| 530 | break; | 439 | break; |
| 531 | } | 440 | } |
| 532 | if (bufs < pipe->buffers) | 441 | if (bufs < pipe->buffers) |
| @@ -1040,8 +949,8 @@ const struct file_operations pipefifo_fops = { | |||
| 1040 | .llseek = no_llseek, | 949 | .llseek = no_llseek, |
| 1041 | .read = new_sync_read, | 950 | .read = new_sync_read, |
| 1042 | .read_iter = pipe_read, | 951 | .read_iter = pipe_read, |
| 1043 | .write = do_sync_write, | 952 | .write = new_sync_write, |
| 1044 | .aio_write = pipe_write, | 953 | .write_iter = pipe_write, |
| 1045 | .poll = pipe_poll, | 954 | .poll = pipe_poll, |
| 1046 | .unlocked_ioctl = pipe_ioctl, | 955 | .unlocked_ioctl = pipe_ioctl, |
| 1047 | .release = pipe_release, | 956 | .release = pipe_release, |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 532f59d0adbb..66012352d333 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -68,6 +68,8 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); | |||
| 68 | size_t iov_iter_single_seg_count(const struct iov_iter *i); | 68 | size_t iov_iter_single_seg_count(const struct iov_iter *i); |
| 69 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | 69 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
| 70 | struct iov_iter *i); | 70 | struct iov_iter *i); |
| 71 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||
| 72 | struct iov_iter *i); | ||
| 71 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 73 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
| 72 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 74 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
| 73 | unsigned long nr_segs, size_t count); | 75 | unsigned long nr_segs, size_t count); |
diff --git a/mm/iov_iter.c b/mm/iov_iter.c index a5c691c1a283..081e3273085b 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c | |||
| @@ -82,6 +82,84 @@ done: | |||
| 82 | } | 82 | } |
| 83 | EXPORT_SYMBOL(copy_page_to_iter); | 83 | EXPORT_SYMBOL(copy_page_to_iter); |
| 84 | 84 | ||
| 85 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||
| 86 | struct iov_iter *i) | ||
| 87 | { | ||
| 88 | size_t skip, copy, left, wanted; | ||
| 89 | const struct iovec *iov; | ||
| 90 | char __user *buf; | ||
| 91 | void *kaddr, *to; | ||
| 92 | |||
| 93 | if (unlikely(bytes > i->count)) | ||
| 94 | bytes = i->count; | ||
| 95 | |||
| 96 | if (unlikely(!bytes)) | ||
| 97 | return 0; | ||
| 98 | |||
| 99 | wanted = bytes; | ||
| 100 | iov = i->iov; | ||
| 101 | skip = i->iov_offset; | ||
| 102 | buf = iov->iov_base + skip; | ||
| 103 | copy = min(bytes, iov->iov_len - skip); | ||
| 104 | |||
| 105 | if (!fault_in_pages_readable(buf, copy)) { | ||
| 106 | kaddr = kmap_atomic(page); | ||
| 107 | to = kaddr + offset; | ||
| 108 | |||
| 109 | /* first chunk, usually the only one */ | ||
| 110 | left = __copy_from_user_inatomic(to, buf, copy); | ||
| 111 | copy -= left; | ||
| 112 | skip += copy; | ||
| 113 | to += copy; | ||
| 114 | bytes -= copy; | ||
| 115 | |||
| 116 | while (unlikely(!left && bytes)) { | ||
| 117 | iov++; | ||
| 118 | buf = iov->iov_base; | ||
| 119 | copy = min(bytes, iov->iov_len); | ||
| 120 | left = __copy_from_user_inatomic(to, buf, copy); | ||
| 121 | copy -= left; | ||
| 122 | skip = copy; | ||
| 123 | to += copy; | ||
| 124 | bytes -= copy; | ||
| 125 | } | ||
| 126 | if (likely(!bytes)) { | ||
| 127 | kunmap_atomic(kaddr); | ||
| 128 | goto done; | ||
| 129 | } | ||
| 130 | offset = to - kaddr; | ||
| 131 | buf += copy; | ||
| 132 | kunmap_atomic(kaddr); | ||
| 133 | copy = min(bytes, iov->iov_len - skip); | ||
| 134 | } | ||
| 135 | /* Too bad - revert to non-atomic kmap */ | ||
| 136 | kaddr = kmap(page); | ||
| 137 | to = kaddr + offset; | ||
| 138 | left = __copy_from_user(to, buf, copy); | ||
| 139 | copy -= left; | ||
| 140 | skip += copy; | ||
| 141 | to += copy; | ||
| 142 | bytes -= copy; | ||
| 143 | while (unlikely(!left && bytes)) { | ||
| 144 | iov++; | ||
| 145 | buf = iov->iov_base; | ||
| 146 | copy = min(bytes, iov->iov_len); | ||
| 147 | left = __copy_from_user(to, buf, copy); | ||
| 148 | copy -= left; | ||
| 149 | skip = copy; | ||
| 150 | to += copy; | ||
| 151 | bytes -= copy; | ||
| 152 | } | ||
| 153 | kunmap(page); | ||
| 154 | done: | ||
| 155 | i->count -= wanted - bytes; | ||
| 156 | i->nr_segs -= iov - i->iov; | ||
| 157 | i->iov = iov; | ||
| 158 | i->iov_offset = skip; | ||
| 159 | return wanted - bytes; | ||
| 160 | } | ||
| 161 | EXPORT_SYMBOL(copy_page_from_iter); | ||
| 162 | |||
| 85 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | 163 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, |
| 86 | const struct iovec *iov, size_t base, size_t bytes) | 164 | const struct iovec *iov, size_t base, size_t bytes) |
| 87 | { | 165 | { |
