aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@linux.intel.com>2014-08-01 09:27:22 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-10-09 02:39:03 -0400
commitc35e02480014f7a86e264a2fda39a568690163da (patch)
tree6b1f1afc1c19bcbfc9c96f707c93627b0f4acbf3
parent475d0db742e3755c6b267f48577ff7cbb7dfda0d (diff)
Add copy_to_iter(), copy_from_iter() and iov_iter_zero()
For DAX, we want to be able to copy between iovecs and kernel addresses that don't necessarily have a struct page. This is a fairly simple rearrangement for bvec iters to kmap the pages outside and pass them in, but for user iovecs it gets more complicated because we might try various different ways to kmap the memory. Duplicating the existing logic works out best in this case. We need to be able to write zeroes to an iovec for reads from unwritten ranges in a file. This is performed by the new iov_iter_zero() function, again patterned after the existing code that handles iovec iterators. [AV: and export the buggers...] Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--include/linux/uio.h3
-rw-r--r--mm/iov_iter.c240
2 files changed, 229 insertions, 14 deletions
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 290fbf0b6b8a..9b1581414cd4 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -80,6 +80,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
80 struct iov_iter *i); 80 struct iov_iter *i);
81size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 81size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
82 struct iov_iter *i); 82 struct iov_iter *i);
83size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
84size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
85size_t iov_iter_zero(size_t bytes, struct iov_iter *);
83unsigned long iov_iter_alignment(const struct iov_iter *i); 86unsigned long iov_iter_alignment(const struct iov_iter *i);
84void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 87void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
85 unsigned long nr_segs, size_t count); 88 unsigned long nr_segs, size_t count);
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index 9a09f2034fcc..eafcf60f6b83 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -4,6 +4,96 @@
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/vmalloc.h> 5#include <linux/vmalloc.h>
6 6
7static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
8{
9 size_t skip, copy, left, wanted;
10 const struct iovec *iov;
11 char __user *buf;
12
13 if (unlikely(bytes > i->count))
14 bytes = i->count;
15
16 if (unlikely(!bytes))
17 return 0;
18
19 wanted = bytes;
20 iov = i->iov;
21 skip = i->iov_offset;
22 buf = iov->iov_base + skip;
23 copy = min(bytes, iov->iov_len - skip);
24
25 left = __copy_to_user(buf, from, copy);
26 copy -= left;
27 skip += copy;
28 from += copy;
29 bytes -= copy;
30 while (unlikely(!left && bytes)) {
31 iov++;
32 buf = iov->iov_base;
33 copy = min(bytes, iov->iov_len);
34 left = __copy_to_user(buf, from, copy);
35 copy -= left;
36 skip = copy;
37 from += copy;
38 bytes -= copy;
39 }
40
41 if (skip == iov->iov_len) {
42 iov++;
43 skip = 0;
44 }
45 i->count -= wanted - bytes;
46 i->nr_segs -= iov - i->iov;
47 i->iov = iov;
48 i->iov_offset = skip;
49 return wanted - bytes;
50}
51
52static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
53{
54 size_t skip, copy, left, wanted;
55 const struct iovec *iov;
56 char __user *buf;
57
58 if (unlikely(bytes > i->count))
59 bytes = i->count;
60
61 if (unlikely(!bytes))
62 return 0;
63
64 wanted = bytes;
65 iov = i->iov;
66 skip = i->iov_offset;
67 buf = iov->iov_base + skip;
68 copy = min(bytes, iov->iov_len - skip);
69
70 left = __copy_from_user(to, buf, copy);
71 copy -= left;
72 skip += copy;
73 to += copy;
74 bytes -= copy;
75 while (unlikely(!left && bytes)) {
76 iov++;
77 buf = iov->iov_base;
78 copy = min(bytes, iov->iov_len);
79 left = __copy_from_user(to, buf, copy);
80 copy -= left;
81 skip = copy;
82 to += copy;
83 bytes -= copy;
84 }
85
86 if (skip == iov->iov_len) {
87 iov++;
88 skip = 0;
89 }
90 i->count -= wanted - bytes;
91 i->nr_segs -= iov - i->iov;
92 i->iov = iov;
93 i->iov_offset = skip;
94 return wanted - bytes;
95}
96
7static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 97static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
8 struct iov_iter *i) 98 struct iov_iter *i)
9{ 99{
@@ -166,6 +256,50 @@ done:
166 return wanted - bytes; 256 return wanted - bytes;
167} 257}
168 258
259static size_t zero_iovec(size_t bytes, struct iov_iter *i)
260{
261 size_t skip, copy, left, wanted;
262 const struct iovec *iov;
263 char __user *buf;
264
265 if (unlikely(bytes > i->count))
266 bytes = i->count;
267
268 if (unlikely(!bytes))
269 return 0;
270
271 wanted = bytes;
272 iov = i->iov;
273 skip = i->iov_offset;
274 buf = iov->iov_base + skip;
275 copy = min(bytes, iov->iov_len - skip);
276
277 left = __clear_user(buf, copy);
278 copy -= left;
279 skip += copy;
280 bytes -= copy;
281
282 while (unlikely(!left && bytes)) {
283 iov++;
284 buf = iov->iov_base;
285 copy = min(bytes, iov->iov_len);
286 left = __clear_user(buf, copy);
287 copy -= left;
288 skip = copy;
289 bytes -= copy;
290 }
291
292 if (skip == iov->iov_len) {
293 iov++;
294 skip = 0;
295 }
296 i->count -= wanted - bytes;
297 i->nr_segs -= iov - i->iov;
298 i->iov = iov;
299 i->iov_offset = skip;
300 return wanted - bytes;
301}
302
169static size_t __iovec_copy_from_user_inatomic(char *vaddr, 303static size_t __iovec_copy_from_user_inatomic(char *vaddr,
170 const struct iovec *iov, size_t base, size_t bytes) 304 const struct iovec *iov, size_t base, size_t bytes)
171{ 305{
@@ -414,12 +548,17 @@ static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t
414 kunmap_atomic(to); 548 kunmap_atomic(to);
415} 549}
416 550
417static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t bytes, 551static void memzero_page(struct page *page, size_t offset, size_t len)
418 struct iov_iter *i) 552{
553 char *addr = kmap_atomic(page);
554 memset(addr + offset, 0, len);
555 kunmap_atomic(addr);
556}
557
558static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
419{ 559{
420 size_t skip, copy, wanted; 560 size_t skip, copy, wanted;
421 const struct bio_vec *bvec; 561 const struct bio_vec *bvec;
422 void *kaddr, *from;
423 562
424 if (unlikely(bytes > i->count)) 563 if (unlikely(bytes > i->count))
425 bytes = i->count; 564 bytes = i->count;
@@ -432,8 +571,6 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
432 skip = i->iov_offset; 571 skip = i->iov_offset;
433 copy = min_t(size_t, bytes, bvec->bv_len - skip); 572 copy = min_t(size_t, bytes, bvec->bv_len - skip);
434 573
435 kaddr = kmap_atomic(page);
436 from = kaddr + offset;
437 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy); 574 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
438 skip += copy; 575 skip += copy;
439 from += copy; 576 from += copy;
@@ -446,7 +583,6 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
446 from += copy; 583 from += copy;
447 bytes -= copy; 584 bytes -= copy;
448 } 585 }
449 kunmap_atomic(kaddr);
450 if (skip == bvec->bv_len) { 586 if (skip == bvec->bv_len) {
451 bvec++; 587 bvec++;
452 skip = 0; 588 skip = 0;
@@ -458,12 +594,10 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
458 return wanted - bytes; 594 return wanted - bytes;
459} 595}
460 596
461static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t bytes, 597static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
462 struct iov_iter *i)
463{ 598{
464 size_t skip, copy, wanted; 599 size_t skip, copy, wanted;
465 const struct bio_vec *bvec; 600 const struct bio_vec *bvec;
466 void *kaddr, *to;
467 601
468 if (unlikely(bytes > i->count)) 602 if (unlikely(bytes > i->count))
469 bytes = i->count; 603 bytes = i->count;
@@ -475,10 +609,6 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
475 bvec = i->bvec; 609 bvec = i->bvec;
476 skip = i->iov_offset; 610 skip = i->iov_offset;
477 611
478 kaddr = kmap_atomic(page);
479
480 to = kaddr + offset;
481
482 copy = min(bytes, bvec->bv_len - skip); 612 copy = min(bytes, bvec->bv_len - skip);
483 613
484 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy); 614 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
@@ -495,7 +625,6 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
495 to += copy; 625 to += copy;
496 bytes -= copy; 626 bytes -= copy;
497 } 627 }
498 kunmap_atomic(kaddr);
499 if (skip == bvec->bv_len) { 628 if (skip == bvec->bv_len) {
500 bvec++; 629 bvec++;
501 skip = 0; 630 skip = 0;
@@ -507,6 +636,61 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
507 return wanted; 636 return wanted;
508} 637}
509 638
639static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
640 size_t bytes, struct iov_iter *i)
641{
642 void *kaddr = kmap_atomic(page);
643 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
644 kunmap_atomic(kaddr);
645 return wanted;
646}
647
648static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
649 size_t bytes, struct iov_iter *i)
650{
651 void *kaddr = kmap_atomic(page);
652 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
653 kunmap_atomic(kaddr);
654 return wanted;
655}
656
657static size_t zero_bvec(size_t bytes, struct iov_iter *i)
658{
659 size_t skip, copy, wanted;
660 const struct bio_vec *bvec;
661
662 if (unlikely(bytes > i->count))
663 bytes = i->count;
664
665 if (unlikely(!bytes))
666 return 0;
667
668 wanted = bytes;
669 bvec = i->bvec;
670 skip = i->iov_offset;
671 copy = min_t(size_t, bytes, bvec->bv_len - skip);
672
673 memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
674 skip += copy;
675 bytes -= copy;
676 while (bytes) {
677 bvec++;
678 copy = min(bytes, (size_t)bvec->bv_len);
679 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
680 skip = copy;
681 bytes -= copy;
682 }
683 if (skip == bvec->bv_len) {
684 bvec++;
685 skip = 0;
686 }
687 i->count -= wanted - bytes;
688 i->nr_segs -= bvec - i->bvec;
689 i->bvec = bvec;
690 i->iov_offset = skip;
691 return wanted - bytes;
692}
693
510static size_t copy_from_user_bvec(struct page *page, 694static size_t copy_from_user_bvec(struct page *page,
511 struct iov_iter *i, unsigned long offset, size_t bytes) 695 struct iov_iter *i, unsigned long offset, size_t bytes)
512{ 696{
@@ -672,6 +856,34 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
672} 856}
673EXPORT_SYMBOL(copy_page_from_iter); 857EXPORT_SYMBOL(copy_page_from_iter);
674 858
859size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
860{
861 if (i->type & ITER_BVEC)
862 return copy_to_iter_bvec(addr, bytes, i);
863 else
864 return copy_to_iter_iovec(addr, bytes, i);
865}
866EXPORT_SYMBOL(copy_to_iter);
867
868size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
869{
870 if (i->type & ITER_BVEC)
871 return copy_from_iter_bvec(addr, bytes, i);
872 else
873 return copy_from_iter_iovec(addr, bytes, i);
874}
875EXPORT_SYMBOL(copy_from_iter);
876
877size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
878{
879 if (i->type & ITER_BVEC) {
880 return zero_bvec(bytes, i);
881 } else {
882 return zero_iovec(bytes, i);
883 }
884}
885EXPORT_SYMBOL(iov_iter_zero);
886
675size_t iov_iter_copy_from_user_atomic(struct page *page, 887size_t iov_iter_copy_from_user_atomic(struct page *page,
676 struct iov_iter *i, unsigned long offset, size_t bytes) 888 struct iov_iter *i, unsigned long offset, size_t bytes)
677{ 889{