diff options
Diffstat (limited to 'lib/iov_iter.c')
-rw-r--r-- | lib/iov_iter.c | 267 |
1 files changed, 215 insertions, 52 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f2bd21b93dfc..60abc44385b7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | #include <linux/bvec.h> | ||
2 | #include <linux/uio.h> | 3 | #include <linux/uio.h> |
3 | #include <linux/pagemap.h> | 4 | #include <linux/pagemap.h> |
4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
@@ -72,19 +73,21 @@ | |||
72 | } | 73 | } |
73 | 74 | ||
74 | #define iterate_all_kinds(i, n, v, I, B, K) { \ | 75 | #define iterate_all_kinds(i, n, v, I, B, K) { \ |
75 | size_t skip = i->iov_offset; \ | 76 | if (likely(n)) { \ |
76 | if (unlikely(i->type & ITER_BVEC)) { \ | 77 | size_t skip = i->iov_offset; \ |
77 | struct bio_vec v; \ | 78 | if (unlikely(i->type & ITER_BVEC)) { \ |
78 | struct bvec_iter __bi; \ | 79 | struct bio_vec v; \ |
79 | iterate_bvec(i, n, v, __bi, skip, (B)) \ | 80 | struct bvec_iter __bi; \ |
80 | } else if (unlikely(i->type & ITER_KVEC)) { \ | 81 | iterate_bvec(i, n, v, __bi, skip, (B)) \ |
81 | const struct kvec *kvec; \ | 82 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
82 | struct kvec v; \ | 83 | const struct kvec *kvec; \ |
83 | iterate_kvec(i, n, v, kvec, skip, (K)) \ | 84 | struct kvec v; \ |
84 | } else { \ | 85 | iterate_kvec(i, n, v, kvec, skip, (K)) \ |
85 | const struct iovec *iov; \ | 86 | } else { \ |
86 | struct iovec v; \ | 87 | const struct iovec *iov; \ |
87 | iterate_iovec(i, n, v, iov, skip, (I)) \ | 88 | struct iovec v; \ |
89 | iterate_iovec(i, n, v, iov, skip, (I)) \ | ||
90 | } \ | ||
88 | } \ | 91 | } \ |
89 | } | 92 | } |
90 | 93 | ||
@@ -568,6 +571,31 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | |||
568 | } | 571 | } |
569 | EXPORT_SYMBOL(copy_from_iter); | 572 | EXPORT_SYMBOL(copy_from_iter); |
570 | 573 | ||
574 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | ||
575 | { | ||
576 | char *to = addr; | ||
577 | if (unlikely(i->type & ITER_PIPE)) { | ||
578 | WARN_ON(1); | ||
579 | return false; | ||
580 | } | ||
581 | if (unlikely(i->count < bytes)) | ||
582 | return false; | ||
583 | |||
584 | iterate_all_kinds(i, bytes, v, ({ | ||
585 | if (__copy_from_user((to += v.iov_len) - v.iov_len, | ||
586 | v.iov_base, v.iov_len)) | ||
587 | return false; | ||
588 | 0;}), | ||
589 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | ||
590 | v.bv_offset, v.bv_len), | ||
591 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | ||
592 | ) | ||
593 | |||
594 | iov_iter_advance(i, bytes); | ||
595 | return true; | ||
596 | } | ||
597 | EXPORT_SYMBOL(copy_from_iter_full); | ||
598 | |||
571 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | 599 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
572 | { | 600 | { |
573 | char *to = addr; | 601 | char *to = addr; |
@@ -587,6 +615,30 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
587 | } | 615 | } |
588 | EXPORT_SYMBOL(copy_from_iter_nocache); | 616 | EXPORT_SYMBOL(copy_from_iter_nocache); |
589 | 617 | ||
618 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | ||
619 | { | ||
620 | char *to = addr; | ||
621 | if (unlikely(i->type & ITER_PIPE)) { | ||
622 | WARN_ON(1); | ||
623 | return false; | ||
624 | } | ||
625 | if (unlikely(i->count < bytes)) | ||
626 | return false; | ||
627 | iterate_all_kinds(i, bytes, v, ({ | ||
628 | if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, | ||
629 | v.iov_base, v.iov_len)) | ||
630 | return false; | ||
631 | 0;}), | ||
632 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | ||
633 | v.bv_offset, v.bv_len), | ||
634 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | ||
635 | ) | ||
636 | |||
637 | iov_iter_advance(i, bytes); | ||
638 | return true; | ||
639 | } | ||
640 | EXPORT_SYMBOL(copy_from_iter_full_nocache); | ||
641 | |||
590 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | 642 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
591 | struct iov_iter *i) | 643 | struct iov_iter *i) |
592 | { | 644 | { |
@@ -678,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, | |||
678 | } | 730 | } |
679 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | 731 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); |
680 | 732 | ||
733 | static inline void pipe_truncate(struct iov_iter *i) | ||
734 | { | ||
735 | struct pipe_inode_info *pipe = i->pipe; | ||
736 | if (pipe->nrbufs) { | ||
737 | size_t off = i->iov_offset; | ||
738 | int idx = i->idx; | ||
739 | int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1); | ||
740 | if (off) { | ||
741 | pipe->bufs[idx].len = off - pipe->bufs[idx].offset; | ||
742 | idx = next_idx(idx, pipe); | ||
743 | nrbufs++; | ||
744 | } | ||
745 | while (pipe->nrbufs > nrbufs) { | ||
746 | pipe_buf_release(pipe, &pipe->bufs[idx]); | ||
747 | idx = next_idx(idx, pipe); | ||
748 | pipe->nrbufs--; | ||
749 | } | ||
750 | } | ||
751 | } | ||
752 | |||
681 | static void pipe_advance(struct iov_iter *i, size_t size) | 753 | static void pipe_advance(struct iov_iter *i, size_t size) |
682 | { | 754 | { |
683 | struct pipe_inode_info *pipe = i->pipe; | 755 | struct pipe_inode_info *pipe = i->pipe; |
684 | struct pipe_buffer *buf; | ||
685 | int idx = i->idx; | ||
686 | size_t off = i->iov_offset, orig_sz; | ||
687 | |||
688 | if (unlikely(i->count < size)) | 756 | if (unlikely(i->count < size)) |
689 | size = i->count; | 757 | size = i->count; |
690 | orig_sz = size; | ||
691 | |||
692 | if (size) { | 758 | if (size) { |
759 | struct pipe_buffer *buf; | ||
760 | size_t off = i->iov_offset, left = size; | ||
761 | int idx = i->idx; | ||
693 | if (off) /* make it relative to the beginning of buffer */ | 762 | if (off) /* make it relative to the beginning of buffer */ |
694 | size += off - pipe->bufs[idx].offset; | 763 | left += off - pipe->bufs[idx].offset; |
695 | while (1) { | 764 | while (1) { |
696 | buf = &pipe->bufs[idx]; | 765 | buf = &pipe->bufs[idx]; |
697 | if (size <= buf->len) | 766 | if (left <= buf->len) |
698 | break; | 767 | break; |
699 | size -= buf->len; | 768 | left -= buf->len; |
700 | idx = next_idx(idx, pipe); | 769 | idx = next_idx(idx, pipe); |
701 | } | 770 | } |
702 | buf->len = size; | ||
703 | i->idx = idx; | 771 | i->idx = idx; |
704 | off = i->iov_offset = buf->offset + size; | 772 | i->iov_offset = buf->offset + left; |
705 | } | 773 | } |
706 | if (off) | 774 | i->count -= size; |
707 | idx = next_idx(idx, pipe); | 775 | /* ... and discard everything past that point */ |
708 | if (pipe->nrbufs) { | 776 | pipe_truncate(i); |
709 | int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | ||
710 | /* [curbuf,unused) is in use. Free [idx,unused) */ | ||
711 | while (idx != unused) { | ||
712 | pipe_buf_release(pipe, &pipe->bufs[idx]); | ||
713 | idx = next_idx(idx, pipe); | ||
714 | pipe->nrbufs--; | ||
715 | } | ||
716 | } | ||
717 | i->count -= orig_sz; | ||
718 | } | 777 | } |
719 | 778 | ||
720 | void iov_iter_advance(struct iov_iter *i, size_t size) | 779 | void iov_iter_advance(struct iov_iter *i, size_t size) |
@@ -727,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size) | |||
727 | } | 786 | } |
728 | EXPORT_SYMBOL(iov_iter_advance); | 787 | EXPORT_SYMBOL(iov_iter_advance); |
729 | 788 | ||
789 | void iov_iter_revert(struct iov_iter *i, size_t unroll) | ||
790 | { | ||
791 | if (!unroll) | ||
792 | return; | ||
793 | i->count += unroll; | ||
794 | if (unlikely(i->type & ITER_PIPE)) { | ||
795 | struct pipe_inode_info *pipe = i->pipe; | ||
796 | int idx = i->idx; | ||
797 | size_t off = i->iov_offset; | ||
798 | while (1) { | ||
799 | size_t n = off - pipe->bufs[idx].offset; | ||
800 | if (unroll < n) { | ||
801 | off -= (n - unroll); | ||
802 | break; | ||
803 | } | ||
804 | unroll -= n; | ||
805 | if (!unroll && idx == i->start_idx) { | ||
806 | off = 0; | ||
807 | break; | ||
808 | } | ||
809 | if (!idx--) | ||
810 | idx = pipe->buffers - 1; | ||
811 | off = pipe->bufs[idx].offset + pipe->bufs[idx].len; | ||
812 | } | ||
813 | i->iov_offset = off; | ||
814 | i->idx = idx; | ||
815 | pipe_truncate(i); | ||
816 | return; | ||
817 | } | ||
818 | if (unroll <= i->iov_offset) { | ||
819 | i->iov_offset -= unroll; | ||
820 | return; | ||
821 | } | ||
822 | unroll -= i->iov_offset; | ||
823 | if (i->type & ITER_BVEC) { | ||
824 | const struct bio_vec *bvec = i->bvec; | ||
825 | while (1) { | ||
826 | size_t n = (--bvec)->bv_len; | ||
827 | i->nr_segs++; | ||
828 | if (unroll <= n) { | ||
829 | i->bvec = bvec; | ||
830 | i->iov_offset = n - unroll; | ||
831 | return; | ||
832 | } | ||
833 | unroll -= n; | ||
834 | } | ||
835 | } else { /* same logics for iovec and kvec */ | ||
836 | const struct iovec *iov = i->iov; | ||
837 | while (1) { | ||
838 | size_t n = (--iov)->iov_len; | ||
839 | i->nr_segs++; | ||
840 | if (unroll <= n) { | ||
841 | i->iov = iov; | ||
842 | i->iov_offset = n - unroll; | ||
843 | return; | ||
844 | } | ||
845 | unroll -= n; | ||
846 | } | ||
847 | } | ||
848 | } | ||
849 | EXPORT_SYMBOL(iov_iter_revert); | ||
850 | |||
730 | /* | 851 | /* |
731 | * Return the count of just the current iov_iter segment. | 852 | * Return the count of just the current iov_iter segment. |
732 | */ | 853 | */ |
@@ -774,11 +895,13 @@ void iov_iter_pipe(struct iov_iter *i, int direction, | |||
774 | size_t count) | 895 | size_t count) |
775 | { | 896 | { |
776 | BUG_ON(direction != ITER_PIPE); | 897 | BUG_ON(direction != ITER_PIPE); |
898 | WARN_ON(pipe->nrbufs == pipe->buffers); | ||
777 | i->type = direction; | 899 | i->type = direction; |
778 | i->pipe = pipe; | 900 | i->pipe = pipe; |
779 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | 901 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); |
780 | i->iov_offset = 0; | 902 | i->iov_offset = 0; |
781 | i->count = count; | 903 | i->count = count; |
904 | i->start_idx = i->idx; | ||
782 | } | 905 | } |
783 | EXPORT_SYMBOL(iov_iter_pipe); | 906 | EXPORT_SYMBOL(iov_iter_pipe); |
784 | 907 | ||
@@ -787,11 +910,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) | |||
787 | unsigned long res = 0; | 910 | unsigned long res = 0; |
788 | size_t size = i->count; | 911 | size_t size = i->count; |
789 | 912 | ||
790 | if (!size) | ||
791 | return 0; | ||
792 | |||
793 | if (unlikely(i->type & ITER_PIPE)) { | 913 | if (unlikely(i->type & ITER_PIPE)) { |
794 | if (i->iov_offset && allocated(&i->pipe->bufs[i->idx])) | 914 | if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) |
795 | return size | i->iov_offset; | 915 | return size | i->iov_offset; |
796 | return size; | 916 | return size; |
797 | } | 917 | } |
@@ -806,10 +926,8 @@ EXPORT_SYMBOL(iov_iter_alignment); | |||
806 | 926 | ||
807 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) | 927 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
808 | { | 928 | { |
809 | unsigned long res = 0; | 929 | unsigned long res = 0; |
810 | size_t size = i->count; | 930 | size_t size = i->count; |
811 | if (!size) | ||
812 | return 0; | ||
813 | 931 | ||
814 | if (unlikely(i->type & ITER_PIPE)) { | 932 | if (unlikely(i->type & ITER_PIPE)) { |
815 | WARN_ON(1); | 933 | WARN_ON(1); |
@@ -824,7 +942,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) | |||
824 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | | 942 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | |
825 | (size != v.iov_len ? size : 0)) | 943 | (size != v.iov_len ? size : 0)) |
826 | ); | 944 | ); |
827 | return res; | 945 | return res; |
828 | } | 946 | } |
829 | EXPORT_SYMBOL(iov_iter_gap_alignment); | 947 | EXPORT_SYMBOL(iov_iter_gap_alignment); |
830 | 948 | ||
@@ -858,6 +976,9 @@ static ssize_t pipe_get_pages(struct iov_iter *i, | |||
858 | size_t capacity; | 976 | size_t capacity; |
859 | int idx; | 977 | int idx; |
860 | 978 | ||
979 | if (!maxsize) | ||
980 | return 0; | ||
981 | |||
861 | if (!sanity(i)) | 982 | if (!sanity(i)) |
862 | return -EFAULT; | 983 | return -EFAULT; |
863 | 984 | ||
@@ -876,9 +997,6 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, | |||
876 | if (maxsize > i->count) | 997 | if (maxsize > i->count) |
877 | maxsize = i->count; | 998 | maxsize = i->count; |
878 | 999 | ||
879 | if (!maxsize) | ||
880 | return 0; | ||
881 | |||
882 | if (unlikely(i->type & ITER_PIPE)) | 1000 | if (unlikely(i->type & ITER_PIPE)) |
883 | return pipe_get_pages(i, pages, maxsize, maxpages, start); | 1001 | return pipe_get_pages(i, pages, maxsize, maxpages, start); |
884 | iterate_all_kinds(i, maxsize, v, ({ | 1002 | iterate_all_kinds(i, maxsize, v, ({ |
@@ -925,6 +1043,9 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i, | |||
925 | int idx; | 1043 | int idx; |
926 | int npages; | 1044 | int npages; |
927 | 1045 | ||
1046 | if (!maxsize) | ||
1047 | return 0; | ||
1048 | |||
928 | if (!sanity(i)) | 1049 | if (!sanity(i)) |
929 | return -EFAULT; | 1050 | return -EFAULT; |
930 | 1051 | ||
@@ -956,9 +1077,6 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, | |||
956 | if (maxsize > i->count) | 1077 | if (maxsize > i->count) |
957 | maxsize = i->count; | 1078 | maxsize = i->count; |
958 | 1079 | ||
959 | if (!maxsize) | ||
960 | return 0; | ||
961 | |||
962 | if (unlikely(i->type & ITER_PIPE)) | 1080 | if (unlikely(i->type & ITER_PIPE)) |
963 | return pipe_get_pages_alloc(i, pages, maxsize, start); | 1081 | return pipe_get_pages_alloc(i, pages, maxsize, start); |
964 | iterate_all_kinds(i, maxsize, v, ({ | 1082 | iterate_all_kinds(i, maxsize, v, ({ |
@@ -1008,7 +1126,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, | |||
1008 | } | 1126 | } |
1009 | iterate_and_advance(i, bytes, v, ({ | 1127 | iterate_and_advance(i, bytes, v, ({ |
1010 | int err = 0; | 1128 | int err = 0; |
1011 | next = csum_and_copy_from_user(v.iov_base, | 1129 | next = csum_and_copy_from_user(v.iov_base, |
1012 | (to += v.iov_len) - v.iov_len, | 1130 | (to += v.iov_len) - v.iov_len, |
1013 | v.iov_len, 0, &err); | 1131 | v.iov_len, 0, &err); |
1014 | if (!err) { | 1132 | if (!err) { |
@@ -1037,6 +1155,51 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, | |||
1037 | } | 1155 | } |
1038 | EXPORT_SYMBOL(csum_and_copy_from_iter); | 1156 | EXPORT_SYMBOL(csum_and_copy_from_iter); |
1039 | 1157 | ||
1158 | bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, | ||
1159 | struct iov_iter *i) | ||
1160 | { | ||
1161 | char *to = addr; | ||
1162 | __wsum sum, next; | ||
1163 | size_t off = 0; | ||
1164 | sum = *csum; | ||
1165 | if (unlikely(i->type & ITER_PIPE)) { | ||
1166 | WARN_ON(1); | ||
1167 | return false; | ||
1168 | } | ||
1169 | if (unlikely(i->count < bytes)) | ||
1170 | return false; | ||
1171 | iterate_all_kinds(i, bytes, v, ({ | ||
1172 | int err = 0; | ||
1173 | next = csum_and_copy_from_user(v.iov_base, | ||
1174 | (to += v.iov_len) - v.iov_len, | ||
1175 | v.iov_len, 0, &err); | ||
1176 | if (err) | ||
1177 | return false; | ||
1178 | sum = csum_block_add(sum, next, off); | ||
1179 | off += v.iov_len; | ||
1180 | 0; | ||
1181 | }), ({ | ||
1182 | char *p = kmap_atomic(v.bv_page); | ||
1183 | next = csum_partial_copy_nocheck(p + v.bv_offset, | ||
1184 | (to += v.bv_len) - v.bv_len, | ||
1185 | v.bv_len, 0); | ||
1186 | kunmap_atomic(p); | ||
1187 | sum = csum_block_add(sum, next, off); | ||
1188 | off += v.bv_len; | ||
1189 | }),({ | ||
1190 | next = csum_partial_copy_nocheck(v.iov_base, | ||
1191 | (to += v.iov_len) - v.iov_len, | ||
1192 | v.iov_len, 0); | ||
1193 | sum = csum_block_add(sum, next, off); | ||
1194 | off += v.iov_len; | ||
1195 | }) | ||
1196 | ) | ||
1197 | *csum = sum; | ||
1198 | iov_iter_advance(i, bytes); | ||
1199 | return true; | ||
1200 | } | ||
1201 | EXPORT_SYMBOL(csum_and_copy_from_iter_full); | ||
1202 | |||
1040 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, | 1203 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, |
1041 | struct iov_iter *i) | 1204 | struct iov_iter *i) |
1042 | { | 1205 | { |
@@ -1051,7 +1214,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, | |||
1051 | iterate_and_advance(i, bytes, v, ({ | 1214 | iterate_and_advance(i, bytes, v, ({ |
1052 | int err = 0; | 1215 | int err = 0; |
1053 | next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, | 1216 | next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, |
1054 | v.iov_base, | 1217 | v.iov_base, |
1055 | v.iov_len, 0, &err); | 1218 | v.iov_len, 0, &err); |
1056 | if (!err) { | 1219 | if (!err) { |
1057 | sum = csum_block_add(sum, next, off); | 1220 | sum = csum_block_add(sum, next, off); |