aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2018-10-22 08:07:28 -0400
committerDavid Howells <dhowells@redhat.com>2018-10-23 19:40:44 -0400
commit00e23707442a75b404392cef1405ab4fd498de6b (patch)
treed2d16e7863306dd0ccf6b7d528958bd9dd497820 /lib
parent1fcb748d187d0c7732a75a509e924ead6d070e04 (diff)
iov_iter: Use accessor function
Use accessor functions to access an iterator's type and direction. This allows for the possibility of using some other method of determining the type of iterator than if-chains with bitwise-AND conditions. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8be175df3075..42d39116a556 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -558,7 +558,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
558size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 558size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
559{ 559{
560 const char *from = addr; 560 const char *from = addr;
561 if (unlikely(i->type & ITER_PIPE)) 561 if (unlikely(iov_iter_is_pipe(i)))
562 return copy_pipe_to_iter(addr, bytes, i); 562 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i)) 563 if (iter_is_iovec(i))
564 might_fault(); 564 might_fault();
@@ -658,7 +658,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
658 const char *from = addr; 658 const char *from = addr;
659 unsigned long rem, curr_addr, s_addr = (unsigned long) addr; 659 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
660 660
661 if (unlikely(i->type & ITER_PIPE)) 661 if (unlikely(iov_iter_is_pipe(i)))
662 return copy_pipe_to_iter_mcsafe(addr, bytes, i); 662 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
663 if (iter_is_iovec(i)) 663 if (iter_is_iovec(i))
664 might_fault(); 664 might_fault();
@@ -692,7 +692,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
692size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 692size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
693{ 693{
694 char *to = addr; 694 char *to = addr;
695 if (unlikely(i->type & ITER_PIPE)) { 695 if (unlikely(iov_iter_is_pipe(i))) {
696 WARN_ON(1); 696 WARN_ON(1);
697 return 0; 697 return 0;
698 } 698 }
@@ -712,7 +712,7 @@ EXPORT_SYMBOL(_copy_from_iter);
712bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 712bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
713{ 713{
714 char *to = addr; 714 char *to = addr;
715 if (unlikely(i->type & ITER_PIPE)) { 715 if (unlikely(iov_iter_is_pipe(i))) {
716 WARN_ON(1); 716 WARN_ON(1);
717 return false; 717 return false;
718 } 718 }
@@ -739,7 +739,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
739size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 739size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
740{ 740{
741 char *to = addr; 741 char *to = addr;
742 if (unlikely(i->type & ITER_PIPE)) { 742 if (unlikely(iov_iter_is_pipe(i))) {
743 WARN_ON(1); 743 WARN_ON(1);
744 return 0; 744 return 0;
745 } 745 }
@@ -773,7 +773,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
773size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 773size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
774{ 774{
775 char *to = addr; 775 char *to = addr;
776 if (unlikely(i->type & ITER_PIPE)) { 776 if (unlikely(iov_iter_is_pipe(i))) {
777 WARN_ON(1); 777 WARN_ON(1);
778 return 0; 778 return 0;
779 } 779 }
@@ -794,7 +794,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
794bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 794bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
795{ 795{
796 char *to = addr; 796 char *to = addr;
797 if (unlikely(i->type & ITER_PIPE)) { 797 if (unlikely(iov_iter_is_pipe(i))) {
798 WARN_ON(1); 798 WARN_ON(1);
799 return false; 799 return false;
800 } 800 }
@@ -836,7 +836,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
836 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 836 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
837 kunmap_atomic(kaddr); 837 kunmap_atomic(kaddr);
838 return wanted; 838 return wanted;
839 } else if (likely(!(i->type & ITER_PIPE))) 839 } else if (likely(!iov_iter_is_pipe(i)))
840 return copy_page_to_iter_iovec(page, offset, bytes, i); 840 return copy_page_to_iter_iovec(page, offset, bytes, i);
841 else 841 else
842 return copy_page_to_iter_pipe(page, offset, bytes, i); 842 return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -848,7 +848,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
848{ 848{
849 if (unlikely(!page_copy_sane(page, offset, bytes))) 849 if (unlikely(!page_copy_sane(page, offset, bytes)))
850 return 0; 850 return 0;
851 if (unlikely(i->type & ITER_PIPE)) { 851 if (unlikely(iov_iter_is_pipe(i))) {
852 WARN_ON(1); 852 WARN_ON(1);
853 return 0; 853 return 0;
854 } 854 }
@@ -888,7 +888,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
888 888
889size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 889size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
890{ 890{
891 if (unlikely(i->type & ITER_PIPE)) 891 if (unlikely(iov_iter_is_pipe(i)))
892 return pipe_zero(bytes, i); 892 return pipe_zero(bytes, i);
893 iterate_and_advance(i, bytes, v, 893 iterate_and_advance(i, bytes, v,
894 clear_user(v.iov_base, v.iov_len), 894 clear_user(v.iov_base, v.iov_len),
@@ -908,7 +908,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
908 kunmap_atomic(kaddr); 908 kunmap_atomic(kaddr);
909 return 0; 909 return 0;
910 } 910 }
911 if (unlikely(i->type & ITER_PIPE)) { 911 if (unlikely(iov_iter_is_pipe(i))) {
912 kunmap_atomic(kaddr); 912 kunmap_atomic(kaddr);
913 WARN_ON(1); 913 WARN_ON(1);
914 return 0; 914 return 0;
@@ -972,7 +972,7 @@ static void pipe_advance(struct iov_iter *i, size_t size)
972 972
973void iov_iter_advance(struct iov_iter *i, size_t size) 973void iov_iter_advance(struct iov_iter *i, size_t size)
974{ 974{
975 if (unlikely(i->type & ITER_PIPE)) { 975 if (unlikely(iov_iter_is_pipe(i))) {
976 pipe_advance(i, size); 976 pipe_advance(i, size);
977 return; 977 return;
978 } 978 }
@@ -987,7 +987,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
987 if (WARN_ON(unroll > MAX_RW_COUNT)) 987 if (WARN_ON(unroll > MAX_RW_COUNT))
988 return; 988 return;
989 i->count += unroll; 989 i->count += unroll;
990 if (unlikely(i->type & ITER_PIPE)) { 990 if (unlikely(iov_iter_is_pipe(i))) {
991 struct pipe_inode_info *pipe = i->pipe; 991 struct pipe_inode_info *pipe = i->pipe;
992 int idx = i->idx; 992 int idx = i->idx;
993 size_t off = i->iov_offset; 993 size_t off = i->iov_offset;
@@ -1016,7 +1016,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
1016 return; 1016 return;
1017 } 1017 }
1018 unroll -= i->iov_offset; 1018 unroll -= i->iov_offset;
1019 if (i->type & ITER_BVEC) { 1019 if (iov_iter_is_bvec(i)) {
1020 const struct bio_vec *bvec = i->bvec; 1020 const struct bio_vec *bvec = i->bvec;
1021 while (1) { 1021 while (1) {
1022 size_t n = (--bvec)->bv_len; 1022 size_t n = (--bvec)->bv_len;
@@ -1049,11 +1049,11 @@ EXPORT_SYMBOL(iov_iter_revert);
1049 */ 1049 */
1050size_t iov_iter_single_seg_count(const struct iov_iter *i) 1050size_t iov_iter_single_seg_count(const struct iov_iter *i)
1051{ 1051{
1052 if (unlikely(i->type & ITER_PIPE)) 1052 if (unlikely(iov_iter_is_pipe(i)))
1053 return i->count; // it is a silly place, anyway 1053 return i->count; // it is a silly place, anyway
1054 if (i->nr_segs == 1) 1054 if (i->nr_segs == 1)
1055 return i->count; 1055 return i->count;
1056 else if (i->type & ITER_BVEC) 1056 else if (iov_iter_is_bvec(i))
1057 return min(i->count, i->bvec->bv_len - i->iov_offset); 1057 return min(i->count, i->bvec->bv_len - i->iov_offset);
1058 else 1058 else
1059 return min(i->count, i->iov->iov_len - i->iov_offset); 1059 return min(i->count, i->iov->iov_len - i->iov_offset);
@@ -1106,7 +1106,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
1106 unsigned long res = 0; 1106 unsigned long res = 0;
1107 size_t size = i->count; 1107 size_t size = i->count;
1108 1108
1109 if (unlikely(i->type & ITER_PIPE)) { 1109 if (unlikely(iov_iter_is_pipe(i))) {
1110 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) 1110 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1111 return size | i->iov_offset; 1111 return size | i->iov_offset;
1112 return size; 1112 return size;
@@ -1125,7 +1125,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1125 unsigned long res = 0; 1125 unsigned long res = 0;
1126 size_t size = i->count; 1126 size_t size = i->count;
1127 1127
1128 if (unlikely(i->type & ITER_PIPE)) { 1128 if (unlikely(iov_iter_is_pipe(i))) {
1129 WARN_ON(1); 1129 WARN_ON(1);
1130 return ~0U; 1130 return ~0U;
1131 } 1131 }
@@ -1193,7 +1193,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
1193 if (maxsize > i->count) 1193 if (maxsize > i->count)
1194 maxsize = i->count; 1194 maxsize = i->count;
1195 1195
1196 if (unlikely(i->type & ITER_PIPE)) 1196 if (unlikely(iov_iter_is_pipe(i)))
1197 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1197 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1198 iterate_all_kinds(i, maxsize, v, ({ 1198 iterate_all_kinds(i, maxsize, v, ({
1199 unsigned long addr = (unsigned long)v.iov_base; 1199 unsigned long addr = (unsigned long)v.iov_base;
@@ -1205,7 +1205,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
1205 len = maxpages * PAGE_SIZE; 1205 len = maxpages * PAGE_SIZE;
1206 addr &= ~(PAGE_SIZE - 1); 1206 addr &= ~(PAGE_SIZE - 1);
1207 n = DIV_ROUND_UP(len, PAGE_SIZE); 1207 n = DIV_ROUND_UP(len, PAGE_SIZE);
1208 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 1208 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1209 if (unlikely(res < 0)) 1209 if (unlikely(res < 0))
1210 return res; 1210 return res;
1211 return (res == n ? len : res * PAGE_SIZE) - *start; 1211 return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1270,7 +1270,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1270 if (maxsize > i->count) 1270 if (maxsize > i->count)
1271 maxsize = i->count; 1271 maxsize = i->count;
1272 1272
1273 if (unlikely(i->type & ITER_PIPE)) 1273 if (unlikely(iov_iter_is_pipe(i)))
1274 return pipe_get_pages_alloc(i, pages, maxsize, start); 1274 return pipe_get_pages_alloc(i, pages, maxsize, start);
1275 iterate_all_kinds(i, maxsize, v, ({ 1275 iterate_all_kinds(i, maxsize, v, ({
1276 unsigned long addr = (unsigned long)v.iov_base; 1276 unsigned long addr = (unsigned long)v.iov_base;
@@ -1283,7 +1283,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1283 p = get_pages_array(n); 1283 p = get_pages_array(n);
1284 if (!p) 1284 if (!p)
1285 return -ENOMEM; 1285 return -ENOMEM;
1286 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); 1286 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1287 if (unlikely(res < 0)) { 1287 if (unlikely(res < 0)) {
1288 kvfree(p); 1288 kvfree(p);
1289 return res; 1289 return res;
@@ -1313,7 +1313,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1313 __wsum sum, next; 1313 __wsum sum, next;
1314 size_t off = 0; 1314 size_t off = 0;
1315 sum = *csum; 1315 sum = *csum;
1316 if (unlikely(i->type & ITER_PIPE)) { 1316 if (unlikely(iov_iter_is_pipe(i))) {
1317 WARN_ON(1); 1317 WARN_ON(1);
1318 return 0; 1318 return 0;
1319 } 1319 }
@@ -1355,7 +1355,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1355 __wsum sum, next; 1355 __wsum sum, next;
1356 size_t off = 0; 1356 size_t off = 0;
1357 sum = *csum; 1357 sum = *csum;
1358 if (unlikely(i->type & ITER_PIPE)) { 1358 if (unlikely(iov_iter_is_pipe(i))) {
1359 WARN_ON(1); 1359 WARN_ON(1);
1360 return false; 1360 return false;
1361 } 1361 }
@@ -1400,7 +1400,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1400 __wsum sum, next; 1400 __wsum sum, next;
1401 size_t off = 0; 1401 size_t off = 0;
1402 sum = *csum; 1402 sum = *csum;
1403 if (unlikely(i->type & ITER_PIPE)) { 1403 if (unlikely(iov_iter_is_pipe(i))) {
1404 WARN_ON(1); /* for now */ 1404 WARN_ON(1); /* for now */
1405 return 0; 1405 return 0;
1406 } 1406 }
@@ -1443,7 +1443,7 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
1443 if (!size) 1443 if (!size)
1444 return 0; 1444 return 0;
1445 1445
1446 if (unlikely(i->type & ITER_PIPE)) { 1446 if (unlikely(iov_iter_is_pipe(i))) {
1447 struct pipe_inode_info *pipe = i->pipe; 1447 struct pipe_inode_info *pipe = i->pipe;
1448 size_t off; 1448 size_t off;
1449 int idx; 1449 int idx;
@@ -1481,11 +1481,11 @@ EXPORT_SYMBOL(iov_iter_npages);
1481const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1481const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1482{ 1482{
1483 *new = *old; 1483 *new = *old;
1484 if (unlikely(new->type & ITER_PIPE)) { 1484 if (unlikely(iov_iter_is_pipe(new))) {
1485 WARN_ON(1); 1485 WARN_ON(1);
1486 return NULL; 1486 return NULL;
1487 } 1487 }
1488 if (new->type & ITER_BVEC) 1488 if (iov_iter_is_bvec(new))
1489 return new->bvec = kmemdup(new->bvec, 1489 return new->bvec = kmemdup(new->bvec,
1490 new->nr_segs * sizeof(struct bio_vec), 1490 new->nr_segs * sizeof(struct bio_vec),
1491 flags); 1491 flags);