aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-11-27 13:51:41 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-11-27 18:44:10 -0500
commit04a311655b06163e2a94e429fe79eb8616fc5e01 (patch)
tree441e212766a0d2ad42a3c796b4beea999dbdd0d6
parent5d01410fe4d92081f349b013a2e7a95429e4f2c9 (diff)
iov_iter.c: macros for iterating over iov_iter
iterate_all_kinds(iter, size, ident, step_iovec, step_bvec) iterates through the ranges covered by iter (up to size bytes total), repeating step_iovec or step_bvec for each of those. ident is declared in expansion of that thing, either as struct iovec or struct bvec, and it contains the range we are currently looking at. step_bvec should be a void expression, step_iovec - a size_t one, with non-zero meaning "stop here, that many bytes from this range left". In the end, the amount actually handled is stored in size. iov_iter_copy_from_user_atomic() and iov_iter_alignment() converted to it. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--mm/iov_iter.c212
1 files changed, 86 insertions, 126 deletions
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index e34a3cb6aad6..798fcb4294e7 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -4,6 +4,72 @@
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/vmalloc.h> 5#include <linux/vmalloc.h>
6 6
7#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
8 size_t left; \
9 size_t wanted = n; \
10 __p = i->iov; \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
14 left = (STEP); \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
17 n -= __v.iov_len; \
18 } else { \
19 left = 0; \
20 } \
21 while (unlikely(!left && n)) { \
22 __p++; \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
25 continue; \
26 __v.iov_base = __p->iov_base; \
27 left = (STEP); \
28 __v.iov_len -= left; \
29 skip = __v.iov_len; \
30 n -= __v.iov_len; \
31 } \
32 n = wanted - n; \
33}
34
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->bvec; \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
42 (void)(STEP); \
43 skip += __v.bv_len; \
44 n -= __v.bv_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
50 continue; \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
53 (void)(STEP); \
54 skip = __v.bv_len; \
55 n -= __v.bv_len; \
56 } \
57 n = wanted; \
58}
59
60#define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
64 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
66 } else { \
67 const struct iovec *iov; \
68 struct iovec v; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
70 } \
71}
72
7static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i) 73static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
8{ 74{
9 size_t skip, copy, left, wanted; 75 size_t skip, copy, left, wanted;
@@ -300,54 +366,6 @@ static size_t zero_iovec(size_t bytes, struct iov_iter *i)
300 return wanted - bytes; 366 return wanted - bytes;
301} 367}
302 368
303static size_t __iovec_copy_from_user_inatomic(char *vaddr,
304 const struct iovec *iov, size_t base, size_t bytes)
305{
306 size_t copied = 0, left = 0;
307
308 while (bytes) {
309 char __user *buf = iov->iov_base + base;
310 int copy = min(bytes, iov->iov_len - base);
311
312 base = 0;
313 left = __copy_from_user_inatomic(vaddr, buf, copy);
314 copied += copy;
315 bytes -= copy;
316 vaddr += copy;
317 iov++;
318
319 if (unlikely(left))
320 break;
321 }
322 return copied - left;
323}
324
325/*
326 * Copy as much as we can into the page and return the number of bytes which
327 * were successfully copied. If a fault is encountered then return the number of
328 * bytes which were copied.
329 */
330static size_t copy_from_user_atomic_iovec(struct page *page,
331 struct iov_iter *i, unsigned long offset, size_t bytes)
332{
333 char *kaddr;
334 size_t copied;
335
336 kaddr = kmap_atomic(page);
337 if (likely(i->nr_segs == 1)) {
338 int left;
339 char __user *buf = i->iov->iov_base + i->iov_offset;
340 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
341 copied = bytes - left;
342 } else {
343 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
344 i->iov, i->iov_offset, bytes);
345 }
346 kunmap_atomic(kaddr);
347
348 return copied;
349}
350
351static void advance_iovec(struct iov_iter *i, size_t bytes) 369static void advance_iovec(struct iov_iter *i, size_t bytes)
352{ 370{
353 BUG_ON(i->count < bytes); 371 BUG_ON(i->count < bytes);
@@ -404,30 +422,6 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
404} 422}
405EXPORT_SYMBOL(iov_iter_fault_in_readable); 423EXPORT_SYMBOL(iov_iter_fault_in_readable);
406 424
407static unsigned long alignment_iovec(const struct iov_iter *i)
408{
409 const struct iovec *iov = i->iov;
410 unsigned long res;
411 size_t size = i->count;
412 size_t n;
413
414 if (!size)
415 return 0;
416
417 res = (unsigned long)iov->iov_base + i->iov_offset;
418 n = iov->iov_len - i->iov_offset;
419 if (n >= size)
420 return res | size;
421 size -= n;
422 res |= n;
423 while (size > (++iov)->iov_len) {
424 res |= (unsigned long)iov->iov_base | iov->iov_len;
425 size -= iov->iov_len;
426 }
427 res |= (unsigned long)iov->iov_base | size;
428 return res;
429}
430
431void iov_iter_init(struct iov_iter *i, int direction, 425void iov_iter_init(struct iov_iter *i, int direction,
432 const struct iovec *iov, unsigned long nr_segs, 426 const struct iovec *iov, unsigned long nr_segs,
433 size_t count) 427 size_t count)
@@ -691,28 +685,6 @@ static size_t zero_bvec(size_t bytes, struct iov_iter *i)
691 return wanted - bytes; 685 return wanted - bytes;
692} 686}
693 687
694static size_t copy_from_user_bvec(struct page *page,
695 struct iov_iter *i, unsigned long offset, size_t bytes)
696{
697 char *kaddr;
698 size_t left;
699 const struct bio_vec *bvec;
700 size_t base = i->iov_offset;
701
702 kaddr = kmap_atomic(page);
703 for (left = bytes, bvec = i->bvec; left; bvec++, base = 0) {
704 size_t copy = min(left, bvec->bv_len - base);
705 if (!bvec->bv_len)
706 continue;
707 memcpy_from_page(kaddr + offset, bvec->bv_page,
708 bvec->bv_offset + base, copy);
709 offset += copy;
710 left -= copy;
711 }
712 kunmap_atomic(kaddr);
713 return bytes;
714}
715
716static void advance_bvec(struct iov_iter *i, size_t bytes) 688static void advance_bvec(struct iov_iter *i, size_t bytes)
717{ 689{
718 BUG_ON(i->count < bytes); 690 BUG_ON(i->count < bytes);
@@ -749,30 +721,6 @@ static void advance_bvec(struct iov_iter *i, size_t bytes)
749 } 721 }
750} 722}
751 723
752static unsigned long alignment_bvec(const struct iov_iter *i)
753{
754 const struct bio_vec *bvec = i->bvec;
755 unsigned long res;
756 size_t size = i->count;
757 size_t n;
758
759 if (!size)
760 return 0;
761
762 res = bvec->bv_offset + i->iov_offset;
763 n = bvec->bv_len - i->iov_offset;
764 if (n >= size)
765 return res | size;
766 size -= n;
767 res |= n;
768 while (size > (++bvec)->bv_len) {
769 res |= bvec->bv_offset | bvec->bv_len;
770 size -= bvec->bv_len;
771 }
772 res |= bvec->bv_offset | size;
773 return res;
774}
775
776static ssize_t get_pages_bvec(struct iov_iter *i, 724static ssize_t get_pages_bvec(struct iov_iter *i,
777 struct page **pages, size_t maxsize, unsigned maxpages, 725 struct page **pages, size_t maxsize, unsigned maxpages,
778 size_t *start) 726 size_t *start)
@@ -887,10 +835,15 @@ EXPORT_SYMBOL(iov_iter_zero);
887size_t iov_iter_copy_from_user_atomic(struct page *page, 835size_t iov_iter_copy_from_user_atomic(struct page *page,
888 struct iov_iter *i, unsigned long offset, size_t bytes) 836 struct iov_iter *i, unsigned long offset, size_t bytes)
889{ 837{
890 if (i->type & ITER_BVEC) 838 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
891 return copy_from_user_bvec(page, i, offset, bytes); 839 iterate_all_kinds(i, bytes, v,
892 else 840 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
893 return copy_from_user_atomic_iovec(page, i, offset, bytes); 841 v.iov_base, v.iov_len),
842 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
843 v.bv_offset, v.bv_len)
844 )
845 kunmap_atomic(kaddr);
846 return bytes;
894} 847}
895EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 848EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
896 849
@@ -919,10 +872,17 @@ EXPORT_SYMBOL(iov_iter_single_seg_count);
919 872
920unsigned long iov_iter_alignment(const struct iov_iter *i) 873unsigned long iov_iter_alignment(const struct iov_iter *i)
921{ 874{
922 if (i->type & ITER_BVEC) 875 unsigned long res = 0;
923 return alignment_bvec(i); 876 size_t size = i->count;
924 else 877
925 return alignment_iovec(i); 878 if (!size)
879 return 0;
880
881 iterate_all_kinds(i, size, v,
882 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
883 res |= v.bv_offset | v.bv_len
884 )
885 return res;
926} 886}
927EXPORT_SYMBOL(iov_iter_alignment); 887EXPORT_SYMBOL(iov_iter_alignment);
928 888