aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/uio.h1
-rw-r--r--mm/iov_iter.c82
2 files changed, 70 insertions, 13 deletions
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9b1581414cd4..6e16945ec832 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -31,6 +31,7 @@ struct iov_iter {
31 size_t count; 31 size_t count;
32 union { 32 union {
33 const struct iovec *iov; 33 const struct iovec *iov;
34 const struct kvec *kvec;
34 const struct bio_vec *bvec; 35 const struct bio_vec *bvec;
35 }; 36 };
36 unsigned long nr_segs; 37 unsigned long nr_segs;
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index 666654498ccf..1618e378277e 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -32,6 +32,29 @@
32 n = wanted - n; \ 32 n = wanted - n; \
33} 33}
34 34
35#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->kvec; \
38 __v.iov_len = min(n, __p->iov_len - skip); \
39 if (likely(__v.iov_len)) { \
40 __v.iov_base = __p->iov_base + skip; \
41 (void)(STEP); \
42 skip += __v.iov_len; \
43 n -= __v.iov_len; \
44 } \
45 while (unlikely(n)) { \
46 __p++; \
47 __v.iov_len = min(n, __p->iov_len); \
48 if (unlikely(!__v.iov_len)) \
49 continue; \
50 __v.iov_base = __p->iov_base; \
51 (void)(STEP); \
52 skip = __v.iov_len; \
53 n -= __v.iov_len; \
54 } \
55 n = wanted; \
56}
57
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \ 58#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \ 59 size_t wanted = n; \
37 __p = i->bvec; \ 60 __p = i->bvec; \
@@ -57,12 +80,16 @@
57 n = wanted; \ 80 n = wanted; \
58} 81}
59 82
60#define iterate_all_kinds(i, n, v, I, B) { \ 83#define iterate_all_kinds(i, n, v, I, B, K) { \
61 size_t skip = i->iov_offset; \ 84 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \ 85 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \ 86 const struct bio_vec *bvec; \
64 struct bio_vec v; \ 87 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \ 88 iterate_bvec(i, n, v, bvec, skip, (B)) \
89 } else if (unlikely(i->type & ITER_KVEC)) { \
90 const struct kvec *kvec; \
91 struct kvec v; \
92 iterate_kvec(i, n, v, kvec, skip, (K)) \
66 } else { \ 93 } else { \
67 const struct iovec *iov; \ 94 const struct iovec *iov; \
68 struct iovec v; \ 95 struct iovec v; \
@@ -70,7 +97,7 @@
70 } \ 97 } \
71} 98}
72 99
73#define iterate_and_advance(i, n, v, I, B) { \ 100#define iterate_and_advance(i, n, v, I, B, K) { \
74 size_t skip = i->iov_offset; \ 101 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \ 102 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \ 103 const struct bio_vec *bvec; \
@@ -82,6 +109,16 @@
82 } \ 109 } \
83 i->nr_segs -= bvec - i->bvec; \ 110 i->nr_segs -= bvec - i->bvec; \
84 i->bvec = bvec; \ 111 i->bvec = bvec; \
112 } else if (unlikely(i->type & ITER_KVEC)) { \
113 const struct kvec *kvec; \
114 struct kvec v; \
115 iterate_kvec(i, n, v, kvec, skip, (K)) \
116 if (skip == kvec->iov_len) { \
117 kvec++; \
118 skip = 0; \
119 } \
120 i->nr_segs -= kvec - i->kvec; \
121 i->kvec = kvec; \
85 } else { \ 122 } else { \
86 const struct iovec *iov; \ 123 const struct iovec *iov; \
87 struct iovec v; \ 124 struct iovec v; \
@@ -270,7 +307,7 @@ done:
270 */ 307 */
271int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 308int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
272{ 309{
273 if (!(i->type & ITER_BVEC)) { 310 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
274 char __user *buf = i->iov->iov_base + i->iov_offset; 311 char __user *buf = i->iov->iov_base + i->iov_offset;
275 bytes = min(bytes, i->iov->iov_len - i->iov_offset); 312 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
276 return fault_in_pages_readable(buf, bytes); 313 return fault_in_pages_readable(buf, bytes);
@@ -284,10 +321,14 @@ void iov_iter_init(struct iov_iter *i, int direction,
284 size_t count) 321 size_t count)
285{ 322{
286 /* It will get better. Eventually... */ 323 /* It will get better. Eventually... */
287 if (segment_eq(get_fs(), KERNEL_DS)) 324 if (segment_eq(get_fs(), KERNEL_DS)) {
288 direction |= ITER_KVEC; 325 direction |= ITER_KVEC;
289 i->type = direction; 326 i->type = direction;
290 i->iov = iov; 327 i->kvec = (struct kvec *)iov;
328 } else {
329 i->type = direction;
330 i->iov = iov;
331 }
291 i->nr_segs = nr_segs; 332 i->nr_segs = nr_segs;
292 i->iov_offset = 0; 333 i->iov_offset = 0;
293 i->count = count; 334 i->count = count;
@@ -328,7 +369,8 @@ size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
328 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, 369 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
329 v.iov_len), 370 v.iov_len),
330 memcpy_to_page(v.bv_page, v.bv_offset, 371 memcpy_to_page(v.bv_page, v.bv_offset,
331 (from += v.bv_len) - v.bv_len, v.bv_len) 372 (from += v.bv_len) - v.bv_len, v.bv_len),
373 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
332 ) 374 )
333 375
334 return bytes; 376 return bytes;
@@ -348,7 +390,8 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
348 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, 390 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
349 v.iov_len), 391 v.iov_len),
350 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 392 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
351 v.bv_offset, v.bv_len) 393 v.bv_offset, v.bv_len),
394 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
352 ) 395 )
353 396
354 return bytes; 397 return bytes;
@@ -371,7 +414,7 @@ EXPORT_SYMBOL(copy_page_to_iter);
371size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 414size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
372 struct iov_iter *i) 415 struct iov_iter *i)
373{ 416{
374 if (i->type & ITER_BVEC) { 417 if (i->type & (ITER_BVEC|ITER_KVEC)) {
375 void *kaddr = kmap_atomic(page); 418 void *kaddr = kmap_atomic(page);
376 size_t wanted = copy_from_iter(kaddr + offset, bytes, i); 419 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
377 kunmap_atomic(kaddr); 420 kunmap_atomic(kaddr);
@@ -391,7 +434,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
391 434
392 iterate_and_advance(i, bytes, v, 435 iterate_and_advance(i, bytes, v,
393 __clear_user(v.iov_base, v.iov_len), 436 __clear_user(v.iov_base, v.iov_len),
394 memzero_page(v.bv_page, v.bv_offset, v.bv_len) 437 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
438 memset(v.iov_base, 0, v.iov_len)
395 ) 439 )
396 440
397 return bytes; 441 return bytes;
@@ -406,7 +450,8 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
406 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, 450 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
407 v.iov_base, v.iov_len), 451 v.iov_base, v.iov_len),
408 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 452 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
409 v.bv_offset, v.bv_len) 453 v.bv_offset, v.bv_len),
454 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
410 ) 455 )
411 kunmap_atomic(kaddr); 456 kunmap_atomic(kaddr);
412 return bytes; 457 return bytes;
@@ -415,7 +460,7 @@ EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
415 460
416void iov_iter_advance(struct iov_iter *i, size_t size) 461void iov_iter_advance(struct iov_iter *i, size_t size)
417{ 462{
418 iterate_and_advance(i, size, v, 0, 0) 463 iterate_and_advance(i, size, v, 0, 0, 0)
419} 464}
420EXPORT_SYMBOL(iov_iter_advance); 465EXPORT_SYMBOL(iov_iter_advance);
421 466
@@ -443,7 +488,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
443 488
444 iterate_all_kinds(i, size, v, 489 iterate_all_kinds(i, size, v,
445 (res |= (unsigned long)v.iov_base | v.iov_len, 0), 490 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
446 res |= v.bv_offset | v.bv_len 491 res |= v.bv_offset | v.bv_len,
492 res |= (unsigned long)v.iov_base | v.iov_len
447 ) 493 )
448 return res; 494 return res;
449} 495}
@@ -478,6 +524,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
478 *start = v.bv_offset; 524 *start = v.bv_offset;
479 get_page(*pages = v.bv_page); 525 get_page(*pages = v.bv_page);
480 return v.bv_len; 526 return v.bv_len;
527 }),({
528 return -EFAULT;
481 }) 529 })
482 ) 530 )
483 return 0; 531 return 0;
@@ -530,6 +578,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
530 return -ENOMEM; 578 return -ENOMEM;
531 get_page(*p = v.bv_page); 579 get_page(*p = v.bv_page);
532 return v.bv_len; 580 return v.bv_len;
581 }),({
582 return -EFAULT;
533 }) 583 })
534 ) 584 )
535 return 0; 585 return 0;
@@ -554,6 +604,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
554 npages++; 604 npages++;
555 if (npages >= maxpages) 605 if (npages >= maxpages)
556 return maxpages; 606 return maxpages;
607 }),({
608 unsigned long p = (unsigned long)v.iov_base;
609 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
610 - p / PAGE_SIZE;
611 if (npages >= maxpages)
612 return maxpages;
557 }) 613 })
558 ) 614 )
559 return npages; 615 return npages;