summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-02-17 20:16:34 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2017-04-02 12:10:57 -0400
commit3278682123811dd8ef07de5eb701fc4548fcebf2 (patch)
treef455af5923c286b055f8b2577f66b624a911855b /net
parent27c0e3748e41ca79171ffa3e97415a20af6facd0 (diff)
make skb_copy_datagram_msg() et.al. preserve ->msg_iter on error
Fixes the mess observed in e.g. rsync over a noisy link we'd been seeing since last Summer. What happens is that we copy part of a datagram before noticing a checksum mismatch. Datagram will be resent, all right, but we want the next try go into the same place, not after it... All this family of primitives (copy/checksum and copy a datagram into destination) is "all or nothing" sort of interface - either we get 0 (meaning that copy had been successful) or we get an error (and no way to tell how much had been copied before we ran into whatever error it had been). Make all of them leave iterator unadvanced in case of errors - all callers must be able to cope with that (an error might've been caught before the iterator had been advanced), it costs very little to arrange, it's safer for callers and actually fixes at least one bug in said callers. Cc: stable@vger.kernel.org Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'net')
-rw-r--r--net/core/datagram.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ea633342ab0d..f4947e737f34 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
398 struct iov_iter *to, int len) 398 struct iov_iter *to, int len)
399{ 399{
400 int start = skb_headlen(skb); 400 int start = skb_headlen(skb);
401 int i, copy = start - offset; 401 int i, copy = start - offset, start_off = offset, n;
402 struct sk_buff *frag_iter; 402 struct sk_buff *frag_iter;
403 403
404 trace_skb_copy_datagram_iovec(skb, len); 404 trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
407 if (copy > 0) { 407 if (copy > 0) {
408 if (copy > len) 408 if (copy > len)
409 copy = len; 409 copy = len;
410 if (copy_to_iter(skb->data + offset, copy, to) != copy) 410 n = copy_to_iter(skb->data + offset, copy, to);
411 offset += n;
412 if (n != copy)
411 goto short_copy; 413 goto short_copy;
412 if ((len -= copy) == 0) 414 if ((len -= copy) == 0)
413 return 0; 415 return 0;
414 offset += copy;
415 } 416 }
416 417
417 /* Copy paged appendix. Hmm... why does this look so complicated? */ 418 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
425 if ((copy = end - offset) > 0) { 426 if ((copy = end - offset) > 0) {
426 if (copy > len) 427 if (copy > len)
427 copy = len; 428 copy = len;
428 if (copy_page_to_iter(skb_frag_page(frag), 429 n = copy_page_to_iter(skb_frag_page(frag),
429 frag->page_offset + offset - 430 frag->page_offset + offset -
430 start, copy, to) != copy) 431 start, copy, to);
432 offset += n;
433 if (n != copy)
431 goto short_copy; 434 goto short_copy;
432 if (!(len -= copy)) 435 if (!(len -= copy))
433 return 0; 436 return 0;
434 offset += copy;
435 } 437 }
436 start = end; 438 start = end;
437 } 439 }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
463 */ 465 */
464 466
465fault: 467fault:
468 iov_iter_revert(to, offset - start_off);
466 return -EFAULT; 469 return -EFAULT;
467 470
468short_copy: 471short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
613 __wsum *csump) 616 __wsum *csump)
614{ 617{
615 int start = skb_headlen(skb); 618 int start = skb_headlen(skb);
616 int i, copy = start - offset; 619 int i, copy = start - offset, start_off = offset;
617 struct sk_buff *frag_iter; 620 struct sk_buff *frag_iter;
618 int pos = 0; 621 int pos = 0;
619 int n; 622 int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
623 if (copy > len) 626 if (copy > len)
624 copy = len; 627 copy = len;
625 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 628 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
629 offset += n;
626 if (n != copy) 630 if (n != copy)
627 goto fault; 631 goto fault;
628 if ((len -= copy) == 0) 632 if ((len -= copy) == 0)
629 return 0; 633 return 0;
630 offset += copy;
631 pos = copy; 634 pos = copy;
632 } 635 }
633 636
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
649 offset - start, copy, 652 offset - start, copy,
650 &csum2, to); 653 &csum2, to);
651 kunmap(page); 654 kunmap(page);
655 offset += n;
652 if (n != copy) 656 if (n != copy)
653 goto fault; 657 goto fault;
654 *csump = csum_block_add(*csump, csum2, pos); 658 *csump = csum_block_add(*csump, csum2, pos);
655 if (!(len -= copy)) 659 if (!(len -= copy))
656 return 0; 660 return 0;
657 offset += copy;
658 pos += copy; 661 pos += copy;
659 } 662 }
660 start = end; 663 start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
687 return 0; 690 return 0;
688 691
689fault: 692fault:
693 iov_iter_revert(to, offset - start_off);
690 return -EFAULT; 694 return -EFAULT;
691} 695}
692 696
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
771 } 775 }
772 return 0; 776 return 0;
773csum_error: 777csum_error:
778 iov_iter_revert(&msg->msg_iter, chunk);
774 return -EINVAL; 779 return -EINVAL;
775fault: 780fault:
776 return -EFAULT; 781 return -EFAULT;