aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-02-17 20:16:34 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-21 03:31:21 -0400
commit0c6172ccbb59e7fab17c19dc7c9a753c3429cfd7 (patch)
treec791377a004f5be8a6baf87273d83677ecc9a631 /net
parentff76ab9e03a50a4df26329e547e75f865a2bfa9f (diff)
make skb_copy_datagram_msg() et.al. preserve ->msg_iter on error
commit 3278682123811dd8ef07de5eb701fc4548fcebf2 upstream. Fixes the mess observed in e.g. rsync over a noisy link we'd been seeing since last Summer. What happens is that we copy part of a datagram before noticing a checksum mismatch. Datagram will be resent, all right, but we want the next try go into the same place, not after it... All this family of primitives (copy/checksum and copy a datagram into destination) is "all or nothing" sort of interface - either we get 0 (meaning that copy had been successful) or we get an error (and no way to tell how much had been copied before we ran into whatever error it had been). Make all of them leave iterator unadvanced in case of errors - all callers must be able to cope with that (an error might've been caught before the iterator had been advanced), it costs very little to arrange, it's safer for callers and actually fixes at least one bug in said callers. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/datagram.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b7de71f8d5d3..963732e775df 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -378,7 +378,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
378 struct iov_iter *to, int len) 378 struct iov_iter *to, int len)
379{ 379{
380 int start = skb_headlen(skb); 380 int start = skb_headlen(skb);
381 int i, copy = start - offset; 381 int i, copy = start - offset, start_off = offset, n;
382 struct sk_buff *frag_iter; 382 struct sk_buff *frag_iter;
383 383
384 trace_skb_copy_datagram_iovec(skb, len); 384 trace_skb_copy_datagram_iovec(skb, len);
@@ -387,11 +387,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
387 if (copy > 0) { 387 if (copy > 0) {
388 if (copy > len) 388 if (copy > len)
389 copy = len; 389 copy = len;
390 if (copy_to_iter(skb->data + offset, copy, to) != copy) 390 n = copy_to_iter(skb->data + offset, copy, to);
391 offset += n;
392 if (n != copy)
391 goto short_copy; 393 goto short_copy;
392 if ((len -= copy) == 0) 394 if ((len -= copy) == 0)
393 return 0; 395 return 0;
394 offset += copy;
395 } 396 }
396 397
397 /* Copy paged appendix. Hmm... why does this look so complicated? */ 398 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -405,13 +406,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
405 if ((copy = end - offset) > 0) { 406 if ((copy = end - offset) > 0) {
406 if (copy > len) 407 if (copy > len)
407 copy = len; 408 copy = len;
408 if (copy_page_to_iter(skb_frag_page(frag), 409 n = copy_page_to_iter(skb_frag_page(frag),
409 frag->page_offset + offset - 410 frag->page_offset + offset -
410 start, copy, to) != copy) 411 start, copy, to);
412 offset += n;
413 if (n != copy)
411 goto short_copy; 414 goto short_copy;
412 if (!(len -= copy)) 415 if (!(len -= copy))
413 return 0; 416 return 0;
414 offset += copy;
415 } 417 }
416 start = end; 418 start = end;
417 } 419 }
@@ -443,6 +445,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
443 */ 445 */
444 446
445fault: 447fault:
448 iov_iter_revert(to, offset - start_off);
446 return -EFAULT; 449 return -EFAULT;
447 450
448short_copy: 451short_copy:
@@ -593,7 +596,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
593 __wsum *csump) 596 __wsum *csump)
594{ 597{
595 int start = skb_headlen(skb); 598 int start = skb_headlen(skb);
596 int i, copy = start - offset; 599 int i, copy = start - offset, start_off = offset;
597 struct sk_buff *frag_iter; 600 struct sk_buff *frag_iter;
598 int pos = 0; 601 int pos = 0;
599 int n; 602 int n;
@@ -603,11 +606,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
603 if (copy > len) 606 if (copy > len)
604 copy = len; 607 copy = len;
605 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 608 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
609 offset += n;
606 if (n != copy) 610 if (n != copy)
607 goto fault; 611 goto fault;
608 if ((len -= copy) == 0) 612 if ((len -= copy) == 0)
609 return 0; 613 return 0;
610 offset += copy;
611 pos = copy; 614 pos = copy;
612 } 615 }
613 616
@@ -629,12 +632,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
629 offset - start, copy, 632 offset - start, copy,
630 &csum2, to); 633 &csum2, to);
631 kunmap(page); 634 kunmap(page);
635 offset += n;
632 if (n != copy) 636 if (n != copy)
633 goto fault; 637 goto fault;
634 *csump = csum_block_add(*csump, csum2, pos); 638 *csump = csum_block_add(*csump, csum2, pos);
635 if (!(len -= copy)) 639 if (!(len -= copy))
636 return 0; 640 return 0;
637 offset += copy;
638 pos += copy; 641 pos += copy;
639 } 642 }
640 start = end; 643 start = end;
@@ -667,6 +670,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
667 return 0; 670 return 0;
668 671
669fault: 672fault:
673 iov_iter_revert(to, offset - start_off);
670 return -EFAULT; 674 return -EFAULT;
671} 675}
672 676
@@ -751,6 +755,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
751 } 755 }
752 return 0; 756 return 0;
753csum_error: 757csum_error:
758 iov_iter_revert(&msg->msg_iter, chunk);
754 return -EINVAL; 759 return -EINVAL;
755fault: 760fault:
756 return -EFAULT; 761 return -EFAULT;