aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_device.c
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-04-25 20:35:10 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-27 20:17:19 -0400
commiteb3d38d5adb520435d4e4af32529ccb13ccc9935 (patch)
tree19cc2e28953e3f4b42b2e96fb83fca36f877659a /net/tls/tls_device.c
parent97e1caa517e22d62a283b876fb8aa5f4672c83dd (diff)
net/tls: fix copy to fragments in reencrypt
Fragments may contain data from other records so we have to account for that when we calculate the destination and max length of copy we can perform. Note that 'offset' is the offset within the message, so it can't be passed as offset within the frag.. Here skb_store_bits() would have realised the call is wrong and simply not copy data. Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: John Hurley <john.hurley@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls/tls_device.c')
-rw-r--r--net/tls/tls_device.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 96357060addc..14dedb24fa7b 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -597,7 +597,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
597static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) 597static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
598{ 598{
599 struct strp_msg *rxm = strp_msg(skb); 599 struct strp_msg *rxm = strp_msg(skb);
600 int err = 0, offset = rxm->offset, copy, nsg; 600 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
601 struct sk_buff *skb_iter, *unused; 601 struct sk_buff *skb_iter, *unused;
602 struct scatterlist sg[1]; 602 struct scatterlist sg[1];
603 char *orig_buf, *buf; 603 char *orig_buf, *buf;
@@ -628,9 +628,10 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
628 else 628 else
629 err = 0; 629 err = 0;
630 630
631 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
632
631 if (skb_pagelen(skb) > offset) { 633 if (skb_pagelen(skb) > offset) {
632 copy = min_t(int, skb_pagelen(skb) - offset, 634 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
633 rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
634 635
635 if (skb->decrypted) 636 if (skb->decrypted)
636 skb_store_bits(skb, offset, buf, copy); 637 skb_store_bits(skb, offset, buf, copy);
@@ -639,16 +640,30 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
639 buf += copy; 640 buf += copy;
640 } 641 }
641 642
643 pos = skb_pagelen(skb);
642 skb_walk_frags(skb, skb_iter) { 644 skb_walk_frags(skb, skb_iter) {
643 copy = min_t(int, skb_iter->len, 645 int frag_pos;
644 rxm->full_len - offset + rxm->offset - 646
645 TLS_CIPHER_AES_GCM_128_TAG_SIZE); 647 /* Practically all frags must belong to msg if reencrypt
648 * is needed with current strparser and coalescing logic,
649 * but strparser may "get optimized", so let's be safe.
650 */
651 if (pos + skb_iter->len <= offset)
652 goto done_with_frag;
653 if (pos >= data_len + rxm->offset)
654 break;
655
656 frag_pos = offset - pos;
657 copy = min_t(int, skb_iter->len - frag_pos,
658 data_len + rxm->offset - offset);
646 659
647 if (skb_iter->decrypted) 660 if (skb_iter->decrypted)
648 skb_store_bits(skb_iter, offset, buf, copy); 661 skb_store_bits(skb_iter, frag_pos, buf, copy);
649 662
650 offset += copy; 663 offset += copy;
651 buf += copy; 664 buf += copy;
665done_with_frag:
666 pos += skb_iter->len;
652 } 667 }
653 668
654free_buf: 669free_buf: