aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-04-25 20:35:09 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-27 20:17:19 -0400
commit97e1caa517e22d62a283b876fb8aa5f4672c83dd (patch)
tree49c0a9a8eaf6ec2c1dc548f0e68fcc2c2f8ed91b
parentb2a20fd0725e8b259c528820033e29fdb3724549 (diff)
net/tls: don't copy negative amounts of data in reencrypt
There is no guarantee the record starts before the skb frags. If we don't check for this condition copy amount will get negative, leading to reads and writes to random memory locations. Familiar hilarity ensues. Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: John Hurley <john.hurley@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/tls/tls_device.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index cc0256939eb6..96357060addc 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -628,14 +628,16 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
628 else 628 else
629 err = 0; 629 err = 0;
630 630
631 copy = min_t(int, skb_pagelen(skb) - offset, 631 if (skb_pagelen(skb) > offset) {
632 rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE); 632 copy = min_t(int, skb_pagelen(skb) - offset,
633 rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
633 634
634 if (skb->decrypted) 635 if (skb->decrypted)
635 skb_store_bits(skb, offset, buf, copy); 636 skb_store_bits(skb, offset, buf, copy);
636 637
637 offset += copy; 638 offset += copy;
638 buf += copy; 639 buf += copy;
640 }
639 641
640 skb_walk_frags(skb, skb_iter) { 642 skb_walk_frags(skb, skb_iter) {
641 copy = min_t(int, skb_iter->len, 643 copy = min_t(int, skb_iter->len,