aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorDoron Roberts-Kedes <doronrk@fb.com>2018-08-28 19:33:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-08-29 22:57:04 -0400
commit0927f71dbcfb59131b289d7d518e9472e51d4830 (patch)
tree1db0618375ce8e2d11e28e25aee8d20d78416387 /net/tls
parent7e8d5755be0e6c92d3b86a85e54c6a550b1910c5 (diff)
net/tls: Calculate nsg for zerocopy path without skb_cow_data.
decrypt_skb fails if the number of sg elements required to map it is greater than MAX_SKB_FRAGS. nsg must always be calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy case. The new function skb_nsg calculates the number of scatterlist elements required to map the skb without the extra overhead of skb_cow_data. This patch reduces memcpy by 50% on my encrypted NBD benchmarks. Reported-by: Vakul Garg <Vakul.garg@nxp.com> Reviewed-by: Vakul Garg <Vakul.garg@nxp.com> Tested-by: Vakul Garg <Vakul.garg@nxp.com> Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_sw.c80
1 files changed, 79 insertions, 1 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 52fbe727d7c1..4ba62cd00a94 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -43,6 +43,82 @@
43 43
44#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE 44#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45 45
46static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
46static int tls_do_decryption(struct sock *sk, 122static int tls_do_decryption(struct sock *sk,
47 struct scatterlist *sgin, 123 struct scatterlist *sgin,
48 struct scatterlist *sgout, 124 struct scatterlist *sgout,
@@ -678,12 +754,14 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
678 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; 754 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
679 else 755 else
680 n_sgout = sg_nents(out_sg); 756 n_sgout = sg_nents(out_sg);
757 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
758 rxm->full_len - tls_ctx->rx.prepend_size);
681 } else { 759 } else {
682 n_sgout = 0; 760 n_sgout = 0;
683 *zc = false; 761 *zc = false;
762 n_sgin = skb_cow_data(skb, 0, &unused);
684 } 763 }
685 764
686 n_sgin = skb_cow_data(skb, 0, &unused);
687 if (n_sgin < 1) 765 if (n_sgin < 1)
688 return -EBADMSG; 766 return -EBADMSG;
689 767