aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_main.c
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-04-10 14:04:31 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-10 16:07:02 -0400
commit35b71a34ada62c9573847a324bf06a133fe11b11 (patch)
tree1161595f0333fca7a5f419b3200b15eee8a21ce3 /net/tls/tls_main.c
parent5a03bc73abed6ae196c15e9950afde19d48be12c (diff)
net/tls: don't leak partially sent record in device mode
David reports that tls triggers warnings related to sk->sk_forward_alloc not being zero at destruction time: WARNING: CPU: 5 PID: 6831 at net/core/stream.c:206 sk_stream_kill_queues+0x103/0x110 WARNING: CPU: 5 PID: 6831 at net/ipv4/af_inet.c:160 inet_sock_destruct+0x15b/0x170 When sender fills up the write buffer and dies from SIGPIPE. This is due to the device implementation not cleaning up the partially_sent_record. This is because commit a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") moved the partial record cleanup to the SW-only path. Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") Reported-by: David Beckett <david.beckett@netronome.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Reviewed-by: Simon Horman <simon.horman@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls/tls_main.c')
-rw-r--r--net/tls/tls_main.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index df921a2904b9..a3cca1ef0098 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
208 return tls_push_sg(sk, ctx, sg, offset, flags); 208 return tls_push_sg(sk, ctx, sg, offset, flags);
209} 209}
210 210
211bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
212{
213 struct scatterlist *sg;
214
215 sg = ctx->partially_sent_record;
216 if (!sg)
217 return false;
218
219 while (1) {
220 put_page(sg_page(sg));
221 sk_mem_uncharge(sk, sg->length);
222
223 if (sg_is_last(sg))
224 break;
225 sg++;
226 }
227 ctx->partially_sent_record = NULL;
228 return true;
229}
230
211static void tls_write_space(struct sock *sk) 231static void tls_write_space(struct sock *sk)
212{ 232{
213 struct tls_context *ctx = tls_get_ctx(sk); 233 struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,6 +287,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
267 kfree(ctx->tx.rec_seq); 287 kfree(ctx->tx.rec_seq);
268 kfree(ctx->tx.iv); 288 kfree(ctx->tx.iv);
269 tls_sw_free_resources_tx(sk); 289 tls_sw_free_resources_tx(sk);
290 } else if (ctx->tx_conf == TLS_HW) {
291 tls_device_free_resources_tx(sk);
270 } 292 }
271 293
272 if (ctx->rx_conf == TLS_SW) { 294 if (ctx->rx_conf == TLS_SW) {