summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-09-07 01:29:59 -0400
committerDavid S. Miller <davem@davemloft.net>2019-09-07 12:10:34 -0400
commite7b159a48ba6f7c243881c7ef3afa6e8785c0826 (patch)
tree82dd0d59e0d2de03e3da81eb237131657684cdce
parentd4774ac0d49ae92c5176c9848db555e89a5a4e45 (diff)
net/tls: remove the record tail optimization
For TLS device offload the tag/message authentication code are filled in by the device. The kernel merely reserves space for them. Because device overwrites it, the contents of the tag make do no matter. Current code tries to save space by reusing the header as the tag. This, however, leads to an additional frag being created and defeats buffer coalescing (which trickles all the way down to the drivers). Remove this optimization, and try to allocate the space for the tag in the usual way, leave the memory uninitialized. If memory allocation fails rewind the record pointer so that we use the already copied user data as tag. Note that the optimization was actually buggy, as the tag for TLS 1.2 is 16 bytes, but header is just 13, so the reuse may had looked past the end of the page.. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/tls/tls_device.c67
1 files changed, 47 insertions, 20 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b11355e00514..916c3c0a99f0 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -256,29 +256,13 @@ static int tls_push_record(struct sock *sk,
256 struct tls_context *ctx, 256 struct tls_context *ctx,
257 struct tls_offload_context_tx *offload_ctx, 257 struct tls_offload_context_tx *offload_ctx,
258 struct tls_record_info *record, 258 struct tls_record_info *record,
259 struct page_frag *pfrag, 259 int flags)
260 int flags,
261 unsigned char record_type)
262{ 260{
263 struct tls_prot_info *prot = &ctx->prot_info; 261 struct tls_prot_info *prot = &ctx->prot_info;
264 struct tcp_sock *tp = tcp_sk(sk); 262 struct tcp_sock *tp = tcp_sk(sk);
265 struct page_frag dummy_tag_frag;
266 skb_frag_t *frag; 263 skb_frag_t *frag;
267 int i; 264 int i;
268 265
269 /* fill prepend */
270 frag = &record->frags[0];
271 tls_fill_prepend(ctx,
272 skb_frag_address(frag),
273 record->len - prot->prepend_size,
274 record_type,
275 prot->version);
276
277 /* HW doesn't care about the data in the tag, because it fills it. */
278 dummy_tag_frag.page = skb_frag_page(frag);
279 dummy_tag_frag.offset = 0;
280
281 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
282 record->end_seq = tp->write_seq + record->len; 266 record->end_seq = tp->write_seq + record->len;
283 list_add_tail_rcu(&record->list, &offload_ctx->records_list); 267 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
284 offload_ctx->open_record = NULL; 268 offload_ctx->open_record = NULL;
@@ -302,6 +286,38 @@ static int tls_push_record(struct sock *sk,
302 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); 286 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
303} 287}
304 288
289static int tls_device_record_close(struct sock *sk,
290 struct tls_context *ctx,
291 struct tls_record_info *record,
292 struct page_frag *pfrag,
293 unsigned char record_type)
294{
295 struct tls_prot_info *prot = &ctx->prot_info;
296 int ret;
297
298 /* append tag
299 * device will fill in the tag, we just need to append a placeholder
300 * use socket memory to improve coalescing (re-using a single buffer
301 * increases frag count)
302 * if we can't allocate memory now, steal some back from data
303 */
304 if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
305 sk->sk_allocation))) {
306 ret = 0;
307 tls_append_frag(record, pfrag, prot->tag_size);
308 } else {
309 ret = prot->tag_size;
310 if (record->len <= prot->overhead_size)
311 return -ENOMEM;
312 }
313
314 /* fill prepend */
315 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
316 record->len - prot->overhead_size,
317 record_type, prot->version);
318 return ret;
319}
320
305static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, 321static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
306 struct page_frag *pfrag, 322 struct page_frag *pfrag,
307 size_t prepend_size) 323 size_t prepend_size)
@@ -452,13 +468,24 @@ last_record:
452 468
453 if (done || record->len >= max_open_record_len || 469 if (done || record->len >= max_open_record_len ||
454 (record->num_frags >= MAX_SKB_FRAGS - 1)) { 470 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
471 rc = tls_device_record_close(sk, tls_ctx, record,
472 pfrag, record_type);
473 if (rc) {
474 if (rc > 0) {
475 size += rc;
476 } else {
477 size = orig_size;
478 destroy_record(record);
479 ctx->open_record = NULL;
480 break;
481 }
482 }
483
455 rc = tls_push_record(sk, 484 rc = tls_push_record(sk,
456 tls_ctx, 485 tls_ctx,
457 ctx, 486 ctx,
458 record, 487 record,
459 pfrag, 488 tls_push_record_flags);
460 tls_push_record_flags,
461 record_type);
462 if (rc < 0) 489 if (rc < 0)
463 break; 490 break;
464 } 491 }