aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls/tls_device.c')
-rw-r--r--net/tls/tls_device.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 7ee9008b2187..a5c17c47d08a 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -247,6 +247,7 @@ static int tls_push_record(struct sock *sk,
247 int flags, 247 int flags,
248 unsigned char record_type) 248 unsigned char record_type)
249{ 249{
250 struct tls_prot_info *prot = &ctx->prot_info;
250 struct tcp_sock *tp = tcp_sk(sk); 251 struct tcp_sock *tp = tcp_sk(sk);
251 struct page_frag dummy_tag_frag; 252 struct page_frag dummy_tag_frag;
252 skb_frag_t *frag; 253 skb_frag_t *frag;
@@ -256,7 +257,7 @@ static int tls_push_record(struct sock *sk,
256 frag = &record->frags[0]; 257 frag = &record->frags[0];
257 tls_fill_prepend(ctx, 258 tls_fill_prepend(ctx,
258 skb_frag_address(frag), 259 skb_frag_address(frag),
259 record->len - ctx->tx.prepend_size, 260 record->len - prot->prepend_size,
260 record_type, 261 record_type,
261 ctx->crypto_send.info.version); 262 ctx->crypto_send.info.version);
262 263
@@ -264,7 +265,7 @@ static int tls_push_record(struct sock *sk,
264 dummy_tag_frag.page = skb_frag_page(frag); 265 dummy_tag_frag.page = skb_frag_page(frag);
265 dummy_tag_frag.offset = 0; 266 dummy_tag_frag.offset = 0;
266 267
267 tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size); 268 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
268 record->end_seq = tp->write_seq + record->len; 269 record->end_seq = tp->write_seq + record->len;
269 spin_lock_irq(&offload_ctx->lock); 270 spin_lock_irq(&offload_ctx->lock);
270 list_add_tail(&record->list, &offload_ctx->records_list); 271 list_add_tail(&record->list, &offload_ctx->records_list);
@@ -347,6 +348,7 @@ static int tls_push_data(struct sock *sk,
347 unsigned char record_type) 348 unsigned char record_type)
348{ 349{
349 struct tls_context *tls_ctx = tls_get_ctx(sk); 350 struct tls_context *tls_ctx = tls_get_ctx(sk);
351 struct tls_prot_info *prot = &tls_ctx->prot_info;
350 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); 352 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
351 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; 353 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
352 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); 354 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
@@ -376,10 +378,10 @@ static int tls_push_data(struct sock *sk,
376 * we need to leave room for an authentication tag. 378 * we need to leave room for an authentication tag.
377 */ 379 */
378 max_open_record_len = TLS_MAX_PAYLOAD_SIZE + 380 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
379 tls_ctx->tx.prepend_size; 381 prot->prepend_size;
380 do { 382 do {
381 rc = tls_do_allocation(sk, ctx, pfrag, 383 rc = tls_do_allocation(sk, ctx, pfrag,
382 tls_ctx->tx.prepend_size); 384 prot->prepend_size);
383 if (rc) { 385 if (rc) {
384 rc = sk_stream_wait_memory(sk, &timeo); 386 rc = sk_stream_wait_memory(sk, &timeo);
385 if (!rc) 387 if (!rc)
@@ -397,7 +399,7 @@ handle_error:
397 size = orig_size; 399 size = orig_size;
398 destroy_record(record); 400 destroy_record(record);
399 ctx->open_record = NULL; 401 ctx->open_record = NULL;
400 } else if (record->len > tls_ctx->tx.prepend_size) { 402 } else if (record->len > prot->prepend_size) {
401 goto last_record; 403 goto last_record;
402 } 404 }
403 405
@@ -658,6 +660,8 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
658int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 660int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
659{ 661{
660 u16 nonce_size, tag_size, iv_size, rec_seq_size; 662 u16 nonce_size, tag_size, iv_size, rec_seq_size;
663 struct tls_context *tls_ctx = tls_get_ctx(sk);
664 struct tls_prot_info *prot = &tls_ctx->prot_info;
661 struct tls_record_info *start_marker_record; 665 struct tls_record_info *start_marker_record;
662 struct tls_offload_context_tx *offload_ctx; 666 struct tls_offload_context_tx *offload_ctx;
663 struct tls_crypto_info *crypto_info; 667 struct tls_crypto_info *crypto_info;
@@ -703,10 +707,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
703 goto free_offload_ctx; 707 goto free_offload_ctx;
704 } 708 }
705 709
706 ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size; 710 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
707 ctx->tx.tag_size = tag_size; 711 prot->tag_size = tag_size;
708 ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size; 712 prot->overhead_size = prot->prepend_size + prot->tag_size;
709 ctx->tx.iv_size = iv_size; 713 prot->iv_size = iv_size;
710 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 714 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
711 GFP_KERNEL); 715 GFP_KERNEL);
712 if (!ctx->tx.iv) { 716 if (!ctx->tx.iv) {
@@ -716,7 +720,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
716 720
717 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 721 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
718 722
719 ctx->tx.rec_seq_size = rec_seq_size; 723 prot->rec_seq_size = rec_seq_size;
720 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); 724 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
721 if (!ctx->tx.rec_seq) { 725 if (!ctx->tx.rec_seq) {
722 rc = -ENOMEM; 726 rc = -ENOMEM;