aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDave Watson <davejwatson@fb.com>2019-01-30 16:58:31 -0500
committerDavid S. Miller <davem@davemloft.net>2019-02-01 18:00:55 -0500
commit130b392c6cd6b2aed1b7eb32253d4920babb4891 (patch)
tree99a8b337cdf5fcb3f23374b3100ed8e3ea295e19 /net
parentfedf201e12960bd2fab0596422851b20a8d80d20 (diff)
net: tls: Add tls 1.3 support
TLS 1.3 has minor changes from TLS 1.2 at the record layer. * Header now hardcodes the same version and application content type in the header. * The real content type is appended after the data, before encryption (or after decryption). * The IV is xored with the sequence number, instead of concatinating four bytes of IV with the explicit IV. * Zero-padding: No exlicit length is given, we search backwards from the end of the decrypted data for the first non-zero byte, which is the content type. Currently recv supports reading zero-padding, but there is no way for send to add zero padding. Signed-off-by: Dave Watson <davejwatson@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/tls/tls_device.c5
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/tls/tls_main.c3
-rw-r--r--net/tls/tls_sw.c116
4 files changed, 101 insertions, 26 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index d753e362d2d9..7ee9008b2187 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -257,7 +257,8 @@ static int tls_push_record(struct sock *sk,
257 tls_fill_prepend(ctx, 257 tls_fill_prepend(ctx,
258 skb_frag_address(frag), 258 skb_frag_address(frag),
259 record->len - ctx->tx.prepend_size, 259 record->len - ctx->tx.prepend_size,
260 record_type); 260 record_type,
261 ctx->crypto_send.info.version);
261 262
262 /* HW doesn't care about the data in the tag, because it fills it. */ 263 /* HW doesn't care about the data in the tag, because it fills it. */
263 dummy_tag_frag.page = skb_frag_page(frag); 264 dummy_tag_frag.page = skb_frag_page(frag);
@@ -270,7 +271,7 @@ static int tls_push_record(struct sock *sk,
270 spin_unlock_irq(&offload_ctx->lock); 271 spin_unlock_irq(&offload_ctx->lock);
271 offload_ctx->open_record = NULL; 272 offload_ctx->open_record = NULL;
272 set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); 273 set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
273 tls_advance_record_sn(sk, &ctx->tx); 274 tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
274 275
275 for (i = 0; i < record->num_frags; i++) { 276 for (i = 0; i < record->num_frags; i++) {
276 frag = &record->frags[i]; 277 frag = &record->frags[i];
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6dbc5a88..54c3a758f2a7 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -73,7 +73,8 @@ static int tls_enc_record(struct aead_request *aead_req,
73 len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; 73 len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
74 74
75 tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, 75 tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
76 (char *)&rcd_sn, sizeof(rcd_sn), buf[0]); 76 (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
77 TLS_1_2_VERSION);
77 78
78 memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, 79 memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
79 TLS_CIPHER_AES_GCM_128_IV_SIZE); 80 TLS_CIPHER_AES_GCM_128_IV_SIZE);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0f028cfdf835..d1c2fd9a3f63 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -463,7 +463,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
463 } 463 }
464 464
465 /* check version */ 465 /* check version */
466 if (crypto_info->version != TLS_1_2_VERSION) { 466 if (crypto_info->version != TLS_1_2_VERSION &&
467 crypto_info->version != TLS_1_3_VERSION) {
467 rc = -ENOTSUPP; 468 rc = -ENOTSUPP;
468 goto err_crypto_info; 469 goto err_crypto_info;
469 } 470 }
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 34f3523f668e..06d7ae97b929 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -120,6 +120,34 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
120 return __skb_nsg(skb, offset, len, 0); 120 return __skb_nsg(skb, offset, len, 0);
121} 121}
122 122
123static int padding_length(struct tls_sw_context_rx *ctx,
124 struct tls_context *tls_ctx, struct sk_buff *skb)
125{
126 struct strp_msg *rxm = strp_msg(skb);
127 int sub = 0;
128
129 /* Determine zero-padding length */
130 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) {
131 char content_type = 0;
132 int err;
133 int back = 17;
134
135 while (content_type == 0) {
136 if (back > rxm->full_len)
137 return -EBADMSG;
138 err = skb_copy_bits(skb,
139 rxm->offset + rxm->full_len - back,
140 &content_type, 1);
141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
123static void tls_decrypt_done(struct crypto_async_request *req, int err) 151static void tls_decrypt_done(struct crypto_async_request *req, int err)
124{ 152{
125 struct aead_request *aead_req = (struct aead_request *)req; 153 struct aead_request *aead_req = (struct aead_request *)req;
@@ -142,7 +170,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
142 tls_err_abort(skb->sk, err); 170 tls_err_abort(skb->sk, err);
143 } else { 171 } else {
144 struct strp_msg *rxm = strp_msg(skb); 172 struct strp_msg *rxm = strp_msg(skb);
145 173 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
146 rxm->offset += tls_ctx->rx.prepend_size; 174 rxm->offset += tls_ctx->rx.prepend_size;
147 rxm->full_len -= tls_ctx->rx.overhead_size; 175 rxm->full_len -= tls_ctx->rx.overhead_size;
148 } 176 }
@@ -448,6 +476,8 @@ static int tls_do_encryption(struct sock *sk,
448 int rc; 476 int rc;
449 477
450 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); 478 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
479 xor_iv_with_seq(tls_ctx->crypto_send.info.version, rec->iv_data,
480 tls_ctx->tx.rec_seq);
451 481
452 sge->offset += tls_ctx->tx.prepend_size; 482 sge->offset += tls_ctx->tx.prepend_size;
453 sge->length -= tls_ctx->tx.prepend_size; 483 sge->length -= tls_ctx->tx.prepend_size;
@@ -483,7 +513,8 @@ static int tls_do_encryption(struct sock *sk,
483 513
484 /* Unhook the record from context if encryption is not failure */ 514 /* Unhook the record from context if encryption is not failure */
485 ctx->open_rec = NULL; 515 ctx->open_rec = NULL;
486 tls_advance_record_sn(sk, &tls_ctx->tx); 516 tls_advance_record_sn(sk, &tls_ctx->tx,
517 tls_ctx->crypto_send.info.version);
487 return rc; 518 return rc;
488} 519}
489 520
@@ -640,7 +671,17 @@ static int tls_push_record(struct sock *sk, int flags,
640 671
641 i = msg_pl->sg.end; 672 i = msg_pl->sg.end;
642 sk_msg_iter_var_prev(i); 673 sk_msg_iter_var_prev(i);
643 sg_mark_end(sk_msg_elem(msg_pl, i)); 674
675 rec->content_type = record_type;
676 if (tls_ctx->crypto_send.info.version == TLS_1_3_VERSION) {
677 /* Add content type to end of message. No padding added */
678 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
679 sg_mark_end(&rec->sg_content_type);
680 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
681 &rec->sg_content_type);
682 } else {
683 sg_mark_end(sk_msg_elem(msg_pl, i));
684 }
644 685
645 i = msg_pl->sg.start; 686 i = msg_pl->sg.start;
646 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? 687 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
@@ -653,18 +694,22 @@ static int tls_push_record(struct sock *sk, int flags,
653 i = msg_en->sg.start; 694 i = msg_en->sg.start;
654 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 695 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
655 696
656 tls_make_aad(rec->aad_space, msg_pl->sg.size, 697 tls_make_aad(rec->aad_space, msg_pl->sg.size + tls_ctx->tx.tail_size,
657 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, 698 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
658 record_type); 699 record_type,
700 tls_ctx->crypto_send.info.version);
659 701
660 tls_fill_prepend(tls_ctx, 702 tls_fill_prepend(tls_ctx,
661 page_address(sg_page(&msg_en->sg.data[i])) + 703 page_address(sg_page(&msg_en->sg.data[i])) +
662 msg_en->sg.data[i].offset, msg_pl->sg.size, 704 msg_en->sg.data[i].offset,
663 record_type); 705 msg_pl->sg.size + tls_ctx->tx.tail_size,
706 record_type,
707 tls_ctx->crypto_send.info.version);
664 708
665 tls_ctx->pending_open_record_frags = false; 709 tls_ctx->pending_open_record_frags = false;
666 710
667 rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i); 711 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
712 msg_pl->sg.size + tls_ctx->tx.tail_size, i);
668 if (rc < 0) { 713 if (rc < 0) {
669 if (rc != -EINPROGRESS) { 714 if (rc != -EINPROGRESS) {
670 tls_err_abort(sk, EBADMSG); 715 tls_err_abort(sk, EBADMSG);
@@ -1292,7 +1337,8 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1292 u8 *aad, *iv, *mem = NULL; 1337 u8 *aad, *iv, *mem = NULL;
1293 struct scatterlist *sgin = NULL; 1338 struct scatterlist *sgin = NULL;
1294 struct scatterlist *sgout = NULL; 1339 struct scatterlist *sgout = NULL;
1295 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size; 1340 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size +
1341 tls_ctx->rx.tail_size;
1296 1342
1297 if (*zc && (out_iov || out_sg)) { 1343 if (*zc && (out_iov || out_sg)) {
1298 if (out_iov) 1344 if (out_iov)
@@ -1343,12 +1389,20 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1343 kfree(mem); 1389 kfree(mem);
1344 return err; 1390 return err;
1345 } 1391 }
1346 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 1392 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION)
1393 memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
1394 else
1395 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1396
1397 xor_iv_with_seq(tls_ctx->crypto_recv.info.version, iv,
1398 tls_ctx->rx.rec_seq);
1347 1399
1348 /* Prepare AAD */ 1400 /* Prepare AAD */
1349 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size, 1401 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size +
1402 tls_ctx->rx.tail_size,
1350 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, 1403 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1351 ctx->control); 1404 ctx->control,
1405 tls_ctx->crypto_recv.info.version);
1352 1406
1353 /* Prepare sgin */ 1407 /* Prepare sgin */
1354 sg_init_table(sgin, n_sgin); 1408 sg_init_table(sgin, n_sgin);
@@ -1405,6 +1459,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1405{ 1459{
1406 struct tls_context *tls_ctx = tls_get_ctx(sk); 1460 struct tls_context *tls_ctx = tls_get_ctx(sk);
1407 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1461 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1462 int version = tls_ctx->crypto_recv.info.version;
1408 struct strp_msg *rxm = strp_msg(skb); 1463 struct strp_msg *rxm = strp_msg(skb);
1409 int err = 0; 1464 int err = 0;
1410 1465
@@ -1417,13 +1472,17 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1417 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async); 1472 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
1418 if (err < 0) { 1473 if (err < 0) {
1419 if (err == -EINPROGRESS) 1474 if (err == -EINPROGRESS)
1420 tls_advance_record_sn(sk, &tls_ctx->rx); 1475 tls_advance_record_sn(sk, &tls_ctx->rx,
1476 version);
1421 1477
1422 return err; 1478 return err;
1423 } 1479 }
1480
1481 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
1482
1424 rxm->offset += tls_ctx->rx.prepend_size; 1483 rxm->offset += tls_ctx->rx.prepend_size;
1425 rxm->full_len -= tls_ctx->rx.overhead_size; 1484 rxm->full_len -= tls_ctx->rx.overhead_size;
1426 tls_advance_record_sn(sk, &tls_ctx->rx); 1485 tls_advance_record_sn(sk, &tls_ctx->rx, version);
1427 ctx->decrypted = true; 1486 ctx->decrypted = true;
1428 ctx->saved_data_ready(sk); 1487 ctx->saved_data_ready(sk);
1429 } else { 1488 } else {
@@ -1611,7 +1670,8 @@ int tls_sw_recvmsg(struct sock *sk,
1611 to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size; 1670 to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size;
1612 1671
1613 if (to_decrypt <= len && !is_kvec && !is_peek && 1672 if (to_decrypt <= len && !is_kvec && !is_peek &&
1614 ctx->control == TLS_RECORD_TYPE_DATA) 1673 ctx->control == TLS_RECORD_TYPE_DATA &&
1674 tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
1615 zc = true; 1675 zc = true;
1616 1676
1617 err = decrypt_skb_update(sk, skb, &msg->msg_iter, 1677 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
@@ -1835,9 +1895,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1835 1895
1836 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 1896 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1837 1897
1838 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size; 1898 cipher_overhead = tls_ctx->rx.tag_size;
1899 if (tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
1900 cipher_overhead += tls_ctx->rx.iv_size;
1839 1901
1840 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) { 1902 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
1903 tls_ctx->rx.tail_size) {
1841 ret = -EMSGSIZE; 1904 ret = -EMSGSIZE;
1842 goto read_failure; 1905 goto read_failure;
1843 } 1906 }
@@ -1846,12 +1909,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1846 goto read_failure; 1909 goto read_failure;
1847 } 1910 }
1848 1911
1849 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || 1912 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
1850 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { 1913 if (header[1] != TLS_1_2_VERSION_MINOR ||
1914 header[2] != TLS_1_2_VERSION_MAJOR) {
1851 ret = -EINVAL; 1915 ret = -EINVAL;
1852 goto read_failure; 1916 goto read_failure;
1853 } 1917 }
1854
1855#ifdef CONFIG_TLS_DEVICE 1918#ifdef CONFIG_TLS_DEVICE
1856 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, 1919 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1857 *(u64*)tls_ctx->rx.rec_seq); 1920 *(u64*)tls_ctx->rx.rec_seq);
@@ -2100,10 +2163,19 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2100 goto free_priv; 2163 goto free_priv;
2101 } 2164 }
2102 2165
2103 cctx->aad_size = TLS_AAD_SPACE_SIZE; 2166 if (crypto_info->version == TLS_1_3_VERSION) {
2167 nonce_size = 0;
2168 cctx->aad_size = TLS_HEADER_SIZE;
2169 cctx->tail_size = 1;
2170 } else {
2171 cctx->aad_size = TLS_AAD_SPACE_SIZE;
2172 cctx->tail_size = 0;
2173 }
2174
2104 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 2175 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
2105 cctx->tag_size = tag_size; 2176 cctx->tag_size = tag_size;
2106 cctx->overhead_size = cctx->prepend_size + cctx->tag_size; 2177 cctx->overhead_size = cctx->prepend_size + cctx->tag_size +
2178 cctx->tail_size;
2107 cctx->iv_size = iv_size; 2179 cctx->iv_size = iv_size;
2108 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 2180 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
2109 GFP_KERNEL); 2181 GFP_KERNEL);