summaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-21 16:01:54 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-21 16:01:54 -0400
commit6f6e434aa267a6030477876d89444fe3a6b7a48d (patch)
tree67755c422f1e85451aa646eae21d4c6f3f389d9f /net/tls
parent44c752fe584d8b9f6e0756ecffa8691677471862 (diff)
parent6741c4bb389da103c0d79ad1961884628900bfe6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
S390 bpf_jit.S is removed in net-next and had changes in 'net', since that code isn't used any more take the removal. TLS data structures split the TX and RX components in 'net-next', put the new struct members from the bug fix in 'net' into the RX part. The 'net-next' tree had some reworking of how the ERSPAN code works in the GRE tunneling code, overlapping with a one-line headroom calculation fix in 'net'. Overlapping changes in __sock_map_ctx_update_elem(), keep the bits that read the prog members via READ_ONCE() into local variables before using them. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_sw.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 5c3909c311f1..839e1e165a0c 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -680,7 +680,6 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
680 struct scatterlist *sgin = &sgin_arr[0]; 680 struct scatterlist *sgin = &sgin_arr[0];
681 struct strp_msg *rxm = strp_msg(skb); 681 struct strp_msg *rxm = strp_msg(skb);
682 int ret, nsg = ARRAY_SIZE(sgin_arr); 682 int ret, nsg = ARRAY_SIZE(sgin_arr);
683 char aad_recv[TLS_AAD_SPACE_SIZE];
684 struct sk_buff *unused; 683 struct sk_buff *unused;
685 684
686 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 685 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
@@ -697,13 +696,13 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
697 } 696 }
698 697
699 sg_init_table(sgin, nsg); 698 sg_init_table(sgin, nsg);
700 sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv)); 699 sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
701 700
702 nsg = skb_to_sgvec(skb, &sgin[1], 701 nsg = skb_to_sgvec(skb, &sgin[1],
703 rxm->offset + tls_ctx->rx.prepend_size, 702 rxm->offset + tls_ctx->rx.prepend_size,
704 rxm->full_len - tls_ctx->rx.prepend_size); 703 rxm->full_len - tls_ctx->rx.prepend_size);
705 704
706 tls_make_aad(aad_recv, 705 tls_make_aad(ctx->rx_aad_ciphertext,
707 rxm->full_len - tls_ctx->rx.overhead_size, 706 rxm->full_len - tls_ctx->rx.overhead_size,
708 tls_ctx->rx.rec_seq, 707 tls_ctx->rx.rec_seq,
709 tls_ctx->rx.rec_seq_size, 708 tls_ctx->rx.rec_seq_size,
@@ -802,12 +801,12 @@ int tls_sw_recvmsg(struct sock *sk,
802 if (to_copy <= len && page_count < MAX_SKB_FRAGS && 801 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
803 likely(!(flags & MSG_PEEK))) { 802 likely(!(flags & MSG_PEEK))) {
804 struct scatterlist sgin[MAX_SKB_FRAGS + 1]; 803 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
805 char unused[21];
806 int pages = 0; 804 int pages = 0;
807 805
808 zc = true; 806 zc = true;
809 sg_init_table(sgin, MAX_SKB_FRAGS + 1); 807 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
810 sg_set_buf(&sgin[0], unused, 13); 808 sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
809 TLS_AAD_SPACE_SIZE);
811 810
812 err = zerocopy_from_iter(sk, &msg->msg_iter, 811 err = zerocopy_from_iter(sk, &msg->msg_iter,
813 to_copy, &pages, 812 to_copy, &pages,