aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2018-09-14 16:01:46 -0400
committerDavid S. Miller <davem@davemloft.net>2018-09-17 11:01:36 -0400
commit7a3dd8c8979ce48b99cb0e9b7435a97f0716138a (patch)
tree6679927b1b42441bacd11519152a54f7a676f545 /net/tls
parenta82738adff167593bbb9df90b4201ce4b3407d21 (diff)
tls: async support causes out-of-bounds access in crypto APIs
When async support was added it needed to access the sk from the async callback to report errors up the stack. The patch tried to use space after the aead request struct by directly setting the reqsize field in aead_request. This is an internal field that should not be used outside the crypto APIs. It is used by the crypto code to define extra space for private structures used in the crypto context. Users of the API then use crypto_aead_reqsize() and add the returned amount of bytes to the end of the request memory allocation before posting the request to encrypt/decrypt APIs. So this breaks (with general protection fault and KASAN error, if enabled) because the request sent to decrypt is shorter than required causing the crypto API out-of-bounds errors. Also it seems unlikely the sk is even valid by the time it gets to the callback because of memset in crypto layer. Anyways, fix this by holding the sk in the skb->sk field when the callback is set up and because the skb is already passed through to the callback handler via void* we can access it in the handler. Then in the handler we need to be careful to NULL the pointer again before kfree_skb. I added comments on both the setup (in tls_do_decryption) and when we clear it from the crypto callback handler tls_decrypt_done(). After this selftests pass again and fixes KASAN errors/warnings. Fixes: 94524d8fc965 ("net/tls: Add support for async decryption of tls records") Signed-off-by: John Fastabend <john.fastabend@gmail.com> Reviewed-by: Vakul Garg <Vakul.garg@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_sw.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 8aa4c1dafd6a..f4aa7cdb01ca 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -122,25 +122,32 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
122static void tls_decrypt_done(struct crypto_async_request *req, int err) 122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{ 123{
124 struct aead_request *aead_req = (struct aead_request *)req; 124 struct aead_request *aead_req = (struct aead_request *)req;
125 struct decrypt_req_ctx *req_ctx =
126 (struct decrypt_req_ctx *)(aead_req + 1);
127
128 struct scatterlist *sgout = aead_req->dst; 125 struct scatterlist *sgout = aead_req->dst;
129 126 struct tls_sw_context_rx *ctx;
130 struct tls_context *tls_ctx = tls_get_ctx(req_ctx->sk); 127 struct tls_context *tls_ctx;
131 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
132 int pending = atomic_dec_return(&ctx->decrypt_pending);
133 struct scatterlist *sg; 128 struct scatterlist *sg;
129 struct sk_buff *skb;
134 unsigned int pages; 130 unsigned int pages;
131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
135 137
136 /* Propagate if there was an err */ 138 /* Propagate if there was an err */
137 if (err) { 139 if (err) {
138 ctx->async_wait.err = err; 140 ctx->async_wait.err = err;
139 tls_err_abort(req_ctx->sk, err); 141 tls_err_abort(skb->sk, err);
140 } 142 }
141 143
144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
142 /* Release the skb, pages and memory allocated for crypto req */ 149 /* Release the skb, pages and memory allocated for crypto req */
143 kfree_skb(req->data); 150 kfree_skb(skb);
144 151
145 /* Skip the first S/G entry as it points to AAD */ 152 /* Skip the first S/G entry as it points to AAD */
146 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
@@ -175,11 +182,13 @@ static int tls_do_decryption(struct sock *sk,
175 (u8 *)iv_recv); 182 (u8 *)iv_recv);
176 183
177 if (async) { 184 if (async) {
178 struct decrypt_req_ctx *req_ctx; 185 /* Using skb->sk to push sk through to crypto async callback
179 186 * handler. This allows propagating errors up to the socket
180 req_ctx = (struct decrypt_req_ctx *)(aead_req + 1); 187 * if needed. It _must_ be cleared in the async handler
181 req_ctx->sk = sk; 188 * before kfree_skb is called. We _know_ skb->sk is NULL
182 189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
183 aead_request_set_callback(aead_req, 192 aead_request_set_callback(aead_req,
184 CRYPTO_TFM_REQ_MAY_BACKLOG, 193 CRYPTO_TFM_REQ_MAY_BACKLOG,
185 tls_decrypt_done, skb); 194 tls_decrypt_done, skb);
@@ -1463,8 +1472,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1463 goto free_aead; 1472 goto free_aead;
1464 1473
1465 if (sw_ctx_rx) { 1474 if (sw_ctx_rx) {
1466 (*aead)->reqsize = sizeof(struct decrypt_req_ctx);
1467
1468 /* Set up strparser */ 1475 /* Set up strparser */
1469 memset(&cb, 0, sizeof(cb)); 1476 memset(&cb, 0, sizeof(cb));
1470 cb.rcv_msg = tls_queue; 1477 cb.rcv_msg = tls_queue;