summaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-06-14 21:07:45 -0400
committerDavid S. Miller <davem@davemloft.net>2018-06-15 12:14:30 -0400
commita447da7d00410278c90d3576782a43f8b675d7be (patch)
tree1ea7b2b56e13cae18573b37ab40bb134a4fcc8c4 /net/tls
parent695ad876d091530e3eb5def7827f8d0106ca3e9f (diff)
tls: fix use-after-free in tls_push_record
syzkaller managed to trigger a use-after-free in tls like the following: BUG: KASAN: use-after-free in tls_push_record.constprop.15+0x6a2/0x810 [tls] Write of size 1 at addr ffff88037aa08000 by task a.out/2317 CPU: 3 PID: 2317 Comm: a.out Not tainted 4.17.0+ #144 Hardware name: LENOVO 20FBCTO1WW/20FBCTO1WW, BIOS N1FET47W (1.21 ) 11/28/2016 Call Trace: dump_stack+0x71/0xab print_address_description+0x6a/0x280 kasan_report+0x258/0x380 ? tls_push_record.constprop.15+0x6a2/0x810 [tls] tls_push_record.constprop.15+0x6a2/0x810 [tls] tls_sw_push_pending_record+0x2e/0x40 [tls] tls_sk_proto_close+0x3fe/0x710 [tls] ? tcp_check_oom+0x4c0/0x4c0 ? tls_write_space+0x260/0x260 [tls] ? kmem_cache_free+0x88/0x1f0 inet_release+0xd6/0x1b0 __sock_release+0xc0/0x240 sock_close+0x11/0x20 __fput+0x22d/0x660 task_work_run+0x114/0x1a0 do_exit+0x71a/0x2780 ? mm_update_next_owner+0x650/0x650 ? handle_mm_fault+0x2f5/0x5f0 ? __do_page_fault+0x44f/0xa50 ? mm_fault_error+0x2d0/0x2d0 do_group_exit+0xde/0x300 __x64_sys_exit_group+0x3a/0x50 do_syscall_64+0x9a/0x300 ? page_fault+0x8/0x30 entry_SYSCALL_64_after_hwframe+0x44/0xa9 This happened through fault injection where aead_req allocation in tls_do_encryption() eventually failed and we returned -ENOMEM from the function. Turns out that the use-after-free is triggered from tls_sw_sendmsg() in the second tls_push_record(). The error then triggers a jump to waiting for memory in sk_stream_wait_memory() resp. returning immediately in case of MSG_DONTWAIT. What follows is the trim_both_sgl(sk, orig_size), which drops elements from the sg list added via tls_sw_sendmsg(). Now the use-after-free gets triggered when the socket is being closed, where tls_sk_proto_close() callback is invoked. The tls_complete_pending_work() will figure that there's a pending closed tls record to be flushed and thus calls into the tls_push_pending_closed_record() from there. ctx->push_pending_record() is called from the latter, which is the tls_sw_push_pending_record() from sw path. This again calls into tls_push_record(). And here the tls_fill_prepend() will panic since the buffer address has been freed earlier via trim_both_sgl(). One way to fix it is to move the aead request allocation out of tls_do_encryption() early into tls_push_record(). This means we don't prep the tls header and advance state to the TLS_PENDING_CLOSED_RECORD before allocation which could potentially fail happened. That fixes the issue on my side. Fixes: 3c4d7559159b ("tls: kernel TLS support") Reported-by: syzbot+5c74af81c547738e1684@syzkaller.appspotmail.com Reported-by: syzbot+709f2810a6a05f11d4d3@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Dave Watson <davejwatson@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_sw.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 34895b7c132d..2945a3bd538c 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
191} 191}
192 192
193static int tls_do_encryption(struct tls_context *tls_ctx, 193static int tls_do_encryption(struct tls_context *tls_ctx,
194 struct tls_sw_context_tx *ctx, size_t data_len, 194 struct tls_sw_context_tx *ctx,
195 gfp_t flags) 195 struct aead_request *aead_req,
196 size_t data_len)
196{ 197{
197 unsigned int req_size = sizeof(struct aead_request) +
198 crypto_aead_reqsize(ctx->aead_send);
199 struct aead_request *aead_req;
200 int rc; 198 int rc;
201 199
202 aead_req = kzalloc(req_size, flags);
203 if (!aead_req)
204 return -ENOMEM;
205
206 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; 200 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
207 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; 201 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
208 202
@@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
219 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; 213 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
220 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; 214 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
221 215
222 kfree(aead_req);
223 return rc; 216 return rc;
224} 217}
225 218
@@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
228{ 221{
229 struct tls_context *tls_ctx = tls_get_ctx(sk); 222 struct tls_context *tls_ctx = tls_get_ctx(sk);
230 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 223 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
224 struct aead_request *req;
231 int rc; 225 int rc;
232 226
227 req = kzalloc(sizeof(struct aead_request) +
228 crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
229 if (!req)
230 return -ENOMEM;
231
233 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); 232 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
234 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); 233 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
235 234
@@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
245 tls_ctx->pending_open_record_frags = 0; 244 tls_ctx->pending_open_record_frags = 0;
246 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); 245 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
247 246
248 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, 247 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
249 sk->sk_allocation);
250 if (rc < 0) { 248 if (rc < 0) {
251 /* If we are called from write_space and 249 /* If we are called from write_space and
252 * we fail, we need to set this SOCK_NOSPACE 250 * we fail, we need to set this SOCK_NOSPACE
253 * to trigger another write_space in the future. 251 * to trigger another write_space in the future.
254 */ 252 */
255 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 253 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
256 return rc; 254 goto out_req;
257 } 255 }
258 256
259 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 257 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
@@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
268 tls_err_abort(sk, EBADMSG); 266 tls_err_abort(sk, EBADMSG);
269 267
270 tls_advance_record_sn(sk, &tls_ctx->tx); 268 tls_advance_record_sn(sk, &tls_ctx->tx);
269out_req:
270 kfree(req);
271 return rc; 271 return rc;
272} 272}
273 273