aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephan Mueller <smueller@chronox.de>2016-12-05 09:26:19 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-12-07 06:55:36 -0500
commit0c1e16cd1ec41987cc6671a2bff46ac958c41eb5 (patch)
tree45dfa74c15b77ad815e8ac60bd92b23d8a7adc03
parent39eaf759466f4e3fbeaa39075512f4f345dffdc8 (diff)
crypto: algif_aead - fix AEAD tag memory handling
For encryption, the AEAD ciphers require AAD || PT as input and generate AAD || CT || Tag as output and vice versa for decryption. Prior to this patch, the AF_ALG interface for AEAD ciphers requires the buffer to be present as input for encryption. Similarly, the output buffer for decryption required the presence of the tag buffer too. This implies that the kernel reads / writes data buffers from/to kernel space even though this operation is not required. This patch changes the AF_ALG AEAD interface to be consistent with the in-kernel AEAD cipher requirements. Due to this handling, he changes are transparent to user space with one exception: the return code of recv indicates the mount of output buffer. That output buffer has a different size compared to before the patch which implies that the return code of recv will also be different. For example, a decryption operation uses 16 bytes AAD, 16 bytes CT and 16 bytes tag, the AF_ALG AEAD interface before showed a recv return code of 48 (bytes) whereas after this patch, the return code is 32 since the tag is not returned any more. Reported-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Stephan Mueller <smueller@chronox.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/algif_aead.c57
1 files changed, 36 insertions, 21 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 80a0f1a78551..2fbf239bb1f2 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
81{ 81{
82 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 82 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
83 83
84 return ctx->used >= ctx->aead_assoclen + as; 84 /*
85 * The minimum amount of memory needed for an AEAD cipher is
86 * the AAD and in case of decryption the tag.
87 */
88 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
85} 89}
86 90
87static void aead_reset_ctx(struct aead_ctx *ctx) 91static void aead_reset_ctx(struct aead_ctx *ctx)
@@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
426 goto unlock; 430 goto unlock;
427 } 431 }
428 432
429 used = ctx->used;
430 outlen = used;
431
432 if (!aead_sufficient_data(ctx)) 433 if (!aead_sufficient_data(ctx))
433 goto unlock; 434 goto unlock;
434 435
436 used = ctx->used;
437 if (ctx->enc)
438 outlen = used + as;
439 else
440 outlen = used - as;
441
435 req = sock_kmalloc(sk, reqlen, GFP_KERNEL); 442 req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
436 if (unlikely(!req)) 443 if (unlikely(!req))
437 goto unlock; 444 goto unlock;
@@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
445 aead_request_set_ad(req, ctx->aead_assoclen); 452 aead_request_set_ad(req, ctx->aead_assoclen);
446 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
447 aead_async_cb, sk); 454 aead_async_cb, sk);
448 used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 455 used -= ctx->aead_assoclen;
449 456
450 /* take over all tx sgls from ctx */ 457 /* take over all tx sgls from ctx */
451 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, 458 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
@@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
461 areq->tsgls = sgl->cur; 468 areq->tsgls = sgl->cur;
462 469
463 /* create rx sgls */ 470 /* create rx sgls */
464 while (iov_iter_count(&msg->msg_iter)) { 471 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
465 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 472 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
466 (outlen - usedpages)); 473 (outlen - usedpages));
467 474
@@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
491 498
492 last_rsgl = rsgl; 499 last_rsgl = rsgl;
493 500
494 /* we do not need more iovecs as we have sufficient memory */
495 if (outlen <= usedpages)
496 break;
497
498 iov_iter_advance(&msg->msg_iter, err); 501 iov_iter_advance(&msg->msg_iter, err);
499 } 502 }
500 err = -EINVAL; 503
501 /* ensure output buffer is sufficiently large */ 504 /* ensure output buffer is sufficiently large */
502 if (usedpages < outlen) 505 if (usedpages < outlen) {
503 goto free; 506 err = -EINVAL;
507 goto unlock;
508 }
504 509
505 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, 510 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
506 areq->iv); 511 areq->iv);
@@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
571 goto unlock; 576 goto unlock;
572 } 577 }
573 578
579 /* data length provided by caller via sendmsg/sendpage */
574 used = ctx->used; 580 used = ctx->used;
575 581
576 /* 582 /*
@@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
585 if (!aead_sufficient_data(ctx)) 591 if (!aead_sufficient_data(ctx))
586 goto unlock; 592 goto unlock;
587 593
588 outlen = used; 594 /*
595 * Calculate the minimum output buffer size holding the result of the
596 * cipher operation. When encrypting data, the receiving buffer is
597 * larger by the tag length compared to the input buffer as the
598 * encryption operation generates the tag. For decryption, the input
599 * buffer provides the tag which is consumed resulting in only the
600 * plaintext without a buffer for the tag returned to the caller.
601 */
602 if (ctx->enc)
603 outlen = used + as;
604 else
605 outlen = used - as;
589 606
590 /* 607 /*
591 * The cipher operation input data is reduced by the associated data 608 * The cipher operation input data is reduced by the associated data
592 * length as this data is processed separately later on. 609 * length as this data is processed separately later on.
593 */ 610 */
594 used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 611 used -= ctx->aead_assoclen;
595 612
596 /* convert iovecs of output buffers into scatterlists */ 613 /* convert iovecs of output buffers into scatterlists */
597 while (iov_iter_count(&msg->msg_iter)) { 614 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
598 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 615 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
599 (outlen - usedpages)); 616 (outlen - usedpages));
600 617
@@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
621 638
622 last_rsgl = rsgl; 639 last_rsgl = rsgl;
623 640
624 /* we do not need more iovecs as we have sufficient memory */
625 if (outlen <= usedpages)
626 break;
627 iov_iter_advance(&msg->msg_iter, err); 641 iov_iter_advance(&msg->msg_iter, err);
628 } 642 }
629 643
630 err = -EINVAL;
631 /* ensure output buffer is sufficiently large */ 644 /* ensure output buffer is sufficiently large */
632 if (usedpages < outlen) 645 if (usedpages < outlen) {
646 err = -EINVAL;
633 goto unlock; 647 goto unlock;
648 }
634 649
635 sg_mark_end(sgl->sg + sgl->cur - 1); 650 sg_mark_end(sgl->sg + sgl->cur - 1);
636 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, 651 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,