diff options
author | tadeusz.struk@intel.com <tadeusz.struk@intel.com> | 2015-04-01 16:53:06 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-01 22:59:28 -0400 |
commit | 033f46b3c13d4072d8ee6b26dd1e90fdd06895d0 (patch) | |
tree | 635f3c1f4d3e5f39da720d0e28e0171a0063a226 /crypto/algif_skcipher.c | |
parent | 99949a74aa8f1b0b1befbd1afaa6959a3654cd72 (diff) |
crypto: algif - explicitly mark end of data
After the TX sgl is expanded we need to explicitly mark end of data
at the last buffer that contains data.
Changes in v2
- use type 'bool' and true/false for 'mark'.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto/algif_skcipher.c')
-rw-r--r-- | crypto/algif_skcipher.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 2db1eb776932..0aa02635ceda 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -509,11 +509,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
509 | struct skcipher_async_req *sreq; | 509 | struct skcipher_async_req *sreq; |
510 | struct ablkcipher_request *req; | 510 | struct ablkcipher_request *req; |
511 | struct skcipher_async_rsgl *last_rsgl = NULL; | 511 | struct skcipher_async_rsgl *last_rsgl = NULL; |
512 | unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx); | 512 | unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); |
513 | unsigned int reqlen = sizeof(struct skcipher_async_req) + | 513 | unsigned int reqlen = sizeof(struct skcipher_async_req) + |
514 | GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); | 514 | GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); |
515 | int i = 0; | ||
516 | int err = -ENOMEM; | 515 | int err = -ENOMEM; |
516 | bool mark = false; | ||
517 | 517 | ||
518 | lock_sock(sk); | 518 | lock_sock(sk); |
519 | req = kmalloc(reqlen, GFP_KERNEL); | 519 | req = kmalloc(reqlen, GFP_KERNEL); |
@@ -555,7 +555,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
555 | iov_iter_count(&msg->msg_iter)); | 555 | iov_iter_count(&msg->msg_iter)); |
556 | used = min_t(unsigned long, used, sg->length); | 556 | used = min_t(unsigned long, used, sg->length); |
557 | 557 | ||
558 | if (i == tx_nents) { | 558 | if (txbufs == tx_nents) { |
559 | struct scatterlist *tmp; | 559 | struct scatterlist *tmp; |
560 | int x; | 560 | int x; |
561 | /* Ran out of tx slots in async request | 561 | /* Ran out of tx slots in async request |
@@ -573,10 +573,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
573 | kfree(sreq->tsg); | 573 | kfree(sreq->tsg); |
574 | sreq->tsg = tmp; | 574 | sreq->tsg = tmp; |
575 | tx_nents *= 2; | 575 | tx_nents *= 2; |
576 | mark = true; | ||
576 | } | 577 | } |
577 | /* Need to take over the tx sgl from ctx | 578 | /* Need to take over the tx sgl from ctx |
578 | * to the asynch req - these sgls will be freed later */ | 579 | * to the asynch req - these sgls will be freed later */ |
579 | sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length, | 580 | sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, |
580 | sg->offset); | 581 | sg->offset); |
581 | 582 | ||
582 | if (list_empty(&sreq->list)) { | 583 | if (list_empty(&sreq->list)) { |
@@ -604,6 +605,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
604 | iov_iter_advance(&msg->msg_iter, used); | 605 | iov_iter_advance(&msg->msg_iter, used); |
605 | } | 606 | } |
606 | 607 | ||
608 | if (mark) | ||
609 | sg_mark_end(sreq->tsg + txbufs - 1); | ||
610 | |||
607 | ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, | 611 | ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, |
608 | len, sreq->iv); | 612 | len, sreq->iv); |
609 | err = ctx->enc ? crypto_ablkcipher_encrypt(req) : | 613 | err = ctx->enc ? crypto_ablkcipher_encrypt(req) : |