aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2019-04-18 09:39:01 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-04-25 03:38:15 -0400
commit05c292afb0c0545c0cf084172db13e544eeb8f56 (patch)
tree317214521e23bc1e88d80aebadcc97d62ea46aa4
parentc776f7d37b6bf3663c838b2d2223f8ec1b523b12 (diff)
crypto: ccree - zap entire sg on aead request unmap
We were trying to be clever zapping out of the cache only the required length out of scatter list on AEAD request completion and getting it wrong. As Knuth said: "when in douby, use brute force". Zap the whole length of the scatter list. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index fa625bdde3f9..09dceec7d828 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -517,9 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
517{ 517{
518 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 518 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
519 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 519 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
520 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
521 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 520 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
522 u32 size_to_unmap = 0;
523 521
524 if (areq_ctx->mac_buf_dma_addr) { 522 if (areq_ctx->mac_buf_dma_addr) {
525 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 523 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -576,19 +574,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
576 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 574 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
577 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 575 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
578 areq_ctx->assoclen, req->cryptlen); 576 areq_ctx->assoclen, req->cryptlen);
579 size_to_unmap = areq_ctx->assoclen + req->cryptlen;
580 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
581 size_to_unmap += areq_ctx->req_authsize;
582 if (areq_ctx->is_gcm4543)
583 size_to_unmap += crypto_aead_ivsize(tfm);
584 577
585 dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, size_to_unmap), 578 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
586 DMA_BIDIRECTIONAL);
587 if (req->src != req->dst) { 579 if (req->src != req->dst) {
588 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 580 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
589 sg_virt(req->dst)); 581 sg_virt(req->dst));
590 dma_unmap_sg(dev, req->dst, 582 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
591 sg_nents_for_len(req->dst, size_to_unmap),
592 DMA_BIDIRECTIONAL); 583 DMA_BIDIRECTIONAL);
593 } 584 }
594 if (drvdata->coherent && 585 if (drvdata->coherent &&