aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarsh Jain <harsh@chelsio.com>2017-01-24 00:04:32 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-02-03 04:45:44 -0500
commit94e1dab1c94715e18bb0bada503de3f3d7593076 (patch)
tree2bfaeb25c1875f4b26ccdfa7ae2666d875da1c0e
parent685ce0626840e2673fe64ea8807684f7324fec5f (diff)
crypto: chcr - Fix panic on dma_unmap_sg
Save DMA mapped sg list addresses to request context buffer. Signed-off-by: Atul Gupta <atul.gupta@chelsio.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c49
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
2 files changed, 29 insertions, 23 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 2ed1e24b44a8..d29c2b45d3ab 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
158 case CRYPTO_ALG_TYPE_AEAD: 158 case CRYPTO_ALG_TYPE_AEAD:
159 ctx_req.req.aead_req = (struct aead_request *)req; 159 ctx_req.req.aead_req = (struct aead_request *)req;
160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); 160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, 161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); 162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 if (ctx_req.ctx.reqctx->skb) { 163 if (ctx_req.ctx.reqctx->skb) {
164 kfree_skb(ctx_req.ctx.reqctx->skb); 164 kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1362 struct chcr_wr *chcr_req; 1362 struct chcr_wr *chcr_req;
1363 struct cpl_rx_phys_dsgl *phys_cpl; 1363 struct cpl_rx_phys_dsgl *phys_cpl;
1364 struct phys_sge_parm sg_param; 1364 struct phys_sge_parm sg_param;
1365 struct scatterlist *src, *dst; 1365 struct scatterlist *src;
1366 struct scatterlist src_sg[2], dst_sg[2];
1367 unsigned int frags = 0, transhdr_len; 1366 unsigned int frags = 0, transhdr_len;
1368 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; 1367 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1369 unsigned int kctx_len = 0; 1368 unsigned int kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1383 1382
1384 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1383 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1385 goto err; 1384 goto err;
1386 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1385 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1387 dst = src; 1386 reqctx->dst = src;
1387
1388 if (req->src != req->dst) { 1388 if (req->src != req->dst) {
1389 err = chcr_copy_assoc(req, aeadctx); 1389 err = chcr_copy_assoc(req, aeadctx);
1390 if (err) 1390 if (err)
1391 return ERR_PTR(err); 1391 return ERR_PTR(err);
1392 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1392 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1393 req->assoclen);
1393 } 1394 }
1394 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { 1395 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1395 null = 1; 1396 null = 1;
1396 assoclen = 0; 1397 assoclen = 0;
1397 } 1398 }
1398 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1399 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1399 (op_type ? -authsize : authsize)); 1400 (op_type ? -authsize : authsize));
1400 if (reqctx->dst_nents <= 0) { 1401 if (reqctx->dst_nents <= 0) {
1401 pr_err("AUTHENC:Invalid Destination sg entries\n"); 1402 pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1460 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1461 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1461 sg_param.qid = qid; 1462 sg_param.qid = qid;
1462 sg_param.align = 0; 1463 sg_param.align = 0;
1463 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1464 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1464 &sg_param)) 1465 &sg_param))
1465 goto dstmap_fail; 1466 goto dstmap_fail;
1466 1467
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1711 struct chcr_wr *chcr_req; 1712 struct chcr_wr *chcr_req;
1712 struct cpl_rx_phys_dsgl *phys_cpl; 1713 struct cpl_rx_phys_dsgl *phys_cpl;
1713 struct phys_sge_parm sg_param; 1714 struct phys_sge_parm sg_param;
1714 struct scatterlist *src, *dst; 1715 struct scatterlist *src;
1715 struct scatterlist src_sg[2], dst_sg[2];
1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; 1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1717 unsigned int dst_size = 0, kctx_len; 1717 unsigned int dst_size = 0, kctx_len;
1718 unsigned int sub_type; 1718 unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1729 goto err; 1729 goto err;
1730 sub_type = get_aead_subtype(tfm); 1730 sub_type = get_aead_subtype(tfm);
1731 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1731 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1732 dst = src; 1732 reqctx->dst = src;
1733
1733 if (req->src != req->dst) { 1734 if (req->src != req->dst) {
1734 err = chcr_copy_assoc(req, aeadctx); 1735 err = chcr_copy_assoc(req, aeadctx);
1735 if (err) { 1736 if (err) {
1736 pr_err("AAD copy to destination buffer fails\n"); 1737 pr_err("AAD copy to destination buffer fails\n");
1737 return ERR_PTR(err); 1738 return ERR_PTR(err);
1738 } 1739 }
1739 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1740 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1741 req->assoclen);
1740 } 1742 }
1741 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1743 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1742 (op_type ? -authsize : authsize)); 1744 (op_type ? -authsize : authsize));
1743 if (reqctx->dst_nents <= 0) { 1745 if (reqctx->dst_nents <= 0) {
1744 pr_err("CCM:Invalid Destination sg entries\n"); 1746 pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1777 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1779 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1778 sg_param.qid = qid; 1780 sg_param.qid = qid;
1779 sg_param.align = 0; 1781 sg_param.align = 0;
1780 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1782 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1781 &sg_param)) 1783 &sg_param))
1782 goto dstmap_fail; 1784 goto dstmap_fail;
1783 1785
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1809 struct chcr_wr *chcr_req; 1811 struct chcr_wr *chcr_req;
1810 struct cpl_rx_phys_dsgl *phys_cpl; 1812 struct cpl_rx_phys_dsgl *phys_cpl;
1811 struct phys_sge_parm sg_param; 1813 struct phys_sge_parm sg_param;
1812 struct scatterlist *src, *dst; 1814 struct scatterlist *src;
1813 struct scatterlist src_sg[2], dst_sg[2];
1814 unsigned int frags = 0, transhdr_len; 1815 unsigned int frags = 0, transhdr_len;
1815 unsigned int ivsize = AES_BLOCK_SIZE; 1816 unsigned int ivsize = AES_BLOCK_SIZE;
1816 unsigned int dst_size = 0, kctx_len; 1817 unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1832 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1833 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1833 goto err; 1834 goto err;
1834 1835
1835 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1836 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1836 dst = src; 1837 reqctx->dst = src;
1837 if (req->src != req->dst) { 1838 if (req->src != req->dst) {
1838 err = chcr_copy_assoc(req, aeadctx); 1839 err = chcr_copy_assoc(req, aeadctx);
1839 if (err) 1840 if (err)
1840 return ERR_PTR(err); 1841 return ERR_PTR(err);
1841 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1842 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1843 req->assoclen);
1842 } 1844 }
1843 1845
1844 if (!req->cryptlen) 1846 if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1848 crypt_len = AES_BLOCK_SIZE; 1850 crypt_len = AES_BLOCK_SIZE;
1849 else 1851 else
1850 crypt_len = req->cryptlen; 1852 crypt_len = req->cryptlen;
1851 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1853 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1852 (op_type ? -authsize : authsize)); 1854 (op_type ? -authsize : authsize));
1853 if (reqctx->dst_nents <= 0) { 1855 if (reqctx->dst_nents <= 0) {
1854 pr_err("GCM:Invalid Destination sg entries\n"); 1856 pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1923 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1925 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1924 sg_param.qid = qid; 1926 sg_param.qid = qid;
1925 sg_param.align = 0; 1927 sg_param.align = 0;
1926 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1928 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1927 &sg_param)) 1929 &sg_param))
1928 goto dstmap_fail; 1930 goto dstmap_fail;
1929 1931
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1937 write_sg_to_skb(skb, &frags, src, req->cryptlen); 1939 write_sg_to_skb(skb, &frags, src, req->cryptlen);
1938 } else { 1940 } else {
1939 aes_gcm_empty_pld_pad(req->dst, authsize - 1); 1941 aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1940 write_sg_to_skb(skb, &frags, dst, crypt_len); 1942 write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1943
1941 } 1944 }
1942 1945
1943 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1946 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d5af7d64a763..7ec0a8f12475 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -158,6 +158,9 @@ struct ablk_ctx {
158}; 158};
159struct chcr_aead_reqctx { 159struct chcr_aead_reqctx {
160 struct sk_buff *skb; 160 struct sk_buff *skb;
161 struct scatterlist *dst;
162 struct scatterlist srcffwd[2];
163 struct scatterlist dstffwd[2];
161 short int dst_nents; 164 short int dst_nents;
162 u16 verify; 165 u16 verify;
163 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 166 u8 iv[CHCR_MAX_CRYPTO_IV_LEN];