aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2019-04-18 09:38:48 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-04-25 03:38:14 -0400
commitc4b22bf51b815fb61a35a27fc847a88bc28ebb63 (patch)
treee1710a71bafd51b9060e8ed8f4f03a65af2bb32c
parent151ded73a6c4cfaa96a563bb7a2db5341d157188 (diff)
crypto: ccree - remove special handling of chained sg
We were handling chained scattergather lists with specialized code needlessly as the regular sg APIs handle them just fine. The code handling this also had an (unused) code path with a use-before-init error, flagged by Coverity. Remove all special handling of chained sg and leave their handling to the regular sg APIs. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c98
1 files changed, 22 insertions, 76 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index adef3cfa1251..7c92373df351 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
83 */ 83 */
84static unsigned int cc_get_sgl_nents(struct device *dev, 84static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list, 85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes, 86 unsigned int nbytes, u32 *lbytes)
87 bool *is_chained)
88{ 87{
89 unsigned int nents = 0; 88 unsigned int nents = 0;
90 89
91 while (nbytes && sg_list) { 90 while (nbytes && sg_list) {
92 if (sg_list->length) { 91 nents++;
93 nents++; 92 /* get the number of bytes in the last entry */
94 /* get the number of bytes in the last entry */ 93 *lbytes = nbytes;
95 *lbytes = nbytes; 94 nbytes -= (sg_list->length > nbytes) ?
96 nbytes -= (sg_list->length > nbytes) ? 95 nbytes : sg_list->length;
97 nbytes : sg_list->length; 96 sg_list = sg_next(sg_list);
98 sg_list = sg_next(sg_list);
99 } else {
100 sg_list = (struct scatterlist *)sg_page(sg_list);
101 if (is_chained)
102 *is_chained = true;
103 }
104 } 97 }
105 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 98 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
106 return nents; 99 return nents;
@@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
142{ 135{
143 u32 nents, lbytes; 136 u32 nents, lbytes;
144 137
145 nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); 138 nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
146 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, 139 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
147 (direct == CC_SG_TO_BUF)); 140 (direct == CC_SG_TO_BUF));
148} 141}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
314 sgl_data->num_of_buffers++; 307 sgl_data->num_of_buffers++;
315} 308}
316 309
317static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
318 enum dma_data_direction direction)
319{
320 u32 i, j;
321 struct scatterlist *l_sg = sg;
322
323 for (i = 0; i < nents; i++) {
324 if (!l_sg)
325 break;
326 if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
327 dev_err(dev, "dma_map_page() sg buffer failed\n");
328 goto err;
329 }
330 l_sg = sg_next(l_sg);
331 }
332 return nents;
333
334err:
335 /* Restore mapped parts */
336 for (j = 0; j < i; j++) {
337 if (!sg)
338 break;
339 dma_unmap_sg(dev, sg, 1, direction);
340 sg = sg_next(sg);
341 }
342 return 0;
343}
344
345static int cc_map_sg(struct device *dev, struct scatterlist *sg, 310static int cc_map_sg(struct device *dev, struct scatterlist *sg,
346 unsigned int nbytes, int direction, u32 *nents, 311 unsigned int nbytes, int direction, u32 *nents,
347 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 312 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
348{ 313{
349 bool is_chained = false;
350
351 if (sg_is_last(sg)) { 314 if (sg_is_last(sg)) {
352 /* One entry only case -set to DLLI */ 315 /* One entry only case -set to DLLI */
353 if (dma_map_sg(dev, sg, 1, direction) != 1) { 316 if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
361 *nents = 1; 324 *nents = 1;
362 *mapped_nents = 1; 325 *mapped_nents = 1;
363 } else { /*sg_is_last*/ 326 } else { /*sg_is_last*/
364 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, 327 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
365 &is_chained);
366 if (*nents > max_sg_nents) { 328 if (*nents > max_sg_nents) {
367 *nents = 0; 329 *nents = 0;
368 dev_err(dev, "Too many fragments. current %d max %d\n", 330 dev_err(dev, "Too many fragments. current %d max %d\n",
369 *nents, max_sg_nents); 331 *nents, max_sg_nents);
370 return -ENOMEM; 332 return -ENOMEM;
371 } 333 }
372 if (!is_chained) { 334 /* In case of mmu the number of mapped nents might
373 /* In case of mmu the number of mapped nents might 335 * be changed from the original sgl nents
374 * be changed from the original sgl nents 336 */
375 */ 337 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
376 *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 338 if (*mapped_nents == 0) {
377 if (*mapped_nents == 0) { 339 *nents = 0;
378 *nents = 0; 340 dev_err(dev, "dma_map_sg() sg buffer failed\n");
379 dev_err(dev, "dma_map_sg() sg buffer failed\n"); 341 return -ENOMEM;
380 return -ENOMEM;
381 }
382 } else {
383 /*In this case the driver maps entry by entry so it
384 * must have the same nents before and after map
385 */
386 *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
387 direction);
388 if (*mapped_nents != *nents) {
389 *nents = *mapped_nents;
390 dev_err(dev, "dma_map_sg() sg buffer failed\n");
391 return -ENOMEM;
392 }
393 } 342 }
394 } 343 }
395 344
@@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
571 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 520 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
572 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 521 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
573 u32 dummy; 522 u32 dummy;
574 bool chained;
575 u32 size_to_unmap = 0; 523 u32 size_to_unmap = 0;
576 524
577 if (areq_ctx->mac_buf_dma_addr) { 525 if (areq_ctx->mac_buf_dma_addr) {
@@ -636,15 +584,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
636 size_to_unmap += crypto_aead_ivsize(tfm); 584 size_to_unmap += crypto_aead_ivsize(tfm);
637 585
638 dma_unmap_sg(dev, req->src, 586 dma_unmap_sg(dev, req->src,
639 cc_get_sgl_nents(dev, req->src, size_to_unmap, 587 cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
640 &dummy, &chained),
641 DMA_BIDIRECTIONAL); 588 DMA_BIDIRECTIONAL);
642 if (req->src != req->dst) { 589 if (req->src != req->dst) {
643 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 590 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
644 sg_virt(req->dst)); 591 sg_virt(req->dst));
645 dma_unmap_sg(dev, req->dst, 592 dma_unmap_sg(dev, req->dst,
646 cc_get_sgl_nents(dev, req->dst, size_to_unmap, 593 cc_get_sgl_nents(dev, req->dst, size_to_unmap,
647 &dummy, &chained), 594 &dummy),
648 DMA_BIDIRECTIONAL); 595 DMA_BIDIRECTIONAL);
649 } 596 }
650 if (drvdata->coherent && 597 if (drvdata->coherent &&
@@ -1022,7 +969,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1022 unsigned int size_for_map = req->assoclen + req->cryptlen; 969 unsigned int size_for_map = req->assoclen + req->cryptlen;
1023 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 970 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024 u32 sg_index = 0; 971 u32 sg_index = 0;
1025 bool chained = false;
1026 bool is_gcm4543 = areq_ctx->is_gcm4543; 972 bool is_gcm4543 = areq_ctx->is_gcm4543;
1027 u32 size_to_skip = req->assoclen; 973 u32 size_to_skip = req->assoclen;
1028 974
@@ -1043,7 +989,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1043 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 989 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1044 authsize : 0; 990 authsize : 0;
1045 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, 991 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1046 &src_last_bytes, &chained); 992 &src_last_bytes);
1047 sg_index = areq_ctx->src_sgl->length; 993 sg_index = areq_ctx->src_sgl->length;
1048 //check where the data starts 994 //check where the data starts
1049 while (sg_index <= size_to_skip) { 995 while (sg_index <= size_to_skip) {
@@ -1083,7 +1029,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1083 } 1029 }
1084 1030
1085 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 1031 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1086 &dst_last_bytes, &chained); 1032 &dst_last_bytes);
1087 sg_index = areq_ctx->dst_sgl->length; 1033 sg_index = areq_ctx->dst_sgl->length;
1088 offset = size_to_skip; 1034 offset = size_to_skip;
1089 1035
@@ -1484,7 +1430,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1484 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1430 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1485 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1431 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1486 areq_ctx->in_nents = 1432 areq_ctx->in_nents =
1487 cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); 1433 cc_get_sgl_nents(dev, src, nbytes, &dummy);
1488 sg_copy_to_buffer(src, areq_ctx->in_nents, 1434 sg_copy_to_buffer(src, areq_ctx->in_nents,
1489 &curr_buff[*curr_buff_cnt], nbytes); 1435 &curr_buff[*curr_buff_cnt], nbytes);
1490 *curr_buff_cnt += nbytes; 1436 *curr_buff_cnt += nbytes;