aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree/cc_buffer_mgr.c
diff options
context:
space:
mode:
authorHadar Gat <hadar.gat@arm.com>2019-01-15 08:43:11 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2019-01-25 05:41:50 -0500
commitccba2f1112d4871982ae3f09d1984c0443c89a97 (patch)
tree6b427d40166e4a03b693b8d18224824ce21a861d /drivers/crypto/ccree/cc_buffer_mgr.c
parenta0d608ee5ebfa9a9da0e69784e7aa0f86644a02e (diff)
crypto: ccree - improve error handling
pass the returned error code to the higher level functions Signed-off-by: Hadar Gat <hadar.gat@arm.com> Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccree/cc_buffer_mgr.c')
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c74
1 files changed, 35 insertions, 39 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index dd948e1df9e5..32d2df36ced2 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -511,10 +511,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
511 /* Map the src SGL */ 511 /* Map the src SGL */
512 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, 512 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
513 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); 513 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
514 if (rc) { 514 if (rc)
515 rc = -ENOMEM;
516 goto cipher_exit; 515 goto cipher_exit;
517 }
518 if (mapped_nents > 1) 516 if (mapped_nents > 1)
519 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 517 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
520 518
@@ -528,12 +526,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
528 } 526 }
529 } else { 527 } else {
530 /* Map the dst sg */ 528 /* Map the dst sg */
531 if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, 529 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
532 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 530 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
533 &dummy, &mapped_nents)) { 531 &dummy, &mapped_nents);
534 rc = -ENOMEM; 532 if (rc)
535 goto cipher_exit; 533 goto cipher_exit;
536 }
537 if (mapped_nents > 1) 534 if (mapped_nents > 1)
538 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 535 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
539 536
@@ -1078,10 +1075,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1078 &areq_ctx->dst.nents, 1075 &areq_ctx->dst.nents,
1079 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, 1076 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1080 &dst_mapped_nents); 1077 &dst_mapped_nents);
1081 if (rc) { 1078 if (rc)
1082 rc = -ENOMEM;
1083 goto chain_data_exit; 1079 goto chain_data_exit;
1084 }
1085 } 1080 }
1086 1081
1087 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 1082 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@@ -1235,11 +1230,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1235 } 1230 }
1236 areq_ctx->ccm_iv0_dma_addr = dma_addr; 1231 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1237 1232
1238 if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, 1233 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1239 &sg_data, req->assoclen)) { 1234 &sg_data, req->assoclen);
1240 rc = -ENOMEM; 1235 if (rc)
1241 goto aead_map_failure; 1236 goto aead_map_failure;
1242 }
1243 } 1237 }
1244 1238
1245 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { 1239 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1299,10 +1293,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1299 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + 1293 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1300 LLI_MAX_NUM_OF_DATA_ENTRIES), 1294 LLI_MAX_NUM_OF_DATA_ENTRIES),
1301 &dummy, &mapped_nents); 1295 &dummy, &mapped_nents);
1302 if (rc) { 1296 if (rc)
1303 rc = -ENOMEM;
1304 goto aead_map_failure; 1297 goto aead_map_failure;
1305 }
1306 1298
1307 if (areq_ctx->is_single_pass) { 1299 if (areq_ctx->is_single_pass) {
1308 /* 1300 /*
@@ -1386,6 +1378,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1386 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1378 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1387 struct buffer_array sg_data; 1379 struct buffer_array sg_data;
1388 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 1380 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1381 int rc = 0;
1389 u32 dummy = 0; 1382 u32 dummy = 0;
1390 u32 mapped_nents = 0; 1383 u32 mapped_nents = 0;
1391 1384
@@ -1405,18 +1398,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1405 /*TODO: copy data in case that buffer is enough for operation */ 1398 /*TODO: copy data in case that buffer is enough for operation */
1406 /* map the previous buffer */ 1399 /* map the previous buffer */
1407 if (*curr_buff_cnt) { 1400 if (*curr_buff_cnt) {
1408 if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1401 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1409 &sg_data)) { 1402 &sg_data);
1410 return -ENOMEM; 1403 if (rc)
1411 } 1404 return rc;
1412 } 1405 }
1413 1406
1414 if (src && nbytes > 0 && do_update) { 1407 if (src && nbytes > 0 && do_update) {
1415 if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, 1408 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1416 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 1409 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1417 &dummy, &mapped_nents)) { 1410 &dummy, &mapped_nents);
1411 if (rc)
1418 goto unmap_curr_buff; 1412 goto unmap_curr_buff;
1419 }
1420 if (src && mapped_nents == 1 && 1413 if (src && mapped_nents == 1 &&
1421 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1414 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1422 memcpy(areq_ctx->buff_sg, src, 1415 memcpy(areq_ctx->buff_sg, src,
@@ -1435,7 +1428,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1435 /* add the src data to the sg_data */ 1428 /* add the src data to the sg_data */
1436 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 1429 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1437 0, true, &areq_ctx->mlli_nents); 1430 0, true, &areq_ctx->mlli_nents);
1438 if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) 1431 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1432 if (rc)
1439 goto fail_unmap_din; 1433 goto fail_unmap_din;
1440 } 1434 }
1441 /* change the buffer index for the unmap function */ 1435 /* change the buffer index for the unmap function */
@@ -1451,7 +1445,7 @@ unmap_curr_buff:
1451 if (*curr_buff_cnt) 1445 if (*curr_buff_cnt)
1452 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1446 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1453 1447
1454 return -ENOMEM; 1448 return rc;
1455} 1449}
1456 1450
1457int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, 1451int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1470,6 +1464,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1470 struct buffer_array sg_data; 1464 struct buffer_array sg_data;
1471 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 1465 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1472 unsigned int swap_index = 0; 1466 unsigned int swap_index = 0;
1467 int rc = 0;
1473 u32 dummy = 0; 1468 u32 dummy = 0;
1474 u32 mapped_nents = 0; 1469 u32 mapped_nents = 0;
1475 1470
@@ -1514,21 +1509,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1514 } 1509 }
1515 1510
1516 if (*curr_buff_cnt) { 1511 if (*curr_buff_cnt) {
1517 if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1512 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1518 &sg_data)) { 1513 &sg_data);
1519 return -ENOMEM; 1514 if (rc)
1520 } 1515 return rc;
1521 /* change the buffer index for next operation */ 1516 /* change the buffer index for next operation */
1522 swap_index = 1; 1517 swap_index = 1;
1523 } 1518 }
1524 1519
1525 if (update_data_len > *curr_buff_cnt) { 1520 if (update_data_len > *curr_buff_cnt) {
1526 if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), 1521 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1527 DMA_TO_DEVICE, &areq_ctx->in_nents, 1522 DMA_TO_DEVICE, &areq_ctx->in_nents,
1528 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, 1523 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1529 &mapped_nents)) { 1524 &mapped_nents);
1525 if (rc)
1530 goto unmap_curr_buff; 1526 goto unmap_curr_buff;
1531 }
1532 if (mapped_nents == 1 && 1527 if (mapped_nents == 1 &&
1533 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1528 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1534 /* only one entry in the SG and no previous data */ 1529 /* only one entry in the SG and no previous data */
@@ -1548,7 +1543,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1548 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, 1543 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1549 (update_data_len - *curr_buff_cnt), 0, true, 1544 (update_data_len - *curr_buff_cnt), 0, true,
1550 &areq_ctx->mlli_nents); 1545 &areq_ctx->mlli_nents);
1551 if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) 1546 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1547 if (rc)
1552 goto fail_unmap_din; 1548 goto fail_unmap_din;
1553 } 1549 }
1554 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); 1550 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1562,7 +1558,7 @@ unmap_curr_buff:
1562 if (*curr_buff_cnt) 1558 if (*curr_buff_cnt)
1563 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1559 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1564 1560
1565 return -ENOMEM; 1561 return rc;
1566} 1562}
1567 1563
1568void cc_unmap_hash_request(struct device *dev, void *ctx, 1564void cc_unmap_hash_request(struct device *dev, void *ctx,