diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 80 |
1 files changed, 72 insertions, 8 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c09ce1f040d3..a80ea853701d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
97 | { | 97 | { |
98 | u32 *jump_cmd, *uncond_jump_cmd; | 98 | u32 *jump_cmd, *uncond_jump_cmd; |
99 | 99 | ||
100 | /* DK bit is valid only for AES */ | ||
101 | if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { | ||
102 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
103 | OP_ALG_DECRYPT); | ||
104 | return; | ||
105 | } | ||
106 | |||
100 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | 107 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); |
101 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 108 | append_operation(desc, type | OP_ALG_AS_INITFINAL | |
102 | OP_ALG_DECRYPT); | 109 | OP_ALG_DECRYPT); |
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
786 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 793 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
787 | desc_bytes(desc), | 794 | desc_bytes(desc), |
788 | DMA_TO_DEVICE); | 795 | DMA_TO_DEVICE); |
789 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | 796 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { |
790 | dev_err(jrdev, "unable to map shared descriptor\n"); | 797 | dev_err(jrdev, "unable to map shared descriptor\n"); |
791 | return -ENOMEM; | 798 | return -ENOMEM; |
792 | } | 799 | } |
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1313 | DMA_FROM_DEVICE, dst_chained); | 1320 | DMA_FROM_DEVICE, dst_chained); |
1314 | } | 1321 | } |
1315 | 1322 | ||
1316 | /* Check if data are contiguous */ | ||
1317 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1323 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); |
1324 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1325 | dev_err(jrdev, "unable to map IV\n"); | ||
1326 | return ERR_PTR(-ENOMEM); | ||
1327 | } | ||
1328 | |||
1329 | /* Check if data are contiguous */ | ||
1318 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1330 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1319 | iv_dma || src_nents || iv_dma + ivsize != | 1331 | iv_dma || src_nents || iv_dma + ivsize != |
1320 | sg_dma_address(req->src)) { | 1332 | sg_dma_address(req->src)) { |
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1345 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1357 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1346 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1358 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1347 | desc_bytes; | 1359 | desc_bytes; |
1348 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1349 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1350 | *all_contig_ptr = all_contig; | 1360 | *all_contig_ptr = all_contig; |
1351 | 1361 | ||
1352 | sec4_sg_index = 0; | 1362 | sec4_sg_index = 0; |
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1369 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1379 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1370 | edesc->sec4_sg + sec4_sg_index, 0); | 1380 | edesc->sec4_sg + sec4_sg_index, 0); |
1371 | } | 1381 | } |
1382 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1383 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1384 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1385 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1386 | return ERR_PTR(-ENOMEM); | ||
1387 | } | ||
1372 | 1388 | ||
1373 | return edesc; | 1389 | return edesc; |
1374 | } | 1390 | } |
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1494 | DMA_FROM_DEVICE, dst_chained); | 1510 | DMA_FROM_DEVICE, dst_chained); |
1495 | } | 1511 | } |
1496 | 1512 | ||
1497 | /* Check if data are contiguous */ | ||
1498 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | 1513 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); |
1514 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1515 | dev_err(jrdev, "unable to map IV\n"); | ||
1516 | return ERR_PTR(-ENOMEM); | ||
1517 | } | ||
1518 | |||
1519 | /* Check if data are contiguous */ | ||
1499 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1520 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1500 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | 1521 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) |
1501 | contig &= ~GIV_SRC_CONTIG; | 1522 | contig &= ~GIV_SRC_CONTIG; |
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1534 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1555 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1535 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1556 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1536 | desc_bytes; | 1557 | desc_bytes; |
1537 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1538 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1539 | *contig_ptr = contig; | 1558 | *contig_ptr = contig; |
1540 | 1559 | ||
1541 | sec4_sg_index = 0; | 1560 | sec4_sg_index = 0; |
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1559 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1578 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1560 | edesc->sec4_sg + sec4_sg_index, 0); | 1579 | edesc->sec4_sg + sec4_sg_index, 0); |
1561 | } | 1580 | } |
1581 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1582 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1583 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1584 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1585 | return ERR_PTR(-ENOMEM); | ||
1586 | } | ||
1562 | 1587 | ||
1563 | return edesc; | 1588 | return edesc; |
1564 | } | 1589 | } |
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1650 | DMA_FROM_DEVICE, dst_chained); | 1675 | DMA_FROM_DEVICE, dst_chained); |
1651 | } | 1676 | } |
1652 | 1677 | ||
1678 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1679 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1680 | dev_err(jrdev, "unable to map IV\n"); | ||
1681 | return ERR_PTR(-ENOMEM); | ||
1682 | } | ||
1683 | |||
1653 | /* | 1684 | /* |
1654 | * Check if iv can be contiguous with source and destination. | 1685 | * Check if iv can be contiguous with source and destination. |
1655 | * If so, include it. If not, create scatterlist. | 1686 | * If so, include it. If not, create scatterlist. |
1656 | */ | 1687 | */ |
1657 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1658 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) | 1688 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) |
1659 | iv_contig = true; | 1689 | iv_contig = true; |
1660 | else | 1690 | else |
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1693 | 1723 | ||
1694 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1724 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1695 | sec4_sg_bytes, DMA_TO_DEVICE); | 1725 | sec4_sg_bytes, DMA_TO_DEVICE); |
1726 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1727 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1728 | return ERR_PTR(-ENOMEM); | ||
1729 | } | ||
1730 | |||
1696 | edesc->iv_dma = iv_dma; | 1731 | edesc->iv_dma = iv_dma; |
1697 | 1732 | ||
1698 | #ifdef DEBUG | 1733 | #ifdef DEBUG |
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
2441 | 2476 | ||
2442 | static int __init caam_algapi_init(void) | 2477 | static int __init caam_algapi_init(void) |
2443 | { | 2478 | { |
2479 | struct device_node *dev_node; | ||
2480 | struct platform_device *pdev; | ||
2481 | struct device *ctrldev; | ||
2482 | void *priv; | ||
2444 | int i = 0, err = 0; | 2483 | int i = 0, err = 0; |
2445 | 2484 | ||
2485 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
2486 | if (!dev_node) { | ||
2487 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2488 | if (!dev_node) | ||
2489 | return -ENODEV; | ||
2490 | } | ||
2491 | |||
2492 | pdev = of_find_device_by_node(dev_node); | ||
2493 | if (!pdev) { | ||
2494 | of_node_put(dev_node); | ||
2495 | return -ENODEV; | ||
2496 | } | ||
2497 | |||
2498 | ctrldev = &pdev->dev; | ||
2499 | priv = dev_get_drvdata(ctrldev); | ||
2500 | of_node_put(dev_node); | ||
2501 | |||
2502 | /* | ||
2503 | * If priv is NULL, it's probably because the caam driver wasn't | ||
2504 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
2505 | */ | ||
2506 | if (!priv) | ||
2507 | return -ENODEV; | ||
2508 | |||
2509 | |||
2446 | INIT_LIST_HEAD(&alg_list); | 2510 | INIT_LIST_HEAD(&alg_list); |
2447 | 2511 | ||
2448 | /* register crypto algorithms the device supports */ | 2512 | /* register crypto algorithms the device supports */ |