diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 12:52:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 12:52:51 -0400 |
commit | 3e7a716a92a0e051f5502c7b689f8c9127c37c33 (patch) | |
tree | 2ebb892eb3a024f108e68a9577c767a53b955a4a /drivers/crypto | |
parent | c2df436bd2504f52808c10ab7d7da832f61ad3f0 (diff) | |
parent | ce5481d01f67ad304908ec2113515440c0fa86eb (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- CTR(AES) optimisation on x86_64 using "by8" AVX.
- arm64 support to ccp
- Intel QAT crypto driver
- Qualcomm crypto engine driver
- x86-64 assembly optimisation for 3DES
- CTR(3DES) speed test
- move FIPS panic from module.c so that it only triggers on crypto
modules
- SP800-90A Deterministic Random Bit Generator (drbg).
- more test vectors for ghash.
- tweak self tests to catch partial block bugs.
- misc fixes.
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (94 commits)
crypto: drbg - fix failure of generating multiple of 2**16 bytes
crypto: ccp - Do not sign extend input data to CCP
crypto: testmgr - add missing spaces to drbg error strings
crypto: atmel-tdes - Switch to managed version of kzalloc
crypto: atmel-sha - Switch to managed version of kzalloc
crypto: testmgr - use chunks smaller than algo block size in chunk tests
crypto: qat - Fixed SKU1 dev issue
crypto: qat - Use hweight for bit counting
crypto: qat - Updated print outputs
crypto: qat - change ae_num to ae_id
crypto: qat - change slice->regions to slice->region
crypto: qat - use min_t macro
crypto: qat - remove unnecessary parentheses
crypto: qat - remove unneeded header
crypto: qat - checkpatch blank lines
crypto: qat - remove unnecessary return codes
crypto: Resolve shadow warnings
crypto: ccp - Remove "select OF" from Kconfig
crypto: caam - fix DECO RSR polling
crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM
...
Diffstat (limited to 'drivers/crypto')
74 files changed, 14524 insertions, 180 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 02f177aeb16c..2fb0fdfc87df 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA | |||
391 | 391 | ||
392 | config CRYPTO_DEV_CCP | 392 | config CRYPTO_DEV_CCP |
393 | bool "Support for AMD Cryptographic Coprocessor" | 393 | bool "Support for AMD Cryptographic Coprocessor" |
394 | depends on X86 && PCI | 394 | depends on (X86 && PCI) || ARM64 |
395 | default n | 395 | default n |
396 | help | 396 | help |
397 | The AMD Cryptographic Coprocessor provides hardware support | 397 | The AMD Cryptographic Coprocessor provides hardware support |
@@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP | |||
418 | To compile this driver as a module, choose M here: the module | 418 | To compile this driver as a module, choose M here: the module |
419 | will be called mxs-dcp. | 419 | will be called mxs-dcp. |
420 | 420 | ||
421 | source "drivers/crypto/qat/Kconfig" | ||
422 | |||
423 | config CRYPTO_DEV_QCE | ||
424 | tristate "Qualcomm crypto engine accelerator" | ||
425 | depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM | ||
426 | select CRYPTO_AES | ||
427 | select CRYPTO_DES | ||
428 | select CRYPTO_ECB | ||
429 | select CRYPTO_CBC | ||
430 | select CRYPTO_XTS | ||
431 | select CRYPTO_CTR | ||
432 | select CRYPTO_ALGAPI | ||
433 | select CRYPTO_BLKCIPHER | ||
434 | help | ||
435 | This driver supports Qualcomm crypto engine accelerator | ||
436 | hardware. To compile this driver as a module, choose M here. The | ||
437 | module will be called qcrypto. | ||
438 | |||
421 | endif # CRYPTO_HW | 439 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 482f090d16d0..3924f93d5774 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | |||
23 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o | 23 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o |
24 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 24 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | 25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
26 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ | ||
27 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 37f9cc98ba17..e4c6c58fbb03 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = { | |||
1292 | .of_match_table = crypto4xx_match, | 1292 | .of_match_table = crypto4xx_match, |
1293 | }, | 1293 | }, |
1294 | .probe = crypto4xx_probe, | 1294 | .probe = crypto4xx_probe, |
1295 | .remove = crypto4xx_remove, | 1295 | .remove = __exit_p(crypto4xx_remove), |
1296 | }; | 1296 | }; |
1297 | 1297 | ||
1298 | module_platform_driver(crypto4xx_driver); | 1298 | module_platform_driver(crypto4xx_driver); |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 0618be06b9fb..9a4f69eaa5e0 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd | |||
1353 | GFP_KERNEL); | 1353 | GFP_KERNEL); |
1354 | if (!pdata->dma_slave) { | 1354 | if (!pdata->dma_slave) { |
1355 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | 1355 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); |
1356 | devm_kfree(&pdev->dev, pdata); | ||
1357 | return ERR_PTR(-ENOMEM); | 1356 | return ERR_PTR(-ENOMEM); |
1358 | } | 1357 | } |
1359 | 1358 | ||
@@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1375 | unsigned long sha_phys_size; | 1374 | unsigned long sha_phys_size; |
1376 | int err; | 1375 | int err; |
1377 | 1376 | ||
1378 | sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL); | 1377 | sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), |
1378 | GFP_KERNEL); | ||
1379 | if (sha_dd == NULL) { | 1379 | if (sha_dd == NULL) { |
1380 | dev_err(dev, "unable to alloc data struct.\n"); | 1380 | dev_err(dev, "unable to alloc data struct.\n"); |
1381 | err = -ENOMEM; | 1381 | err = -ENOMEM; |
@@ -1490,8 +1490,6 @@ clk_err: | |||
1490 | free_irq(sha_dd->irq, sha_dd); | 1490 | free_irq(sha_dd->irq, sha_dd); |
1491 | res_err: | 1491 | res_err: |
1492 | tasklet_kill(&sha_dd->done_task); | 1492 | tasklet_kill(&sha_dd->done_task); |
1493 | kfree(sha_dd); | ||
1494 | sha_dd = NULL; | ||
1495 | sha_dd_err: | 1493 | sha_dd_err: |
1496 | dev_err(dev, "initialization failed.\n"); | 1494 | dev_err(dev, "initialization failed.\n"); |
1497 | 1495 | ||
@@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev) | |||
1523 | if (sha_dd->irq >= 0) | 1521 | if (sha_dd->irq >= 0) |
1524 | free_irq(sha_dd->irq, sha_dd); | 1522 | free_irq(sha_dd->irq, sha_dd); |
1525 | 1523 | ||
1526 | kfree(sha_dd); | ||
1527 | sha_dd = NULL; | ||
1528 | |||
1529 | return 0; | 1524 | return 0; |
1530 | } | 1525 | } |
1531 | 1526 | ||
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 6cde5b530c69..d3a9041938ea 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p | |||
1337 | GFP_KERNEL); | 1337 | GFP_KERNEL); |
1338 | if (!pdata->dma_slave) { | 1338 | if (!pdata->dma_slave) { |
1339 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | 1339 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); |
1340 | devm_kfree(&pdev->dev, pdata); | ||
1341 | return ERR_PTR(-ENOMEM); | 1340 | return ERR_PTR(-ENOMEM); |
1342 | } | 1341 | } |
1343 | 1342 | ||
@@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1359 | unsigned long tdes_phys_size; | 1358 | unsigned long tdes_phys_size; |
1360 | int err; | 1359 | int err; |
1361 | 1360 | ||
1362 | tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL); | 1361 | tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); |
1363 | if (tdes_dd == NULL) { | 1362 | if (tdes_dd == NULL) { |
1364 | dev_err(dev, "unable to alloc data struct.\n"); | 1363 | dev_err(dev, "unable to alloc data struct.\n"); |
1365 | err = -ENOMEM; | 1364 | err = -ENOMEM; |
@@ -1483,8 +1482,6 @@ tdes_irq_err: | |||
1483 | res_err: | 1482 | res_err: |
1484 | tasklet_kill(&tdes_dd->done_task); | 1483 | tasklet_kill(&tdes_dd->done_task); |
1485 | tasklet_kill(&tdes_dd->queue_task); | 1484 | tasklet_kill(&tdes_dd->queue_task); |
1486 | kfree(tdes_dd); | ||
1487 | tdes_dd = NULL; | ||
1488 | tdes_dd_err: | 1485 | tdes_dd_err: |
1489 | dev_err(dev, "initialization failed.\n"); | 1486 | dev_err(dev, "initialization failed.\n"); |
1490 | 1487 | ||
@@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev) | |||
1519 | if (tdes_dd->irq >= 0) | 1516 | if (tdes_dd->irq >= 0) |
1520 | free_irq(tdes_dd->irq, tdes_dd); | 1517 | free_irq(tdes_dd->irq, tdes_dd); |
1521 | 1518 | ||
1522 | kfree(tdes_dd); | ||
1523 | tdes_dd = NULL; | ||
1524 | |||
1525 | return 0; | 1519 | return 0; |
1526 | } | 1520 | } |
1527 | 1521 | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c09ce1f040d3..a80ea853701d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
97 | { | 97 | { |
98 | u32 *jump_cmd, *uncond_jump_cmd; | 98 | u32 *jump_cmd, *uncond_jump_cmd; |
99 | 99 | ||
100 | /* DK bit is valid only for AES */ | ||
101 | if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { | ||
102 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
103 | OP_ALG_DECRYPT); | ||
104 | return; | ||
105 | } | ||
106 | |||
100 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | 107 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); |
101 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 108 | append_operation(desc, type | OP_ALG_AS_INITFINAL | |
102 | OP_ALG_DECRYPT); | 109 | OP_ALG_DECRYPT); |
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
786 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 793 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
787 | desc_bytes(desc), | 794 | desc_bytes(desc), |
788 | DMA_TO_DEVICE); | 795 | DMA_TO_DEVICE); |
789 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | 796 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { |
790 | dev_err(jrdev, "unable to map shared descriptor\n"); | 797 | dev_err(jrdev, "unable to map shared descriptor\n"); |
791 | return -ENOMEM; | 798 | return -ENOMEM; |
792 | } | 799 | } |
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1313 | DMA_FROM_DEVICE, dst_chained); | 1320 | DMA_FROM_DEVICE, dst_chained); |
1314 | } | 1321 | } |
1315 | 1322 | ||
1316 | /* Check if data are contiguous */ | ||
1317 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1323 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); |
1324 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1325 | dev_err(jrdev, "unable to map IV\n"); | ||
1326 | return ERR_PTR(-ENOMEM); | ||
1327 | } | ||
1328 | |||
1329 | /* Check if data are contiguous */ | ||
1318 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1330 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1319 | iv_dma || src_nents || iv_dma + ivsize != | 1331 | iv_dma || src_nents || iv_dma + ivsize != |
1320 | sg_dma_address(req->src)) { | 1332 | sg_dma_address(req->src)) { |
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1345 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1357 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1346 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1358 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1347 | desc_bytes; | 1359 | desc_bytes; |
1348 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1349 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1350 | *all_contig_ptr = all_contig; | 1360 | *all_contig_ptr = all_contig; |
1351 | 1361 | ||
1352 | sec4_sg_index = 0; | 1362 | sec4_sg_index = 0; |
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1369 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1379 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1370 | edesc->sec4_sg + sec4_sg_index, 0); | 1380 | edesc->sec4_sg + sec4_sg_index, 0); |
1371 | } | 1381 | } |
1382 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1383 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1384 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1385 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1386 | return ERR_PTR(-ENOMEM); | ||
1387 | } | ||
1372 | 1388 | ||
1373 | return edesc; | 1389 | return edesc; |
1374 | } | 1390 | } |
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1494 | DMA_FROM_DEVICE, dst_chained); | 1510 | DMA_FROM_DEVICE, dst_chained); |
1495 | } | 1511 | } |
1496 | 1512 | ||
1497 | /* Check if data are contiguous */ | ||
1498 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | 1513 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); |
1514 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1515 | dev_err(jrdev, "unable to map IV\n"); | ||
1516 | return ERR_PTR(-ENOMEM); | ||
1517 | } | ||
1518 | |||
1519 | /* Check if data are contiguous */ | ||
1499 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1520 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1500 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | 1521 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) |
1501 | contig &= ~GIV_SRC_CONTIG; | 1522 | contig &= ~GIV_SRC_CONTIG; |
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1534 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1555 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1535 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1556 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1536 | desc_bytes; | 1557 | desc_bytes; |
1537 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1538 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1539 | *contig_ptr = contig; | 1558 | *contig_ptr = contig; |
1540 | 1559 | ||
1541 | sec4_sg_index = 0; | 1560 | sec4_sg_index = 0; |
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1559 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1578 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1560 | edesc->sec4_sg + sec4_sg_index, 0); | 1579 | edesc->sec4_sg + sec4_sg_index, 0); |
1561 | } | 1580 | } |
1581 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1582 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1583 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1584 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1585 | return ERR_PTR(-ENOMEM); | ||
1586 | } | ||
1562 | 1587 | ||
1563 | return edesc; | 1588 | return edesc; |
1564 | } | 1589 | } |
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1650 | DMA_FROM_DEVICE, dst_chained); | 1675 | DMA_FROM_DEVICE, dst_chained); |
1651 | } | 1676 | } |
1652 | 1677 | ||
1678 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1679 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1680 | dev_err(jrdev, "unable to map IV\n"); | ||
1681 | return ERR_PTR(-ENOMEM); | ||
1682 | } | ||
1683 | |||
1653 | /* | 1684 | /* |
1654 | * Check if iv can be contiguous with source and destination. | 1685 | * Check if iv can be contiguous with source and destination. |
1655 | * If so, include it. If not, create scatterlist. | 1686 | * If so, include it. If not, create scatterlist. |
1656 | */ | 1687 | */ |
1657 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1658 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) | 1688 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) |
1659 | iv_contig = true; | 1689 | iv_contig = true; |
1660 | else | 1690 | else |
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1693 | 1723 | ||
1694 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1724 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1695 | sec4_sg_bytes, DMA_TO_DEVICE); | 1725 | sec4_sg_bytes, DMA_TO_DEVICE); |
1726 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1727 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1728 | return ERR_PTR(-ENOMEM); | ||
1729 | } | ||
1730 | |||
1696 | edesc->iv_dma = iv_dma; | 1731 | edesc->iv_dma = iv_dma; |
1697 | 1732 | ||
1698 | #ifdef DEBUG | 1733 | #ifdef DEBUG |
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
2441 | 2476 | ||
2442 | static int __init caam_algapi_init(void) | 2477 | static int __init caam_algapi_init(void) |
2443 | { | 2478 | { |
2479 | struct device_node *dev_node; | ||
2480 | struct platform_device *pdev; | ||
2481 | struct device *ctrldev; | ||
2482 | void *priv; | ||
2444 | int i = 0, err = 0; | 2483 | int i = 0, err = 0; |
2445 | 2484 | ||
2485 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
2486 | if (!dev_node) { | ||
2487 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2488 | if (!dev_node) | ||
2489 | return -ENODEV; | ||
2490 | } | ||
2491 | |||
2492 | pdev = of_find_device_by_node(dev_node); | ||
2493 | if (!pdev) { | ||
2494 | of_node_put(dev_node); | ||
2495 | return -ENODEV; | ||
2496 | } | ||
2497 | |||
2498 | ctrldev = &pdev->dev; | ||
2499 | priv = dev_get_drvdata(ctrldev); | ||
2500 | of_node_put(dev_node); | ||
2501 | |||
2502 | /* | ||
2503 | * If priv is NULL, it's probably because the caam driver wasn't | ||
2504 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
2505 | */ | ||
2506 | if (!priv) | ||
2507 | return -ENODEV; | ||
2508 | |||
2509 | |||
2446 | INIT_LIST_HEAD(&alg_list); | 2510 | INIT_LIST_HEAD(&alg_list); |
2447 | 2511 | ||
2448 | /* register crypto algorithms the device supports */ | 2512 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 0d9284ef96a8..b464d03ebf40 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -137,13 +137,20 @@ struct caam_hash_state { | |||
137 | /* Common job descriptor seq in/out ptr routines */ | 137 | /* Common job descriptor seq in/out ptr routines */ |
138 | 138 | ||
139 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | 139 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ |
140 | static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, | 140 | static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, |
141 | struct caam_hash_state *state, | 141 | struct caam_hash_state *state, |
142 | int ctx_len) | 142 | int ctx_len) |
143 | { | 143 | { |
144 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, | 144 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, |
145 | ctx_len, DMA_FROM_DEVICE); | 145 | ctx_len, DMA_FROM_DEVICE); |
146 | if (dma_mapping_error(jrdev, state->ctx_dma)) { | ||
147 | dev_err(jrdev, "unable to map ctx\n"); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | |||
146 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); | 151 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); |
152 | |||
153 | return 0; | ||
147 | } | 154 | } |
148 | 155 | ||
149 | /* Map req->result, and append seq_out_ptr command that points to it */ | 156 | /* Map req->result, and append seq_out_ptr command that points to it */ |
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, | |||
201 | } | 208 | } |
202 | 209 | ||
203 | /* Map state->caam_ctx, and add it to link table */ | 210 | /* Map state->caam_ctx, and add it to link table */ |
204 | static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | 211 | static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, |
205 | struct caam_hash_state *state, | 212 | struct caam_hash_state *state, int ctx_len, |
206 | int ctx_len, | 213 | struct sec4_sg_entry *sec4_sg, u32 flag) |
207 | struct sec4_sg_entry *sec4_sg, | ||
208 | u32 flag) | ||
209 | { | 214 | { |
210 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); | 215 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); |
216 | if (dma_mapping_error(jrdev, state->ctx_dma)) { | ||
217 | dev_err(jrdev, "unable to map ctx\n"); | ||
218 | return -ENOMEM; | ||
219 | } | ||
220 | |||
211 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); | 221 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); |
222 | |||
223 | return 0; | ||
212 | } | 224 | } |
213 | 225 | ||
214 | /* Common shared descriptor commands */ | 226 | /* Common shared descriptor commands */ |
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
487 | digestsize, 1); | 499 | digestsize, 1); |
488 | #endif | 500 | #endif |
489 | } | 501 | } |
490 | *keylen = digestsize; | ||
491 | |||
492 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | 502 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); |
493 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); | 503 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); |
494 | 504 | ||
505 | *keylen = digestsize; | ||
506 | |||
495 | kfree(desc); | 507 | kfree(desc); |
496 | 508 | ||
497 | return ret; | 509 | return ret; |
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
706 | if (err) | 718 | if (err) |
707 | caam_jr_strstatus(jrdev, err); | 719 | caam_jr_strstatus(jrdev, err); |
708 | 720 | ||
709 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | 721 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); |
710 | kfree(edesc); | 722 | kfree(edesc); |
711 | 723 | ||
712 | #ifdef DEBUG | 724 | #ifdef DEBUG |
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
741 | if (err) | 753 | if (err) |
742 | caam_jr_strstatus(jrdev, err); | 754 | caam_jr_strstatus(jrdev, err); |
743 | 755 | ||
744 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | 756 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); |
745 | kfree(edesc); | 757 | kfree(edesc); |
746 | 758 | ||
747 | #ifdef DEBUG | 759 | #ifdef DEBUG |
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
808 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 820 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
809 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 821 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
810 | DESC_JOB_IO_LEN; | 822 | DESC_JOB_IO_LEN; |
811 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
812 | sec4_sg_bytes, | ||
813 | DMA_TO_DEVICE); | ||
814 | 823 | ||
815 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 824 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
816 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | 825 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
826 | if (ret) | ||
827 | return ret; | ||
817 | 828 | ||
818 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | 829 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, |
819 | edesc->sec4_sg + 1, | 830 | edesc->sec4_sg + 1, |
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
839 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 850 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
840 | HDR_REVERSE); | 851 | HDR_REVERSE); |
841 | 852 | ||
853 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
854 | sec4_sg_bytes, | ||
855 | DMA_TO_DEVICE); | ||
856 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
857 | dev_err(jrdev, "unable to map S/G table\n"); | ||
858 | return -ENOMEM; | ||
859 | } | ||
860 | |||
842 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | 861 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
843 | to_hash, LDST_SGF); | 862 | to_hash, LDST_SGF); |
844 | 863 | ||
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
911 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 930 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
912 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 931 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
913 | DESC_JOB_IO_LEN; | 932 | DESC_JOB_IO_LEN; |
914 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
915 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
916 | edesc->src_nents = 0; | 933 | edesc->src_nents = 0; |
917 | 934 | ||
918 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | 935 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
919 | DMA_TO_DEVICE); | 936 | edesc->sec4_sg, DMA_TO_DEVICE); |
937 | if (ret) | ||
938 | return ret; | ||
920 | 939 | ||
921 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 940 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
922 | buf, state->buf_dma, buflen, | 941 | buf, state->buf_dma, buflen, |
923 | last_buflen); | 942 | last_buflen); |
924 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; | 943 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; |
925 | 944 | ||
945 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
946 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
947 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
948 | dev_err(jrdev, "unable to map S/G table\n"); | ||
949 | return -ENOMEM; | ||
950 | } | ||
951 | |||
926 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, | 952 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, |
927 | LDST_SGF); | 953 | LDST_SGF); |
928 | 954 | ||
929 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 955 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
930 | digestsize); | 956 | digestsize); |
957 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
958 | dev_err(jrdev, "unable to map dst\n"); | ||
959 | return -ENOMEM; | ||
960 | } | ||
931 | 961 | ||
932 | #ifdef DEBUG | 962 | #ifdef DEBUG |
933 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 963 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
989 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1019 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
990 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1020 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
991 | DESC_JOB_IO_LEN; | 1021 | DESC_JOB_IO_LEN; |
992 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
993 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
994 | 1022 | ||
995 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | 1023 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
996 | DMA_TO_DEVICE); | 1024 | edesc->sec4_sg, DMA_TO_DEVICE); |
1025 | if (ret) | ||
1026 | return ret; | ||
997 | 1027 | ||
998 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 1028 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
999 | buf, state->buf_dma, buflen, | 1029 | buf, state->buf_dma, buflen, |
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
1002 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | 1032 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + |
1003 | sec4_sg_src_index, chained); | 1033 | sec4_sg_src_index, chained); |
1004 | 1034 | ||
1035 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1036 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1037 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1038 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1039 | return -ENOMEM; | ||
1040 | } | ||
1041 | |||
1005 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | 1042 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
1006 | buflen + req->nbytes, LDST_SGF); | 1043 | buflen + req->nbytes, LDST_SGF); |
1007 | 1044 | ||
1008 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1045 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1009 | digestsize); | 1046 | digestsize); |
1047 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1048 | dev_err(jrdev, "unable to map dst\n"); | ||
1049 | return -ENOMEM; | ||
1050 | } | ||
1010 | 1051 | ||
1011 | #ifdef DEBUG | 1052 | #ifdef DEBUG |
1012 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1053 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req) | |||
1056 | } | 1097 | } |
1057 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1098 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1058 | DESC_JOB_IO_LEN; | 1099 | DESC_JOB_IO_LEN; |
1059 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1100 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1060 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1061 | edesc->src_nents = src_nents; | 1101 | edesc->src_nents = src_nents; |
1062 | edesc->chained = chained; | 1102 | edesc->chained = chained; |
1063 | 1103 | ||
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req) | |||
1067 | 1107 | ||
1068 | if (src_nents) { | 1108 | if (src_nents) { |
1069 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | 1109 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); |
1110 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1111 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1112 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1113 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1114 | return -ENOMEM; | ||
1115 | } | ||
1070 | src_dma = edesc->sec4_sg_dma; | 1116 | src_dma = edesc->sec4_sg_dma; |
1071 | options = LDST_SGF; | 1117 | options = LDST_SGF; |
1072 | } else { | 1118 | } else { |
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req) | |||
1077 | 1123 | ||
1078 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1124 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1079 | digestsize); | 1125 | digestsize); |
1126 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1127 | dev_err(jrdev, "unable to map dst\n"); | ||
1128 | return -ENOMEM; | ||
1129 | } | ||
1080 | 1130 | ||
1081 | #ifdef DEBUG | 1131 | #ifdef DEBUG |
1082 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1132 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1125 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | 1175 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); |
1126 | 1176 | ||
1127 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1177 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); |
1178 | if (dma_mapping_error(jrdev, state->buf_dma)) { | ||
1179 | dev_err(jrdev, "unable to map src\n"); | ||
1180 | return -ENOMEM; | ||
1181 | } | ||
1128 | 1182 | ||
1129 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1183 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1130 | 1184 | ||
1131 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1185 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1132 | digestsize); | 1186 | digestsize); |
1187 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1188 | dev_err(jrdev, "unable to map dst\n"); | ||
1189 | return -ENOMEM; | ||
1190 | } | ||
1133 | edesc->src_nents = 0; | 1191 | edesc->src_nents = 0; |
1134 | 1192 | ||
1135 | #ifdef DEBUG | 1193 | #ifdef DEBUG |
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1197 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1255 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1198 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1256 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1199 | DESC_JOB_IO_LEN; | 1257 | DESC_JOB_IO_LEN; |
1200 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1258 | edesc->dst_dma = 0; |
1201 | sec4_sg_bytes, | ||
1202 | DMA_TO_DEVICE); | ||
1203 | 1259 | ||
1204 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | 1260 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, |
1205 | buf, *buflen); | 1261 | buf, *buflen); |
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1216 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 1272 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
1217 | HDR_REVERSE); | 1273 | HDR_REVERSE); |
1218 | 1274 | ||
1275 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1276 | sec4_sg_bytes, | ||
1277 | DMA_TO_DEVICE); | ||
1278 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1279 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1280 | return -ENOMEM; | ||
1281 | } | ||
1282 | |||
1219 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); | 1283 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); |
1220 | 1284 | ||
1221 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1285 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1286 | if (ret) | ||
1287 | return ret; | ||
1222 | 1288 | ||
1223 | #ifdef DEBUG | 1289 | #ifdef DEBUG |
1224 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1290 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1297 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1363 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1298 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1364 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1299 | DESC_JOB_IO_LEN; | 1365 | DESC_JOB_IO_LEN; |
1300 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1301 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1302 | 1366 | ||
1303 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | 1367 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, |
1304 | state->buf_dma, buflen, | 1368 | state->buf_dma, buflen, |
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1307 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, | 1371 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, |
1308 | chained); | 1372 | chained); |
1309 | 1373 | ||
1374 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1375 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1376 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1377 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1378 | return -ENOMEM; | ||
1379 | } | ||
1380 | |||
1310 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + | 1381 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + |
1311 | req->nbytes, LDST_SGF); | 1382 | req->nbytes, LDST_SGF); |
1312 | 1383 | ||
1313 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1384 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1314 | digestsize); | 1385 | digestsize); |
1386 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1387 | dev_err(jrdev, "unable to map dst\n"); | ||
1388 | return -ENOMEM; | ||
1389 | } | ||
1315 | 1390 | ||
1316 | #ifdef DEBUG | 1391 | #ifdef DEBUG |
1317 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1392 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req) | |||
1380 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1455 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1381 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1456 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1382 | DESC_JOB_IO_LEN; | 1457 | DESC_JOB_IO_LEN; |
1383 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1458 | edesc->dst_dma = 0; |
1384 | sec4_sg_bytes, | ||
1385 | DMA_TO_DEVICE); | ||
1386 | 1459 | ||
1387 | if (src_nents) { | 1460 | if (src_nents) { |
1388 | sg_to_sec4_sg_last(req->src, src_nents, | 1461 | sg_to_sec4_sg_last(req->src, src_nents, |
1389 | edesc->sec4_sg, 0); | 1462 | edesc->sec4_sg, 0); |
1463 | edesc->sec4_sg_dma = dma_map_single(jrdev, | ||
1464 | edesc->sec4_sg, | ||
1465 | sec4_sg_bytes, | ||
1466 | DMA_TO_DEVICE); | ||
1467 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1468 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1469 | return -ENOMEM; | ||
1470 | } | ||
1390 | src_dma = edesc->sec4_sg_dma; | 1471 | src_dma = edesc->sec4_sg_dma; |
1391 | options = LDST_SGF; | 1472 | options = LDST_SGF; |
1392 | } else { | 1473 | } else { |
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
1404 | 1485 | ||
1405 | append_seq_in_ptr(desc, src_dma, to_hash, options); | 1486 | append_seq_in_ptr(desc, src_dma, to_hash, options); |
1406 | 1487 | ||
1407 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1488 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1489 | if (ret) | ||
1490 | return ret; | ||
1408 | 1491 | ||
1409 | #ifdef DEBUG | 1492 | #ifdef DEBUG |
1410 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1493 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req) | |||
1453 | state->final = ahash_final_no_ctx; | 1536 | state->final = ahash_final_no_ctx; |
1454 | 1537 | ||
1455 | state->current_buf = 0; | 1538 | state->current_buf = 0; |
1539 | state->buf_dma = 0; | ||
1456 | 1540 | ||
1457 | return 0; | 1541 | return 0; |
1458 | } | 1542 | } |
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1787 | 1871 | ||
1788 | static int __init caam_algapi_hash_init(void) | 1872 | static int __init caam_algapi_hash_init(void) |
1789 | { | 1873 | { |
1874 | struct device_node *dev_node; | ||
1875 | struct platform_device *pdev; | ||
1876 | struct device *ctrldev; | ||
1877 | void *priv; | ||
1790 | int i = 0, err = 0; | 1878 | int i = 0, err = 0; |
1791 | 1879 | ||
1880 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1881 | if (!dev_node) { | ||
1882 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
1883 | if (!dev_node) | ||
1884 | return -ENODEV; | ||
1885 | } | ||
1886 | |||
1887 | pdev = of_find_device_by_node(dev_node); | ||
1888 | if (!pdev) { | ||
1889 | of_node_put(dev_node); | ||
1890 | return -ENODEV; | ||
1891 | } | ||
1892 | |||
1893 | ctrldev = &pdev->dev; | ||
1894 | priv = dev_get_drvdata(ctrldev); | ||
1895 | of_node_put(dev_node); | ||
1896 | |||
1897 | /* | ||
1898 | * If priv is NULL, it's probably because the caam driver wasn't | ||
1899 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
1900 | */ | ||
1901 | if (!priv) | ||
1902 | return -ENODEV; | ||
1903 | |||
1792 | INIT_LIST_HEAD(&hash_list); | 1904 | INIT_LIST_HEAD(&hash_list); |
1793 | 1905 | ||
1794 | /* register crypto algorithms the device supports */ | 1906 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 8c07d3153f12..ae31e555793c 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) | |||
185 | max - copied_idx, false); | 185 | max - copied_idx, false); |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | 188 | static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) |
189 | { | 189 | { |
190 | struct device *jrdev = ctx->jrdev; | 190 | struct device *jrdev = ctx->jrdev; |
191 | u32 *desc = ctx->sh_desc; | 191 | u32 *desc = ctx->sh_desc; |
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
203 | 203 | ||
204 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 204 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
205 | DMA_TO_DEVICE); | 205 | DMA_TO_DEVICE); |
206 | if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { | ||
207 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
208 | return -ENOMEM; | ||
209 | } | ||
206 | #ifdef DEBUG | 210 | #ifdef DEBUG |
207 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 211 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
208 | desc, desc_bytes(desc), 1); | 212 | desc, desc_bytes(desc), 1); |
209 | #endif | 213 | #endif |
214 | return 0; | ||
210 | } | 215 | } |
211 | 216 | ||
212 | static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | 217 | static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) |
213 | { | 218 | { |
214 | struct device *jrdev = ctx->jrdev; | 219 | struct device *jrdev = ctx->jrdev; |
215 | struct buf_data *bd = &ctx->bufs[buf_id]; | 220 | struct buf_data *bd = &ctx->bufs[buf_id]; |
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | |||
220 | HDR_REVERSE); | 225 | HDR_REVERSE); |
221 | 226 | ||
222 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); | 227 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); |
228 | if (dma_mapping_error(jrdev, bd->addr)) { | ||
229 | dev_err(jrdev, "unable to map dst\n"); | ||
230 | return -ENOMEM; | ||
231 | } | ||
223 | 232 | ||
224 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | 233 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); |
225 | #ifdef DEBUG | 234 | #ifdef DEBUG |
226 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 235 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
227 | desc, desc_bytes(desc), 1); | 236 | desc, desc_bytes(desc), 1); |
228 | #endif | 237 | #endif |
238 | return 0; | ||
229 | } | 239 | } |
230 | 240 | ||
231 | static void caam_cleanup(struct hwrng *rng) | 241 | static void caam_cleanup(struct hwrng *rng) |
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng) | |||
242 | rng_unmap_ctx(rng_ctx); | 252 | rng_unmap_ctx(rng_ctx); |
243 | } | 253 | } |
244 | 254 | ||
245 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) | 255 | static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) |
246 | { | 256 | { |
247 | struct buf_data *bd = &ctx->bufs[buf_id]; | 257 | struct buf_data *bd = &ctx->bufs[buf_id]; |
258 | int err; | ||
259 | |||
260 | err = rng_create_job_desc(ctx, buf_id); | ||
261 | if (err) | ||
262 | return err; | ||
248 | 263 | ||
249 | rng_create_job_desc(ctx, buf_id); | ||
250 | atomic_set(&bd->empty, BUF_EMPTY); | 264 | atomic_set(&bd->empty, BUF_EMPTY); |
251 | submit_job(ctx, buf_id == ctx->current_buf); | 265 | submit_job(ctx, buf_id == ctx->current_buf); |
252 | wait_for_completion(&bd->filled); | 266 | wait_for_completion(&bd->filled); |
267 | |||
268 | return 0; | ||
253 | } | 269 | } |
254 | 270 | ||
255 | static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) | 271 | static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) |
256 | { | 272 | { |
273 | int err; | ||
274 | |||
257 | ctx->jrdev = jrdev; | 275 | ctx->jrdev = jrdev; |
258 | rng_create_sh_desc(ctx); | 276 | |
277 | err = rng_create_sh_desc(ctx); | ||
278 | if (err) | ||
279 | return err; | ||
280 | |||
259 | ctx->current_buf = 0; | 281 | ctx->current_buf = 0; |
260 | ctx->cur_buf_idx = 0; | 282 | ctx->cur_buf_idx = 0; |
261 | caam_init_buf(ctx, 0); | 283 | |
262 | caam_init_buf(ctx, 1); | 284 | err = caam_init_buf(ctx, 0); |
285 | if (err) | ||
286 | return err; | ||
287 | |||
288 | err = caam_init_buf(ctx, 1); | ||
289 | if (err) | ||
290 | return err; | ||
291 | |||
292 | return 0; | ||
263 | } | 293 | } |
264 | 294 | ||
265 | static struct hwrng caam_rng = { | 295 | static struct hwrng caam_rng = { |
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void) | |||
278 | static int __init caam_rng_init(void) | 308 | static int __init caam_rng_init(void) |
279 | { | 309 | { |
280 | struct device *dev; | 310 | struct device *dev; |
311 | struct device_node *dev_node; | ||
312 | struct platform_device *pdev; | ||
313 | struct device *ctrldev; | ||
314 | void *priv; | ||
315 | int err; | ||
316 | |||
317 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
318 | if (!dev_node) { | ||
319 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
320 | if (!dev_node) | ||
321 | return -ENODEV; | ||
322 | } | ||
323 | |||
324 | pdev = of_find_device_by_node(dev_node); | ||
325 | if (!pdev) { | ||
326 | of_node_put(dev_node); | ||
327 | return -ENODEV; | ||
328 | } | ||
329 | |||
330 | ctrldev = &pdev->dev; | ||
331 | priv = dev_get_drvdata(ctrldev); | ||
332 | of_node_put(dev_node); | ||
333 | |||
334 | /* | ||
335 | * If priv is NULL, it's probably because the caam driver wasn't | ||
336 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
337 | */ | ||
338 | if (!priv) | ||
339 | return -ENODEV; | ||
281 | 340 | ||
282 | dev = caam_jr_alloc(); | 341 | dev = caam_jr_alloc(); |
283 | if (IS_ERR(dev)) { | 342 | if (IS_ERR(dev)) { |
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void) | |||
287 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); | 346 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); |
288 | if (!rng_ctx) | 347 | if (!rng_ctx) |
289 | return -ENOMEM; | 348 | return -ENOMEM; |
290 | caam_init_rng(rng_ctx, dev); | 349 | err = caam_init_rng(rng_ctx, dev); |
350 | if (err) | ||
351 | return err; | ||
291 | 352 | ||
292 | dev_info(dev, "registering rng-caam\n"); | 353 | dev_info(dev, "registering rng-caam\n"); |
293 | return hwrng_register(&caam_rng); | 354 | return hwrng_register(&caam_rng); |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 1c38f86bf63a..3cade79ea41e 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/device.h> | ||
8 | #include <linux/of_address.h> | 9 | #include <linux/of_address.h> |
9 | #include <linux/of_irq.h> | 10 | #include <linux/of_irq.h> |
10 | 11 | ||
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
87 | 88 | ||
88 | /* Set the bit to request direct access to DECO0 */ | 89 | /* Set the bit to request direct access to DECO0 */ |
89 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | 90 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; |
91 | |||
92 | if (ctrlpriv->virt_en == 1) { | ||
93 | setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); | ||
94 | |||
95 | while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && | ||
96 | --timeout) | ||
97 | cpu_relax(); | ||
98 | |||
99 | timeout = 100000; | ||
100 | } | ||
101 | |||
90 | setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 102 | setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
91 | 103 | ||
92 | while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && | 104 | while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && |
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
129 | *status = rd_reg32(&topregs->deco.op_status_hi) & | 141 | *status = rd_reg32(&topregs->deco.op_status_hi) & |
130 | DECO_OP_STATUS_HI_ERR_MASK; | 142 | DECO_OP_STATUS_HI_ERR_MASK; |
131 | 143 | ||
144 | if (ctrlpriv->virt_en == 1) | ||
145 | clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); | ||
146 | |||
132 | /* Mark the DECO as free */ | 147 | /* Mark the DECO as free */ |
133 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 148 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
134 | 149 | ||
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev) | |||
295 | /* Unmap controller region */ | 310 | /* Unmap controller region */ |
296 | iounmap(&topregs->ctrl); | 311 | iounmap(&topregs->ctrl); |
297 | 312 | ||
298 | kfree(ctrlpriv->jrpdev); | ||
299 | kfree(ctrlpriv); | ||
300 | |||
301 | return ret; | 313 | return ret; |
302 | } | 314 | } |
303 | 315 | ||
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev) | |||
380 | #ifdef CONFIG_DEBUG_FS | 392 | #ifdef CONFIG_DEBUG_FS |
381 | struct caam_perfmon *perfmon; | 393 | struct caam_perfmon *perfmon; |
382 | #endif | 394 | #endif |
383 | u64 cha_vid; | 395 | u32 scfgr, comp_params; |
396 | u32 cha_vid_ls; | ||
384 | 397 | ||
385 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | 398 | ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), |
399 | GFP_KERNEL); | ||
386 | if (!ctrlpriv) | 400 | if (!ctrlpriv) |
387 | return -ENOMEM; | 401 | return -ENOMEM; |
388 | 402 | ||
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev) | |||
413 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | | 427 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | |
414 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | 428 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); |
415 | 429 | ||
430 | /* | ||
431 | * Read the Compile Time paramters and SCFGR to determine | ||
432 | * if Virtualization is enabled for this platform | ||
433 | */ | ||
434 | comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms); | ||
435 | scfgr = rd_reg32(&topregs->ctrl.scfgr); | ||
436 | |||
437 | ctrlpriv->virt_en = 0; | ||
438 | if (comp_params & CTPR_MS_VIRT_EN_INCL) { | ||
439 | /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or | ||
440 | * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 | ||
441 | */ | ||
442 | if ((comp_params & CTPR_MS_VIRT_EN_POR) || | ||
443 | (!(comp_params & CTPR_MS_VIRT_EN_POR) && | ||
444 | (scfgr & SCFGR_VIRT_EN))) | ||
445 | ctrlpriv->virt_en = 1; | ||
446 | } else { | ||
447 | /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ | ||
448 | if (comp_params & CTPR_MS_VIRT_EN_POR) | ||
449 | ctrlpriv->virt_en = 1; | ||
450 | } | ||
451 | |||
452 | if (ctrlpriv->virt_en == 1) | ||
453 | setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START | | ||
454 | JRSTART_JR1_START | JRSTART_JR2_START | | ||
455 | JRSTART_JR3_START); | ||
456 | |||
416 | if (sizeof(dma_addr_t) == sizeof(u64)) | 457 | if (sizeof(dma_addr_t) == sizeof(u64)) |
417 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) | 458 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) |
418 | dma_set_mask(dev, DMA_BIT_MASK(40)); | 459 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
419 | else | 460 | else |
420 | dma_set_mask(dev, DMA_BIT_MASK(36)); | 461 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); |
421 | else | 462 | else |
422 | dma_set_mask(dev, DMA_BIT_MASK(32)); | 463 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
423 | 464 | ||
424 | /* | 465 | /* |
425 | * Detect and enable JobRs | 466 | * Detect and enable JobRs |
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev) | |||
432 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) | 473 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) |
433 | rspec++; | 474 | rspec++; |
434 | 475 | ||
435 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, | 476 | ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, |
436 | GFP_KERNEL); | 477 | sizeof(struct platform_device *) * rspec, |
478 | GFP_KERNEL); | ||
437 | if (ctrlpriv->jrpdev == NULL) { | 479 | if (ctrlpriv->jrpdev == NULL) { |
438 | iounmap(&topregs->ctrl); | 480 | iounmap(&topregs->ctrl); |
439 | return -ENOMEM; | 481 | return -ENOMEM; |
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev) | |||
456 | } | 498 | } |
457 | 499 | ||
458 | /* Check to see if QI present. If so, enable */ | 500 | /* Check to see if QI present. If so, enable */ |
459 | ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & | 501 | ctrlpriv->qi_present = |
460 | CTPR_QI_MASK); | 502 | !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) & |
503 | CTPR_MS_QI_MASK); | ||
461 | if (ctrlpriv->qi_present) { | 504 | if (ctrlpriv->qi_present) { |
462 | ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; | 505 | ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; |
463 | /* This is all that's required to physically enable QI */ | 506 | /* This is all that's required to physically enable QI */ |
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev) | |||
471 | return -ENOMEM; | 514 | return -ENOMEM; |
472 | } | 515 | } |
473 | 516 | ||
474 | cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id); | 517 | cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls); |
475 | 518 | ||
476 | /* | 519 | /* |
477 | * If SEC has RNG version >= 4 and RNG state handle has not been | 520 | * If SEC has RNG version >= 4 and RNG state handle has not been |
478 | * already instantiated, do RNG instantiation | 521 | * already instantiated, do RNG instantiation |
479 | */ | 522 | */ |
480 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { | 523 | if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { |
481 | ctrlpriv->rng4_sh_init = | 524 | ctrlpriv->rng4_sh_init = |
482 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); | 525 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); |
483 | /* | 526 | /* |
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev) | |||
531 | 574 | ||
532 | /* NOTE: RTIC detection ought to go here, around Si time */ | 575 | /* NOTE: RTIC detection ought to go here, around Si time */ |
533 | 576 | ||
534 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); | 577 | caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 | |
578 | (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls); | ||
535 | 579 | ||
536 | /* Report "alive" for developer to see */ | 580 | /* Report "alive" for developer to see */ |
537 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, | 581 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev) | |||
547 | */ | 591 | */ |
548 | perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; | 592 | perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; |
549 | 593 | ||
550 | ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL); | 594 | ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); |
551 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); | 595 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); |
552 | 596 | ||
553 | /* Controller-level - performance monitor counters */ | 597 | /* Controller-level - performance monitor counters */ |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 7e4500f18df6..d397ff9d56fd 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -321,7 +321,6 @@ struct sec4_sg_entry { | |||
321 | /* Continue - Not the last FIFO store to come */ | 321 | /* Continue - Not the last FIFO store to come */ |
322 | #define FIFOST_CONT_SHIFT 23 | 322 | #define FIFOST_CONT_SHIFT 23 |
323 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | 323 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) |
324 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | ||
325 | 324 | ||
326 | /* | 325 | /* |
327 | * Extended Length - use 32-bit extended length that | 326 | * Extended Length - use 32-bit extended length that |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 6d85fcc5bd0a..97363db4e56e 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -82,6 +82,7 @@ struct caam_drv_private { | |||
82 | u8 total_jobrs; /* Total Job Rings in device */ | 82 | u8 total_jobrs; /* Total Job Rings in device */ |
83 | u8 qi_present; /* Nonzero if QI present in device */ | 83 | u8 qi_present; /* Nonzero if QI present in device */ |
84 | int secvio_irq; /* Security violation interrupt number */ | 84 | int secvio_irq; /* Security violation interrupt number */ |
85 | int virt_en; /* Virtualization enabled in CAAM */ | ||
85 | 86 | ||
86 | #define RNG4_MAX_HANDLES 2 | 87 | #define RNG4_MAX_HANDLES 2 |
87 | /* RNG4 block */ | 88 | /* RNG4 block */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b512a4ba7569..4d18e27ffa9e 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
476 | 476 | ||
477 | if (sizeof(dma_addr_t) == sizeof(u64)) | 477 | if (sizeof(dma_addr_t) == sizeof(u64)) |
478 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) | 478 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) |
479 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | 479 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); |
480 | else | 480 | else |
481 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | 481 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); |
482 | else | 482 | else |
483 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | 483 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); |
484 | 484 | ||
485 | /* Identify the interrupt */ | 485 | /* Identify the interrupt */ |
486 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); | 486 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cbde8b95a6f8..f48e344ffc39 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -84,6 +84,7 @@ | |||
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifndef CONFIG_64BIT | 86 | #ifndef CONFIG_64BIT |
87 | #ifdef __BIG_ENDIAN | ||
87 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | 88 | static inline void wr_reg64(u64 __iomem *reg, u64 data) |
88 | { | 89 | { |
89 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); | 90 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); |
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg) | |||
95 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | | 96 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | |
96 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); | 97 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); |
97 | } | 98 | } |
99 | #else | ||
100 | #ifdef __LITTLE_ENDIAN | ||
101 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | ||
102 | { | ||
103 | wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); | ||
104 | wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); | ||
105 | } | ||
106 | |||
107 | static inline u64 rd_reg64(u64 __iomem *reg) | ||
108 | { | ||
109 | return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | | ||
110 | ((u64)rd_reg32((u32 __iomem *)reg)); | ||
111 | } | ||
112 | #endif | ||
113 | #endif | ||
98 | #endif | 114 | #endif |
99 | 115 | ||
100 | /* | 116 | /* |
@@ -114,45 +130,45 @@ struct jr_outentry { | |||
114 | */ | 130 | */ |
115 | 131 | ||
116 | /* Number of DECOs */ | 132 | /* Number of DECOs */ |
117 | #define CHA_NUM_DECONUM_SHIFT 56 | 133 | #define CHA_NUM_MS_DECONUM_SHIFT 24 |
118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | 134 | #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) |
119 | 135 | ||
120 | /* CHA Version IDs */ | 136 | /* CHA Version IDs */ |
121 | #define CHA_ID_AES_SHIFT 0 | 137 | #define CHA_ID_LS_AES_SHIFT 0 |
122 | #define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT) | 138 | #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) |
123 | 139 | ||
124 | #define CHA_ID_DES_SHIFT 4 | 140 | #define CHA_ID_LS_DES_SHIFT 4 |
125 | #define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT) | 141 | #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) |
126 | 142 | ||
127 | #define CHA_ID_ARC4_SHIFT 8 | 143 | #define CHA_ID_LS_ARC4_SHIFT 8 |
128 | #define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT) | 144 | #define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) |
129 | 145 | ||
130 | #define CHA_ID_MD_SHIFT 12 | 146 | #define CHA_ID_LS_MD_SHIFT 12 |
131 | #define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT) | 147 | #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) |
132 | 148 | ||
133 | #define CHA_ID_RNG_SHIFT 16 | 149 | #define CHA_ID_LS_RNG_SHIFT 16 |
134 | #define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT) | 150 | #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) |
135 | 151 | ||
136 | #define CHA_ID_SNW8_SHIFT 20 | 152 | #define CHA_ID_LS_SNW8_SHIFT 20 |
137 | #define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT) | 153 | #define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT) |
138 | 154 | ||
139 | #define CHA_ID_KAS_SHIFT 24 | 155 | #define CHA_ID_LS_KAS_SHIFT 24 |
140 | #define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT) | 156 | #define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT) |
141 | 157 | ||
142 | #define CHA_ID_PK_SHIFT 28 | 158 | #define CHA_ID_LS_PK_SHIFT 28 |
143 | #define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT) | 159 | #define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT) |
144 | 160 | ||
145 | #define CHA_ID_CRC_SHIFT 32 | 161 | #define CHA_ID_MS_CRC_SHIFT 0 |
146 | #define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT) | 162 | #define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT) |
147 | 163 | ||
148 | #define CHA_ID_SNW9_SHIFT 36 | 164 | #define CHA_ID_MS_SNW9_SHIFT 4 |
149 | #define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT) | 165 | #define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT) |
150 | 166 | ||
151 | #define CHA_ID_DECO_SHIFT 56 | 167 | #define CHA_ID_MS_DECO_SHIFT 24 |
152 | #define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT) | 168 | #define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT) |
153 | 169 | ||
154 | #define CHA_ID_JR_SHIFT 60 | 170 | #define CHA_ID_MS_JR_SHIFT 28 |
155 | #define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT) | 171 | #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) |
156 | 172 | ||
157 | struct sec_vid { | 173 | struct sec_vid { |
158 | u16 ip_id; | 174 | u16 ip_id; |
@@ -172,10 +188,14 @@ struct caam_perfmon { | |||
172 | u64 rsvd[13]; | 188 | u64 rsvd[13]; |
173 | 189 | ||
174 | /* CAAM Hardware Instantiation Parameters fa0-fbf */ | 190 | /* CAAM Hardware Instantiation Parameters fa0-fbf */ |
175 | u64 cha_rev; /* CRNR - CHA Revision Number */ | 191 | u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/ |
176 | #define CTPR_QI_SHIFT 57 | 192 | u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ |
177 | #define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT) | 193 | #define CTPR_MS_QI_SHIFT 25 |
178 | u64 comp_parms; /* CTPR - Compile Parameters Register */ | 194 | #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) |
195 | #define CTPR_MS_VIRT_EN_INCL 0x00000001 | ||
196 | #define CTPR_MS_VIRT_EN_POR 0x00000002 | ||
197 | u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ | ||
198 | u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ | ||
179 | u64 rsvd1[2]; | 199 | u64 rsvd1[2]; |
180 | 200 | ||
181 | /* CAAM Global Status fc0-fdf */ | 201 | /* CAAM Global Status fc0-fdf */ |
@@ -189,9 +209,12 @@ struct caam_perfmon { | |||
189 | /* Component Instantiation Parameters fe0-fff */ | 209 | /* Component Instantiation Parameters fe0-fff */ |
190 | u32 rtic_id; /* RVID - RTIC Version ID */ | 210 | u32 rtic_id; /* RVID - RTIC Version ID */ |
191 | u32 ccb_id; /* CCBVID - CCB Version ID */ | 211 | u32 ccb_id; /* CCBVID - CCB Version ID */ |
192 | u64 cha_id; /* CHAVID - CHA Version ID */ | 212 | u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ |
193 | u64 cha_num; /* CHANUM - CHA Number */ | 213 | u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ |
194 | u64 caam_id; /* CAAMVID - CAAM Version ID */ | 214 | u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ |
215 | u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ | ||
216 | u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ | ||
217 | u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ | ||
195 | }; | 218 | }; |
196 | 219 | ||
197 | /* LIODN programming for DMA configuration */ | 220 | /* LIODN programming for DMA configuration */ |
@@ -304,9 +327,12 @@ struct caam_ctrl { | |||
304 | /* Bus Access Configuration Section 010-11f */ | 327 | /* Bus Access Configuration Section 010-11f */ |
305 | /* Read/Writable */ | 328 | /* Read/Writable */ |
306 | struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ | 329 | struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ |
307 | u32 rsvd3[12]; | 330 | u32 rsvd3[11]; |
331 | u32 jrstart; /* JRSTART - Job Ring Start Register */ | ||
308 | struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ | 332 | struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ |
309 | u32 rsvd4[7]; | 333 | u32 rsvd4[5]; |
334 | u32 deco_rsr; /* DECORSR - Deco Request Source */ | ||
335 | u32 rsvd11; | ||
310 | u32 deco_rq; /* DECORR - DECO Request */ | 336 | u32 deco_rq; /* DECORR - DECO Request */ |
311 | struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ | 337 | struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ |
312 | u32 rsvd5[22]; | 338 | u32 rsvd5[22]; |
@@ -347,7 +373,10 @@ struct caam_ctrl { | |||
347 | #define MCFGR_DMA_RESET 0x10000000 | 373 | #define MCFGR_DMA_RESET 0x10000000 |
348 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ | 374 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ |
349 | #define SCFGR_RDBENABLE 0x00000400 | 375 | #define SCFGR_RDBENABLE 0x00000400 |
376 | #define SCFGR_VIRT_EN 0x00008000 | ||
350 | #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ | 377 | #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ |
378 | #define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */ | ||
379 | #define DECORSR_VALID 0x80000000 | ||
351 | #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ | 380 | #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ |
352 | 381 | ||
353 | /* AXI read cache control */ | 382 | /* AXI read cache control */ |
@@ -365,6 +394,12 @@ struct caam_ctrl { | |||
365 | #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ | 394 | #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ |
366 | #define MCFGR_BURST_64 0x00000001 /* Max burst size */ | 395 | #define MCFGR_BURST_64 0x00000001 /* Max burst size */ |
367 | 396 | ||
397 | /* JRSTART register offsets */ | ||
398 | #define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ | ||
399 | #define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */ | ||
400 | #define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */ | ||
401 | #define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */ | ||
402 | |||
368 | /* | 403 | /* |
369 | * caam_job_ring - direct job ring setup | 404 | * caam_job_ring - direct job ring setup |
370 | * 1-4 possible per instantiation, base + 1000/2000/3000/4000 | 405 | * 1-4 possible per instantiation, base + 1000/2000/3000/4000 |
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index d3505a018720..7f592d8d07bb 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -1,6 +1,11 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o | 1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o |
2 | ccp-objs := ccp-dev.o ccp-ops.o | 2 | ccp-objs := ccp-dev.o ccp-ops.o |
3 | ifdef CONFIG_X86 | ||
3 | ccp-objs += ccp-pci.o | 4 | ccp-objs += ccp-pci.o |
5 | endif | ||
6 | ifdef CONFIG_ARM64 | ||
7 | ccp-objs += ccp-platform.o | ||
8 | endif | ||
4 | 9 | ||
5 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o | 10 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o |
6 | ccp-crypto-objs := ccp-crypto-main.o \ | 11 | ccp-crypto-objs := ccp-crypto-main.o \ |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 2c7816149b01..a7d110652a74 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -20,7 +20,9 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/hw_random.h> | 21 | #include <linux/hw_random.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #ifdef CONFIG_X86 | ||
23 | #include <asm/cpu_device_id.h> | 24 | #include <asm/cpu_device_id.h> |
25 | #endif | ||
24 | #include <linux/ccp.h> | 26 | #include <linux/ccp.h> |
25 | 27 | ||
26 | #include "ccp-dev.h" | 28 | #include "ccp-dev.h" |
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp) | |||
360 | /* Build queue interrupt mask (two interrupts per queue) */ | 362 | /* Build queue interrupt mask (two interrupts per queue) */ |
361 | qim |= cmd_q->int_ok | cmd_q->int_err; | 363 | qim |= cmd_q->int_ok | cmd_q->int_err; |
362 | 364 | ||
365 | #ifdef CONFIG_ARM64 | ||
366 | /* For arm64 set the recommended queue cache settings */ | ||
367 | iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + | ||
368 | (CMD_Q_CACHE_INC * i)); | ||
369 | #endif | ||
370 | |||
363 | dev_dbg(dev, "queue #%u available\n", i); | 371 | dev_dbg(dev, "queue #%u available\n", i); |
364 | } | 372 | } |
365 | if (ccp->cmd_q_count == 0) { | 373 | if (ccp->cmd_q_count == 0) { |
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp) | |||
558 | } | 566 | } |
559 | #endif | 567 | #endif |
560 | 568 | ||
569 | #ifdef CONFIG_X86 | ||
561 | static const struct x86_cpu_id ccp_support[] = { | 570 | static const struct x86_cpu_id ccp_support[] = { |
562 | { X86_VENDOR_AMD, 22, }, | 571 | { X86_VENDOR_AMD, 22, }, |
563 | }; | 572 | }; |
573 | #endif | ||
564 | 574 | ||
565 | static int __init ccp_mod_init(void) | 575 | static int __init ccp_mod_init(void) |
566 | { | 576 | { |
577 | #ifdef CONFIG_X86 | ||
567 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; | 578 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
568 | int ret; | 579 | int ret; |
569 | 580 | ||
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void) | |||
589 | 600 | ||
590 | break; | 601 | break; |
591 | } | 602 | } |
603 | #endif | ||
604 | |||
605 | #ifdef CONFIG_ARM64 | ||
606 | int ret; | ||
607 | |||
608 | ret = ccp_platform_init(); | ||
609 | if (ret) | ||
610 | return ret; | ||
611 | |||
612 | /* Don't leave the driver loaded if init failed */ | ||
613 | if (!ccp_get_device()) { | ||
614 | ccp_platform_exit(); | ||
615 | return -ENODEV; | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | #endif | ||
592 | 620 | ||
593 | return -ENODEV; | 621 | return -ENODEV; |
594 | } | 622 | } |
595 | 623 | ||
596 | static void __exit ccp_mod_exit(void) | 624 | static void __exit ccp_mod_exit(void) |
597 | { | 625 | { |
626 | #ifdef CONFIG_X86 | ||
598 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; | 627 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
599 | 628 | ||
600 | switch (cpuinfo->x86) { | 629 | switch (cpuinfo->x86) { |
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void) | |||
602 | ccp_pci_exit(); | 631 | ccp_pci_exit(); |
603 | break; | 632 | break; |
604 | } | 633 | } |
634 | #endif | ||
635 | |||
636 | #ifdef CONFIG_ARM64 | ||
637 | ccp_platform_exit(); | ||
638 | #endif | ||
605 | } | 639 | } |
606 | 640 | ||
607 | module_init(ccp_mod_init); | 641 | module_init(ccp_mod_init); |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7ec536e702ec..62ff35a6b9ec 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -23,8 +23,6 @@ | |||
23 | #include <linux/hw_random.h> | 23 | #include <linux/hw_random.h> |
24 | 24 | ||
25 | 25 | ||
26 | #define IO_OFFSET 0x20000 | ||
27 | |||
28 | #define MAX_DMAPOOL_NAME_LEN 32 | 26 | #define MAX_DMAPOOL_NAME_LEN 32 |
29 | 27 | ||
30 | #define MAX_HW_QUEUES 5 | 28 | #define MAX_HW_QUEUES 5 |
@@ -32,6 +30,9 @@ | |||
32 | 30 | ||
33 | #define TRNG_RETRIES 10 | 31 | #define TRNG_RETRIES 10 |
34 | 32 | ||
33 | #define CACHE_NONE 0x00 | ||
34 | #define CACHE_WB_NO_ALLOC 0xb7 | ||
35 | |||
35 | 36 | ||
36 | /****** Register Mappings ******/ | 37 | /****** Register Mappings ******/ |
37 | #define Q_MASK_REG 0x000 | 38 | #define Q_MASK_REG 0x000 |
@@ -50,7 +51,7 @@ | |||
50 | #define CMD_Q_INT_STATUS_BASE 0x214 | 51 | #define CMD_Q_INT_STATUS_BASE 0x214 |
51 | #define CMD_Q_STATUS_INCR 0x20 | 52 | #define CMD_Q_STATUS_INCR 0x20 |
52 | 53 | ||
53 | #define CMD_Q_CACHE 0x228 | 54 | #define CMD_Q_CACHE_BASE 0x228 |
54 | #define CMD_Q_CACHE_INC 0x20 | 55 | #define CMD_Q_CACHE_INC 0x20 |
55 | 56 | ||
56 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); | 57 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); |
@@ -194,6 +195,7 @@ struct ccp_device { | |||
194 | void *dev_specific; | 195 | void *dev_specific; |
195 | int (*get_irq)(struct ccp_device *ccp); | 196 | int (*get_irq)(struct ccp_device *ccp); |
196 | void (*free_irq)(struct ccp_device *ccp); | 197 | void (*free_irq)(struct ccp_device *ccp); |
198 | unsigned int irq; | ||
197 | 199 | ||
198 | /* | 200 | /* |
199 | * I/O area used for device communication. The register mapping | 201 | * I/O area used for device communication. The register mapping |
@@ -254,12 +256,18 @@ struct ccp_device { | |||
254 | /* Suspend support */ | 256 | /* Suspend support */ |
255 | unsigned int suspending; | 257 | unsigned int suspending; |
256 | wait_queue_head_t suspend_queue; | 258 | wait_queue_head_t suspend_queue; |
259 | |||
260 | /* DMA caching attribute support */ | ||
261 | unsigned int axcache; | ||
257 | }; | 262 | }; |
258 | 263 | ||
259 | 264 | ||
260 | int ccp_pci_init(void); | 265 | int ccp_pci_init(void); |
261 | void ccp_pci_exit(void); | 266 | void ccp_pci_exit(void); |
262 | 267 | ||
268 | int ccp_platform_init(void); | ||
269 | void ccp_platform_exit(void); | ||
270 | |||
263 | struct ccp_device *ccp_alloc_struct(struct device *dev); | 271 | struct ccp_device *ccp_alloc_struct(struct device *dev); |
264 | int ccp_init(struct ccp_device *ccp); | 272 | int ccp_init(struct ccp_device *ccp); |
265 | void ccp_destroy(struct ccp_device *ccp); | 273 | void ccp_destroy(struct ccp_device *ccp); |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 9ae006d69df4..8729364261d7 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1606 | goto e_ksb; | 1606 | goto e_ksb; |
1607 | 1607 | ||
1608 | ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, | 1608 | ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, |
1609 | true); | 1609 | false); |
1610 | ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, | 1610 | ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, |
1611 | CCP_PASSTHRU_BYTESWAP_NOOP); | 1611 | CCP_PASSTHRU_BYTESWAP_NOOP); |
1612 | if (ret) { | 1612 | if (ret) { |
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1623 | goto e_exp; | 1623 | goto e_exp; |
1624 | 1624 | ||
1625 | ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, | 1625 | ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, |
1626 | true); | 1626 | false); |
1627 | src.address += o_len; /* Adjust the address for the copy operation */ | 1627 | src.address += o_len; /* Adjust the address for the copy operation */ |
1628 | ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, | 1628 | ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, |
1629 | true); | 1629 | false); |
1630 | src.address -= o_len; /* Reset the address to original value */ | 1630 | src.address -= o_len; /* Reset the address to original value */ |
1631 | 1631 | ||
1632 | /* Prepare the output area for the operation */ | 1632 | /* Prepare the output area for the operation */ |
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1841 | 1841 | ||
1842 | /* Copy the ECC modulus */ | 1842 | /* Copy the ECC modulus */ |
1843 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, | 1843 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, |
1844 | CCP_ECC_OPERAND_SIZE, true); | 1844 | CCP_ECC_OPERAND_SIZE, false); |
1845 | src.address += CCP_ECC_OPERAND_SIZE; | 1845 | src.address += CCP_ECC_OPERAND_SIZE; |
1846 | 1846 | ||
1847 | /* Copy the first operand */ | 1847 | /* Copy the first operand */ |
1848 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, | 1848 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, |
1849 | ecc->u.mm.operand_1_len, | 1849 | ecc->u.mm.operand_1_len, |
1850 | CCP_ECC_OPERAND_SIZE, true); | 1850 | CCP_ECC_OPERAND_SIZE, false); |
1851 | src.address += CCP_ECC_OPERAND_SIZE; | 1851 | src.address += CCP_ECC_OPERAND_SIZE; |
1852 | 1852 | ||
1853 | if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { | 1853 | if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { |
1854 | /* Copy the second operand */ | 1854 | /* Copy the second operand */ |
1855 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, | 1855 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, |
1856 | ecc->u.mm.operand_2_len, | 1856 | ecc->u.mm.operand_2_len, |
1857 | CCP_ECC_OPERAND_SIZE, true); | 1857 | CCP_ECC_OPERAND_SIZE, false); |
1858 | src.address += CCP_ECC_OPERAND_SIZE; | 1858 | src.address += CCP_ECC_OPERAND_SIZE; |
1859 | } | 1859 | } |
1860 | 1860 | ||
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1960 | 1960 | ||
1961 | /* Copy the ECC modulus */ | 1961 | /* Copy the ECC modulus */ |
1962 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, | 1962 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, |
1963 | CCP_ECC_OPERAND_SIZE, true); | 1963 | CCP_ECC_OPERAND_SIZE, false); |
1964 | src.address += CCP_ECC_OPERAND_SIZE; | 1964 | src.address += CCP_ECC_OPERAND_SIZE; |
1965 | 1965 | ||
1966 | /* Copy the first point X and Y coordinate */ | 1966 | /* Copy the first point X and Y coordinate */ |
1967 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, | 1967 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, |
1968 | ecc->u.pm.point_1.x_len, | 1968 | ecc->u.pm.point_1.x_len, |
1969 | CCP_ECC_OPERAND_SIZE, true); | 1969 | CCP_ECC_OPERAND_SIZE, false); |
1970 | src.address += CCP_ECC_OPERAND_SIZE; | 1970 | src.address += CCP_ECC_OPERAND_SIZE; |
1971 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, | 1971 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, |
1972 | ecc->u.pm.point_1.y_len, | 1972 | ecc->u.pm.point_1.y_len, |
1973 | CCP_ECC_OPERAND_SIZE, true); | 1973 | CCP_ECC_OPERAND_SIZE, false); |
1974 | src.address += CCP_ECC_OPERAND_SIZE; | 1974 | src.address += CCP_ECC_OPERAND_SIZE; |
1975 | 1975 | ||
1976 | /* Set the first point Z coordianate to 1 */ | 1976 | /* Set the first point Z coordianate to 1 */ |
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1981 | /* Copy the second point X and Y coordinate */ | 1981 | /* Copy the second point X and Y coordinate */ |
1982 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, | 1982 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, |
1983 | ecc->u.pm.point_2.x_len, | 1983 | ecc->u.pm.point_2.x_len, |
1984 | CCP_ECC_OPERAND_SIZE, true); | 1984 | CCP_ECC_OPERAND_SIZE, false); |
1985 | src.address += CCP_ECC_OPERAND_SIZE; | 1985 | src.address += CCP_ECC_OPERAND_SIZE; |
1986 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, | 1986 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, |
1987 | ecc->u.pm.point_2.y_len, | 1987 | ecc->u.pm.point_2.y_len, |
1988 | CCP_ECC_OPERAND_SIZE, true); | 1988 | CCP_ECC_OPERAND_SIZE, false); |
1989 | src.address += CCP_ECC_OPERAND_SIZE; | 1989 | src.address += CCP_ECC_OPERAND_SIZE; |
1990 | 1990 | ||
1991 | /* Set the second point Z coordianate to 1 */ | 1991 | /* Set the second point Z coordianate to 1 */ |
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1995 | /* Copy the Domain "a" parameter */ | 1995 | /* Copy the Domain "a" parameter */ |
1996 | ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, | 1996 | ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, |
1997 | ecc->u.pm.domain_a_len, | 1997 | ecc->u.pm.domain_a_len, |
1998 | CCP_ECC_OPERAND_SIZE, true); | 1998 | CCP_ECC_OPERAND_SIZE, false); |
1999 | src.address += CCP_ECC_OPERAND_SIZE; | 1999 | src.address += CCP_ECC_OPERAND_SIZE; |
2000 | 2000 | ||
2001 | if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { | 2001 | if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { |
2002 | /* Copy the scalar value */ | 2002 | /* Copy the scalar value */ |
2003 | ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, | 2003 | ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, |
2004 | ecc->u.pm.scalar_len, | 2004 | ecc->u.pm.scalar_len, |
2005 | CCP_ECC_OPERAND_SIZE, true); | 2005 | CCP_ECC_OPERAND_SIZE, false); |
2006 | src.address += CCP_ECC_OPERAND_SIZE; | 2006 | src.address += CCP_ECC_OPERAND_SIZE; |
2007 | } | 2007 | } |
2008 | } | 2008 | } |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 0d746236df5e..180cc87b4dbb 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
@@ -12,8 +12,10 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/device.h> | ||
15 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
16 | #include <linux/pci_ids.h> | 17 | #include <linux/pci_ids.h> |
18 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
18 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
19 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
@@ -24,6 +26,8 @@ | |||
24 | #include "ccp-dev.h" | 26 | #include "ccp-dev.h" |
25 | 27 | ||
26 | #define IO_BAR 2 | 28 | #define IO_BAR 2 |
29 | #define IO_OFFSET 0x20000 | ||
30 | |||
27 | #define MSIX_VECTORS 2 | 31 | #define MSIX_VECTORS 2 |
28 | 32 | ||
29 | struct ccp_msix { | 33 | struct ccp_msix { |
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) | |||
89 | if (ret) | 93 | if (ret) |
90 | return ret; | 94 | return ret; |
91 | 95 | ||
92 | ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev); | 96 | ccp->irq = pdev->irq; |
97 | ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); | ||
93 | if (ret) { | 98 | if (ret) { |
94 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); | 99 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); |
95 | goto e_msi; | 100 | goto e_msi; |
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
136 | dev); | 141 | dev); |
137 | pci_disable_msix(pdev); | 142 | pci_disable_msix(pdev); |
138 | } else { | 143 | } else { |
139 | free_irq(pdev->irq, dev); | 144 | free_irq(ccp->irq, dev); |
140 | pci_disable_msi(pdev); | 145 | pci_disable_msi(pdev); |
141 | } | 146 | } |
142 | } | 147 | } |
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp) | |||
147 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 152 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); |
148 | resource_size_t io_len; | 153 | resource_size_t io_len; |
149 | unsigned long io_flags; | 154 | unsigned long io_flags; |
150 | int bar; | ||
151 | 155 | ||
152 | io_flags = pci_resource_flags(pdev, IO_BAR); | 156 | io_flags = pci_resource_flags(pdev, IO_BAR); |
153 | io_len = pci_resource_len(pdev, IO_BAR); | 157 | io_len = pci_resource_len(pdev, IO_BAR); |
154 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) | 158 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) |
155 | return IO_BAR; | 159 | return IO_BAR; |
156 | 160 | ||
157 | for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) { | ||
158 | io_flags = pci_resource_flags(pdev, bar); | ||
159 | io_len = pci_resource_len(pdev, bar); | ||
160 | if ((io_flags & IORESOURCE_MEM) && | ||
161 | (io_len >= (IO_OFFSET + 0x800))) | ||
162 | return bar; | ||
163 | } | ||
164 | |||
165 | return -EIO; | 161 | return -EIO; |
166 | } | 162 | } |
167 | 163 | ||
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
214 | } | 210 | } |
215 | ccp->io_regs = ccp->io_map + IO_OFFSET; | 211 | ccp->io_regs = ccp->io_map + IO_OFFSET; |
216 | 212 | ||
217 | ret = dma_set_mask(dev, DMA_BIT_MASK(48)); | 213 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
218 | if (ret == 0) { | 214 | if (ret) { |
219 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48)); | 215 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
220 | if (ret) { | 216 | if (ret) { |
221 | dev_err(dev, | 217 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", |
222 | "pci_set_consistent_dma_mask failed (%d)\n", | ||
223 | ret); | 218 | ret); |
224 | goto e_bar0; | 219 | goto e_iomap; |
225 | } | ||
226 | } else { | ||
227 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
228 | if (ret) { | ||
229 | dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret); | ||
230 | goto e_bar0; | ||
231 | } | 220 | } |
232 | } | 221 | } |
233 | 222 | ||
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
235 | 224 | ||
236 | ret = ccp_init(ccp); | 225 | ret = ccp_init(ccp); |
237 | if (ret) | 226 | if (ret) |
238 | goto e_bar0; | 227 | goto e_iomap; |
239 | 228 | ||
240 | dev_notice(dev, "enabled\n"); | 229 | dev_notice(dev, "enabled\n"); |
241 | 230 | ||
242 | return 0; | 231 | return 0; |
243 | 232 | ||
244 | e_bar0: | 233 | e_iomap: |
245 | pci_iounmap(pdev, ccp->io_map); | 234 | pci_iounmap(pdev, ccp->io_map); |
246 | 235 | ||
247 | e_device: | 236 | e_device: |
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c new file mode 100644 index 000000000000..b0a2806908f1 --- /dev/null +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Advanced Micro Devices, Inc. | ||
5 | * | ||
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/ccp.h> | ||
25 | #include <linux/of.h> | ||
26 | |||
27 | #include "ccp-dev.h" | ||
28 | |||
29 | |||
30 | static int ccp_get_irq(struct ccp_device *ccp) | ||
31 | { | ||
32 | struct device *dev = ccp->dev; | ||
33 | struct platform_device *pdev = container_of(dev, | ||
34 | struct platform_device, dev); | ||
35 | int ret; | ||
36 | |||
37 | ret = platform_get_irq(pdev, 0); | ||
38 | if (ret < 0) | ||
39 | return ret; | ||
40 | |||
41 | ccp->irq = ret; | ||
42 | ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); | ||
43 | if (ret) { | ||
44 | dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static int ccp_get_irqs(struct ccp_device *ccp) | ||
52 | { | ||
53 | struct device *dev = ccp->dev; | ||
54 | int ret; | ||
55 | |||
56 | ret = ccp_get_irq(ccp); | ||
57 | if (!ret) | ||
58 | return 0; | ||
59 | |||
60 | /* Couldn't get an interrupt */ | ||
61 | dev_notice(dev, "could not enable interrupts (%d)\n", ret); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static void ccp_free_irqs(struct ccp_device *ccp) | ||
67 | { | ||
68 | struct device *dev = ccp->dev; | ||
69 | |||
70 | free_irq(ccp->irq, dev); | ||
71 | } | ||
72 | |||
73 | static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) | ||
74 | { | ||
75 | struct device *dev = ccp->dev; | ||
76 | struct platform_device *pdev = container_of(dev, | ||
77 | struct platform_device, dev); | ||
78 | struct resource *ior; | ||
79 | |||
80 | ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
81 | if (ior && (resource_size(ior) >= 0x800)) | ||
82 | return ior; | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | static int ccp_platform_probe(struct platform_device *pdev) | ||
88 | { | ||
89 | struct ccp_device *ccp; | ||
90 | struct device *dev = &pdev->dev; | ||
91 | struct resource *ior; | ||
92 | int ret; | ||
93 | |||
94 | ret = -ENOMEM; | ||
95 | ccp = ccp_alloc_struct(dev); | ||
96 | if (!ccp) | ||
97 | goto e_err; | ||
98 | |||
99 | ccp->dev_specific = NULL; | ||
100 | ccp->get_irq = ccp_get_irqs; | ||
101 | ccp->free_irq = ccp_free_irqs; | ||
102 | |||
103 | ior = ccp_find_mmio_area(ccp); | ||
104 | ccp->io_map = devm_ioremap_resource(dev, ior); | ||
105 | if (IS_ERR(ccp->io_map)) { | ||
106 | ret = PTR_ERR(ccp->io_map); | ||
107 | goto e_free; | ||
108 | } | ||
109 | ccp->io_regs = ccp->io_map; | ||
110 | |||
111 | if (!dev->dma_mask) | ||
112 | dev->dma_mask = &dev->coherent_dma_mask; | ||
113 | *(dev->dma_mask) = DMA_BIT_MASK(48); | ||
114 | dev->coherent_dma_mask = DMA_BIT_MASK(48); | ||
115 | |||
116 | if (of_property_read_bool(dev->of_node, "dma-coherent")) | ||
117 | ccp->axcache = CACHE_WB_NO_ALLOC; | ||
118 | else | ||
119 | ccp->axcache = CACHE_NONE; | ||
120 | |||
121 | dev_set_drvdata(dev, ccp); | ||
122 | |||
123 | ret = ccp_init(ccp); | ||
124 | if (ret) | ||
125 | goto e_free; | ||
126 | |||
127 | dev_notice(dev, "enabled\n"); | ||
128 | |||
129 | return 0; | ||
130 | |||
131 | e_free: | ||
132 | kfree(ccp); | ||
133 | |||
134 | e_err: | ||
135 | dev_notice(dev, "initialization failed\n"); | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static int ccp_platform_remove(struct platform_device *pdev) | ||
140 | { | ||
141 | struct device *dev = &pdev->dev; | ||
142 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
143 | |||
144 | ccp_destroy(ccp); | ||
145 | |||
146 | kfree(ccp); | ||
147 | |||
148 | dev_notice(dev, "disabled\n"); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | #ifdef CONFIG_PM | ||
154 | static int ccp_platform_suspend(struct platform_device *pdev, | ||
155 | pm_message_t state) | ||
156 | { | ||
157 | struct device *dev = &pdev->dev; | ||
158 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
159 | unsigned long flags; | ||
160 | unsigned int i; | ||
161 | |||
162 | spin_lock_irqsave(&ccp->cmd_lock, flags); | ||
163 | |||
164 | ccp->suspending = 1; | ||
165 | |||
166 | /* Wake all the queue kthreads to prepare for suspend */ | ||
167 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
168 | wake_up_process(ccp->cmd_q[i].kthread); | ||
169 | |||
170 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | ||
171 | |||
172 | /* Wait for all queue kthreads to say they're done */ | ||
173 | while (!ccp_queues_suspended(ccp)) | ||
174 | wait_event_interruptible(ccp->suspend_queue, | ||
175 | ccp_queues_suspended(ccp)); | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int ccp_platform_resume(struct platform_device *pdev) | ||
181 | { | ||
182 | struct device *dev = &pdev->dev; | ||
183 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
184 | unsigned long flags; | ||
185 | unsigned int i; | ||
186 | |||
187 | spin_lock_irqsave(&ccp->cmd_lock, flags); | ||
188 | |||
189 | ccp->suspending = 0; | ||
190 | |||
191 | /* Wake up all the kthreads */ | ||
192 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
193 | ccp->cmd_q[i].suspended = 0; | ||
194 | wake_up_process(ccp->cmd_q[i].kthread); | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | #endif | ||
202 | |||
203 | static const struct of_device_id ccp_platform_ids[] = { | ||
204 | { .compatible = "amd,ccp-seattle-v1a" }, | ||
205 | { }, | ||
206 | }; | ||
207 | |||
208 | static struct platform_driver ccp_platform_driver = { | ||
209 | .driver = { | ||
210 | .name = "AMD Cryptographic Coprocessor", | ||
211 | .owner = THIS_MODULE, | ||
212 | .of_match_table = ccp_platform_ids, | ||
213 | }, | ||
214 | .probe = ccp_platform_probe, | ||
215 | .remove = ccp_platform_remove, | ||
216 | #ifdef CONFIG_PM | ||
217 | .suspend = ccp_platform_suspend, | ||
218 | .resume = ccp_platform_resume, | ||
219 | #endif | ||
220 | }; | ||
221 | |||
222 | int ccp_platform_init(void) | ||
223 | { | ||
224 | return platform_driver_register(&ccp_platform_driver); | ||
225 | } | ||
226 | |||
227 | void ccp_platform_exit(void) | ||
228 | { | ||
229 | platform_driver_unregister(&ccp_platform_driver); | ||
230 | } | ||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 502edf0a2933..544f6d327ede 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c | |||
@@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = { | |||
1247 | static struct vio_driver nx842_driver = { | 1247 | static struct vio_driver nx842_driver = { |
1248 | .name = MODULE_NAME, | 1248 | .name = MODULE_NAME, |
1249 | .probe = nx842_probe, | 1249 | .probe = nx842_probe, |
1250 | .remove = nx842_remove, | 1250 | .remove = __exit_p(nx842_remove), |
1251 | .get_desired_dma = nx842_get_desired_dma, | 1251 | .get_desired_dma = nx842_get_desired_dma, |
1252 | .id_table = nx842_driver_ids, | 1252 | .id_table = nx842_driver_ids, |
1253 | }; | 1253 | }; |
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig new file mode 100644 index 000000000000..49bede2a9f77 --- /dev/null +++ b/drivers/crypto/qat/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | config CRYPTO_DEV_QAT | ||
2 | tristate | ||
3 | select CRYPTO_AEAD | ||
4 | select CRYPTO_AUTHENC | ||
5 | select CRYPTO_ALGAPI | ||
6 | select CRYPTO_AES | ||
7 | select CRYPTO_CBC | ||
8 | select CRYPTO_SHA1 | ||
9 | select CRYPTO_SHA256 | ||
10 | select CRYPTO_SHA512 | ||
11 | select FW_LOADER | ||
12 | |||
13 | config CRYPTO_DEV_QAT_DH895xCC | ||
14 | tristate "Support for Intel(R) DH895xCC" | ||
15 | depends on X86 && PCI | ||
16 | default n | ||
17 | select CRYPTO_DEV_QAT | ||
18 | help | ||
19 | Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology | ||
20 | for accelerating crypto and compression workloads. | ||
21 | |||
22 | To compile this as a module, choose M here: the module | ||
23 | will be called qat_dh895xcc. | ||
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile new file mode 100644 index 000000000000..d11481be225e --- /dev/null +++ b/drivers/crypto/qat/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ | ||
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile new file mode 100644 index 000000000000..e0424dc382fe --- /dev/null +++ b/drivers/crypto/qat/qat_common/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o | ||
2 | intel_qat-objs := adf_cfg.o \ | ||
3 | adf_ctl_drv.o \ | ||
4 | adf_dev_mgr.o \ | ||
5 | adf_init.o \ | ||
6 | adf_accel_engine.o \ | ||
7 | adf_aer.o \ | ||
8 | adf_transport.o \ | ||
9 | qat_crypto.o \ | ||
10 | qat_algs.o \ | ||
11 | qat_uclo.o \ | ||
12 | qat_hal.o | ||
13 | |||
14 | intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o | ||
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h new file mode 100644 index 000000000000..9282381b03ce --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_ACCEL_DEVICES_H_ | ||
48 | #define ADF_ACCEL_DEVICES_H_ | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/atomic.h> | ||
51 | #include <linux/list.h> | ||
52 | #include <linux/proc_fs.h> | ||
53 | #include <linux/io.h> | ||
54 | #include "adf_cfg_common.h" | ||
55 | |||
56 | #define PCI_VENDOR_ID_INTEL 0x8086 | ||
57 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" | ||
58 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 | ||
59 | #define ADF_DH895XCC_PMISC_BAR 1 | ||
60 | #define ADF_DH895XCC_ETR_BAR 2 | ||
61 | #define ADF_PCI_MAX_BARS 3 | ||
62 | #define ADF_DEVICE_NAME_LENGTH 32 | ||
63 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 | ||
64 | #define ADF_MAX_MSIX_VECTOR_NAME 16 | ||
65 | #define ADF_DEVICE_NAME_PREFIX "qat_" | ||
66 | |||
67 | enum adf_accel_capabilities { | ||
68 | ADF_ACCEL_CAPABILITIES_NULL = 0, | ||
69 | ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, | ||
70 | ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, | ||
71 | ADF_ACCEL_CAPABILITIES_CIPHER = 4, | ||
72 | ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, | ||
73 | ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, | ||
74 | ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64, | ||
75 | ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 | ||
76 | }; | ||
77 | |||
78 | struct adf_bar { | ||
79 | resource_size_t base_addr; | ||
80 | void __iomem *virt_addr; | ||
81 | resource_size_t size; | ||
82 | } __packed; | ||
83 | |||
84 | struct adf_accel_msix { | ||
85 | struct msix_entry *entries; | ||
86 | char **names; | ||
87 | } __packed; | ||
88 | |||
89 | struct adf_accel_pci { | ||
90 | struct pci_dev *pci_dev; | ||
91 | struct adf_accel_msix msix_entries; | ||
92 | struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; | ||
93 | uint8_t revid; | ||
94 | uint8_t sku; | ||
95 | } __packed; | ||
96 | |||
97 | enum dev_state { | ||
98 | DEV_DOWN = 0, | ||
99 | DEV_UP | ||
100 | }; | ||
101 | |||
102 | enum dev_sku_info { | ||
103 | DEV_SKU_1 = 0, | ||
104 | DEV_SKU_2, | ||
105 | DEV_SKU_3, | ||
106 | DEV_SKU_4, | ||
107 | DEV_SKU_UNKNOWN, | ||
108 | }; | ||
109 | |||
110 | static inline const char *get_sku_info(enum dev_sku_info info) | ||
111 | { | ||
112 | switch (info) { | ||
113 | case DEV_SKU_1: | ||
114 | return "SKU1"; | ||
115 | case DEV_SKU_2: | ||
116 | return "SKU2"; | ||
117 | case DEV_SKU_3: | ||
118 | return "SKU3"; | ||
119 | case DEV_SKU_4: | ||
120 | return "SKU4"; | ||
121 | case DEV_SKU_UNKNOWN: | ||
122 | default: | ||
123 | break; | ||
124 | } | ||
125 | return "Unknown SKU"; | ||
126 | } | ||
127 | |||
128 | struct adf_hw_device_class { | ||
129 | const char *name; | ||
130 | const enum adf_device_type type; | ||
131 | uint32_t instances; | ||
132 | } __packed; | ||
133 | |||
134 | struct adf_cfg_device_data; | ||
135 | struct adf_accel_dev; | ||
136 | struct adf_etr_data; | ||
137 | struct adf_etr_ring_data; | ||
138 | |||
139 | struct adf_hw_device_data { | ||
140 | struct adf_hw_device_class *dev_class; | ||
141 | uint32_t (*get_accel_mask)(uint32_t fuse); | ||
142 | uint32_t (*get_ae_mask)(uint32_t fuse); | ||
143 | uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); | ||
144 | uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); | ||
145 | uint32_t (*get_num_aes)(struct adf_hw_device_data *self); | ||
146 | uint32_t (*get_num_accels)(struct adf_hw_device_data *self); | ||
147 | enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); | ||
148 | void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); | ||
149 | void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); | ||
150 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); | ||
151 | void (*free_irq)(struct adf_accel_dev *accel_dev); | ||
152 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); | ||
153 | const char *fw_name; | ||
154 | uint32_t pci_dev_id; | ||
155 | uint32_t fuses; | ||
156 | uint32_t accel_capabilities_mask; | ||
157 | uint16_t accel_mask; | ||
158 | uint16_t ae_mask; | ||
159 | uint16_t tx_rings_mask; | ||
160 | uint8_t tx_rx_gap; | ||
161 | uint8_t instance_id; | ||
162 | uint8_t num_banks; | ||
163 | uint8_t num_accel; | ||
164 | uint8_t num_logical_accel; | ||
165 | uint8_t num_engines; | ||
166 | } __packed; | ||
167 | |||
168 | /* CSR write macro */ | ||
169 | #define ADF_CSR_WR(csr_base, csr_offset, val) \ | ||
170 | __raw_writel(val, csr_base + csr_offset) | ||
171 | |||
172 | /* CSR read macro */ | ||
173 | #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) | ||
174 | |||
175 | #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev) | ||
176 | #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) | ||
177 | #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) | ||
178 | #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) | ||
179 | #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) | ||
180 | #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev | ||
181 | |||
182 | struct adf_admin_comms; | ||
183 | struct icp_qat_fw_loader_handle; | ||
184 | struct adf_fw_loader_data { | ||
185 | struct icp_qat_fw_loader_handle *fw_loader; | ||
186 | const struct firmware *uof_fw; | ||
187 | }; | ||
188 | |||
189 | struct adf_accel_dev { | ||
190 | struct adf_etr_data *transport; | ||
191 | struct adf_hw_device_data *hw_device; | ||
192 | struct adf_cfg_device_data *cfg; | ||
193 | struct adf_fw_loader_data *fw_loader; | ||
194 | struct adf_admin_comms *admin; | ||
195 | struct list_head crypto_list; | ||
196 | unsigned long status; | ||
197 | atomic_t ref_count; | ||
198 | struct dentry *debugfs_dir; | ||
199 | struct list_head list; | ||
200 | struct module *owner; | ||
201 | uint8_t accel_id; | ||
202 | uint8_t numa_node; | ||
203 | struct adf_accel_pci accel_pci_dev; | ||
204 | } __packed; | ||
205 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c new file mode 100644 index 000000000000..c77453b900a3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/firmware.h> | ||
48 | #include <linux/pci.h> | ||
49 | #include "adf_cfg.h" | ||
50 | #include "adf_accel_devices.h" | ||
51 | #include "adf_common_drv.h" | ||
52 | #include "icp_qat_uclo.h" | ||
53 | |||
54 | int adf_ae_fw_load(struct adf_accel_dev *accel_dev) | ||
55 | { | ||
56 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
57 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
58 | void *uof_addr; | ||
59 | uint32_t uof_size; | ||
60 | |||
61 | if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, | ||
62 | &accel_dev->accel_pci_dev.pci_dev->dev)) { | ||
63 | pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name); | ||
64 | return -EFAULT; | ||
65 | } | ||
66 | |||
67 | uof_size = loader_data->uof_fw->size; | ||
68 | uof_addr = (void *)loader_data->uof_fw->data; | ||
69 | if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { | ||
70 | pr_err("QAT: Failed to map UOF\n"); | ||
71 | goto out_err; | ||
72 | } | ||
73 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { | ||
74 | pr_err("QAT: Failed to map UOF\n"); | ||
75 | goto out_err; | ||
76 | } | ||
77 | return 0; | ||
78 | |||
79 | out_err: | ||
80 | release_firmware(loader_data->uof_fw); | ||
81 | return -EFAULT; | ||
82 | } | ||
83 | |||
84 | int adf_ae_fw_release(struct adf_accel_dev *accel_dev) | ||
85 | { | ||
86 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
87 | |||
88 | release_firmware(loader_data->uof_fw); | ||
89 | qat_uclo_del_uof_obj(loader_data->fw_loader); | ||
90 | qat_hal_deinit(loader_data->fw_loader); | ||
91 | loader_data->fw_loader = NULL; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | int adf_ae_start(struct adf_accel_dev *accel_dev) | ||
96 | { | ||
97 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
98 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
99 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | ||
100 | |||
101 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | ||
102 | if (hw_data->ae_mask & (1 << ae)) { | ||
103 | qat_hal_start(loader_data->fw_loader, ae, 0xFF); | ||
104 | ae_ctr++; | ||
105 | } | ||
106 | } | ||
107 | pr_info("QAT: qat_dev%d started %d acceleration engines\n", | ||
108 | accel_dev->accel_id, ae_ctr); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | int adf_ae_stop(struct adf_accel_dev *accel_dev) | ||
113 | { | ||
114 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
115 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
116 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | ||
117 | |||
118 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | ||
119 | if (hw_data->ae_mask & (1 << ae)) { | ||
120 | qat_hal_stop(loader_data->fw_loader, ae, 0xFF); | ||
121 | ae_ctr++; | ||
122 | } | ||
123 | } | ||
124 | pr_info("QAT: qat_dev%d stopped %d acceleration engines\n", | ||
125 | accel_dev->accel_id, ae_ctr); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) | ||
130 | { | ||
131 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
132 | |||
133 | qat_hal_reset(loader_data->fw_loader); | ||
134 | if (qat_hal_clr_reset(loader_data->fw_loader)) | ||
135 | return -EFAULT; | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | int adf_ae_init(struct adf_accel_dev *accel_dev) | ||
141 | { | ||
142 | struct adf_fw_loader_data *loader_data; | ||
143 | |||
144 | loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); | ||
145 | if (!loader_data) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | accel_dev->fw_loader = loader_data; | ||
149 | if (qat_hal_init(accel_dev)) { | ||
150 | pr_err("QAT: Failed to init the AEs\n"); | ||
151 | kfree(loader_data); | ||
152 | return -EFAULT; | ||
153 | } | ||
154 | if (adf_ae_reset(accel_dev, 0)) { | ||
155 | pr_err("QAT: Failed to reset the AEs\n"); | ||
156 | qat_hal_deinit(loader_data->fw_loader); | ||
157 | kfree(loader_data); | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev) | ||
164 | { | ||
165 | kfree(accel_dev->fw_loader); | ||
166 | accel_dev->fw_loader = NULL; | ||
167 | return 0; | ||
168 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c new file mode 100644 index 000000000000..c29d4c3926bf --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/pci.h> | ||
49 | #include <linux/aer.h> | ||
50 | #include <linux/completion.h> | ||
51 | #include <linux/workqueue.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include "adf_accel_devices.h" | ||
54 | #include "adf_common_drv.h" | ||
55 | |||
56 | static struct workqueue_struct *device_reset_wq; | ||
57 | |||
58 | static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, | ||
59 | pci_channel_state_t state) | ||
60 | { | ||
61 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
62 | |||
63 | pr_info("QAT: Acceleration driver hardware error detected.\n"); | ||
64 | if (!accel_dev) { | ||
65 | pr_err("QAT: Can't find acceleration device\n"); | ||
66 | return PCI_ERS_RESULT_DISCONNECT; | ||
67 | } | ||
68 | |||
69 | if (state == pci_channel_io_perm_failure) { | ||
70 | pr_err("QAT: Can't recover from device error\n"); | ||
71 | return PCI_ERS_RESULT_DISCONNECT; | ||
72 | } | ||
73 | |||
74 | return PCI_ERS_RESULT_NEED_RESET; | ||
75 | } | ||
76 | |||
77 | /* reset dev data */ | ||
78 | struct adf_reset_dev_data { | ||
79 | int mode; | ||
80 | struct adf_accel_dev *accel_dev; | ||
81 | struct completion compl; | ||
82 | struct work_struct reset_work; | ||
83 | }; | ||
84 | |||
85 | #define PPDSTAT_OFFSET 0x7E | ||
86 | static void adf_dev_restore(struct adf_accel_dev *accel_dev) | ||
87 | { | ||
88 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
89 | struct pci_dev *parent = pdev->bus->self; | ||
90 | uint16_t ppdstat = 0, bridge_ctl = 0; | ||
91 | int pending = 0; | ||
92 | |||
93 | pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id); | ||
94 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
95 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
96 | if (pending) { | ||
97 | int ctr = 0; | ||
98 | |||
99 | do { | ||
100 | msleep(100); | ||
101 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
102 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
103 | } while (pending && ctr++ < 10); | ||
104 | } | ||
105 | |||
106 | if (pending) | ||
107 | pr_info("QAT: Transaction still in progress. Proceeding\n"); | ||
108 | |||
109 | pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); | ||
110 | bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
111 | pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); | ||
112 | msleep(100); | ||
113 | bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
114 | pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); | ||
115 | msleep(100); | ||
116 | pci_restore_state(pdev); | ||
117 | pci_save_state(pdev); | ||
118 | } | ||
119 | |||
120 | static void adf_device_reset_worker(struct work_struct *work) | ||
121 | { | ||
122 | struct adf_reset_dev_data *reset_data = | ||
123 | container_of(work, struct adf_reset_dev_data, reset_work); | ||
124 | struct adf_accel_dev *accel_dev = reset_data->accel_dev; | ||
125 | |||
126 | adf_dev_restarting_notify(accel_dev); | ||
127 | adf_dev_stop(accel_dev); | ||
128 | adf_dev_restore(accel_dev); | ||
129 | if (adf_dev_start(accel_dev)) { | ||
130 | /* The device hanged and we can't restart it so stop here */ | ||
131 | dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); | ||
132 | kfree(reset_data); | ||
133 | WARN(1, "QAT: device restart failed. Device is unusable\n"); | ||
134 | return; | ||
135 | } | ||
136 | adf_dev_restarted_notify(accel_dev); | ||
137 | clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
138 | |||
139 | /* The dev is back alive. Notify the caller if in sync mode */ | ||
140 | if (reset_data->mode == ADF_DEV_RESET_SYNC) | ||
141 | complete(&reset_data->compl); | ||
142 | else | ||
143 | kfree(reset_data); | ||
144 | } | ||
145 | |||
146 | static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, | ||
147 | enum adf_dev_reset_mode mode) | ||
148 | { | ||
149 | struct adf_reset_dev_data *reset_data; | ||
150 | |||
151 | if (adf_dev_started(accel_dev) && | ||
152 | !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | ||
153 | return 0; | ||
154 | |||
155 | set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
156 | reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC); | ||
157 | if (!reset_data) | ||
158 | return -ENOMEM; | ||
159 | reset_data->accel_dev = accel_dev; | ||
160 | init_completion(&reset_data->compl); | ||
161 | reset_data->mode = mode; | ||
162 | INIT_WORK(&reset_data->reset_work, adf_device_reset_worker); | ||
163 | queue_work(device_reset_wq, &reset_data->reset_work); | ||
164 | |||
165 | /* If in sync mode wait for the result */ | ||
166 | if (mode == ADF_DEV_RESET_SYNC) { | ||
167 | int ret = 0; | ||
168 | /* Maximum device reset time is 10 seconds */ | ||
169 | unsigned long wait_jiffies = msecs_to_jiffies(10000); | ||
170 | unsigned long timeout = wait_for_completion_timeout( | ||
171 | &reset_data->compl, wait_jiffies); | ||
172 | if (!timeout) { | ||
173 | pr_err("QAT: Reset device timeout expired\n"); | ||
174 | ret = -EFAULT; | ||
175 | } | ||
176 | kfree(reset_data); | ||
177 | return ret; | ||
178 | } | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) | ||
183 | { | ||
184 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
185 | |||
186 | if (!accel_dev) { | ||
187 | pr_err("QAT: Can't find acceleration device\n"); | ||
188 | return PCI_ERS_RESULT_DISCONNECT; | ||
189 | } | ||
190 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
191 | if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) | ||
192 | return PCI_ERS_RESULT_DISCONNECT; | ||
193 | |||
194 | return PCI_ERS_RESULT_RECOVERED; | ||
195 | } | ||
196 | |||
197 | static void adf_resume(struct pci_dev *pdev) | ||
198 | { | ||
199 | pr_info("QAT: Acceleration driver reset completed\n"); | ||
200 | pr_info("QAT: Device is up and runnig\n"); | ||
201 | } | ||
202 | |||
203 | static struct pci_error_handlers adf_err_handler = { | ||
204 | .error_detected = adf_error_detected, | ||
205 | .slot_reset = adf_slot_reset, | ||
206 | .resume = adf_resume, | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * adf_enable_aer() - Enable Advance Error Reporting for acceleration device | ||
211 | * @accel_dev: Pointer to acceleration device. | ||
212 | * @adf: PCI device driver owning the given acceleration device. | ||
213 | * | ||
214 | * Function enables PCI Advance Error Reporting for the | ||
215 | * QAT acceleration device accel_dev. | ||
216 | * To be used by QAT device specific drivers. | ||
217 | * | ||
218 | * Return: 0 on success, error code othewise. | ||
219 | */ | ||
220 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) | ||
221 | { | ||
222 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
223 | |||
224 | adf->err_handler = &adf_err_handler; | ||
225 | pci_enable_pcie_error_reporting(pdev); | ||
226 | return 0; | ||
227 | } | ||
228 | EXPORT_SYMBOL_GPL(adf_enable_aer); | ||
229 | |||
230 | /** | ||
231 | * adf_disable_aer() - Enable Advance Error Reporting for acceleration device | ||
232 | * @accel_dev: Pointer to acceleration device. | ||
233 | * | ||
234 | * Function disables PCI Advance Error Reporting for the | ||
235 | * QAT acceleration device accel_dev. | ||
236 | * To be used by QAT device specific drivers. | ||
237 | * | ||
238 | * Return: void | ||
239 | */ | ||
240 | void adf_disable_aer(struct adf_accel_dev *accel_dev) | ||
241 | { | ||
242 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
243 | |||
244 | pci_disable_pcie_error_reporting(pdev); | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(adf_disable_aer); | ||
247 | |||
248 | int adf_init_aer(void) | ||
249 | { | ||
250 | device_reset_wq = create_workqueue("qat_device_reset_wq"); | ||
251 | return (device_reset_wq == NULL) ? -EFAULT : 0; | ||
252 | } | ||
253 | |||
254 | void adf_exit_aer(void) | ||
255 | { | ||
256 | if (device_reset_wq) | ||
257 | destroy_workqueue(device_reset_wq); | ||
258 | device_reset_wq = NULL; | ||
259 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c new file mode 100644 index 000000000000..aba7f1d043fb --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/list.h> | ||
50 | #include <linux/seq_file.h> | ||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_cfg.h" | ||
53 | |||
54 | static DEFINE_MUTEX(qat_cfg_read_lock); | ||
55 | |||
56 | static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos) | ||
57 | { | ||
58 | struct adf_cfg_device_data *dev_cfg = sfile->private; | ||
59 | |||
60 | mutex_lock(&qat_cfg_read_lock); | ||
61 | return seq_list_start(&dev_cfg->sec_list, *pos); | ||
62 | } | ||
63 | |||
64 | static int qat_dev_cfg_show(struct seq_file *sfile, void *v) | ||
65 | { | ||
66 | struct list_head *list; | ||
67 | struct adf_cfg_section *sec = | ||
68 | list_entry(v, struct adf_cfg_section, list); | ||
69 | |||
70 | seq_printf(sfile, "[%s]\n", sec->name); | ||
71 | list_for_each(list, &sec->param_head) { | ||
72 | struct adf_cfg_key_val *ptr = | ||
73 | list_entry(list, struct adf_cfg_key_val, list); | ||
74 | seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val); | ||
75 | } | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
80 | { | ||
81 | struct adf_cfg_device_data *dev_cfg = sfile->private; | ||
82 | |||
83 | return seq_list_next(v, &dev_cfg->sec_list, pos); | ||
84 | } | ||
85 | |||
86 | static void qat_dev_cfg_stop(struct seq_file *sfile, void *v) | ||
87 | { | ||
88 | mutex_unlock(&qat_cfg_read_lock); | ||
89 | } | ||
90 | |||
91 | static const struct seq_operations qat_dev_cfg_sops = { | ||
92 | .start = qat_dev_cfg_start, | ||
93 | .next = qat_dev_cfg_next, | ||
94 | .stop = qat_dev_cfg_stop, | ||
95 | .show = qat_dev_cfg_show | ||
96 | }; | ||
97 | |||
98 | static int qat_dev_cfg_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | int ret = seq_open(file, &qat_dev_cfg_sops); | ||
101 | |||
102 | if (!ret) { | ||
103 | struct seq_file *seq_f = file->private_data; | ||
104 | |||
105 | seq_f->private = inode->i_private; | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | static const struct file_operations qat_dev_cfg_fops = { | ||
111 | .open = qat_dev_cfg_open, | ||
112 | .read = seq_read, | ||
113 | .llseek = seq_lseek, | ||
114 | .release = seq_release | ||
115 | }; | ||
116 | |||
117 | /** | ||
118 | * adf_cfg_dev_add() - Create an acceleration device configuration table. | ||
119 | * @accel_dev: Pointer to acceleration device. | ||
120 | * | ||
121 | * Function creates a configuration table for the given acceleration device. | ||
122 | * The table stores device specific config values. | ||
123 | * To be used by QAT device specific drivers. | ||
124 | * | ||
125 | * Return: 0 on success, error code othewise. | ||
126 | */ | ||
127 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) | ||
128 | { | ||
129 | struct adf_cfg_device_data *dev_cfg_data; | ||
130 | |||
131 | dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL); | ||
132 | if (!dev_cfg_data) | ||
133 | return -ENOMEM; | ||
134 | INIT_LIST_HEAD(&dev_cfg_data->sec_list); | ||
135 | init_rwsem(&dev_cfg_data->lock); | ||
136 | accel_dev->cfg = dev_cfg_data; | ||
137 | |||
138 | /* accel_dev->debugfs_dir should always be non-NULL here */ | ||
139 | dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR, | ||
140 | accel_dev->debugfs_dir, | ||
141 | dev_cfg_data, | ||
142 | &qat_dev_cfg_fops); | ||
143 | if (!dev_cfg_data->debug) { | ||
144 | pr_err("QAT: Failed to create qat cfg debugfs entry.\n"); | ||
145 | kfree(dev_cfg_data); | ||
146 | accel_dev->cfg = NULL; | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | return 0; | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(adf_cfg_dev_add); | ||
152 | |||
153 | static void adf_cfg_section_del_all(struct list_head *head); | ||
154 | |||
155 | void adf_cfg_del_all(struct adf_accel_dev *accel_dev) | ||
156 | { | ||
157 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; | ||
158 | |||
159 | down_write(&dev_cfg_data->lock); | ||
160 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | ||
161 | up_write(&dev_cfg_data->lock); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * adf_cfg_dev_remove() - Clears acceleration device configuration table. | ||
166 | * @accel_dev: Pointer to acceleration device. | ||
167 | * | ||
168 | * Function removes configuration table from the given acceleration device | ||
169 | * and frees all allocated memory. | ||
170 | * To be used by QAT device specific drivers. | ||
171 | * | ||
172 | * Return: void | ||
173 | */ | ||
174 | void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) | ||
175 | { | ||
176 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; | ||
177 | |||
178 | down_write(&dev_cfg_data->lock); | ||
179 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | ||
180 | up_write(&dev_cfg_data->lock); | ||
181 | debugfs_remove(dev_cfg_data->debug); | ||
182 | kfree(dev_cfg_data); | ||
183 | accel_dev->cfg = NULL; | ||
184 | } | ||
185 | EXPORT_SYMBOL_GPL(adf_cfg_dev_remove); | ||
186 | |||
187 | static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, | ||
188 | struct adf_cfg_section *sec) | ||
189 | { | ||
190 | list_add_tail(&new->list, &sec->param_head); | ||
191 | } | ||
192 | |||
193 | static void adf_cfg_keyval_del_all(struct list_head *head) | ||
194 | { | ||
195 | struct list_head *list_ptr, *tmp; | ||
196 | |||
197 | list_for_each_prev_safe(list_ptr, tmp, head) { | ||
198 | struct adf_cfg_key_val *ptr = | ||
199 | list_entry(list_ptr, struct adf_cfg_key_val, list); | ||
200 | list_del(list_ptr); | ||
201 | kfree(ptr); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | static void adf_cfg_section_del_all(struct list_head *head) | ||
206 | { | ||
207 | struct adf_cfg_section *ptr; | ||
208 | struct list_head *list, *tmp; | ||
209 | |||
210 | list_for_each_prev_safe(list, tmp, head) { | ||
211 | ptr = list_entry(list, struct adf_cfg_section, list); | ||
212 | adf_cfg_keyval_del_all(&ptr->param_head); | ||
213 | list_del(list); | ||
214 | kfree(ptr); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, | ||
219 | const char *key) | ||
220 | { | ||
221 | struct list_head *list; | ||
222 | |||
223 | list_for_each(list, &s->param_head) { | ||
224 | struct adf_cfg_key_val *ptr = | ||
225 | list_entry(list, struct adf_cfg_key_val, list); | ||
226 | if (!strcmp(ptr->key, key)) | ||
227 | return ptr; | ||
228 | } | ||
229 | return NULL; | ||
230 | } | ||
231 | |||
232 | static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev, | ||
233 | const char *sec_name) | ||
234 | { | ||
235 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
236 | struct list_head *list; | ||
237 | |||
238 | list_for_each(list, &cfg->sec_list) { | ||
239 | struct adf_cfg_section *ptr = | ||
240 | list_entry(list, struct adf_cfg_section, list); | ||
241 | if (!strcmp(ptr->name, sec_name)) | ||
242 | return ptr; | ||
243 | } | ||
244 | return NULL; | ||
245 | } | ||
246 | |||
247 | static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, | ||
248 | const char *sec_name, | ||
249 | const char *key_name, | ||
250 | char *val) | ||
251 | { | ||
252 | struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); | ||
253 | struct adf_cfg_key_val *keyval = NULL; | ||
254 | |||
255 | if (sec) | ||
256 | keyval = adf_cfg_key_value_find(sec, key_name); | ||
257 | if (keyval) { | ||
258 | memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); | ||
259 | return 0; | ||
260 | } | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * adf_cfg_add_key_value_param() - Add key-value config entry to config table. | ||
266 | * @accel_dev: Pointer to acceleration device. | ||
267 | * @section_name: Name of the section where the param will be added | ||
268 | * @key: The key string | ||
269 | * @val: Value pain for the given @key | ||
270 | * @type: Type - string, int or address | ||
271 | * | ||
272 | * Function adds configuration key - value entry in the appropriate section | ||
273 | * in the given acceleration device | ||
274 | * To be used by QAT device specific drivers. | ||
275 | * | ||
276 | * Return: 0 on success, error code othewise. | ||
277 | */ | ||
278 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, | ||
279 | const char *section_name, | ||
280 | const char *key, const void *val, | ||
281 | enum adf_cfg_val_type type) | ||
282 | { | ||
283 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
284 | struct adf_cfg_key_val *key_val; | ||
285 | struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, | ||
286 | section_name); | ||
287 | if (!section) | ||
288 | return -EFAULT; | ||
289 | |||
290 | key_val = kzalloc(sizeof(*key_val), GFP_KERNEL); | ||
291 | if (!key_val) | ||
292 | return -ENOMEM; | ||
293 | |||
294 | INIT_LIST_HEAD(&key_val->list); | ||
295 | strlcpy(key_val->key, key, sizeof(key_val->key)); | ||
296 | |||
297 | if (type == ADF_DEC) { | ||
298 | snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, | ||
299 | "%ld", (*((long *)val))); | ||
300 | } else if (type == ADF_STR) { | ||
301 | strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); | ||
302 | } else if (type == ADF_HEX) { | ||
303 | snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, | ||
304 | "0x%lx", (unsigned long)val); | ||
305 | } else { | ||
306 | pr_err("QAT: Unknown type given.\n"); | ||
307 | kfree(key_val); | ||
308 | return -1; | ||
309 | } | ||
310 | key_val->type = type; | ||
311 | down_write(&cfg->lock); | ||
312 | adf_cfg_keyval_add(key_val, section); | ||
313 | up_write(&cfg->lock); | ||
314 | return 0; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); | ||
317 | |||
318 | /** | ||
319 | * adf_cfg_section_add() - Add config section entry to config table. | ||
320 | * @accel_dev: Pointer to acceleration device. | ||
321 | * @name: Name of the section | ||
322 | * | ||
323 | * Function adds configuration section where key - value entries | ||
324 | * will be stored. | ||
325 | * To be used by QAT device specific drivers. | ||
326 | * | ||
327 | * Return: 0 on success, error code othewise. | ||
328 | */ | ||
329 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) | ||
330 | { | ||
331 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
332 | struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); | ||
333 | |||
334 | if (sec) | ||
335 | return 0; | ||
336 | |||
337 | sec = kzalloc(sizeof(*sec), GFP_KERNEL); | ||
338 | if (!sec) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | strlcpy(sec->name, name, sizeof(sec->name)); | ||
342 | INIT_LIST_HEAD(&sec->param_head); | ||
343 | down_write(&cfg->lock); | ||
344 | list_add_tail(&sec->list, &cfg->sec_list); | ||
345 | up_write(&cfg->lock); | ||
346 | return 0; | ||
347 | } | ||
348 | EXPORT_SYMBOL_GPL(adf_cfg_section_add); | ||
349 | |||
350 | int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, | ||
351 | const char *section, const char *name, | ||
352 | char *value) | ||
353 | { | ||
354 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
355 | int ret; | ||
356 | |||
357 | down_read(&cfg->lock); | ||
358 | ret = adf_cfg_key_val_get(accel_dev, section, name, value); | ||
359 | up_read(&cfg->lock); | ||
360 | return ret; | ||
361 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h new file mode 100644 index 000000000000..6a9c6f6b5ec9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_H_ | ||
48 | #define ADF_CFG_H_ | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/rwsem.h> | ||
52 | #include <linux/debugfs.h> | ||
53 | #include "adf_accel_devices.h" | ||
54 | #include "adf_cfg_common.h" | ||
55 | #include "adf_cfg_strings.h" | ||
56 | |||
57 | struct adf_cfg_key_val { | ||
58 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
59 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
60 | enum adf_cfg_val_type type; | ||
61 | struct list_head list; | ||
62 | }; | ||
63 | |||
64 | struct adf_cfg_section { | ||
65 | char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; | ||
66 | struct list_head list; | ||
67 | struct list_head param_head; | ||
68 | }; | ||
69 | |||
70 | struct adf_cfg_device_data { | ||
71 | struct list_head sec_list; | ||
72 | struct dentry *debug; | ||
73 | struct rw_semaphore lock; | ||
74 | }; | ||
75 | |||
76 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); | ||
77 | void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); | ||
78 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); | ||
79 | void adf_cfg_del_all(struct adf_accel_dev *accel_dev); | ||
80 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, | ||
81 | const char *section_name, | ||
82 | const char *key, const void *val, | ||
83 | enum adf_cfg_val_type type); | ||
84 | int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, | ||
85 | const char *section, const char *name, char *value); | ||
86 | |||
87 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h new file mode 100644 index 000000000000..88b82187ac35 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_COMMON_H_ | ||
48 | #define ADF_CFG_COMMON_H_ | ||
49 | |||
50 | #include <linux/types.h> | ||
51 | #include <linux/ioctl.h> | ||
52 | |||
53 | #define ADF_CFG_MAX_STR_LEN 64 | ||
54 | #define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
55 | #define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
56 | #define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
57 | #define ADF_CFG_BASE_DEC 10 | ||
58 | #define ADF_CFG_BASE_HEX 16 | ||
59 | #define ADF_CFG_ALL_DEVICES 0xFE | ||
60 | #define ADF_CFG_NO_DEVICE 0xFF | ||
61 | #define ADF_CFG_AFFINITY_WHATEVER 0xFF | ||
62 | #define MAX_DEVICE_NAME_SIZE 32 | ||
63 | #define ADF_MAX_DEVICES 32 | ||
64 | |||
65 | enum adf_cfg_val_type { | ||
66 | ADF_DEC, | ||
67 | ADF_HEX, | ||
68 | ADF_STR | ||
69 | }; | ||
70 | |||
71 | enum adf_device_type { | ||
72 | DEV_UNKNOWN = 0, | ||
73 | DEV_DH895XCC, | ||
74 | }; | ||
75 | |||
76 | struct adf_dev_status_info { | ||
77 | enum adf_device_type type; | ||
78 | uint8_t accel_id; | ||
79 | uint8_t instance_id; | ||
80 | uint8_t num_ae; | ||
81 | uint8_t num_accel; | ||
82 | uint8_t num_logical_accel; | ||
83 | uint8_t banks_per_accel; | ||
84 | uint8_t state; | ||
85 | uint8_t bus; | ||
86 | uint8_t dev; | ||
87 | uint8_t fun; | ||
88 | char name[MAX_DEVICE_NAME_SIZE]; | ||
89 | }; | ||
90 | |||
91 | #define ADF_CTL_IOC_MAGIC 'a' | ||
92 | #define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \ | ||
93 | struct adf_user_cfg_ctl_data) | ||
94 | #define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \ | ||
95 | struct adf_user_cfg_ctl_data) | ||
96 | #define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ | ||
97 | struct adf_user_cfg_ctl_data) | ||
98 | #define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) | ||
99 | #define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) | ||
100 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h new file mode 100644 index 000000000000..c7ac758ebc90 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_STRINGS_H_ | ||
48 | #define ADF_CFG_STRINGS_H_ | ||
49 | |||
50 | #define ADF_GENERAL_SEC "GENERAL" | ||
51 | #define ADF_KERNEL_SEC "KERNEL" | ||
52 | #define ADF_ACCEL_SEC "Accelerator" | ||
53 | #define ADF_NUM_CY "NumberCyInstances" | ||
54 | #define ADF_NUM_DC "NumberDcInstances" | ||
55 | #define ADF_RING_SYM_SIZE "NumConcurrentSymRequests" | ||
56 | #define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests" | ||
57 | #define ADF_RING_DC_SIZE "NumConcurrentRequests" | ||
58 | #define ADF_RING_ASYM_TX "RingAsymTx" | ||
59 | #define ADF_RING_SYM_TX "RingSymTx" | ||
60 | #define ADF_RING_RND_TX "RingNrbgTx" | ||
61 | #define ADF_RING_ASYM_RX "RingAsymRx" | ||
62 | #define ADF_RING_SYM_RX "RinSymRx" | ||
63 | #define ADF_RING_RND_RX "RingNrbgRx" | ||
64 | #define ADF_RING_DC_TX "RingTx" | ||
65 | #define ADF_RING_DC_RX "RingRx" | ||
66 | #define ADF_ETRMGR_BANK "Bank" | ||
67 | #define ADF_RING_BANK_NUM "BankNumber" | ||
68 | #define ADF_CY "Cy" | ||
69 | #define ADF_DC "Dc" | ||
70 | #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" | ||
71 | #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ | ||
72 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED | ||
73 | #define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs" | ||
74 | #define ADF_ETRMGR_COALESCE_TIMER_FORMAT \ | ||
75 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER | ||
76 | #define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses" | ||
77 | #define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \ | ||
78 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED | ||
79 | #define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity" | ||
80 | #define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ | ||
81 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY | ||
82 | #define ADF_ACCEL_STR "Accelerator%d" | ||
83 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h new file mode 100644 index 000000000000..0c38a155a865 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_USER_H_ | ||
48 | #define ADF_CFG_USER_H_ | ||
49 | |||
50 | #include "adf_cfg_common.h" | ||
51 | #include "adf_cfg_strings.h" | ||
52 | |||
53 | struct adf_user_cfg_key_val { | ||
54 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
55 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
56 | union { | ||
57 | char *user_val_ptr; | ||
58 | uint64_t padding1; | ||
59 | }; | ||
60 | union { | ||
61 | struct adf_user_cfg_key_val *prev; | ||
62 | uint64_t padding2; | ||
63 | }; | ||
64 | union { | ||
65 | struct adf_user_cfg_key_val *next; | ||
66 | uint64_t padding3; | ||
67 | }; | ||
68 | enum adf_cfg_val_type type; | ||
69 | }; | ||
70 | |||
71 | struct adf_user_cfg_section { | ||
72 | char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; | ||
73 | union { | ||
74 | struct adf_user_cfg_key_val *params; | ||
75 | uint64_t padding1; | ||
76 | }; | ||
77 | union { | ||
78 | struct adf_user_cfg_section *prev; | ||
79 | uint64_t padding2; | ||
80 | }; | ||
81 | union { | ||
82 | struct adf_user_cfg_section *next; | ||
83 | uint64_t padding3; | ||
84 | }; | ||
85 | }; | ||
86 | |||
87 | struct adf_user_cfg_ctl_data { | ||
88 | union { | ||
89 | struct adf_user_cfg_section *config_section; | ||
90 | uint64_t padding; | ||
91 | }; | ||
92 | uint8_t device_id; | ||
93 | }; | ||
94 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h new file mode 100644 index 000000000000..5e8f9d431e5d --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DRV_H | ||
48 | #define ADF_DRV_H | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/pci.h> | ||
52 | #include "adf_accel_devices.h" | ||
53 | #include "icp_qat_fw_loader_handle.h" | ||
54 | #include "icp_qat_hal.h" | ||
55 | |||
56 | #define ADF_STATUS_RESTARTING 0 | ||
57 | #define ADF_STATUS_STARTING 1 | ||
58 | #define ADF_STATUS_CONFIGURED 2 | ||
59 | #define ADF_STATUS_STARTED 3 | ||
60 | #define ADF_STATUS_AE_INITIALISED 4 | ||
61 | #define ADF_STATUS_AE_UCODE_LOADED 5 | ||
62 | #define ADF_STATUS_AE_STARTED 6 | ||
63 | #define ADF_STATUS_ORPHAN_TH_RUNNING 7 | ||
64 | #define ADF_STATUS_IRQ_ALLOCATED 8 | ||
65 | |||
66 | enum adf_dev_reset_mode { | ||
67 | ADF_DEV_RESET_ASYNC = 0, | ||
68 | ADF_DEV_RESET_SYNC | ||
69 | }; | ||
70 | |||
71 | enum adf_event { | ||
72 | ADF_EVENT_INIT = 0, | ||
73 | ADF_EVENT_START, | ||
74 | ADF_EVENT_STOP, | ||
75 | ADF_EVENT_SHUTDOWN, | ||
76 | ADF_EVENT_RESTARTING, | ||
77 | ADF_EVENT_RESTARTED, | ||
78 | }; | ||
79 | |||
80 | struct service_hndl { | ||
81 | int (*event_hld)(struct adf_accel_dev *accel_dev, | ||
82 | enum adf_event event); | ||
83 | unsigned long init_status; | ||
84 | unsigned long start_status; | ||
85 | char *name; | ||
86 | struct list_head list; | ||
87 | int admin; | ||
88 | }; | ||
89 | |||
90 | int adf_service_register(struct service_hndl *service); | ||
91 | int adf_service_unregister(struct service_hndl *service); | ||
92 | |||
93 | int adf_dev_init(struct adf_accel_dev *accel_dev); | ||
94 | int adf_dev_start(struct adf_accel_dev *accel_dev); | ||
95 | int adf_dev_stop(struct adf_accel_dev *accel_dev); | ||
96 | int adf_dev_shutdown(struct adf_accel_dev *accel_dev); | ||
97 | |||
98 | int adf_ctl_dev_register(void); | ||
99 | void adf_ctl_dev_unregister(void); | ||
100 | int adf_processes_dev_register(void); | ||
101 | void adf_processes_dev_unregister(void); | ||
102 | |||
103 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); | ||
104 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); | ||
105 | struct list_head *adf_devmgr_get_head(void); | ||
106 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); | ||
107 | struct adf_accel_dev *adf_devmgr_get_first(void); | ||
108 | struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); | ||
109 | int adf_devmgr_verify_id(uint32_t id); | ||
110 | void adf_devmgr_get_num_dev(uint32_t *num); | ||
111 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); | ||
112 | int adf_dev_started(struct adf_accel_dev *accel_dev); | ||
113 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); | ||
114 | int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); | ||
115 | int adf_ae_init(struct adf_accel_dev *accel_dev); | ||
116 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev); | ||
117 | int adf_ae_fw_load(struct adf_accel_dev *accel_dev); | ||
118 | int adf_ae_fw_release(struct adf_accel_dev *accel_dev); | ||
119 | int adf_ae_start(struct adf_accel_dev *accel_dev); | ||
120 | int adf_ae_stop(struct adf_accel_dev *accel_dev); | ||
121 | |||
122 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); | ||
123 | void adf_disable_aer(struct adf_accel_dev *accel_dev); | ||
124 | int adf_init_aer(void); | ||
125 | void adf_exit_aer(void); | ||
126 | |||
127 | int adf_dev_get(struct adf_accel_dev *accel_dev); | ||
128 | void adf_dev_put(struct adf_accel_dev *accel_dev); | ||
129 | int adf_dev_in_use(struct adf_accel_dev *accel_dev); | ||
130 | int adf_init_etr_data(struct adf_accel_dev *accel_dev); | ||
131 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); | ||
132 | int qat_crypto_register(void); | ||
133 | int qat_crypto_unregister(void); | ||
134 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); | ||
135 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); | ||
136 | void qat_alg_callback(void *resp); | ||
137 | int qat_algs_init(void); | ||
138 | void qat_algs_exit(void); | ||
139 | int qat_algs_register(void); | ||
140 | int qat_algs_unregister(void); | ||
141 | |||
142 | int qat_hal_init(struct adf_accel_dev *accel_dev); | ||
143 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); | ||
144 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
145 | unsigned int ctx_mask); | ||
146 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
147 | unsigned int ctx_mask); | ||
148 | void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); | ||
149 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); | ||
150 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | ||
151 | unsigned char ae, unsigned int ctx_mask); | ||
152 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | ||
153 | unsigned char ae, enum icp_qat_uof_regtype lm_type, | ||
154 | unsigned char mode); | ||
155 | int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, | ||
156 | unsigned char ae, unsigned char mode); | ||
157 | int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, | ||
158 | unsigned char ae, unsigned char mode); | ||
159 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, | ||
160 | unsigned char ae, unsigned int ctx_mask, unsigned int upc); | ||
161 | void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, | ||
162 | unsigned char ae, unsigned int uaddr, | ||
163 | unsigned int words_num, uint64_t *uword); | ||
164 | void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
165 | unsigned int uword_addr, unsigned int words_num, | ||
166 | unsigned int *data); | ||
167 | int qat_hal_get_ins_num(void); | ||
168 | int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
169 | unsigned char ae, | ||
170 | struct icp_qat_uof_batch_init *lm_init_header); | ||
171 | int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, | ||
172 | unsigned char ae, unsigned char ctx_mask, | ||
173 | enum icp_qat_uof_regtype reg_type, | ||
174 | unsigned short reg_num, unsigned int regdata); | ||
175 | int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
176 | unsigned char ae, unsigned char ctx_mask, | ||
177 | enum icp_qat_uof_regtype reg_type, | ||
178 | unsigned short reg_num, unsigned int regdata); | ||
179 | int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
180 | unsigned char ae, unsigned char ctx_mask, | ||
181 | enum icp_qat_uof_regtype reg_type, | ||
182 | unsigned short reg_num, unsigned int regdata); | ||
183 | int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, | ||
184 | unsigned char ae, unsigned char ctx_mask, | ||
185 | unsigned short reg_num, unsigned int regdata); | ||
186 | int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
187 | unsigned char ae, unsigned short lm_addr, unsigned int value); | ||
188 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); | ||
189 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); | ||
190 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
191 | void *addr_ptr, int mem_size); | ||
192 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c new file mode 100644 index 000000000000..d97069b8a8e4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
@@ -0,0 +1,490 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/mutex.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/fs.h> | ||
51 | #include <linux/bitops.h> | ||
52 | #include <linux/pci.h> | ||
53 | #include <linux/cdev.h> | ||
54 | #include <linux/uaccess.h> | ||
55 | |||
56 | #include "adf_accel_devices.h" | ||
57 | #include "adf_common_drv.h" | ||
58 | #include "adf_cfg.h" | ||
59 | #include "adf_cfg_common.h" | ||
60 | #include "adf_cfg_user.h" | ||
61 | |||
62 | #define DEVICE_NAME "qat_adf_ctl" | ||
63 | |||
64 | static DEFINE_MUTEX(adf_ctl_lock); | ||
65 | static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); | ||
66 | |||
67 | static const struct file_operations adf_ctl_ops = { | ||
68 | .owner = THIS_MODULE, | ||
69 | .unlocked_ioctl = adf_ctl_ioctl, | ||
70 | .compat_ioctl = adf_ctl_ioctl, | ||
71 | }; | ||
72 | |||
73 | struct adf_ctl_drv_info { | ||
74 | unsigned int major; | ||
75 | struct cdev drv_cdev; | ||
76 | struct class *drv_class; | ||
77 | }; | ||
78 | |||
79 | static struct adf_ctl_drv_info adt_ctl_drv; | ||
80 | |||
81 | static void adf_chr_drv_destroy(void) | ||
82 | { | ||
83 | device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0)); | ||
84 | cdev_del(&adt_ctl_drv.drv_cdev); | ||
85 | class_destroy(adt_ctl_drv.drv_class); | ||
86 | unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1); | ||
87 | } | ||
88 | |||
89 | static int adf_chr_drv_create(void) | ||
90 | { | ||
91 | dev_t dev_id; | ||
92 | struct device *drv_device; | ||
93 | |||
94 | if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { | ||
95 | pr_err("QAT: unable to allocate chrdev region\n"); | ||
96 | return -EFAULT; | ||
97 | } | ||
98 | |||
99 | adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME); | ||
100 | if (IS_ERR(adt_ctl_drv.drv_class)) { | ||
101 | pr_err("QAT: class_create failed for adf_ctl\n"); | ||
102 | goto err_chrdev_unreg; | ||
103 | } | ||
104 | adt_ctl_drv.major = MAJOR(dev_id); | ||
105 | cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops); | ||
106 | if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) { | ||
107 | pr_err("QAT: cdev add failed\n"); | ||
108 | goto err_class_destr; | ||
109 | } | ||
110 | |||
111 | drv_device = device_create(adt_ctl_drv.drv_class, NULL, | ||
112 | MKDEV(adt_ctl_drv.major, 0), | ||
113 | NULL, DEVICE_NAME); | ||
114 | if (!drv_device) { | ||
115 | pr_err("QAT: failed to create device\n"); | ||
116 | goto err_cdev_del; | ||
117 | } | ||
118 | return 0; | ||
119 | err_cdev_del: | ||
120 | cdev_del(&adt_ctl_drv.drv_cdev); | ||
121 | err_class_destr: | ||
122 | class_destroy(adt_ctl_drv.drv_class); | ||
123 | err_chrdev_unreg: | ||
124 | unregister_chrdev_region(dev_id, 1); | ||
125 | return -EFAULT; | ||
126 | } | ||
127 | |||
128 | static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, | ||
129 | unsigned long arg) | ||
130 | { | ||
131 | struct adf_user_cfg_ctl_data *cfg_data; | ||
132 | |||
133 | cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); | ||
134 | if (!cfg_data) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | /* Initialize device id to NO DEVICE as 0 is a valid device id */ | ||
138 | cfg_data->device_id = ADF_CFG_NO_DEVICE; | ||
139 | |||
140 | if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { | ||
141 | pr_err("QAT: failed to copy from user cfg_data.\n"); | ||
142 | kfree(cfg_data); | ||
143 | return -EIO; | ||
144 | } | ||
145 | |||
146 | *ctl_data = cfg_data; | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, | ||
151 | const char *section, | ||
152 | const struct adf_user_cfg_key_val *key_val) | ||
153 | { | ||
154 | if (key_val->type == ADF_HEX) { | ||
155 | long *ptr = (long *)key_val->val; | ||
156 | long val = *ptr; | ||
157 | |||
158 | if (adf_cfg_add_key_value_param(accel_dev, section, | ||
159 | key_val->key, (void *)val, | ||
160 | key_val->type)) { | ||
161 | pr_err("QAT: failed to add keyvalue.\n"); | ||
162 | return -EFAULT; | ||
163 | } | ||
164 | } else { | ||
165 | if (adf_cfg_add_key_value_param(accel_dev, section, | ||
166 | key_val->key, key_val->val, | ||
167 | key_val->type)) { | ||
168 | pr_err("QAT: failed to add keyvalue.\n"); | ||
169 | return -EFAULT; | ||
170 | } | ||
171 | } | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, | ||
176 | struct adf_user_cfg_ctl_data *ctl_data) | ||
177 | { | ||
178 | struct adf_user_cfg_key_val key_val; | ||
179 | struct adf_user_cfg_key_val *params_head; | ||
180 | struct adf_user_cfg_section section, *section_head; | ||
181 | |||
182 | section_head = ctl_data->config_section; | ||
183 | |||
184 | while (section_head) { | ||
185 | if (copy_from_user(§ion, (void __user *)section_head, | ||
186 | sizeof(*section_head))) { | ||
187 | pr_err("QAT: failed to copy section info\n"); | ||
188 | goto out_err; | ||
189 | } | ||
190 | |||
191 | if (adf_cfg_section_add(accel_dev, section.name)) { | ||
192 | pr_err("QAT: failed to add section.\n"); | ||
193 | goto out_err; | ||
194 | } | ||
195 | |||
196 | params_head = section_head->params; | ||
197 | |||
198 | while (params_head) { | ||
199 | if (copy_from_user(&key_val, (void __user *)params_head, | ||
200 | sizeof(key_val))) { | ||
201 | pr_err("QAT: Failed to copy keyvalue.\n"); | ||
202 | goto out_err; | ||
203 | } | ||
204 | if (adf_add_key_value_data(accel_dev, section.name, | ||
205 | &key_val)) { | ||
206 | goto out_err; | ||
207 | } | ||
208 | params_head = key_val.next; | ||
209 | } | ||
210 | section_head = section.next; | ||
211 | } | ||
212 | return 0; | ||
213 | out_err: | ||
214 | adf_cfg_del_all(accel_dev); | ||
215 | return -EFAULT; | ||
216 | } | ||
217 | |||
218 | static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, | ||
219 | unsigned long arg) | ||
220 | { | ||
221 | int ret; | ||
222 | struct adf_user_cfg_ctl_data *ctl_data; | ||
223 | struct adf_accel_dev *accel_dev; | ||
224 | |||
225 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
226 | if (ret) | ||
227 | return ret; | ||
228 | |||
229 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); | ||
230 | if (!accel_dev) { | ||
231 | ret = -EFAULT; | ||
232 | goto out; | ||
233 | } | ||
234 | |||
235 | if (adf_dev_started(accel_dev)) { | ||
236 | ret = -EFAULT; | ||
237 | goto out; | ||
238 | } | ||
239 | |||
240 | if (adf_copy_key_value_data(accel_dev, ctl_data)) { | ||
241 | ret = -EFAULT; | ||
242 | goto out; | ||
243 | } | ||
244 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
245 | out: | ||
246 | kfree(ctl_data); | ||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | static int adf_ctl_is_device_in_use(int id) | ||
251 | { | ||
252 | struct list_head *itr, *head = adf_devmgr_get_head(); | ||
253 | |||
254 | list_for_each(itr, head) { | ||
255 | struct adf_accel_dev *dev = | ||
256 | list_entry(itr, struct adf_accel_dev, list); | ||
257 | |||
258 | if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | ||
259 | if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { | ||
260 | pr_info("QAT: device qat_dev%d is busy\n", | ||
261 | dev->accel_id); | ||
262 | return -EBUSY; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int adf_ctl_stop_devices(uint32_t id) | ||
270 | { | ||
271 | struct list_head *itr, *head = adf_devmgr_get_head(); | ||
272 | int ret = 0; | ||
273 | |||
274 | list_for_each(itr, head) { | ||
275 | struct adf_accel_dev *accel_dev = | ||
276 | list_entry(itr, struct adf_accel_dev, list); | ||
277 | if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | ||
278 | if (!adf_dev_started(accel_dev)) | ||
279 | continue; | ||
280 | |||
281 | if (adf_dev_stop(accel_dev)) { | ||
282 | pr_err("QAT: Failed to stop qat_dev%d\n", id); | ||
283 | ret = -EFAULT; | ||
284 | } | ||
285 | } | ||
286 | } | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, | ||
291 | unsigned long arg) | ||
292 | { | ||
293 | int ret; | ||
294 | struct adf_user_cfg_ctl_data *ctl_data; | ||
295 | |||
296 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | if (adf_devmgr_verify_id(ctl_data->device_id)) { | ||
301 | pr_err("QAT: Device %d not found\n", ctl_data->device_id); | ||
302 | ret = -ENODEV; | ||
303 | goto out; | ||
304 | } | ||
305 | |||
306 | ret = adf_ctl_is_device_in_use(ctl_data->device_id); | ||
307 | if (ret) | ||
308 | goto out; | ||
309 | |||
310 | if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) | ||
311 | pr_info("QAT: Stopping all acceleration devices.\n"); | ||
312 | else | ||
313 | pr_info("QAT: Stopping acceleration device qat_dev%d.\n", | ||
314 | ctl_data->device_id); | ||
315 | |||
316 | ret = adf_ctl_stop_devices(ctl_data->device_id); | ||
317 | if (ret) | ||
318 | pr_err("QAT: failed to stop device.\n"); | ||
319 | out: | ||
320 | kfree(ctl_data); | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, | ||
325 | unsigned long arg) | ||
326 | { | ||
327 | int ret; | ||
328 | struct adf_user_cfg_ctl_data *ctl_data; | ||
329 | struct adf_accel_dev *accel_dev; | ||
330 | |||
331 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
332 | if (ret) | ||
333 | return ret; | ||
334 | |||
335 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); | ||
336 | if (!accel_dev) { | ||
337 | pr_err("QAT: Device %d not found\n", ctl_data->device_id); | ||
338 | ret = -ENODEV; | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | if (!adf_dev_started(accel_dev)) { | ||
343 | pr_info("QAT: Starting acceleration device qat_dev%d.\n", | ||
344 | ctl_data->device_id); | ||
345 | ret = adf_dev_start(accel_dev); | ||
346 | } else { | ||
347 | pr_info("QAT: Acceleration device qat_dev%d already started.\n", | ||
348 | ctl_data->device_id); | ||
349 | } | ||
350 | if (ret) { | ||
351 | pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); | ||
352 | adf_dev_stop(accel_dev); | ||
353 | } | ||
354 | out: | ||
355 | kfree(ctl_data); | ||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, | ||
360 | unsigned long arg) | ||
361 | { | ||
362 | uint32_t num_devices = 0; | ||
363 | |||
364 | adf_devmgr_get_num_dev(&num_devices); | ||
365 | if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) | ||
366 | return -EFAULT; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, | ||
372 | unsigned long arg) | ||
373 | { | ||
374 | struct adf_hw_device_data *hw_data; | ||
375 | struct adf_dev_status_info dev_info; | ||
376 | struct adf_accel_dev *accel_dev; | ||
377 | |||
378 | if (copy_from_user(&dev_info, (void __user *)arg, | ||
379 | sizeof(struct adf_dev_status_info))) { | ||
380 | pr_err("QAT: failed to copy from user.\n"); | ||
381 | return -EFAULT; | ||
382 | } | ||
383 | |||
384 | accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); | ||
385 | if (!accel_dev) { | ||
386 | pr_err("QAT: Device %d not found\n", dev_info.accel_id); | ||
387 | return -ENODEV; | ||
388 | } | ||
389 | hw_data = accel_dev->hw_device; | ||
390 | dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; | ||
391 | dev_info.num_ae = hw_data->get_num_aes(hw_data); | ||
392 | dev_info.num_accel = hw_data->get_num_accels(hw_data); | ||
393 | dev_info.num_logical_accel = hw_data->num_logical_accel; | ||
394 | dev_info.banks_per_accel = hw_data->num_banks | ||
395 | / hw_data->num_logical_accel; | ||
396 | strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); | ||
397 | dev_info.instance_id = hw_data->instance_id; | ||
398 | dev_info.type = hw_data->dev_class->type; | ||
399 | dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; | ||
400 | dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); | ||
401 | dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); | ||
402 | |||
403 | if (copy_to_user((void __user *)arg, &dev_info, | ||
404 | sizeof(struct adf_dev_status_info))) { | ||
405 | pr_err("QAT: failed to copy status.\n"); | ||
406 | return -EFAULT; | ||
407 | } | ||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | ||
412 | { | ||
413 | int ret; | ||
414 | |||
415 | if (mutex_lock_interruptible(&adf_ctl_lock)) | ||
416 | return -EFAULT; | ||
417 | |||
418 | switch (cmd) { | ||
419 | case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: | ||
420 | ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); | ||
421 | break; | ||
422 | |||
423 | case IOCTL_STOP_ACCEL_DEV: | ||
424 | ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); | ||
425 | break; | ||
426 | |||
427 | case IOCTL_START_ACCEL_DEV: | ||
428 | ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); | ||
429 | break; | ||
430 | |||
431 | case IOCTL_GET_NUM_DEVICES: | ||
432 | ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); | ||
433 | break; | ||
434 | |||
435 | case IOCTL_STATUS_ACCEL_DEV: | ||
436 | ret = adf_ctl_ioctl_get_status(fp, cmd, arg); | ||
437 | break; | ||
438 | default: | ||
439 | pr_err("QAT: Invalid ioclt\n"); | ||
440 | ret = -EFAULT; | ||
441 | break; | ||
442 | } | ||
443 | mutex_unlock(&adf_ctl_lock); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | static int __init adf_register_ctl_device_driver(void) | ||
448 | { | ||
449 | mutex_init(&adf_ctl_lock); | ||
450 | |||
451 | if (qat_algs_init()) | ||
452 | goto err_algs_init; | ||
453 | |||
454 | if (adf_chr_drv_create()) | ||
455 | goto err_chr_dev; | ||
456 | |||
457 | if (adf_init_aer()) | ||
458 | goto err_aer; | ||
459 | |||
460 | if (qat_crypto_register()) | ||
461 | goto err_crypto_register; | ||
462 | |||
463 | return 0; | ||
464 | |||
465 | err_crypto_register: | ||
466 | adf_exit_aer(); | ||
467 | err_aer: | ||
468 | adf_chr_drv_destroy(); | ||
469 | err_chr_dev: | ||
470 | qat_algs_exit(); | ||
471 | err_algs_init: | ||
472 | mutex_destroy(&adf_ctl_lock); | ||
473 | return -EFAULT; | ||
474 | } | ||
475 | |||
476 | static void __exit adf_unregister_ctl_device_driver(void) | ||
477 | { | ||
478 | adf_chr_drv_destroy(); | ||
479 | adf_exit_aer(); | ||
480 | qat_crypto_unregister(); | ||
481 | qat_algs_exit(); | ||
482 | mutex_destroy(&adf_ctl_lock); | ||
483 | } | ||
484 | |||
485 | module_init(adf_register_ctl_device_driver); | ||
486 | module_exit(adf_unregister_ctl_device_driver); | ||
487 | MODULE_LICENSE("Dual BSD/GPL"); | ||
488 | MODULE_AUTHOR("Intel"); | ||
489 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
490 | MODULE_ALIAS("intel_qat"); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c new file mode 100644 index 000000000000..ae71555c0868 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/list.h> | ||
49 | #include "adf_cfg.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | |||
52 | static LIST_HEAD(accel_table); | ||
53 | static DEFINE_MUTEX(table_lock); | ||
54 | static uint32_t num_devices; | ||
55 | |||
56 | /** | ||
57 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework | ||
58 | * @accel_dev: Pointer to acceleration device. | ||
59 | * | ||
60 | * Function adds acceleration device to the acceleration framework. | ||
61 | * To be used by QAT device specific drivers. | ||
62 | * | ||
63 | * Return: 0 on success, error code othewise. | ||
64 | */ | ||
65 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) | ||
66 | { | ||
67 | struct list_head *itr; | ||
68 | |||
69 | if (num_devices == ADF_MAX_DEVICES) { | ||
70 | pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES); | ||
71 | return -EFAULT; | ||
72 | } | ||
73 | |||
74 | mutex_lock(&table_lock); | ||
75 | list_for_each(itr, &accel_table) { | ||
76 | struct adf_accel_dev *ptr = | ||
77 | list_entry(itr, struct adf_accel_dev, list); | ||
78 | |||
79 | if (ptr == accel_dev) { | ||
80 | mutex_unlock(&table_lock); | ||
81 | return -EEXIST; | ||
82 | } | ||
83 | } | ||
84 | atomic_set(&accel_dev->ref_count, 0); | ||
85 | list_add_tail(&accel_dev->list, &accel_table); | ||
86 | accel_dev->accel_id = num_devices++; | ||
87 | mutex_unlock(&table_lock); | ||
88 | return 0; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); | ||
91 | |||
92 | struct list_head *adf_devmgr_get_head(void) | ||
93 | { | ||
94 | return &accel_table; | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. | ||
99 | * @accel_dev: Pointer to acceleration device. | ||
100 | * | ||
101 | * Function removes acceleration device from the acceleration framework. | ||
102 | * To be used by QAT device specific drivers. | ||
103 | * | ||
104 | * Return: void | ||
105 | */ | ||
106 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) | ||
107 | { | ||
108 | mutex_lock(&table_lock); | ||
109 | list_del(&accel_dev->list); | ||
110 | num_devices--; | ||
111 | mutex_unlock(&table_lock); | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); | ||
114 | |||
115 | struct adf_accel_dev *adf_devmgr_get_first(void) | ||
116 | { | ||
117 | struct adf_accel_dev *dev = NULL; | ||
118 | |||
119 | if (!list_empty(&accel_table)) | ||
120 | dev = list_first_entry(&accel_table, struct adf_accel_dev, | ||
121 | list); | ||
122 | return dev; | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. | ||
127 | * @accel_dev: Pointer to pci device. | ||
128 | * | ||
129 | * Function returns acceleration device associated with the given pci device. | ||
130 | * To be used by QAT device specific drivers. | ||
131 | * | ||
132 | * Return: pinter to accel_dev or NULL if not found. | ||
133 | */ | ||
134 | struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) | ||
135 | { | ||
136 | struct list_head *itr; | ||
137 | |||
138 | list_for_each(itr, &accel_table) { | ||
139 | struct adf_accel_dev *ptr = | ||
140 | list_entry(itr, struct adf_accel_dev, list); | ||
141 | |||
142 | if (ptr->accel_pci_dev.pci_dev == pci_dev) { | ||
143 | mutex_unlock(&table_lock); | ||
144 | return ptr; | ||
145 | } | ||
146 | } | ||
147 | return NULL; | ||
148 | } | ||
149 | EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); | ||
150 | |||
151 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) | ||
152 | { | ||
153 | struct list_head *itr; | ||
154 | |||
155 | list_for_each(itr, &accel_table) { | ||
156 | struct adf_accel_dev *ptr = | ||
157 | list_entry(itr, struct adf_accel_dev, list); | ||
158 | |||
159 | if (ptr->accel_id == id) { | ||
160 | mutex_unlock(&table_lock); | ||
161 | return ptr; | ||
162 | } | ||
163 | } | ||
164 | return NULL; | ||
165 | } | ||
166 | |||
167 | int adf_devmgr_verify_id(uint32_t id) | ||
168 | { | ||
169 | if (id == ADF_CFG_ALL_DEVICES) | ||
170 | return 0; | ||
171 | |||
172 | if (adf_devmgr_get_dev_by_id(id)) | ||
173 | return 0; | ||
174 | |||
175 | return -ENODEV; | ||
176 | } | ||
177 | |||
178 | void adf_devmgr_get_num_dev(uint32_t *num) | ||
179 | { | ||
180 | struct list_head *itr; | ||
181 | |||
182 | *num = 0; | ||
183 | list_for_each(itr, &accel_table) { | ||
184 | (*num)++; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | int adf_dev_in_use(struct adf_accel_dev *accel_dev) | ||
189 | { | ||
190 | return atomic_read(&accel_dev->ref_count) != 0; | ||
191 | } | ||
192 | |||
193 | int adf_dev_get(struct adf_accel_dev *accel_dev) | ||
194 | { | ||
195 | if (atomic_add_return(1, &accel_dev->ref_count) == 1) | ||
196 | if (!try_module_get(accel_dev->owner)) | ||
197 | return -EFAULT; | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | void adf_dev_put(struct adf_accel_dev *accel_dev) | ||
202 | { | ||
203 | if (atomic_sub_return(1, &accel_dev->ref_count) == 0) | ||
204 | module_put(accel_dev->owner); | ||
205 | } | ||
206 | |||
207 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) | ||
208 | { | ||
209 | return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
210 | } | ||
211 | |||
212 | int adf_dev_started(struct adf_accel_dev *accel_dev) | ||
213 | { | ||
214 | return test_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
215 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c new file mode 100644 index 000000000000..5c0e47a00a87 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_init.c | |||
@@ -0,0 +1,388 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/list.h> | ||
49 | #include <linux/bitops.h> | ||
50 | #include <linux/delay.h> | ||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_cfg.h" | ||
53 | #include "adf_common_drv.h" | ||
54 | |||
55 | static LIST_HEAD(service_table); | ||
56 | static DEFINE_MUTEX(service_lock); | ||
57 | |||
58 | static void adf_service_add(struct service_hndl *service) | ||
59 | { | ||
60 | mutex_lock(&service_lock); | ||
61 | list_add(&service->list, &service_table); | ||
62 | mutex_unlock(&service_lock); | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * adf_service_register() - Register acceleration service in the accel framework | ||
67 | * @service: Pointer to the service | ||
68 | * | ||
69 | * Function adds the acceleration service to the acceleration framework. | ||
70 | * To be used by QAT device specific drivers. | ||
71 | * | ||
72 | * Return: 0 on success, error code othewise. | ||
73 | */ | ||
74 | int adf_service_register(struct service_hndl *service) | ||
75 | { | ||
76 | service->init_status = 0; | ||
77 | service->start_status = 0; | ||
78 | adf_service_add(service); | ||
79 | return 0; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(adf_service_register); | ||
82 | |||
83 | static void adf_service_remove(struct service_hndl *service) | ||
84 | { | ||
85 | mutex_lock(&service_lock); | ||
86 | list_del(&service->list); | ||
87 | mutex_unlock(&service_lock); | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * adf_service_unregister() - Unregister acceleration service from the framework | ||
92 | * @service: Pointer to the service | ||
93 | * | ||
94 | * Function remove the acceleration service from the acceleration framework. | ||
95 | * To be used by QAT device specific drivers. | ||
96 | * | ||
97 | * Return: 0 on success, error code othewise. | ||
98 | */ | ||
99 | int adf_service_unregister(struct service_hndl *service) | ||
100 | { | ||
101 | if (service->init_status || service->start_status) { | ||
102 | pr_err("QAT: Could not remove active service\n"); | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | adf_service_remove(service); | ||
106 | return 0; | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(adf_service_unregister); | ||
109 | |||
110 | /** | ||
111 | * adf_dev_start() - Start acceleration service for the given accel device | ||
112 | * @accel_dev: Pointer to acceleration device. | ||
113 | * | ||
114 | * Function notifies all the registered services that the acceleration device | ||
115 | * is ready to be used. | ||
116 | * To be used by QAT device specific drivers. | ||
117 | * | ||
118 | * Return: 0 on success, error code othewise. | ||
119 | */ | ||
120 | int adf_dev_start(struct adf_accel_dev *accel_dev) | ||
121 | { | ||
122 | struct service_hndl *service; | ||
123 | struct list_head *list_itr; | ||
124 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
125 | |||
126 | if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { | ||
127 | pr_info("QAT: Device not configured\n"); | ||
128 | return -EFAULT; | ||
129 | } | ||
130 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
131 | |||
132 | if (adf_ae_init(accel_dev)) { | ||
133 | pr_err("QAT: Failed to initialise Acceleration Engine\n"); | ||
134 | return -EFAULT; | ||
135 | } | ||
136 | set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); | ||
137 | |||
138 | if (adf_ae_fw_load(accel_dev)) { | ||
139 | pr_err("QAT: Failed to load acceleration FW\n"); | ||
140 | adf_ae_fw_release(accel_dev); | ||
141 | return -EFAULT; | ||
142 | } | ||
143 | set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); | ||
144 | |||
145 | if (hw_data->alloc_irq(accel_dev)) { | ||
146 | pr_err("QAT: Failed to allocate interrupts\n"); | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); | ||
150 | |||
151 | /* | ||
152 | * Subservice initialisation is divided into two stages: init and start. | ||
153 | * This is to facilitate any ordering dependencies between services | ||
154 | * prior to starting any of the accelerators. | ||
155 | */ | ||
156 | list_for_each(list_itr, &service_table) { | ||
157 | service = list_entry(list_itr, struct service_hndl, list); | ||
158 | if (!service->admin) | ||
159 | continue; | ||
160 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | ||
161 | pr_err("QAT: Failed to initialise service %s\n", | ||
162 | service->name); | ||
163 | return -EFAULT; | ||
164 | } | ||
165 | set_bit(accel_dev->accel_id, &service->init_status); | ||
166 | } | ||
167 | list_for_each(list_itr, &service_table) { | ||
168 | service = list_entry(list_itr, struct service_hndl, list); | ||
169 | if (service->admin) | ||
170 | continue; | ||
171 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | ||
172 | pr_err("QAT: Failed to initialise service %s\n", | ||
173 | service->name); | ||
174 | return -EFAULT; | ||
175 | } | ||
176 | set_bit(accel_dev->accel_id, &service->init_status); | ||
177 | } | ||
178 | |||
179 | hw_data->enable_error_correction(accel_dev); | ||
180 | |||
181 | if (adf_ae_start(accel_dev)) { | ||
182 | pr_err("QAT: AE Start Failed\n"); | ||
183 | return -EFAULT; | ||
184 | } | ||
185 | set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | ||
186 | |||
187 | list_for_each(list_itr, &service_table) { | ||
188 | service = list_entry(list_itr, struct service_hndl, list); | ||
189 | if (!service->admin) | ||
190 | continue; | ||
191 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | ||
192 | pr_err("QAT: Failed to start service %s\n", | ||
193 | service->name); | ||
194 | return -EFAULT; | ||
195 | } | ||
196 | set_bit(accel_dev->accel_id, &service->start_status); | ||
197 | } | ||
198 | list_for_each(list_itr, &service_table) { | ||
199 | service = list_entry(list_itr, struct service_hndl, list); | ||
200 | if (service->admin) | ||
201 | continue; | ||
202 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | ||
203 | pr_err("QAT: Failed to start service %s\n", | ||
204 | service->name); | ||
205 | return -EFAULT; | ||
206 | } | ||
207 | set_bit(accel_dev->accel_id, &service->start_status); | ||
208 | } | ||
209 | |||
210 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
211 | set_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
212 | |||
213 | if (qat_algs_register()) { | ||
214 | pr_err("QAT: Failed to register crypto algs\n"); | ||
215 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
216 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
217 | return -EFAULT; | ||
218 | } | ||
219 | return 0; | ||
220 | } | ||
221 | EXPORT_SYMBOL_GPL(adf_dev_start); | ||
222 | |||
223 | /** | ||
224 | * adf_dev_stop() - Stop acceleration service for the given accel device | ||
225 | * @accel_dev: Pointer to acceleration device. | ||
226 | * | ||
227 | * Function notifies all the registered services that the acceleration device | ||
228 | * is shuting down. | ||
229 | * To be used by QAT device specific drivers. | ||
230 | * | ||
231 | * Return: 0 on success, error code othewise. | ||
232 | */ | ||
233 | int adf_dev_stop(struct adf_accel_dev *accel_dev) | ||
234 | { | ||
235 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
236 | struct service_hndl *service; | ||
237 | struct list_head *list_itr; | ||
238 | int ret, wait = 0; | ||
239 | |||
240 | if (!adf_dev_started(accel_dev) && | ||
241 | !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { | ||
242 | return 0; | ||
243 | } | ||
244 | clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
245 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
246 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
247 | |||
248 | if (qat_algs_unregister()) | ||
249 | pr_err("QAT: Failed to unregister crypto algs\n"); | ||
250 | |||
251 | list_for_each(list_itr, &service_table) { | ||
252 | service = list_entry(list_itr, struct service_hndl, list); | ||
253 | if (service->admin) | ||
254 | continue; | ||
255 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | ||
256 | continue; | ||
257 | ret = service->event_hld(accel_dev, ADF_EVENT_STOP); | ||
258 | if (!ret) { | ||
259 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
260 | } else if (ret == -EAGAIN) { | ||
261 | wait = 1; | ||
262 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
263 | } | ||
264 | } | ||
265 | list_for_each(list_itr, &service_table) { | ||
266 | service = list_entry(list_itr, struct service_hndl, list); | ||
267 | if (!service->admin) | ||
268 | continue; | ||
269 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | ||
270 | continue; | ||
271 | if (service->event_hld(accel_dev, ADF_EVENT_STOP)) | ||
272 | pr_err("QAT: Failed to shutdown service %s\n", | ||
273 | service->name); | ||
274 | else | ||
275 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
276 | } | ||
277 | |||
278 | if (wait) | ||
279 | msleep(100); | ||
280 | |||
281 | if (adf_dev_started(accel_dev)) { | ||
282 | if (adf_ae_stop(accel_dev)) | ||
283 | pr_err("QAT: failed to stop AE\n"); | ||
284 | else | ||
285 | clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | ||
286 | } | ||
287 | |||
288 | if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { | ||
289 | if (adf_ae_fw_release(accel_dev)) | ||
290 | pr_err("QAT: Failed to release the ucode\n"); | ||
291 | else | ||
292 | clear_bit(ADF_STATUS_AE_UCODE_LOADED, | ||
293 | &accel_dev->status); | ||
294 | } | ||
295 | |||
296 | if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { | ||
297 | if (adf_ae_shutdown(accel_dev)) | ||
298 | pr_err("QAT: Failed to shutdown Accel Engine\n"); | ||
299 | else | ||
300 | clear_bit(ADF_STATUS_AE_INITIALISED, | ||
301 | &accel_dev->status); | ||
302 | } | ||
303 | |||
304 | list_for_each(list_itr, &service_table) { | ||
305 | service = list_entry(list_itr, struct service_hndl, list); | ||
306 | if (service->admin) | ||
307 | continue; | ||
308 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | ||
309 | continue; | ||
310 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | ||
311 | pr_err("QAT: Failed to shutdown service %s\n", | ||
312 | service->name); | ||
313 | else | ||
314 | clear_bit(accel_dev->accel_id, &service->init_status); | ||
315 | } | ||
316 | list_for_each(list_itr, &service_table) { | ||
317 | service = list_entry(list_itr, struct service_hndl, list); | ||
318 | if (!service->admin) | ||
319 | continue; | ||
320 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | ||
321 | continue; | ||
322 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | ||
323 | pr_err("QAT: Failed to shutdown service %s\n", | ||
324 | service->name); | ||
325 | else | ||
326 | clear_bit(accel_dev->accel_id, &service->init_status); | ||
327 | } | ||
328 | |||
329 | if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { | ||
330 | hw_data->free_irq(accel_dev); | ||
331 | clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); | ||
332 | } | ||
333 | |||
334 | /* Delete configuration only if not restarting */ | ||
335 | if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | ||
336 | adf_cfg_del_all(accel_dev); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(adf_dev_stop); | ||
341 | |||
342 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) | ||
343 | { | ||
344 | struct service_hndl *service; | ||
345 | struct list_head *list_itr; | ||
346 | |||
347 | list_for_each(list_itr, &service_table) { | ||
348 | service = list_entry(list_itr, struct service_hndl, list); | ||
349 | if (service->admin) | ||
350 | continue; | ||
351 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | ||
352 | pr_err("QAT: Failed to restart service %s.\n", | ||
353 | service->name); | ||
354 | } | ||
355 | list_for_each(list_itr, &service_table) { | ||
356 | service = list_entry(list_itr, struct service_hndl, list); | ||
357 | if (!service->admin) | ||
358 | continue; | ||
359 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | ||
360 | pr_err("QAT: Failed to restart service %s.\n", | ||
361 | service->name); | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) | ||
367 | { | ||
368 | struct service_hndl *service; | ||
369 | struct list_head *list_itr; | ||
370 | |||
371 | list_for_each(list_itr, &service_table) { | ||
372 | service = list_entry(list_itr, struct service_hndl, list); | ||
373 | if (service->admin) | ||
374 | continue; | ||
375 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | ||
376 | pr_err("QAT: Failed to restart service %s.\n", | ||
377 | service->name); | ||
378 | } | ||
379 | list_for_each(list_itr, &service_table) { | ||
380 | service = list_entry(list_itr, struct service_hndl, list); | ||
381 | if (!service->admin) | ||
382 | continue; | ||
383 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | ||
384 | pr_err("QAT: Failed to restart service %s.\n", | ||
385 | service->name); | ||
386 | } | ||
387 | return 0; | ||
388 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c new file mode 100644 index 000000000000..5f3fa45348b4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/delay.h> | ||
48 | #include "adf_accel_devices.h" | ||
49 | #include "adf_transport_internal.h" | ||
50 | #include "adf_transport_access_macros.h" | ||
51 | #include "adf_cfg.h" | ||
52 | #include "adf_common_drv.h" | ||
53 | |||
54 | static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) | ||
55 | { | ||
56 | uint32_t div = data >> shift; | ||
57 | uint32_t mult = div << shift; | ||
58 | |||
59 | return data - mult; | ||
60 | } | ||
61 | |||
62 | static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) | ||
63 | { | ||
64 | if (((size - 1) & addr) != 0) | ||
65 | return -EFAULT; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) | ||
70 | { | ||
71 | int i = ADF_MIN_RING_SIZE; | ||
72 | |||
73 | for (; i <= ADF_MAX_RING_SIZE; i++) | ||
74 | if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) | ||
75 | return i; | ||
76 | |||
77 | return ADF_DEFAULT_RING_SIZE; | ||
78 | } | ||
79 | |||
80 | static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) | ||
81 | { | ||
82 | spin_lock(&bank->lock); | ||
83 | if (bank->ring_mask & (1 << ring)) { | ||
84 | spin_unlock(&bank->lock); | ||
85 | return -EFAULT; | ||
86 | } | ||
87 | bank->ring_mask |= (1 << ring); | ||
88 | spin_unlock(&bank->lock); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) | ||
93 | { | ||
94 | spin_lock(&bank->lock); | ||
95 | bank->ring_mask &= ~(1 << ring); | ||
96 | spin_unlock(&bank->lock); | ||
97 | } | ||
98 | |||
99 | static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) | ||
100 | { | ||
101 | spin_lock_bh(&bank->lock); | ||
102 | bank->irq_mask |= (1 << ring); | ||
103 | spin_unlock_bh(&bank->lock); | ||
104 | WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); | ||
105 | WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number, | ||
106 | bank->irq_coalesc_timer); | ||
107 | } | ||
108 | |||
109 | static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) | ||
110 | { | ||
111 | spin_lock_bh(&bank->lock); | ||
112 | bank->irq_mask &= ~(1 << ring); | ||
113 | spin_unlock_bh(&bank->lock); | ||
114 | WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); | ||
115 | } | ||
116 | |||
117 | int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) | ||
118 | { | ||
119 | if (atomic_add_return(1, ring->inflights) > | ||
120 | ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { | ||
121 | atomic_dec(ring->inflights); | ||
122 | return -EAGAIN; | ||
123 | } | ||
124 | spin_lock_bh(&ring->lock); | ||
125 | memcpy(ring->base_addr + ring->tail, msg, | ||
126 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); | ||
127 | |||
128 | ring->tail = adf_modulo(ring->tail + | ||
129 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), | ||
130 | ADF_RING_SIZE_MODULO(ring->ring_size)); | ||
131 | WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number, | ||
132 | ring->ring_number, ring->tail); | ||
133 | spin_unlock_bh(&ring->lock); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int adf_handle_response(struct adf_etr_ring_data *ring) | ||
138 | { | ||
139 | uint32_t msg_counter = 0; | ||
140 | uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); | ||
141 | |||
142 | while (*msg != ADF_RING_EMPTY_SIG) { | ||
143 | ring->callback((uint32_t *)msg); | ||
144 | *msg = ADF_RING_EMPTY_SIG; | ||
145 | ring->head = adf_modulo(ring->head + | ||
146 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), | ||
147 | ADF_RING_SIZE_MODULO(ring->ring_size)); | ||
148 | msg_counter++; | ||
149 | msg = (uint32_t *)(ring->base_addr + ring->head); | ||
150 | } | ||
151 | if (msg_counter > 0) { | ||
152 | WRITE_CSR_RING_HEAD(ring->bank->csr_addr, | ||
153 | ring->bank->bank_number, | ||
154 | ring->ring_number, ring->head); | ||
155 | atomic_sub(msg_counter, ring->inflights); | ||
156 | } | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) | ||
161 | { | ||
162 | uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); | ||
163 | |||
164 | WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, | ||
165 | ring->ring_number, ring_config); | ||
166 | } | ||
167 | |||
168 | static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) | ||
169 | { | ||
170 | uint32_t ring_config = | ||
171 | BUILD_RESP_RING_CONFIG(ring->ring_size, | ||
172 | ADF_RING_NEAR_WATERMARK_512, | ||
173 | ADF_RING_NEAR_WATERMARK_0); | ||
174 | |||
175 | WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, | ||
176 | ring->ring_number, ring_config); | ||
177 | } | ||
178 | |||
179 | static int adf_init_ring(struct adf_etr_ring_data *ring) | ||
180 | { | ||
181 | struct adf_etr_bank_data *bank = ring->bank; | ||
182 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
183 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
184 | uint64_t ring_base; | ||
185 | uint32_t ring_size_bytes = | ||
186 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); | ||
187 | |||
188 | ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); | ||
189 | ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), | ||
190 | ring_size_bytes, &ring->dma_addr, | ||
191 | GFP_KERNEL); | ||
192 | if (!ring->base_addr) | ||
193 | return -ENOMEM; | ||
194 | |||
195 | memset(ring->base_addr, 0x7F, ring_size_bytes); | ||
196 | /* The base_addr has to be aligned to the size of the buffer */ | ||
197 | if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { | ||
198 | pr_err("QAT: Ring address not aligned\n"); | ||
199 | dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, | ||
200 | ring->base_addr, ring->dma_addr); | ||
201 | return -EFAULT; | ||
202 | } | ||
203 | |||
204 | if (hw_data->tx_rings_mask & (1 << ring->ring_number)) | ||
205 | adf_configure_tx_ring(ring); | ||
206 | |||
207 | else | ||
208 | adf_configure_rx_ring(ring); | ||
209 | |||
210 | ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); | ||
211 | WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number, | ||
212 | ring->ring_number, ring_base); | ||
213 | spin_lock_init(&ring->lock); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static void adf_cleanup_ring(struct adf_etr_ring_data *ring) | ||
218 | { | ||
219 | uint32_t ring_size_bytes = | ||
220 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); | ||
221 | ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); | ||
222 | |||
223 | if (ring->base_addr) { | ||
224 | memset(ring->base_addr, 0x7F, ring_size_bytes); | ||
225 | dma_free_coherent(&GET_DEV(ring->bank->accel_dev), | ||
226 | ring_size_bytes, ring->base_addr, | ||
227 | ring->dma_addr); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | ||
232 | uint32_t bank_num, uint32_t num_msgs, | ||
233 | uint32_t msg_size, const char *ring_name, | ||
234 | adf_callback_fn callback, int poll_mode, | ||
235 | struct adf_etr_ring_data **ring_ptr) | ||
236 | { | ||
237 | struct adf_etr_data *transport_data = accel_dev->transport; | ||
238 | struct adf_etr_bank_data *bank; | ||
239 | struct adf_etr_ring_data *ring; | ||
240 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
241 | uint32_t ring_num; | ||
242 | int ret; | ||
243 | |||
244 | if (bank_num >= GET_MAX_BANKS(accel_dev)) { | ||
245 | pr_err("QAT: Invalid bank number\n"); | ||
246 | return -EFAULT; | ||
247 | } | ||
248 | if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { | ||
249 | pr_err("QAT: Invalid msg size\n"); | ||
250 | return -EFAULT; | ||
251 | } | ||
252 | if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), | ||
253 | ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { | ||
254 | pr_err("QAT: Invalid ring size for given msg size\n"); | ||
255 | return -EFAULT; | ||
256 | } | ||
257 | if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { | ||
258 | pr_err("QAT: Section %s, no such entry : %s\n", | ||
259 | section, ring_name); | ||
260 | return -EFAULT; | ||
261 | } | ||
262 | if (kstrtouint(val, 10, &ring_num)) { | ||
263 | pr_err("QAT: Can't get ring number\n"); | ||
264 | return -EFAULT; | ||
265 | } | ||
266 | |||
267 | bank = &transport_data->banks[bank_num]; | ||
268 | if (adf_reserve_ring(bank, ring_num)) { | ||
269 | pr_err("QAT: Ring %d, %s already exists.\n", | ||
270 | ring_num, ring_name); | ||
271 | return -EFAULT; | ||
272 | } | ||
273 | ring = &bank->rings[ring_num]; | ||
274 | ring->ring_number = ring_num; | ||
275 | ring->bank = bank; | ||
276 | ring->callback = callback; | ||
277 | ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); | ||
278 | ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); | ||
279 | ring->head = 0; | ||
280 | ring->tail = 0; | ||
281 | atomic_set(ring->inflights, 0); | ||
282 | ret = adf_init_ring(ring); | ||
283 | if (ret) | ||
284 | goto err; | ||
285 | |||
286 | /* Enable HW arbitration for the given ring */ | ||
287 | accel_dev->hw_device->hw_arb_ring_enable(ring); | ||
288 | |||
289 | if (adf_ring_debugfs_add(ring, ring_name)) { | ||
290 | pr_err("QAT: Couldn't add ring debugfs entry\n"); | ||
291 | ret = -EFAULT; | ||
292 | goto err; | ||
293 | } | ||
294 | |||
295 | /* Enable interrupts if needed */ | ||
296 | if (callback && (!poll_mode)) | ||
297 | adf_enable_ring_irq(bank, ring->ring_number); | ||
298 | *ring_ptr = ring; | ||
299 | return 0; | ||
300 | err: | ||
301 | adf_cleanup_ring(ring); | ||
302 | adf_unreserve_ring(bank, ring_num); | ||
303 | accel_dev->hw_device->hw_arb_ring_disable(ring); | ||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | void adf_remove_ring(struct adf_etr_ring_data *ring) | ||
308 | { | ||
309 | struct adf_etr_bank_data *bank = ring->bank; | ||
310 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
311 | |||
312 | /* Disable interrupts for the given ring */ | ||
313 | adf_disable_ring_irq(bank, ring->ring_number); | ||
314 | |||
315 | /* Clear PCI config space */ | ||
316 | WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number, | ||
317 | ring->ring_number, 0); | ||
318 | WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number, | ||
319 | ring->ring_number, 0); | ||
320 | adf_ring_debugfs_rm(ring); | ||
321 | adf_unreserve_ring(bank, ring->ring_number); | ||
322 | /* Disable HW arbitration for the given ring */ | ||
323 | accel_dev->hw_device->hw_arb_ring_disable(ring); | ||
324 | adf_cleanup_ring(ring); | ||
325 | } | ||
326 | |||
327 | static void adf_ring_response_handler(struct adf_etr_bank_data *bank) | ||
328 | { | ||
329 | uint32_t empty_rings, i; | ||
330 | |||
331 | empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); | ||
332 | empty_rings = ~empty_rings & bank->irq_mask; | ||
333 | |||
334 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) { | ||
335 | if (empty_rings & (1 << i)) | ||
336 | adf_handle_response(&bank->rings[i]); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * adf_response_handler() - Bottom half handler response handler | ||
342 | * @bank_addr: Address of a ring bank for with the BH was scheduled. | ||
343 | * | ||
344 | * Function is the bottom half handler for the response from acceleration | ||
345 | * device. There is one handler for every ring bank. Function checks all | ||
346 | * communication rings in the bank. | ||
347 | * To be used by QAT device specific drivers. | ||
348 | * | ||
349 | * Return: void | ||
350 | */ | ||
351 | void adf_response_handler(unsigned long bank_addr) | ||
352 | { | ||
353 | struct adf_etr_bank_data *bank = (void *)bank_addr; | ||
354 | |||
355 | /* Handle all the responses nad reenable IRQs */ | ||
356 | adf_ring_response_handler(bank); | ||
357 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, | ||
358 | bank->irq_mask); | ||
359 | } | ||
360 | EXPORT_SYMBOL_GPL(adf_response_handler); | ||
361 | |||
362 | static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, | ||
363 | const char *section, const char *format, | ||
364 | uint32_t key, uint32_t *value) | ||
365 | { | ||
366 | char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
367 | char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
368 | |||
369 | snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); | ||
370 | |||
371 | if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) | ||
372 | return -EFAULT; | ||
373 | |||
374 | if (kstrtouint(val_buf, 10, value)) | ||
375 | return -EFAULT; | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static void adf_enable_coalesc(struct adf_etr_bank_data *bank, | ||
380 | const char *section, uint32_t bank_num_in_accel) | ||
381 | { | ||
382 | if (adf_get_cfg_int(bank->accel_dev, section, | ||
383 | ADF_ETRMGR_COALESCE_TIMER_FORMAT, | ||
384 | bank_num_in_accel, &bank->irq_coalesc_timer)) | ||
385 | bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; | ||
386 | |||
387 | if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer || | ||
388 | ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer) | ||
389 | bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; | ||
390 | } | ||
391 | |||
392 | static int adf_init_bank(struct adf_accel_dev *accel_dev, | ||
393 | struct adf_etr_bank_data *bank, | ||
394 | uint32_t bank_num, void __iomem *csr_addr) | ||
395 | { | ||
396 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
397 | struct adf_etr_ring_data *ring; | ||
398 | struct adf_etr_ring_data *tx_ring; | ||
399 | uint32_t i, coalesc_enabled; | ||
400 | |||
401 | memset(bank, 0, sizeof(*bank)); | ||
402 | bank->bank_number = bank_num; | ||
403 | bank->csr_addr = csr_addr; | ||
404 | bank->accel_dev = accel_dev; | ||
405 | spin_lock_init(&bank->lock); | ||
406 | |||
407 | /* Enable IRQ coalescing always. This will allow to use | ||
408 | * the optimised flag and coalesc register. | ||
409 | * If it is disabled in the config file just use min time value */ | ||
410 | if (adf_get_cfg_int(accel_dev, "Accelerator0", | ||
411 | ADF_ETRMGR_COALESCING_ENABLED_FORMAT, | ||
412 | bank_num, &coalesc_enabled) && coalesc_enabled) | ||
413 | adf_enable_coalesc(bank, "Accelerator0", bank_num); | ||
414 | else | ||
415 | bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; | ||
416 | |||
417 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
418 | WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); | ||
419 | WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); | ||
420 | ring = &bank->rings[i]; | ||
421 | if (hw_data->tx_rings_mask & (1 << i)) { | ||
422 | ring->inflights = kzalloc_node(sizeof(atomic_t), | ||
423 | GFP_KERNEL, | ||
424 | accel_dev->numa_node); | ||
425 | if (!ring->inflights) | ||
426 | goto err; | ||
427 | } else { | ||
428 | if (i < hw_data->tx_rx_gap) { | ||
429 | pr_err("QAT: Invalid tx rings mask config\n"); | ||
430 | goto err; | ||
431 | } | ||
432 | tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; | ||
433 | ring->inflights = tx_ring->inflights; | ||
434 | } | ||
435 | } | ||
436 | if (adf_bank_debugfs_add(bank)) { | ||
437 | pr_err("QAT: Failed to add bank debugfs entry\n"); | ||
438 | goto err; | ||
439 | } | ||
440 | |||
441 | WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); | ||
442 | return 0; | ||
443 | err: | ||
444 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
445 | ring = &bank->rings[i]; | ||
446 | if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) | ||
447 | kfree(ring->inflights); | ||
448 | } | ||
449 | return -ENOMEM; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * adf_init_etr_data() - Initialize transport rings for acceleration device | ||
454 | * @accel_dev: Pointer to acceleration device. | ||
455 | * | ||
456 | * Function is the initializes the communications channels (rings) to the | ||
457 | * acceleration device accel_dev. | ||
458 | * To be used by QAT device specific drivers. | ||
459 | * | ||
460 | * Return: 0 on success, error code othewise. | ||
461 | */ | ||
462 | int adf_init_etr_data(struct adf_accel_dev *accel_dev) | ||
463 | { | ||
464 | struct adf_etr_data *etr_data; | ||
465 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
466 | void __iomem *csr_addr; | ||
467 | uint32_t size; | ||
468 | uint32_t num_banks = 0; | ||
469 | int i, ret; | ||
470 | |||
471 | etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, | ||
472 | accel_dev->numa_node); | ||
473 | if (!etr_data) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | num_banks = GET_MAX_BANKS(accel_dev); | ||
477 | size = num_banks * sizeof(struct adf_etr_bank_data); | ||
478 | etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); | ||
479 | if (!etr_data->banks) { | ||
480 | ret = -ENOMEM; | ||
481 | goto err_bank; | ||
482 | } | ||
483 | |||
484 | accel_dev->transport = etr_data; | ||
485 | i = hw_data->get_etr_bar_id(hw_data); | ||
486 | csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; | ||
487 | |||
488 | /* accel_dev->debugfs_dir should always be non-NULL here */ | ||
489 | etr_data->debug = debugfs_create_dir("transport", | ||
490 | accel_dev->debugfs_dir); | ||
491 | if (!etr_data->debug) { | ||
492 | pr_err("QAT: Unable to create transport debugfs entry\n"); | ||
493 | ret = -ENOENT; | ||
494 | goto err_bank_debug; | ||
495 | } | ||
496 | |||
497 | for (i = 0; i < num_banks; i++) { | ||
498 | ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, | ||
499 | csr_addr); | ||
500 | if (ret) | ||
501 | goto err_bank_all; | ||
502 | } | ||
503 | |||
504 | return 0; | ||
505 | |||
506 | err_bank_all: | ||
507 | debugfs_remove(etr_data->debug); | ||
508 | err_bank_debug: | ||
509 | kfree(etr_data->banks); | ||
510 | err_bank: | ||
511 | kfree(etr_data); | ||
512 | accel_dev->transport = NULL; | ||
513 | return ret; | ||
514 | } | ||
515 | EXPORT_SYMBOL_GPL(adf_init_etr_data); | ||
516 | |||
517 | static void cleanup_bank(struct adf_etr_bank_data *bank) | ||
518 | { | ||
519 | uint32_t i; | ||
520 | |||
521 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
522 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
523 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
524 | struct adf_etr_ring_data *ring = &bank->rings[i]; | ||
525 | |||
526 | if (bank->ring_mask & (1 << i)) | ||
527 | adf_cleanup_ring(ring); | ||
528 | |||
529 | if (hw_data->tx_rings_mask & (1 << i)) | ||
530 | kfree(ring->inflights); | ||
531 | } | ||
532 | adf_bank_debugfs_rm(bank); | ||
533 | memset(bank, 0, sizeof(*bank)); | ||
534 | } | ||
535 | |||
536 | static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) | ||
537 | { | ||
538 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
539 | uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); | ||
540 | |||
541 | for (i = 0; i < num_banks; i++) | ||
542 | cleanup_bank(&etr_data->banks[i]); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * adf_cleanup_etr_data() - Clear transport rings for acceleration device | ||
547 | * @accel_dev: Pointer to acceleration device. | ||
548 | * | ||
549 | * Function is the clears the communications channels (rings) of the | ||
550 | * acceleration device accel_dev. | ||
551 | * To be used by QAT device specific drivers. | ||
552 | * | ||
553 | * Return: void | ||
554 | */ | ||
555 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) | ||
556 | { | ||
557 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
558 | |||
559 | if (etr_data) { | ||
560 | adf_cleanup_etr_handles(accel_dev); | ||
561 | debugfs_remove(etr_data->debug); | ||
562 | kfree(etr_data->banks); | ||
563 | kfree(etr_data); | ||
564 | accel_dev->transport = NULL; | ||
565 | } | ||
566 | } | ||
567 | EXPORT_SYMBOL_GPL(adf_cleanup_etr_data); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h new file mode 100644 index 000000000000..386485bd9c95 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_H | ||
48 | #define ADF_TRANSPORT_H | ||
49 | |||
50 | #include "adf_accel_devices.h" | ||
51 | |||
52 | struct adf_etr_ring_data; | ||
53 | |||
54 | typedef void (*adf_callback_fn)(void *resp_msg); | ||
55 | |||
56 | int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | ||
57 | uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, | ||
58 | const char *ring_name, adf_callback_fn callback, | ||
59 | int poll_mode, struct adf_etr_ring_data **ring_ptr); | ||
60 | |||
61 | int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); | ||
62 | void adf_remove_ring(struct adf_etr_ring_data *ring); | ||
63 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h new file mode 100644 index 000000000000..91d88d676580 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_ACCESS_MACROS_H | ||
48 | #define ADF_TRANSPORT_ACCESS_MACROS_H | ||
49 | |||
50 | #include "adf_accel_devices.h" | ||
51 | #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL | ||
52 | #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL | ||
53 | #define ADF_RING_CSR_RING_CONFIG 0x000 | ||
54 | #define ADF_RING_CSR_RING_LBASE 0x040 | ||
55 | #define ADF_RING_CSR_RING_UBASE 0x080 | ||
56 | #define ADF_RING_CSR_RING_HEAD 0x0C0 | ||
57 | #define ADF_RING_CSR_RING_TAIL 0x100 | ||
58 | #define ADF_RING_CSR_E_STAT 0x14C | ||
59 | #define ADF_RING_CSR_INT_SRCSEL 0x174 | ||
60 | #define ADF_RING_CSR_INT_SRCSEL_2 0x178 | ||
61 | #define ADF_RING_CSR_INT_COL_EN 0x17C | ||
62 | #define ADF_RING_CSR_INT_COL_CTL 0x180 | ||
63 | #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 | ||
64 | #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 | ||
65 | #define ADF_RING_BUNDLE_SIZE 0x1000 | ||
66 | #define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A | ||
67 | #define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 | ||
68 | #define ADF_COALESCING_MIN_TIME 0x1FF | ||
69 | #define ADF_COALESCING_MAX_TIME 0xFFFFF | ||
70 | #define ADF_COALESCING_DEF_TIME 0x27FF | ||
71 | #define ADF_RING_NEAR_WATERMARK_512 0x08 | ||
72 | #define ADF_RING_NEAR_WATERMARK_0 0x00 | ||
73 | #define ADF_RING_EMPTY_SIG 0x7F7F7F7F | ||
74 | |||
75 | /* Valid internal ring size values */ | ||
76 | #define ADF_RING_SIZE_128 0x01 | ||
77 | #define ADF_RING_SIZE_256 0x02 | ||
78 | #define ADF_RING_SIZE_512 0x03 | ||
79 | #define ADF_RING_SIZE_4K 0x06 | ||
80 | #define ADF_RING_SIZE_16K 0x08 | ||
81 | #define ADF_RING_SIZE_4M 0x10 | ||
82 | #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 | ||
83 | #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M | ||
84 | #define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K | ||
85 | |||
86 | /* Valid internal msg size values internal */ | ||
87 | #define ADF_MSG_SIZE_32 0x01 | ||
88 | #define ADF_MSG_SIZE_64 0x02 | ||
89 | #define ADF_MSG_SIZE_128 0x04 | ||
90 | #define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 | ||
91 | #define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 | ||
92 | |||
93 | /* Size to bytes conversion macros for ring and msg values */ | ||
94 | #define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) | ||
95 | #define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) | ||
96 | #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) | ||
97 | #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) | ||
98 | |||
99 | /* Minimum ring bufer size for memory allocation */ | ||
100 | #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ | ||
101 | ADF_RING_SIZE_4K : SIZE) | ||
102 | #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) | ||
103 | #define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ | ||
104 | ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1) | ||
105 | #define BUILD_RING_CONFIG(size) \ | ||
106 | ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ | ||
107 | | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ | ||
108 | | size) | ||
109 | #define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ | ||
110 | ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ | ||
111 | | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ | ||
112 | | size) | ||
113 | #define BUILD_RING_BASE_ADDR(addr, size) \ | ||
114 | ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) | ||
115 | #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ | ||
116 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
117 | ADF_RING_CSR_RING_HEAD + (ring << 2)) | ||
118 | #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ | ||
119 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
120 | ADF_RING_CSR_RING_TAIL + (ring << 2)) | ||
121 | #define READ_CSR_E_STAT(csr_base_addr, bank) \ | ||
122 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
123 | ADF_RING_CSR_E_STAT) | ||
124 | #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ | ||
125 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
126 | ADF_RING_CSR_RING_CONFIG + (ring << 2), value) | ||
127 | #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ | ||
128 | do { \ | ||
129 | uint32_t l_base = 0, u_base = 0; \ | ||
130 | l_base = (uint32_t)(value & 0xFFFFFFFF); \ | ||
131 | u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ | ||
132 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
133 | ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ | ||
134 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
135 | ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ | ||
136 | } while (0) | ||
137 | #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ | ||
138 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
139 | ADF_RING_CSR_RING_HEAD + (ring << 2), value) | ||
140 | #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ | ||
141 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
142 | ADF_RING_CSR_RING_TAIL + (ring << 2), value) | ||
143 | #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ | ||
144 | do { \ | ||
145 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
146 | ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ | ||
147 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
148 | ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ | ||
149 | } while (0) | ||
150 | #define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ | ||
151 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
152 | ADF_RING_CSR_INT_COL_EN, value) | ||
153 | #define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ | ||
154 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
155 | ADF_RING_CSR_INT_COL_CTL, \ | ||
156 | ADF_RING_CSR_INT_COL_CTL_ENABLE | value) | ||
157 | #define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ | ||
158 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
159 | ADF_RING_CSR_INT_FLAG_AND_COL, value) | ||
160 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c new file mode 100644 index 000000000000..6b6974553514 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/seq_file.h> | ||
50 | #include "adf_accel_devices.h" | ||
51 | #include "adf_transport_internal.h" | ||
52 | #include "adf_transport_access_macros.h" | ||
53 | |||
54 | static DEFINE_MUTEX(ring_read_lock); | ||
55 | static DEFINE_MUTEX(bank_read_lock); | ||
56 | |||
57 | static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) | ||
58 | { | ||
59 | struct adf_etr_ring_data *ring = sfile->private; | ||
60 | |||
61 | mutex_lock(&ring_read_lock); | ||
62 | if (*pos == 0) | ||
63 | return SEQ_START_TOKEN; | ||
64 | |||
65 | if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / | ||
66 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) | ||
67 | return NULL; | ||
68 | |||
69 | return ring->base_addr + | ||
70 | (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); | ||
71 | } | ||
72 | |||
73 | static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
74 | { | ||
75 | struct adf_etr_ring_data *ring = sfile->private; | ||
76 | |||
77 | if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / | ||
78 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) | ||
79 | return NULL; | ||
80 | |||
81 | return ring->base_addr + | ||
82 | (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); | ||
83 | } | ||
84 | |||
85 | static int adf_ring_show(struct seq_file *sfile, void *v) | ||
86 | { | ||
87 | struct adf_etr_ring_data *ring = sfile->private; | ||
88 | struct adf_etr_bank_data *bank = ring->bank; | ||
89 | uint32_t *msg = v; | ||
90 | void __iomem *csr = ring->bank->csr_addr; | ||
91 | int i, x; | ||
92 | |||
93 | if (v == SEQ_START_TOKEN) { | ||
94 | int head, tail, empty; | ||
95 | |||
96 | head = READ_CSR_RING_HEAD(csr, bank->bank_number, | ||
97 | ring->ring_number); | ||
98 | tail = READ_CSR_RING_TAIL(csr, bank->bank_number, | ||
99 | ring->ring_number); | ||
100 | empty = READ_CSR_E_STAT(csr, bank->bank_number); | ||
101 | |||
102 | seq_puts(sfile, "------- Ring configuration -------\n"); | ||
103 | seq_printf(sfile, "ring num %d, bank num %d\n", | ||
104 | ring->ring_number, ring->bank->bank_number); | ||
105 | seq_printf(sfile, "head %x, tail %x, empty: %d\n", | ||
106 | head, tail, (empty & 1 << ring->ring_number) | ||
107 | >> ring->ring_number); | ||
108 | seq_printf(sfile, "ring size %d, msg size %d\n", | ||
109 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), | ||
110 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); | ||
111 | seq_puts(sfile, "----------- Ring data ------------\n"); | ||
112 | return 0; | ||
113 | } | ||
114 | seq_printf(sfile, "%p:", msg); | ||
115 | x = 0; | ||
116 | i = 0; | ||
117 | for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) { | ||
118 | seq_printf(sfile, " %08X", *(msg + i)); | ||
119 | if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 && | ||
120 | (++x == 8)) { | ||
121 | seq_printf(sfile, "\n%p:", msg + i + 1); | ||
122 | x = 0; | ||
123 | } | ||
124 | } | ||
125 | seq_puts(sfile, "\n"); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void adf_ring_stop(struct seq_file *sfile, void *v) | ||
130 | { | ||
131 | mutex_unlock(&ring_read_lock); | ||
132 | } | ||
133 | |||
134 | static const struct seq_operations adf_ring_sops = { | ||
135 | .start = adf_ring_start, | ||
136 | .next = adf_ring_next, | ||
137 | .stop = adf_ring_stop, | ||
138 | .show = adf_ring_show | ||
139 | }; | ||
140 | |||
141 | static int adf_ring_open(struct inode *inode, struct file *file) | ||
142 | { | ||
143 | int ret = seq_open(file, &adf_ring_sops); | ||
144 | |||
145 | if (!ret) { | ||
146 | struct seq_file *seq_f = file->private_data; | ||
147 | |||
148 | seq_f->private = inode->i_private; | ||
149 | } | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | static const struct file_operations adf_ring_debug_fops = { | ||
154 | .open = adf_ring_open, | ||
155 | .read = seq_read, | ||
156 | .llseek = seq_lseek, | ||
157 | .release = seq_release | ||
158 | }; | ||
159 | |||
160 | int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) | ||
161 | { | ||
162 | struct adf_etr_ring_debug_entry *ring_debug; | ||
163 | char entry_name[8]; | ||
164 | |||
165 | ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); | ||
166 | if (!ring_debug) | ||
167 | return -ENOMEM; | ||
168 | |||
169 | strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); | ||
170 | snprintf(entry_name, sizeof(entry_name), "ring_%02d", | ||
171 | ring->ring_number); | ||
172 | |||
173 | ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, | ||
174 | ring->bank->bank_debug_dir, | ||
175 | ring, &adf_ring_debug_fops); | ||
176 | if (!ring_debug->debug) { | ||
177 | pr_err("QAT: Failed to create ring debug entry.\n"); | ||
178 | kfree(ring_debug); | ||
179 | return -EFAULT; | ||
180 | } | ||
181 | ring->ring_debug = ring_debug; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) | ||
186 | { | ||
187 | if (ring->ring_debug) { | ||
188 | debugfs_remove(ring->ring_debug->debug); | ||
189 | kfree(ring->ring_debug); | ||
190 | ring->ring_debug = NULL; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | static void *adf_bank_start(struct seq_file *sfile, loff_t *pos) | ||
195 | { | ||
196 | mutex_lock(&bank_read_lock); | ||
197 | if (*pos == 0) | ||
198 | return SEQ_START_TOKEN; | ||
199 | |||
200 | if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK) | ||
201 | return NULL; | ||
202 | |||
203 | return pos; | ||
204 | } | ||
205 | |||
206 | static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
207 | { | ||
208 | if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK) | ||
209 | return NULL; | ||
210 | |||
211 | return pos; | ||
212 | } | ||
213 | |||
214 | static int adf_bank_show(struct seq_file *sfile, void *v) | ||
215 | { | ||
216 | struct adf_etr_bank_data *bank = sfile->private; | ||
217 | |||
218 | if (v == SEQ_START_TOKEN) { | ||
219 | seq_printf(sfile, "------- Bank %d configuration -------\n", | ||
220 | bank->bank_number); | ||
221 | } else { | ||
222 | int ring_id = *((int *)v) - 1; | ||
223 | struct adf_etr_ring_data *ring = &bank->rings[ring_id]; | ||
224 | void __iomem *csr = bank->csr_addr; | ||
225 | int head, tail, empty; | ||
226 | |||
227 | if (!(bank->ring_mask & 1 << ring_id)) | ||
228 | return 0; | ||
229 | |||
230 | head = READ_CSR_RING_HEAD(csr, bank->bank_number, | ||
231 | ring->ring_number); | ||
232 | tail = READ_CSR_RING_TAIL(csr, bank->bank_number, | ||
233 | ring->ring_number); | ||
234 | empty = READ_CSR_E_STAT(csr, bank->bank_number); | ||
235 | |||
236 | seq_printf(sfile, | ||
237 | "ring num %02d, head %04x, tail %04x, empty: %d\n", | ||
238 | ring->ring_number, head, tail, | ||
239 | (empty & 1 << ring->ring_number) >> | ||
240 | ring->ring_number); | ||
241 | } | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static void adf_bank_stop(struct seq_file *sfile, void *v) | ||
246 | { | ||
247 | mutex_unlock(&bank_read_lock); | ||
248 | } | ||
249 | |||
250 | static const struct seq_operations adf_bank_sops = { | ||
251 | .start = adf_bank_start, | ||
252 | .next = adf_bank_next, | ||
253 | .stop = adf_bank_stop, | ||
254 | .show = adf_bank_show | ||
255 | }; | ||
256 | |||
257 | static int adf_bank_open(struct inode *inode, struct file *file) | ||
258 | { | ||
259 | int ret = seq_open(file, &adf_bank_sops); | ||
260 | |||
261 | if (!ret) { | ||
262 | struct seq_file *seq_f = file->private_data; | ||
263 | |||
264 | seq_f->private = inode->i_private; | ||
265 | } | ||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static const struct file_operations adf_bank_debug_fops = { | ||
270 | .open = adf_bank_open, | ||
271 | .read = seq_read, | ||
272 | .llseek = seq_lseek, | ||
273 | .release = seq_release | ||
274 | }; | ||
275 | |||
276 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) | ||
277 | { | ||
278 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
279 | struct dentry *parent = accel_dev->transport->debug; | ||
280 | char name[8]; | ||
281 | |||
282 | snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); | ||
283 | bank->bank_debug_dir = debugfs_create_dir(name, parent); | ||
284 | if (!bank->bank_debug_dir) { | ||
285 | pr_err("QAT: Failed to create bank debug dir.\n"); | ||
286 | return -EFAULT; | ||
287 | } | ||
288 | |||
289 | bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, | ||
290 | bank->bank_debug_dir, bank, | ||
291 | &adf_bank_debug_fops); | ||
292 | if (!bank->bank_debug_cfg) { | ||
293 | pr_err("QAT: Failed to create bank debug entry.\n"); | ||
294 | debugfs_remove(bank->bank_debug_dir); | ||
295 | return -EFAULT; | ||
296 | } | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) | ||
301 | { | ||
302 | debugfs_remove(bank->bank_debug_cfg); | ||
303 | debugfs_remove(bank->bank_debug_dir); | ||
304 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h new file mode 100644 index 000000000000..f854bac276b0 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_INTRN_H | ||
48 | #define ADF_TRANSPORT_INTRN_H | ||
49 | |||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/atomic.h> | ||
52 | #include <linux/spinlock_types.h> | ||
53 | #include "adf_transport.h" | ||
54 | |||
55 | struct adf_etr_ring_debug_entry { | ||
56 | char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
57 | struct dentry *debug; | ||
58 | }; | ||
59 | |||
60 | struct adf_etr_ring_data { | ||
61 | void *base_addr; | ||
62 | atomic_t *inflights; | ||
63 | spinlock_t lock; /* protects ring data struct */ | ||
64 | adf_callback_fn callback; | ||
65 | struct adf_etr_bank_data *bank; | ||
66 | dma_addr_t dma_addr; | ||
67 | uint16_t head; | ||
68 | uint16_t tail; | ||
69 | uint8_t ring_number; | ||
70 | uint8_t ring_size; | ||
71 | uint8_t msg_size; | ||
72 | uint8_t reserved; | ||
73 | struct adf_etr_ring_debug_entry *ring_debug; | ||
74 | } __packed; | ||
75 | |||
76 | struct adf_etr_bank_data { | ||
77 | struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; | ||
78 | struct tasklet_struct resp_hanlder; | ||
79 | void __iomem *csr_addr; | ||
80 | struct adf_accel_dev *accel_dev; | ||
81 | uint32_t irq_coalesc_timer; | ||
82 | uint16_t ring_mask; | ||
83 | uint16_t irq_mask; | ||
84 | spinlock_t lock; /* protects bank data struct */ | ||
85 | struct dentry *bank_debug_dir; | ||
86 | struct dentry *bank_debug_cfg; | ||
87 | uint32_t bank_number; | ||
88 | } __packed; | ||
89 | |||
90 | struct adf_etr_data { | ||
91 | struct adf_etr_bank_data *banks; | ||
92 | struct dentry *debug; | ||
93 | }; | ||
94 | |||
95 | void adf_response_handler(unsigned long bank_addr); | ||
96 | #ifdef CONFIG_DEBUG_FS | ||
97 | #include <linux/debugfs.h> | ||
98 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); | ||
99 | void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank); | ||
100 | int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); | ||
101 | void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); | ||
102 | #else | ||
103 | static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | #define adf_bank_debugfs_rm(bank) do {} while (0) | ||
109 | |||
110 | static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, | ||
111 | const char *name) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | #define adf_ring_debugfs_rm(ring) do {} while (0) | ||
117 | #endif | ||
118 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h new file mode 100644 index 000000000000..f1e30e24a419 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_H_ | ||
48 | #define _ICP_QAT_FW_H_ | ||
49 | #include <linux/types.h> | ||
50 | #include "icp_qat_hw.h" | ||
51 | |||
52 | #define QAT_FIELD_SET(flags, val, bitpos, mask) \ | ||
53 | { (flags) = (((flags) & (~((mask) << (bitpos)))) | \ | ||
54 | (((val) & (mask)) << (bitpos))) ; } | ||
55 | |||
56 | #define QAT_FIELD_GET(flags, bitpos, mask) \ | ||
57 | (((flags) >> (bitpos)) & (mask)) | ||
58 | |||
59 | #define ICP_QAT_FW_REQ_DEFAULT_SZ 128 | ||
60 | #define ICP_QAT_FW_RESP_DEFAULT_SZ 32 | ||
61 | #define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 | ||
62 | #define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF | ||
63 | #define ICP_QAT_FW_NUM_LONGWORDS_1 1 | ||
64 | #define ICP_QAT_FW_NUM_LONGWORDS_2 2 | ||
65 | #define ICP_QAT_FW_NUM_LONGWORDS_3 3 | ||
66 | #define ICP_QAT_FW_NUM_LONGWORDS_4 4 | ||
67 | #define ICP_QAT_FW_NUM_LONGWORDS_5 5 | ||
68 | #define ICP_QAT_FW_NUM_LONGWORDS_6 6 | ||
69 | #define ICP_QAT_FW_NUM_LONGWORDS_7 7 | ||
70 | #define ICP_QAT_FW_NUM_LONGWORDS_10 10 | ||
71 | #define ICP_QAT_FW_NUM_LONGWORDS_13 13 | ||
72 | #define ICP_QAT_FW_NULL_REQ_SERV_ID 1 | ||
73 | |||
74 | enum icp_qat_fw_comn_resp_serv_id { | ||
75 | ICP_QAT_FW_COMN_RESP_SERV_NULL, | ||
76 | ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, | ||
77 | ICP_QAT_FW_COMN_RESP_SERV_DELIMITER | ||
78 | }; | ||
79 | |||
80 | enum icp_qat_fw_comn_request_id { | ||
81 | ICP_QAT_FW_COMN_REQ_NULL = 0, | ||
82 | ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, | ||
83 | ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, | ||
84 | ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, | ||
85 | ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, | ||
86 | ICP_QAT_FW_COMN_REQ_DELIMITER | ||
87 | }; | ||
88 | |||
89 | struct icp_qat_fw_comn_req_hdr_cd_pars { | ||
90 | union { | ||
91 | struct { | ||
92 | uint64_t content_desc_addr; | ||
93 | uint16_t content_desc_resrvd1; | ||
94 | uint8_t content_desc_params_sz; | ||
95 | uint8_t content_desc_hdr_resrvd2; | ||
96 | uint32_t content_desc_resrvd3; | ||
97 | } s; | ||
98 | struct { | ||
99 | uint32_t serv_specif_fields[4]; | ||
100 | } s1; | ||
101 | } u; | ||
102 | }; | ||
103 | |||
104 | struct icp_qat_fw_comn_req_mid { | ||
105 | uint64_t opaque_data; | ||
106 | uint64_t src_data_addr; | ||
107 | uint64_t dest_data_addr; | ||
108 | uint32_t src_length; | ||
109 | uint32_t dst_length; | ||
110 | }; | ||
111 | |||
112 | struct icp_qat_fw_comn_req_cd_ctrl { | ||
113 | uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; | ||
114 | }; | ||
115 | |||
116 | struct icp_qat_fw_comn_req_hdr { | ||
117 | uint8_t resrvd1; | ||
118 | uint8_t service_cmd_id; | ||
119 | uint8_t service_type; | ||
120 | uint8_t hdr_flags; | ||
121 | uint16_t serv_specif_flags; | ||
122 | uint16_t comn_req_flags; | ||
123 | }; | ||
124 | |||
125 | struct icp_qat_fw_comn_req_rqpars { | ||
126 | uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; | ||
127 | }; | ||
128 | |||
129 | struct icp_qat_fw_comn_req { | ||
130 | struct icp_qat_fw_comn_req_hdr comn_hdr; | ||
131 | struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; | ||
132 | struct icp_qat_fw_comn_req_mid comn_mid; | ||
133 | struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; | ||
134 | struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; | ||
135 | }; | ||
136 | |||
137 | struct icp_qat_fw_comn_error { | ||
138 | uint8_t xlat_err_code; | ||
139 | uint8_t cmp_err_code; | ||
140 | }; | ||
141 | |||
142 | struct icp_qat_fw_comn_resp_hdr { | ||
143 | uint8_t resrvd1; | ||
144 | uint8_t service_id; | ||
145 | uint8_t response_type; | ||
146 | uint8_t hdr_flags; | ||
147 | struct icp_qat_fw_comn_error comn_error; | ||
148 | uint8_t comn_status; | ||
149 | uint8_t cmd_id; | ||
150 | }; | ||
151 | |||
152 | struct icp_qat_fw_comn_resp { | ||
153 | struct icp_qat_fw_comn_resp_hdr comn_hdr; | ||
154 | uint64_t opaque_data; | ||
155 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
156 | }; | ||
157 | |||
158 | #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 | ||
159 | #define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 | ||
160 | #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 | ||
161 | #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 | ||
162 | #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F | ||
163 | |||
164 | #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ | ||
165 | icp_qat_fw_comn_req_hdr_t.service_type | ||
166 | |||
167 | #define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ | ||
168 | icp_qat_fw_comn_req_hdr_t.service_type = val | ||
169 | |||
170 | #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ | ||
171 | icp_qat_fw_comn_req_hdr_t.service_cmd_id | ||
172 | |||
173 | #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ | ||
174 | icp_qat_fw_comn_req_hdr_t.service_cmd_id = val | ||
175 | |||
176 | #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ | ||
177 | ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) | ||
178 | |||
179 | #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ | ||
180 | ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) | ||
181 | |||
182 | #define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ | ||
183 | QAT_FIELD_GET(hdr_flags, \ | ||
184 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ | ||
185 | ICP_QAT_FW_COMN_VALID_FLAG_MASK) | ||
186 | |||
187 | #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ | ||
188 | (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) | ||
189 | |||
190 | #define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ | ||
191 | QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ | ||
192 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ | ||
193 | ICP_QAT_FW_COMN_VALID_FLAG_MASK) | ||
194 | |||
195 | #define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ | ||
196 | (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ | ||
197 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | ||
198 | |||
199 | #define QAT_COMN_PTR_TYPE_BITPOS 0 | ||
200 | #define QAT_COMN_PTR_TYPE_MASK 0x1 | ||
201 | #define QAT_COMN_CD_FLD_TYPE_BITPOS 1 | ||
202 | #define QAT_COMN_CD_FLD_TYPE_MASK 0x1 | ||
203 | #define QAT_COMN_PTR_TYPE_FLAT 0x0 | ||
204 | #define QAT_COMN_PTR_TYPE_SGL 0x1 | ||
205 | #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 | ||
206 | #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 | ||
207 | |||
208 | #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ | ||
209 | ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ | ||
210 | | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) | ||
211 | |||
212 | #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ | ||
213 | QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) | ||
214 | |||
215 | #define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ | ||
216 | QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ | ||
217 | QAT_COMN_CD_FLD_TYPE_MASK) | ||
218 | |||
219 | #define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ | ||
220 | QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ | ||
221 | QAT_COMN_PTR_TYPE_MASK) | ||
222 | |||
223 | #define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ | ||
224 | QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ | ||
225 | QAT_COMN_CD_FLD_TYPE_MASK) | ||
226 | |||
227 | #define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 | ||
228 | #define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 | ||
229 | #define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 | ||
230 | #define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F | ||
231 | |||
232 | #define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
233 | ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ | ||
234 | >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
235 | |||
236 | #define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
237 | { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ | ||
238 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
239 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
240 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } | ||
241 | |||
242 | #define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
243 | (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
244 | |||
245 | #define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
246 | { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ | ||
247 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
248 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } | ||
249 | |||
250 | #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 | ||
251 | #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 | ||
252 | #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 | ||
253 | #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 | ||
254 | #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 | ||
255 | #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 | ||
256 | #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 | ||
257 | #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 | ||
258 | |||
259 | #define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ | ||
260 | ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ | ||
261 | QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ | ||
262 | (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ | ||
263 | QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ | ||
264 | (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ | ||
265 | QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ | ||
266 | (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ | ||
267 | QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) | ||
268 | |||
269 | #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ | ||
270 | QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ | ||
271 | QAT_COMN_RESP_CRYPTO_STATUS_MASK) | ||
272 | |||
273 | #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ | ||
274 | QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ | ||
275 | QAT_COMN_RESP_CMP_STATUS_MASK) | ||
276 | |||
277 | #define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ | ||
278 | QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ | ||
279 | QAT_COMN_RESP_XLAT_STATUS_MASK) | ||
280 | |||
281 | #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ | ||
282 | QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ | ||
283 | QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) | ||
284 | |||
285 | #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 | ||
286 | #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 | ||
287 | #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 | ||
288 | #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 | ||
289 | #define ERR_CODE_NO_ERROR 0 | ||
290 | #define ERR_CODE_INVALID_BLOCK_TYPE -1 | ||
291 | #define ERR_CODE_NO_MATCH_ONES_COMP -2 | ||
292 | #define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 | ||
293 | #define ERR_CODE_INCOMPLETE_LEN -4 | ||
294 | #define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 | ||
295 | #define ERR_CODE_RPT_GT_SPEC_LEN -6 | ||
296 | #define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 | ||
297 | #define ERR_CODE_INV_DIS_CODE_LEN -8 | ||
298 | #define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 | ||
299 | #define ERR_CODE_DIS_TOO_FAR_BACK -10 | ||
300 | #define ERR_CODE_OVERFLOW_ERROR -11 | ||
301 | #define ERR_CODE_SOFT_ERROR -12 | ||
302 | #define ERR_CODE_FATAL_ERROR -13 | ||
303 | #define ERR_CODE_SSM_ERROR -14 | ||
304 | #define ERR_CODE_ENDPOINT_ERROR -15 | ||
305 | |||
306 | enum icp_qat_fw_slice { | ||
307 | ICP_QAT_FW_SLICE_NULL = 0, | ||
308 | ICP_QAT_FW_SLICE_CIPHER = 1, | ||
309 | ICP_QAT_FW_SLICE_AUTH = 2, | ||
310 | ICP_QAT_FW_SLICE_DRAM_RD = 3, | ||
311 | ICP_QAT_FW_SLICE_DRAM_WR = 4, | ||
312 | ICP_QAT_FW_SLICE_COMP = 5, | ||
313 | ICP_QAT_FW_SLICE_XLAT = 6, | ||
314 | ICP_QAT_FW_SLICE_DELIMITER | ||
315 | }; | ||
316 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h new file mode 100644 index 000000000000..72a59faa9005 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ | ||
48 | #define _ICP_QAT_FW_INIT_ADMIN_H_ | ||
49 | |||
50 | #include "icp_qat_fw.h" | ||
51 | |||
52 | enum icp_qat_fw_init_admin_cmd_id { | ||
53 | ICP_QAT_FW_INIT_ME = 0, | ||
54 | ICP_QAT_FW_TRNG_ENABLE = 1, | ||
55 | ICP_QAT_FW_TRNG_DISABLE = 2, | ||
56 | ICP_QAT_FW_CONSTANTS_CFG = 3, | ||
57 | ICP_QAT_FW_STATUS_GET = 4, | ||
58 | ICP_QAT_FW_COUNTERS_GET = 5, | ||
59 | ICP_QAT_FW_LOOPBACK = 6, | ||
60 | ICP_QAT_FW_HEARTBEAT_SYNC = 7, | ||
61 | ICP_QAT_FW_HEARTBEAT_GET = 8 | ||
62 | }; | ||
63 | |||
64 | enum icp_qat_fw_init_admin_resp_status { | ||
65 | ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, | ||
66 | ICP_QAT_FW_INIT_RESP_STATUS_FAIL | ||
67 | }; | ||
68 | |||
69 | struct icp_qat_fw_init_admin_req { | ||
70 | uint16_t init_cfg_sz; | ||
71 | uint8_t resrvd1; | ||
72 | uint8_t init_admin_cmd_id; | ||
73 | uint32_t resrvd2; | ||
74 | uint64_t opaque_data; | ||
75 | uint64_t init_cfg_ptr; | ||
76 | uint64_t resrvd3; | ||
77 | }; | ||
78 | |||
79 | struct icp_qat_fw_init_admin_resp_hdr { | ||
80 | uint8_t flags; | ||
81 | uint8_t resrvd1; | ||
82 | uint8_t status; | ||
83 | uint8_t init_admin_cmd_id; | ||
84 | }; | ||
85 | |||
86 | struct icp_qat_fw_init_admin_resp_pars { | ||
87 | union { | ||
88 | uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
89 | struct { | ||
90 | uint32_t version_patch_num; | ||
91 | uint8_t context_id; | ||
92 | uint8_t ae_id; | ||
93 | uint16_t resrvd1; | ||
94 | uint64_t resrvd2; | ||
95 | } s1; | ||
96 | struct { | ||
97 | uint64_t req_rec_count; | ||
98 | uint64_t resp_sent_count; | ||
99 | } s2; | ||
100 | } u; | ||
101 | }; | ||
102 | |||
103 | struct icp_qat_fw_init_admin_resp { | ||
104 | struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; | ||
105 | union { | ||
106 | uint32_t resrvd2; | ||
107 | struct { | ||
108 | uint16_t version_minor_num; | ||
109 | uint16_t version_major_num; | ||
110 | } s; | ||
111 | } u; | ||
112 | uint64_t opaque_data; | ||
113 | struct icp_qat_fw_init_admin_resp_pars init_resp_pars; | ||
114 | }; | ||
115 | |||
116 | #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 | ||
117 | #define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 | ||
118 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 | ||
119 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 | ||
120 | #define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE | ||
121 | #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ | ||
122 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) | ||
123 | |||
124 | #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ | ||
125 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) | ||
126 | |||
127 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ | ||
128 | QAT_FIELD_GET(flags, \ | ||
129 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ | ||
130 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) | ||
131 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h new file mode 100644 index 000000000000..c8d26697e8ea --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_LA_H_ | ||
48 | #define _ICP_QAT_FW_LA_H_ | ||
49 | #include "icp_qat_fw.h" | ||
50 | |||
51 | enum icp_qat_fw_la_cmd_id { | ||
52 | ICP_QAT_FW_LA_CMD_CIPHER = 0, | ||
53 | ICP_QAT_FW_LA_CMD_AUTH = 1, | ||
54 | ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, | ||
55 | ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, | ||
56 | ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, | ||
57 | ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, | ||
58 | ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, | ||
59 | ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, | ||
60 | ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, | ||
61 | ICP_QAT_FW_LA_CMD_MGF1 = 9, | ||
62 | ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, | ||
63 | ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, | ||
64 | ICP_QAT_FW_LA_CMD_DELIMITER = 12 | ||
65 | }; | ||
66 | |||
67 | #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK | ||
68 | #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR | ||
69 | #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK | ||
70 | #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR | ||
71 | |||
72 | struct icp_qat_fw_la_bulk_req { | ||
73 | struct icp_qat_fw_comn_req_hdr comn_hdr; | ||
74 | struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; | ||
75 | struct icp_qat_fw_comn_req_mid comn_mid; | ||
76 | struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; | ||
77 | struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; | ||
78 | }; | ||
79 | |||
80 | #define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 | ||
81 | #define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 | ||
82 | #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 | ||
83 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 | ||
84 | #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 | ||
85 | #define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 | ||
86 | #define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 | ||
87 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 | ||
88 | #define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 | ||
89 | #define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 | ||
90 | #define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 | ||
91 | #define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 | ||
92 | #define ICP_QAT_FW_LA_GCM_PROTO 2 | ||
93 | #define ICP_QAT_FW_LA_CCM_PROTO 1 | ||
94 | #define ICP_QAT_FW_LA_NO_PROTO 0 | ||
95 | #define QAT_LA_PROTO_BITPOS 7 | ||
96 | #define QAT_LA_PROTO_MASK 0x7 | ||
97 | #define ICP_QAT_FW_LA_CMP_AUTH_RES 1 | ||
98 | #define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 | ||
99 | #define QAT_LA_CMP_AUTH_RES_BITPOS 6 | ||
100 | #define QAT_LA_CMP_AUTH_RES_MASK 0x1 | ||
101 | #define ICP_QAT_FW_LA_RET_AUTH_RES 1 | ||
102 | #define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 | ||
103 | #define QAT_LA_RET_AUTH_RES_BITPOS 5 | ||
104 | #define QAT_LA_RET_AUTH_RES_MASK 0x1 | ||
105 | #define ICP_QAT_FW_LA_UPDATE_STATE 1 | ||
106 | #define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 | ||
107 | #define QAT_LA_UPDATE_STATE_BITPOS 4 | ||
108 | #define QAT_LA_UPDATE_STATE_MASK 0x1 | ||
109 | #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 | ||
110 | #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 | ||
111 | #define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 | ||
112 | #define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 | ||
113 | #define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 | ||
114 | #define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 | ||
115 | #define QAT_LA_CIPH_IV_FLD_BITPOS 2 | ||
116 | #define QAT_LA_CIPH_IV_FLD_MASK 0x1 | ||
117 | #define ICP_QAT_FW_LA_PARTIAL_NONE 0 | ||
118 | #define ICP_QAT_FW_LA_PARTIAL_START 1 | ||
119 | #define ICP_QAT_FW_LA_PARTIAL_MID 3 | ||
120 | #define ICP_QAT_FW_LA_PARTIAL_END 2 | ||
121 | #define QAT_LA_PARTIAL_BITPOS 0 | ||
122 | #define QAT_LA_PARTIAL_MASK 0x3 | ||
123 | #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ | ||
124 | cmp_auth, ret_auth, update_state, \ | ||
125 | ciph_iv, ciphcfg, partial) \ | ||
126 | (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ | ||
127 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ | ||
128 | ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ | ||
129 | QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ | ||
130 | ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ | ||
131 | QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ | ||
132 | ((proto & QAT_LA_PROTO_MASK) << \ | ||
133 | QAT_LA_PROTO_BITPOS) | \ | ||
134 | ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ | ||
135 | QAT_LA_CMP_AUTH_RES_BITPOS) | \ | ||
136 | ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ | ||
137 | QAT_LA_RET_AUTH_RES_BITPOS) | \ | ||
138 | ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ | ||
139 | QAT_LA_UPDATE_STATE_BITPOS) | \ | ||
140 | ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ | ||
141 | QAT_LA_CIPH_IV_FLD_BITPOS) | \ | ||
142 | ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ | ||
143 | QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ | ||
144 | ((partial & QAT_LA_PARTIAL_MASK) << \ | ||
145 | QAT_LA_PARTIAL_BITPOS)) | ||
146 | |||
147 | #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ | ||
148 | QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ | ||
149 | QAT_LA_CIPH_IV_FLD_MASK) | ||
150 | |||
151 | #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ | ||
152 | QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ | ||
153 | QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) | ||
154 | |||
155 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ | ||
156 | QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ | ||
157 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) | ||
158 | |||
159 | #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ | ||
160 | QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ | ||
161 | QAT_LA_GCM_IV_LEN_FLAG_MASK) | ||
162 | |||
163 | #define ICP_QAT_FW_LA_PROTO_GET(flags) \ | ||
164 | QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) | ||
165 | |||
166 | #define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ | ||
167 | QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ | ||
168 | QAT_LA_CMP_AUTH_RES_MASK) | ||
169 | |||
170 | #define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ | ||
171 | QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ | ||
172 | QAT_LA_RET_AUTH_RES_MASK) | ||
173 | |||
174 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ | ||
175 | QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ | ||
176 | QAT_LA_DIGEST_IN_BUFFER_MASK) | ||
177 | |||
178 | #define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ | ||
179 | QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ | ||
180 | QAT_LA_UPDATE_STATE_MASK) | ||
181 | |||
182 | #define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ | ||
183 | QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ | ||
184 | QAT_LA_PARTIAL_MASK) | ||
185 | |||
186 | #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ | ||
187 | QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ | ||
188 | QAT_LA_CIPH_IV_FLD_MASK) | ||
189 | |||
190 | #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ | ||
191 | QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ | ||
192 | QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) | ||
193 | |||
194 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ | ||
195 | QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ | ||
196 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) | ||
197 | |||
198 | #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ | ||
199 | QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ | ||
200 | QAT_LA_GCM_IV_LEN_FLAG_MASK) | ||
201 | |||
202 | #define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ | ||
203 | QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ | ||
204 | QAT_LA_PROTO_MASK) | ||
205 | |||
206 | #define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ | ||
207 | QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ | ||
208 | QAT_LA_CMP_AUTH_RES_MASK) | ||
209 | |||
210 | #define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ | ||
211 | QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ | ||
212 | QAT_LA_RET_AUTH_RES_MASK) | ||
213 | |||
214 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ | ||
215 | QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ | ||
216 | QAT_LA_DIGEST_IN_BUFFER_MASK) | ||
217 | |||
218 | #define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ | ||
219 | QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ | ||
220 | QAT_LA_UPDATE_STATE_MASK) | ||
221 | |||
222 | #define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ | ||
223 | QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ | ||
224 | QAT_LA_PARTIAL_MASK) | ||
225 | |||
226 | struct icp_qat_fw_cipher_req_hdr_cd_pars { | ||
227 | union { | ||
228 | struct { | ||
229 | uint64_t content_desc_addr; | ||
230 | uint16_t content_desc_resrvd1; | ||
231 | uint8_t content_desc_params_sz; | ||
232 | uint8_t content_desc_hdr_resrvd2; | ||
233 | uint32_t content_desc_resrvd3; | ||
234 | } s; | ||
235 | struct { | ||
236 | uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
237 | } s1; | ||
238 | } u; | ||
239 | }; | ||
240 | |||
241 | struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { | ||
242 | union { | ||
243 | struct { | ||
244 | uint64_t content_desc_addr; | ||
245 | uint16_t content_desc_resrvd1; | ||
246 | uint8_t content_desc_params_sz; | ||
247 | uint8_t content_desc_hdr_resrvd2; | ||
248 | uint32_t content_desc_resrvd3; | ||
249 | } s; | ||
250 | struct { | ||
251 | uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
252 | } sl; | ||
253 | } u; | ||
254 | }; | ||
255 | |||
256 | struct icp_qat_fw_cipher_cd_ctrl_hdr { | ||
257 | uint8_t cipher_state_sz; | ||
258 | uint8_t cipher_key_sz; | ||
259 | uint8_t cipher_cfg_offset; | ||
260 | uint8_t next_curr_id; | ||
261 | uint8_t cipher_padding_sz; | ||
262 | uint8_t resrvd1; | ||
263 | uint16_t resrvd2; | ||
264 | uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; | ||
265 | }; | ||
266 | |||
267 | struct icp_qat_fw_auth_cd_ctrl_hdr { | ||
268 | uint32_t resrvd1; | ||
269 | uint8_t resrvd2; | ||
270 | uint8_t hash_flags; | ||
271 | uint8_t hash_cfg_offset; | ||
272 | uint8_t next_curr_id; | ||
273 | uint8_t resrvd3; | ||
274 | uint8_t outer_prefix_sz; | ||
275 | uint8_t final_sz; | ||
276 | uint8_t inner_res_sz; | ||
277 | uint8_t resrvd4; | ||
278 | uint8_t inner_state1_sz; | ||
279 | uint8_t inner_state2_offset; | ||
280 | uint8_t inner_state2_sz; | ||
281 | uint8_t outer_config_offset; | ||
282 | uint8_t outer_state1_sz; | ||
283 | uint8_t outer_res_sz; | ||
284 | uint8_t outer_prefix_offset; | ||
285 | }; | ||
286 | |||
287 | struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { | ||
288 | uint8_t cipher_state_sz; | ||
289 | uint8_t cipher_key_sz; | ||
290 | uint8_t cipher_cfg_offset; | ||
291 | uint8_t next_curr_id_cipher; | ||
292 | uint8_t cipher_padding_sz; | ||
293 | uint8_t hash_flags; | ||
294 | uint8_t hash_cfg_offset; | ||
295 | uint8_t next_curr_id_auth; | ||
296 | uint8_t resrvd1; | ||
297 | uint8_t outer_prefix_sz; | ||
298 | uint8_t final_sz; | ||
299 | uint8_t inner_res_sz; | ||
300 | uint8_t resrvd2; | ||
301 | uint8_t inner_state1_sz; | ||
302 | uint8_t inner_state2_offset; | ||
303 | uint8_t inner_state2_sz; | ||
304 | uint8_t outer_config_offset; | ||
305 | uint8_t outer_state1_sz; | ||
306 | uint8_t outer_res_sz; | ||
307 | uint8_t outer_prefix_offset; | ||
308 | }; | ||
309 | |||
310 | #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 | ||
311 | #define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 | ||
312 | #define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 | ||
313 | #define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ | ||
314 | (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) | ||
315 | #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) | ||
316 | |||
317 | struct icp_qat_fw_la_cipher_req_params { | ||
318 | uint32_t cipher_offset; | ||
319 | uint32_t cipher_length; | ||
320 | union { | ||
321 | uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
322 | struct { | ||
323 | uint64_t cipher_IV_ptr; | ||
324 | uint64_t resrvd1; | ||
325 | } s; | ||
326 | } u; | ||
327 | }; | ||
328 | |||
329 | struct icp_qat_fw_la_auth_req_params { | ||
330 | uint32_t auth_off; | ||
331 | uint32_t auth_len; | ||
332 | union { | ||
333 | uint64_t auth_partial_st_prefix; | ||
334 | uint64_t aad_adr; | ||
335 | } u1; | ||
336 | uint64_t auth_res_addr; | ||
337 | union { | ||
338 | uint8_t inner_prefix_sz; | ||
339 | uint8_t aad_sz; | ||
340 | } u2; | ||
341 | uint8_t resrvd1; | ||
342 | uint8_t hash_state_sz; | ||
343 | uint8_t auth_res_sz; | ||
344 | } __packed; | ||
345 | |||
346 | struct icp_qat_fw_la_auth_req_params_resrvd_flds { | ||
347 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; | ||
348 | union { | ||
349 | uint8_t inner_prefix_sz; | ||
350 | uint8_t aad_sz; | ||
351 | } u2; | ||
352 | uint8_t resrvd1; | ||
353 | uint16_t resrvd2; | ||
354 | }; | ||
355 | |||
356 | struct icp_qat_fw_la_resp { | ||
357 | struct icp_qat_fw_comn_resp_hdr comn_resp; | ||
358 | uint64_t opaque_data; | ||
359 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
360 | }; | ||
361 | |||
362 | #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
363 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ | ||
364 | ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
365 | |||
366 | #define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
367 | { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ | ||
368 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
369 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
370 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
371 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } | ||
372 | |||
373 | #define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
374 | (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
375 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
376 | |||
377 | #define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
378 | { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ | ||
379 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
380 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
381 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } | ||
382 | |||
383 | #define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
384 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ | ||
385 | >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
386 | |||
387 | #define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
388 | { (cd_ctrl_hdr_t)->next_curr_id_auth = \ | ||
389 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
390 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
391 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
392 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } | ||
393 | |||
394 | #define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
395 | (((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
396 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
397 | |||
398 | #define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
399 | { (cd_ctrl_hdr_t)->next_curr_id_auth = \ | ||
400 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
401 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
402 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } | ||
403 | |||
404 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h new file mode 100644 index 000000000000..5e1aa40c0404 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ | ||
48 | #define __ICP_QAT_FW_LOADER_HANDLE_H__ | ||
49 | #include "icp_qat_uclo.h" | ||
50 | |||
51 | struct icp_qat_fw_loader_ae_data { | ||
52 | unsigned int state; | ||
53 | unsigned int ustore_size; | ||
54 | unsigned int free_addr; | ||
55 | unsigned int free_size; | ||
56 | unsigned int live_ctx_mask; | ||
57 | }; | ||
58 | |||
59 | struct icp_qat_fw_loader_hal_handle { | ||
60 | struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE]; | ||
61 | unsigned int ae_mask; | ||
62 | unsigned int slice_mask; | ||
63 | unsigned int revision_id; | ||
64 | unsigned int ae_max_num; | ||
65 | unsigned int upc_mask; | ||
66 | unsigned int max_ustore; | ||
67 | }; | ||
68 | |||
69 | struct icp_qat_fw_loader_handle { | ||
70 | struct icp_qat_fw_loader_hal_handle *hal_handle; | ||
71 | void *obj_handle; | ||
72 | void __iomem *hal_sram_addr_v; | ||
73 | void __iomem *hal_cap_g_ctl_csr_addr_v; | ||
74 | void __iomem *hal_cap_ae_xfer_csr_addr_v; | ||
75 | void __iomem *hal_cap_ae_local_csr_addr_v; | ||
76 | void __iomem *hal_ep_csr_addr_v; | ||
77 | }; | ||
78 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h new file mode 100644 index 000000000000..85b6d241ea82 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_HAL_H | ||
48 | #define __ICP_QAT_HAL_H | ||
49 | #include "icp_qat_fw_loader_handle.h" | ||
50 | |||
51 | enum hal_global_csr { | ||
52 | MISC_CONTROL = 0x04, | ||
53 | ICP_RESET = 0x0c, | ||
54 | ICP_GLOBAL_CLK_ENABLE = 0x50 | ||
55 | }; | ||
56 | |||
57 | enum hal_ae_csr { | ||
58 | USTORE_ADDRESS = 0x000, | ||
59 | USTORE_DATA_LOWER = 0x004, | ||
60 | USTORE_DATA_UPPER = 0x008, | ||
61 | ALU_OUT = 0x010, | ||
62 | CTX_ARB_CNTL = 0x014, | ||
63 | CTX_ENABLES = 0x018, | ||
64 | CC_ENABLE = 0x01c, | ||
65 | CSR_CTX_POINTER = 0x020, | ||
66 | CTX_STS_INDIRECT = 0x040, | ||
67 | ACTIVE_CTX_STATUS = 0x044, | ||
68 | CTX_SIG_EVENTS_INDIRECT = 0x048, | ||
69 | CTX_SIG_EVENTS_ACTIVE = 0x04c, | ||
70 | CTX_WAKEUP_EVENTS_INDIRECT = 0x050, | ||
71 | LM_ADDR_0_INDIRECT = 0x060, | ||
72 | LM_ADDR_1_INDIRECT = 0x068, | ||
73 | INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0, | ||
74 | INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8, | ||
75 | FUTURE_COUNT_SIGNAL_INDIRECT = 0x078, | ||
76 | TIMESTAMP_LOW = 0x0c0, | ||
77 | TIMESTAMP_HIGH = 0x0c4, | ||
78 | PROFILE_COUNT = 0x144, | ||
79 | SIGNATURE_ENABLE = 0x150, | ||
80 | AE_MISC_CONTROL = 0x160, | ||
81 | LOCAL_CSR_STATUS = 0x180, | ||
82 | }; | ||
83 | |||
84 | #define UA_ECS (0x1 << 31) | ||
85 | #define ACS_ABO_BITPOS 31 | ||
86 | #define ACS_ACNO 0x7 | ||
87 | #define CE_ENABLE_BITPOS 0x8 | ||
88 | #define CE_LMADDR_0_GLOBAL_BITPOS 16 | ||
89 | #define CE_LMADDR_1_GLOBAL_BITPOS 17 | ||
90 | #define CE_NN_MODE_BITPOS 20 | ||
91 | #define CE_REG_PAR_ERR_BITPOS 25 | ||
92 | #define CE_BREAKPOINT_BITPOS 27 | ||
93 | #define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29 | ||
94 | #define CE_INUSE_CONTEXTS_BITPOS 31 | ||
95 | #define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS) | ||
96 | #define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS) | ||
97 | #define XCWE_VOLUNTARY (0x1) | ||
98 | #define LCS_STATUS (0x1) | ||
99 | #define MMC_SHARE_CS_BITPOS 2 | ||
100 | #define GLOBAL_CSR 0xA00 | ||
101 | |||
102 | #define SET_CAP_CSR(handle, csr, val) \ | ||
103 | ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) | ||
104 | #define GET_CAP_CSR(handle, csr) \ | ||
105 | ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr) | ||
106 | #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) | ||
107 | #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) | ||
108 | #define AE_CSR(handle, ae) \ | ||
109 | (handle->hal_cap_ae_local_csr_addr_v + \ | ||
110 | ((ae & handle->hal_handle->ae_mask) << 12)) | ||
111 | #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) | ||
112 | #define SET_AE_CSR(handle, ae, csr, val) \ | ||
113 | ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) | ||
114 | #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) | ||
115 | #define AE_XFER(handle, ae) \ | ||
116 | (handle->hal_cap_ae_xfer_csr_addr_v + \ | ||
117 | ((ae & handle->hal_handle->ae_mask) << 12)) | ||
118 | #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ | ||
119 | ((reg & 0xff) << 2)) | ||
120 | #define SET_AE_XFER(handle, ae, reg, val) \ | ||
121 | ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) | ||
122 | #define SRAM_WRITE(handle, addr, val) \ | ||
123 | ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) | ||
124 | #define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) | ||
125 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h new file mode 100644 index 000000000000..5031f8c10d75 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_HW_H_ | ||
48 | #define _ICP_QAT_HW_H_ | ||
49 | |||
50 | enum icp_qat_hw_ae_id { | ||
51 | ICP_QAT_HW_AE_0 = 0, | ||
52 | ICP_QAT_HW_AE_1 = 1, | ||
53 | ICP_QAT_HW_AE_2 = 2, | ||
54 | ICP_QAT_HW_AE_3 = 3, | ||
55 | ICP_QAT_HW_AE_4 = 4, | ||
56 | ICP_QAT_HW_AE_5 = 5, | ||
57 | ICP_QAT_HW_AE_6 = 6, | ||
58 | ICP_QAT_HW_AE_7 = 7, | ||
59 | ICP_QAT_HW_AE_8 = 8, | ||
60 | ICP_QAT_HW_AE_9 = 9, | ||
61 | ICP_QAT_HW_AE_10 = 10, | ||
62 | ICP_QAT_HW_AE_11 = 11, | ||
63 | ICP_QAT_HW_AE_DELIMITER = 12 | ||
64 | }; | ||
65 | |||
66 | enum icp_qat_hw_qat_id { | ||
67 | ICP_QAT_HW_QAT_0 = 0, | ||
68 | ICP_QAT_HW_QAT_1 = 1, | ||
69 | ICP_QAT_HW_QAT_2 = 2, | ||
70 | ICP_QAT_HW_QAT_3 = 3, | ||
71 | ICP_QAT_HW_QAT_4 = 4, | ||
72 | ICP_QAT_HW_QAT_5 = 5, | ||
73 | ICP_QAT_HW_QAT_DELIMITER = 6 | ||
74 | }; | ||
75 | |||
76 | enum icp_qat_hw_auth_algo { | ||
77 | ICP_QAT_HW_AUTH_ALGO_NULL = 0, | ||
78 | ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, | ||
79 | ICP_QAT_HW_AUTH_ALGO_MD5 = 2, | ||
80 | ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, | ||
81 | ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, | ||
82 | ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, | ||
83 | ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, | ||
84 | ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, | ||
85 | ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, | ||
86 | ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, | ||
87 | ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, | ||
88 | ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, | ||
89 | ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, | ||
90 | ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, | ||
91 | ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, | ||
92 | ICP_QAT_HW_AUTH_RESERVED_1 = 15, | ||
93 | ICP_QAT_HW_AUTH_RESERVED_2 = 16, | ||
94 | ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, | ||
95 | ICP_QAT_HW_AUTH_RESERVED_3 = 18, | ||
96 | ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, | ||
97 | ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 | ||
98 | }; | ||
99 | |||
100 | enum icp_qat_hw_auth_mode { | ||
101 | ICP_QAT_HW_AUTH_MODE0 = 0, | ||
102 | ICP_QAT_HW_AUTH_MODE1 = 1, | ||
103 | ICP_QAT_HW_AUTH_MODE2 = 2, | ||
104 | ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 | ||
105 | }; | ||
106 | |||
107 | struct icp_qat_hw_auth_config { | ||
108 | uint32_t config; | ||
109 | uint32_t reserved; | ||
110 | }; | ||
111 | |||
112 | #define QAT_AUTH_MODE_BITPOS 4 | ||
113 | #define QAT_AUTH_MODE_MASK 0xF | ||
114 | #define QAT_AUTH_ALGO_BITPOS 0 | ||
115 | #define QAT_AUTH_ALGO_MASK 0xF | ||
116 | #define QAT_AUTH_CMP_BITPOS 8 | ||
117 | #define QAT_AUTH_CMP_MASK 0x7F | ||
118 | #define QAT_AUTH_SHA3_PADDING_BITPOS 16 | ||
119 | #define QAT_AUTH_SHA3_PADDING_MASK 0x1 | ||
120 | #define QAT_AUTH_ALGO_SHA3_BITPOS 22 | ||
121 | #define QAT_AUTH_ALGO_SHA3_MASK 0x3 | ||
122 | #define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ | ||
123 | (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ | ||
124 | ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ | ||
125 | (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ | ||
126 | QAT_AUTH_ALGO_SHA3_BITPOS) | \ | ||
127 | (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ | ||
128 | (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ | ||
129 | & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ | ||
130 | ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) | ||
131 | |||
132 | struct icp_qat_hw_auth_counter { | ||
133 | __be32 counter; | ||
134 | uint32_t reserved; | ||
135 | }; | ||
136 | |||
137 | #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF | ||
138 | #define QAT_AUTH_COUNT_BITPOS 0 | ||
139 | #define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ | ||
140 | (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) | ||
141 | |||
142 | struct icp_qat_hw_auth_setup { | ||
143 | struct icp_qat_hw_auth_config auth_config; | ||
144 | struct icp_qat_hw_auth_counter auth_counter; | ||
145 | }; | ||
146 | |||
147 | #define QAT_HW_DEFAULT_ALIGNMENT 8 | ||
148 | #define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1))) | ||
149 | #define ICP_QAT_HW_NULL_STATE1_SZ 32 | ||
150 | #define ICP_QAT_HW_MD5_STATE1_SZ 16 | ||
151 | #define ICP_QAT_HW_SHA1_STATE1_SZ 20 | ||
152 | #define ICP_QAT_HW_SHA224_STATE1_SZ 32 | ||
153 | #define ICP_QAT_HW_SHA256_STATE1_SZ 32 | ||
154 | #define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 | ||
155 | #define ICP_QAT_HW_SHA384_STATE1_SZ 64 | ||
156 | #define ICP_QAT_HW_SHA512_STATE1_SZ 64 | ||
157 | #define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 | ||
158 | #define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 | ||
159 | #define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 | ||
160 | #define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 | ||
161 | #define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 | ||
162 | #define ICP_QAT_HW_AES_F9_STATE1_SZ 32 | ||
163 | #define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 | ||
164 | #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 | ||
165 | #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 | ||
166 | #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 | ||
167 | #define ICP_QAT_HW_NULL_STATE2_SZ 32 | ||
168 | #define ICP_QAT_HW_MD5_STATE2_SZ 16 | ||
169 | #define ICP_QAT_HW_SHA1_STATE2_SZ 20 | ||
170 | #define ICP_QAT_HW_SHA224_STATE2_SZ 32 | ||
171 | #define ICP_QAT_HW_SHA256_STATE2_SZ 32 | ||
172 | #define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 | ||
173 | #define ICP_QAT_HW_SHA384_STATE2_SZ 64 | ||
174 | #define ICP_QAT_HW_SHA512_STATE2_SZ 64 | ||
175 | #define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 | ||
176 | #define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 | ||
177 | #define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 | ||
178 | #define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 | ||
179 | #define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 | ||
180 | #define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 | ||
181 | #define ICP_QAT_HW_F9_IK_SZ 16 | ||
182 | #define ICP_QAT_HW_F9_FK_SZ 16 | ||
183 | #define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ | ||
184 | ICP_QAT_HW_F9_FK_SZ) | ||
185 | #define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ | ||
186 | #define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 | ||
187 | #define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 | ||
188 | #define ICP_QAT_HW_GALOIS_H_SZ 16 | ||
189 | #define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 | ||
190 | #define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 | ||
191 | |||
192 | struct icp_qat_hw_auth_sha512 { | ||
193 | struct icp_qat_hw_auth_setup inner_setup; | ||
194 | uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; | ||
195 | struct icp_qat_hw_auth_setup outer_setup; | ||
196 | uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; | ||
197 | }; | ||
198 | |||
199 | struct icp_qat_hw_auth_algo_blk { | ||
200 | struct icp_qat_hw_auth_sha512 sha; | ||
201 | }; | ||
202 | |||
203 | #define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 | ||
204 | #define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF | ||
205 | |||
206 | enum icp_qat_hw_cipher_algo { | ||
207 | ICP_QAT_HW_CIPHER_ALGO_NULL = 0, | ||
208 | ICP_QAT_HW_CIPHER_ALGO_DES = 1, | ||
209 | ICP_QAT_HW_CIPHER_ALGO_3DES = 2, | ||
210 | ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, | ||
211 | ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, | ||
212 | ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, | ||
213 | ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, | ||
214 | ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, | ||
215 | ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, | ||
216 | ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, | ||
217 | ICP_QAT_HW_CIPHER_DELIMITER = 10 | ||
218 | }; | ||
219 | |||
220 | enum icp_qat_hw_cipher_mode { | ||
221 | ICP_QAT_HW_CIPHER_ECB_MODE = 0, | ||
222 | ICP_QAT_HW_CIPHER_CBC_MODE = 1, | ||
223 | ICP_QAT_HW_CIPHER_CTR_MODE = 2, | ||
224 | ICP_QAT_HW_CIPHER_F8_MODE = 3, | ||
225 | ICP_QAT_HW_CIPHER_XTS_MODE = 6, | ||
226 | ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 | ||
227 | }; | ||
228 | |||
229 | struct icp_qat_hw_cipher_config { | ||
230 | uint32_t val; | ||
231 | uint32_t reserved; | ||
232 | }; | ||
233 | |||
234 | enum icp_qat_hw_cipher_dir { | ||
235 | ICP_QAT_HW_CIPHER_ENCRYPT = 0, | ||
236 | ICP_QAT_HW_CIPHER_DECRYPT = 1, | ||
237 | }; | ||
238 | |||
239 | enum icp_qat_hw_cipher_convert { | ||
240 | ICP_QAT_HW_CIPHER_NO_CONVERT = 0, | ||
241 | ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, | ||
242 | }; | ||
243 | |||
244 | #define QAT_CIPHER_MODE_BITPOS 4 | ||
245 | #define QAT_CIPHER_MODE_MASK 0xF | ||
246 | #define QAT_CIPHER_ALGO_BITPOS 0 | ||
247 | #define QAT_CIPHER_ALGO_MASK 0xF | ||
248 | #define QAT_CIPHER_CONVERT_BITPOS 9 | ||
249 | #define QAT_CIPHER_CONVERT_MASK 0x1 | ||
250 | #define QAT_CIPHER_DIR_BITPOS 8 | ||
251 | #define QAT_CIPHER_DIR_MASK 0x1 | ||
252 | #define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 | ||
253 | #define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 | ||
254 | #define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ | ||
255 | (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ | ||
256 | ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ | ||
257 | ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ | ||
258 | ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) | ||
259 | #define ICP_QAT_HW_DES_BLK_SZ 8 | ||
260 | #define ICP_QAT_HW_3DES_BLK_SZ 8 | ||
261 | #define ICP_QAT_HW_NULL_BLK_SZ 8 | ||
262 | #define ICP_QAT_HW_AES_BLK_SZ 16 | ||
263 | #define ICP_QAT_HW_KASUMI_BLK_SZ 8 | ||
264 | #define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 | ||
265 | #define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 | ||
266 | #define ICP_QAT_HW_NULL_KEY_SZ 256 | ||
267 | #define ICP_QAT_HW_DES_KEY_SZ 8 | ||
268 | #define ICP_QAT_HW_3DES_KEY_SZ 24 | ||
269 | #define ICP_QAT_HW_AES_128_KEY_SZ 16 | ||
270 | #define ICP_QAT_HW_AES_192_KEY_SZ 24 | ||
271 | #define ICP_QAT_HW_AES_256_KEY_SZ 32 | ||
272 | #define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
273 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
274 | #define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ | ||
275 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
276 | #define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
277 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
278 | #define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
279 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
280 | #define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
281 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
282 | #define ICP_QAT_HW_KASUMI_KEY_SZ 16 | ||
283 | #define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ | ||
284 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
285 | #define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
286 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
287 | #define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
288 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
289 | #define ICP_QAT_HW_ARC4_KEY_SZ 256 | ||
290 | #define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 | ||
291 | #define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 | ||
292 | #define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 | ||
293 | #define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 | ||
294 | #define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 | ||
295 | #define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024 | ||
296 | |||
297 | struct icp_qat_hw_cipher_aes256_f8 { | ||
298 | struct icp_qat_hw_cipher_config cipher_config; | ||
299 | uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; | ||
300 | }; | ||
301 | |||
302 | struct icp_qat_hw_cipher_algo_blk { | ||
303 | struct icp_qat_hw_cipher_aes256_f8 aes; | ||
304 | }; | ||
305 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h new file mode 100644 index 000000000000..2132a8cbc4ec --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_UCLO_H__ | ||
48 | #define __ICP_QAT_UCLO_H__ | ||
49 | |||
50 | #define ICP_QAT_AC_C_CPU_TYPE 0x00400000 | ||
51 | #define ICP_QAT_UCLO_MAX_AE 12 | ||
52 | #define ICP_QAT_UCLO_MAX_CTX 8 | ||
53 | #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) | ||
54 | #define ICP_QAT_UCLO_MAX_USTORE 0x4000 | ||
55 | #define ICP_QAT_UCLO_MAX_XFER_REG 128 | ||
56 | #define ICP_QAT_UCLO_MAX_GPR_REG 128 | ||
57 | #define ICP_QAT_UCLO_MAX_NN_REG 128 | ||
58 | #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 | ||
59 | #define ICP_QAT_UCLO_AE_ALL_CTX 0xff | ||
60 | #define ICP_QAT_UOF_OBJID_LEN 8 | ||
61 | #define ICP_QAT_UOF_FID 0xc6c2 | ||
62 | #define ICP_QAT_UOF_MAJVER 0x4 | ||
63 | #define ICP_QAT_UOF_MINVER 0x11 | ||
64 | #define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff | ||
65 | #define ICP_QAT_UOF_OBJS "UOF_OBJS" | ||
66 | #define ICP_QAT_UOF_STRT "UOF_STRT" | ||
67 | #define ICP_QAT_UOF_GTID "UOF_GTID" | ||
68 | #define ICP_QAT_UOF_IMAG "UOF_IMAG" | ||
69 | #define ICP_QAT_UOF_IMEM "UOF_IMEM" | ||
70 | #define ICP_QAT_UOF_MSEG "UOF_MSEG" | ||
71 | #define ICP_QAT_UOF_LOCAL_SCOPE 1 | ||
72 | #define ICP_QAT_UOF_INIT_EXPR 0 | ||
73 | #define ICP_QAT_UOF_INIT_REG 1 | ||
74 | #define ICP_QAT_UOF_INIT_REG_CTX 2 | ||
75 | #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 | ||
76 | |||
77 | #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) | ||
78 | #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) | ||
79 | #define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) | ||
80 | #define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1) | ||
81 | |||
82 | #define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1) | ||
83 | #define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1) | ||
84 | |||
85 | enum icp_qat_uof_mem_region { | ||
86 | ICP_QAT_UOF_SRAM_REGION = 0x0, | ||
87 | ICP_QAT_UOF_LMEM_REGION = 0x3, | ||
88 | ICP_QAT_UOF_UMEM_REGION = 0x5 | ||
89 | }; | ||
90 | |||
91 | enum icp_qat_uof_regtype { | ||
92 | ICP_NO_DEST, | ||
93 | ICP_GPA_REL, | ||
94 | ICP_GPA_ABS, | ||
95 | ICP_GPB_REL, | ||
96 | ICP_GPB_ABS, | ||
97 | ICP_SR_REL, | ||
98 | ICP_SR_RD_REL, | ||
99 | ICP_SR_WR_REL, | ||
100 | ICP_SR_ABS, | ||
101 | ICP_SR_RD_ABS, | ||
102 | ICP_SR_WR_ABS, | ||
103 | ICP_DR_REL, | ||
104 | ICP_DR_RD_REL, | ||
105 | ICP_DR_WR_REL, | ||
106 | ICP_DR_ABS, | ||
107 | ICP_DR_RD_ABS, | ||
108 | ICP_DR_WR_ABS, | ||
109 | ICP_LMEM, | ||
110 | ICP_LMEM0, | ||
111 | ICP_LMEM1, | ||
112 | ICP_NEIGH_REL, | ||
113 | }; | ||
114 | |||
115 | struct icp_qat_uclo_page { | ||
116 | struct icp_qat_uclo_encap_page *encap_page; | ||
117 | struct icp_qat_uclo_region *region; | ||
118 | unsigned int flags; | ||
119 | }; | ||
120 | |||
121 | struct icp_qat_uclo_region { | ||
122 | struct icp_qat_uclo_page *loaded; | ||
123 | struct icp_qat_uclo_page *page; | ||
124 | }; | ||
125 | |||
126 | struct icp_qat_uclo_aeslice { | ||
127 | struct icp_qat_uclo_region *region; | ||
128 | struct icp_qat_uclo_page *page; | ||
129 | struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; | ||
130 | struct icp_qat_uclo_encapme *encap_image; | ||
131 | unsigned int ctx_mask_assigned; | ||
132 | unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX]; | ||
133 | }; | ||
134 | |||
135 | struct icp_qat_uclo_aedata { | ||
136 | unsigned int slice_num; | ||
137 | unsigned int eff_ustore_size; | ||
138 | struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX]; | ||
139 | }; | ||
140 | |||
141 | struct icp_qat_uof_encap_obj { | ||
142 | char *beg_uof; | ||
143 | struct icp_qat_uof_objhdr *obj_hdr; | ||
144 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
145 | struct icp_qat_uof_varmem_seg *var_mem_seg; | ||
146 | }; | ||
147 | |||
148 | struct icp_qat_uclo_encap_uwblock { | ||
149 | unsigned int start_addr; | ||
150 | unsigned int words_num; | ||
151 | uint64_t micro_words; | ||
152 | }; | ||
153 | |||
154 | struct icp_qat_uclo_encap_page { | ||
155 | unsigned int def_page; | ||
156 | unsigned int page_region; | ||
157 | unsigned int beg_addr_v; | ||
158 | unsigned int beg_addr_p; | ||
159 | unsigned int micro_words_num; | ||
160 | unsigned int uwblock_num; | ||
161 | struct icp_qat_uclo_encap_uwblock *uwblock; | ||
162 | }; | ||
163 | |||
164 | struct icp_qat_uclo_encapme { | ||
165 | struct icp_qat_uof_image *img_ptr; | ||
166 | struct icp_qat_uclo_encap_page *page; | ||
167 | unsigned int ae_reg_num; | ||
168 | struct icp_qat_uof_ae_reg *ae_reg; | ||
169 | unsigned int init_regsym_num; | ||
170 | struct icp_qat_uof_init_regsym *init_regsym; | ||
171 | unsigned int sbreak_num; | ||
172 | struct icp_qat_uof_sbreak *sbreak; | ||
173 | unsigned int uwords_num; | ||
174 | }; | ||
175 | |||
176 | struct icp_qat_uclo_init_mem_table { | ||
177 | unsigned int entry_num; | ||
178 | struct icp_qat_uof_initmem *init_mem; | ||
179 | }; | ||
180 | |||
181 | struct icp_qat_uclo_objhdr { | ||
182 | char *file_buff; | ||
183 | unsigned int checksum; | ||
184 | unsigned int size; | ||
185 | }; | ||
186 | |||
187 | struct icp_qat_uof_strtable { | ||
188 | unsigned int table_len; | ||
189 | unsigned int reserved; | ||
190 | uint64_t strings; | ||
191 | }; | ||
192 | |||
193 | struct icp_qat_uclo_objhandle { | ||
194 | unsigned int prod_type; | ||
195 | unsigned int prod_rev; | ||
196 | struct icp_qat_uclo_objhdr *obj_hdr; | ||
197 | struct icp_qat_uof_encap_obj encap_uof_obj; | ||
198 | struct icp_qat_uof_strtable str_table; | ||
199 | struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE]; | ||
200 | struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE]; | ||
201 | struct icp_qat_uclo_init_mem_table init_mem_tab; | ||
202 | struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE]; | ||
203 | struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE]; | ||
204 | int uimage_num; | ||
205 | int uword_in_bytes; | ||
206 | int global_inited; | ||
207 | unsigned int ae_num; | ||
208 | unsigned int ustore_phy_size; | ||
209 | void *obj_buf; | ||
210 | uint64_t *uword_buf; | ||
211 | }; | ||
212 | |||
213 | struct icp_qat_uof_uword_block { | ||
214 | unsigned int start_addr; | ||
215 | unsigned int words_num; | ||
216 | unsigned int uword_offset; | ||
217 | unsigned int reserved; | ||
218 | }; | ||
219 | |||
220 | struct icp_qat_uof_filehdr { | ||
221 | unsigned short file_id; | ||
222 | unsigned short reserved1; | ||
223 | char min_ver; | ||
224 | char maj_ver; | ||
225 | unsigned short reserved2; | ||
226 | unsigned short max_chunks; | ||
227 | unsigned short num_chunks; | ||
228 | }; | ||
229 | |||
230 | struct icp_qat_uof_filechunkhdr { | ||
231 | char chunk_id[ICP_QAT_UOF_OBJID_LEN]; | ||
232 | unsigned int checksum; | ||
233 | unsigned int offset; | ||
234 | unsigned int size; | ||
235 | }; | ||
236 | |||
237 | struct icp_qat_uof_objhdr { | ||
238 | unsigned int cpu_type; | ||
239 | unsigned short min_cpu_ver; | ||
240 | unsigned short max_cpu_ver; | ||
241 | short max_chunks; | ||
242 | short num_chunks; | ||
243 | unsigned int reserved1; | ||
244 | unsigned int reserved2; | ||
245 | }; | ||
246 | |||
247 | struct icp_qat_uof_chunkhdr { | ||
248 | char chunk_id[ICP_QAT_UOF_OBJID_LEN]; | ||
249 | unsigned int offset; | ||
250 | unsigned int size; | ||
251 | }; | ||
252 | |||
253 | struct icp_qat_uof_memvar_attr { | ||
254 | unsigned int offset_in_byte; | ||
255 | unsigned int value; | ||
256 | }; | ||
257 | |||
258 | struct icp_qat_uof_initmem { | ||
259 | unsigned int sym_name; | ||
260 | char region; | ||
261 | char scope; | ||
262 | unsigned short reserved1; | ||
263 | unsigned int addr; | ||
264 | unsigned int num_in_bytes; | ||
265 | unsigned int val_attr_num; | ||
266 | }; | ||
267 | |||
268 | struct icp_qat_uof_init_regsym { | ||
269 | unsigned int sym_name; | ||
270 | char init_type; | ||
271 | char value_type; | ||
272 | char reg_type; | ||
273 | unsigned char ctx; | ||
274 | unsigned int reg_addr; | ||
275 | unsigned int value; | ||
276 | }; | ||
277 | |||
278 | struct icp_qat_uof_varmem_seg { | ||
279 | unsigned int sram_base; | ||
280 | unsigned int sram_size; | ||
281 | unsigned int sram_alignment; | ||
282 | unsigned int sdram_base; | ||
283 | unsigned int sdram_size; | ||
284 | unsigned int sdram_alignment; | ||
285 | unsigned int sdram1_base; | ||
286 | unsigned int sdram1_size; | ||
287 | unsigned int sdram1_alignment; | ||
288 | unsigned int scratch_base; | ||
289 | unsigned int scratch_size; | ||
290 | unsigned int scratch_alignment; | ||
291 | }; | ||
292 | |||
293 | struct icp_qat_uof_gtid { | ||
294 | char tool_id[ICP_QAT_UOF_OBJID_LEN]; | ||
295 | int tool_ver; | ||
296 | unsigned int reserved1; | ||
297 | unsigned int reserved2; | ||
298 | }; | ||
299 | |||
300 | struct icp_qat_uof_sbreak { | ||
301 | unsigned int page_num; | ||
302 | unsigned int virt_uaddr; | ||
303 | unsigned char sbreak_type; | ||
304 | unsigned char reg_type; | ||
305 | unsigned short reserved1; | ||
306 | unsigned int addr_offset; | ||
307 | unsigned int reg_addr; | ||
308 | }; | ||
309 | |||
310 | struct icp_qat_uof_code_page { | ||
311 | unsigned int page_region; | ||
312 | unsigned int page_num; | ||
313 | unsigned char def_page; | ||
314 | unsigned char reserved2; | ||
315 | unsigned short reserved1; | ||
316 | unsigned int beg_addr_v; | ||
317 | unsigned int beg_addr_p; | ||
318 | unsigned int neigh_reg_tab_offset; | ||
319 | unsigned int uc_var_tab_offset; | ||
320 | unsigned int imp_var_tab_offset; | ||
321 | unsigned int imp_expr_tab_offset; | ||
322 | unsigned int code_area_offset; | ||
323 | }; | ||
324 | |||
325 | struct icp_qat_uof_image { | ||
326 | unsigned int img_name; | ||
327 | unsigned int ae_assigned; | ||
328 | unsigned int ctx_assigned; | ||
329 | unsigned int cpu_type; | ||
330 | unsigned int entry_address; | ||
331 | unsigned int fill_pattern[2]; | ||
332 | unsigned int reloadable_size; | ||
333 | unsigned char sensitivity; | ||
334 | unsigned char reserved; | ||
335 | unsigned short ae_mode; | ||
336 | unsigned short max_ver; | ||
337 | unsigned short min_ver; | ||
338 | unsigned short image_attrib; | ||
339 | unsigned short reserved2; | ||
340 | unsigned short page_region_num; | ||
341 | unsigned short numpages; | ||
342 | unsigned int reg_tab_offset; | ||
343 | unsigned int init_reg_sym_tab; | ||
344 | unsigned int sbreak_tab; | ||
345 | unsigned int app_metadata; | ||
346 | }; | ||
347 | |||
348 | struct icp_qat_uof_objtable { | ||
349 | unsigned int entry_num; | ||
350 | }; | ||
351 | |||
352 | struct icp_qat_uof_ae_reg { | ||
353 | unsigned int name; | ||
354 | unsigned int vis_name; | ||
355 | unsigned short type; | ||
356 | unsigned short addr; | ||
357 | unsigned short access_mode; | ||
358 | unsigned char visible; | ||
359 | unsigned char reserved1; | ||
360 | unsigned short ref_count; | ||
361 | unsigned short reserved2; | ||
362 | unsigned int xo_id; | ||
363 | }; | ||
364 | |||
365 | struct icp_qat_uof_code_area { | ||
366 | unsigned int micro_words_num; | ||
367 | unsigned int uword_block_tab; | ||
368 | }; | ||
369 | |||
370 | struct icp_qat_uof_batch_init { | ||
371 | unsigned int ae; | ||
372 | unsigned int addr; | ||
373 | unsigned int *value; | ||
374 | unsigned int size; | ||
375 | struct icp_qat_uof_batch_init *next; | ||
376 | }; | ||
377 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c new file mode 100644 index 000000000000..59df48872955 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/crypto.h> | ||
50 | #include <crypto/aead.h> | ||
51 | #include <crypto/aes.h> | ||
52 | #include <crypto/sha.h> | ||
53 | #include <crypto/hash.h> | ||
54 | #include <crypto/algapi.h> | ||
55 | #include <crypto/authenc.h> | ||
56 | #include <crypto/rng.h> | ||
57 | #include <linux/dma-mapping.h> | ||
58 | #include "adf_accel_devices.h" | ||
59 | #include "adf_transport.h" | ||
60 | #include "adf_common_drv.h" | ||
61 | #include "qat_crypto.h" | ||
62 | #include "icp_qat_hw.h" | ||
63 | #include "icp_qat_fw.h" | ||
64 | #include "icp_qat_fw_la.h" | ||
65 | |||
66 | #define QAT_AES_HW_CONFIG_ENC(alg) \ | ||
67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | ||
68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ | ||
69 | ICP_QAT_HW_CIPHER_ENCRYPT) | ||
70 | |||
71 | #define QAT_AES_HW_CONFIG_DEC(alg) \ | ||
72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | ||
73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ | ||
74 | ICP_QAT_HW_CIPHER_DECRYPT) | ||
75 | |||
76 | static atomic_t active_dev; | ||
77 | |||
78 | struct qat_alg_buf { | ||
79 | uint32_t len; | ||
80 | uint32_t resrvd; | ||
81 | uint64_t addr; | ||
82 | } __packed; | ||
83 | |||
84 | struct qat_alg_buf_list { | ||
85 | uint64_t resrvd; | ||
86 | uint32_t num_bufs; | ||
87 | uint32_t num_mapped_bufs; | ||
88 | struct qat_alg_buf bufers[]; | ||
89 | } __packed __aligned(64); | ||
90 | |||
91 | /* Common content descriptor */ | ||
92 | struct qat_alg_cd { | ||
93 | union { | ||
94 | struct qat_enc { /* Encrypt content desc */ | ||
95 | struct icp_qat_hw_cipher_algo_blk cipher; | ||
96 | struct icp_qat_hw_auth_algo_blk hash; | ||
97 | } qat_enc_cd; | ||
98 | struct qat_dec { /* Decrytp content desc */ | ||
99 | struct icp_qat_hw_auth_algo_blk hash; | ||
100 | struct icp_qat_hw_cipher_algo_blk cipher; | ||
101 | } qat_dec_cd; | ||
102 | }; | ||
103 | } __aligned(64); | ||
104 | |||
105 | #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) | ||
106 | |||
107 | struct qat_auth_state { | ||
108 | uint8_t data[MAX_AUTH_STATE_SIZE]; | ||
109 | } __aligned(64); | ||
110 | |||
111 | struct qat_alg_session_ctx { | ||
112 | struct qat_alg_cd *enc_cd; | ||
113 | dma_addr_t enc_cd_paddr; | ||
114 | struct qat_alg_cd *dec_cd; | ||
115 | dma_addr_t dec_cd_paddr; | ||
116 | struct qat_auth_state *auth_hw_state_enc; | ||
117 | dma_addr_t auth_state_enc_paddr; | ||
118 | struct qat_auth_state *auth_hw_state_dec; | ||
119 | dma_addr_t auth_state_dec_paddr; | ||
120 | struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; | ||
121 | struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; | ||
122 | struct qat_crypto_instance *inst; | ||
123 | struct crypto_tfm *tfm; | ||
124 | struct crypto_shash *hash_tfm; | ||
125 | enum icp_qat_hw_auth_algo qat_hash_alg; | ||
126 | uint8_t salt[AES_BLOCK_SIZE]; | ||
127 | spinlock_t lock; /* protects qat_alg_session_ctx struct */ | ||
128 | }; | ||
129 | |||
130 | static int get_current_node(void) | ||
131 | { | ||
132 | return cpu_data(current_thread_info()->cpu).phys_proc_id; | ||
133 | } | ||
134 | |||
135 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | ||
136 | { | ||
137 | switch (qat_hash_alg) { | ||
138 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
139 | return ICP_QAT_HW_SHA1_STATE1_SZ; | ||
140 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
141 | return ICP_QAT_HW_SHA256_STATE1_SZ; | ||
142 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
143 | return ICP_QAT_HW_SHA512_STATE1_SZ; | ||
144 | default: | ||
145 | return -EFAULT; | ||
146 | }; | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | ||
151 | struct qat_alg_session_ctx *ctx, | ||
152 | const uint8_t *auth_key, | ||
153 | unsigned int auth_keylen, uint8_t *auth_state) | ||
154 | { | ||
155 | struct { | ||
156 | struct shash_desc shash; | ||
157 | char ctx[crypto_shash_descsize(ctx->hash_tfm)]; | ||
158 | } desc; | ||
159 | struct sha1_state sha1; | ||
160 | struct sha256_state sha256; | ||
161 | struct sha512_state sha512; | ||
162 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); | ||
163 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); | ||
164 | uint8_t *ipad = auth_state; | ||
165 | uint8_t *opad = ipad + block_size; | ||
166 | __be32 *hash_state_out; | ||
167 | __be64 *hash512_state_out; | ||
168 | int i, offset; | ||
169 | |||
170 | desc.shash.tfm = ctx->hash_tfm; | ||
171 | desc.shash.flags = 0x0; | ||
172 | |||
173 | if (auth_keylen > block_size) { | ||
174 | char buff[SHA512_BLOCK_SIZE]; | ||
175 | int ret = crypto_shash_digest(&desc.shash, auth_key, | ||
176 | auth_keylen, buff); | ||
177 | if (ret) | ||
178 | return ret; | ||
179 | |||
180 | memcpy(ipad, buff, digest_size); | ||
181 | memcpy(opad, buff, digest_size); | ||
182 | memset(ipad + digest_size, 0, block_size - digest_size); | ||
183 | memset(opad + digest_size, 0, block_size - digest_size); | ||
184 | } else { | ||
185 | memcpy(ipad, auth_key, auth_keylen); | ||
186 | memcpy(opad, auth_key, auth_keylen); | ||
187 | memset(ipad + auth_keylen, 0, block_size - auth_keylen); | ||
188 | memset(opad + auth_keylen, 0, block_size - auth_keylen); | ||
189 | } | ||
190 | |||
191 | for (i = 0; i < block_size; i++) { | ||
192 | char *ipad_ptr = ipad + i; | ||
193 | char *opad_ptr = opad + i; | ||
194 | *ipad_ptr ^= 0x36; | ||
195 | *opad_ptr ^= 0x5C; | ||
196 | } | ||
197 | |||
198 | if (crypto_shash_init(&desc.shash)) | ||
199 | return -EFAULT; | ||
200 | |||
201 | if (crypto_shash_update(&desc.shash, ipad, block_size)) | ||
202 | return -EFAULT; | ||
203 | |||
204 | hash_state_out = (__be32 *)hash->sha.state1; | ||
205 | hash512_state_out = (__be64 *)hash_state_out; | ||
206 | |||
207 | switch (ctx->qat_hash_alg) { | ||
208 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
209 | if (crypto_shash_export(&desc.shash, &sha1)) | ||
210 | return -EFAULT; | ||
211 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
212 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | ||
213 | break; | ||
214 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
215 | if (crypto_shash_export(&desc.shash, &sha256)) | ||
216 | return -EFAULT; | ||
217 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
218 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | ||
219 | break; | ||
220 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
221 | if (crypto_shash_export(&desc.shash, &sha512)) | ||
222 | return -EFAULT; | ||
223 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | ||
224 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | ||
225 | break; | ||
226 | default: | ||
227 | return -EFAULT; | ||
228 | } | ||
229 | |||
230 | if (crypto_shash_init(&desc.shash)) | ||
231 | return -EFAULT; | ||
232 | |||
233 | if (crypto_shash_update(&desc.shash, opad, block_size)) | ||
234 | return -EFAULT; | ||
235 | |||
236 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); | ||
237 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); | ||
238 | hash512_state_out = (__be64 *)hash_state_out; | ||
239 | |||
240 | switch (ctx->qat_hash_alg) { | ||
241 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
242 | if (crypto_shash_export(&desc.shash, &sha1)) | ||
243 | return -EFAULT; | ||
244 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
245 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | ||
246 | break; | ||
247 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
248 | if (crypto_shash_export(&desc.shash, &sha256)) | ||
249 | return -EFAULT; | ||
250 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
251 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | ||
252 | break; | ||
253 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
254 | if (crypto_shash_export(&desc.shash, &sha512)) | ||
255 | return -EFAULT; | ||
256 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | ||
257 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | ||
258 | break; | ||
259 | default: | ||
260 | return -EFAULT; | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | ||
266 | { | ||
267 | header->hdr_flags = | ||
268 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); | ||
269 | header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; | ||
270 | header->comn_req_flags = | ||
271 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, | ||
272 | QAT_COMN_PTR_TYPE_SGL); | ||
273 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | ||
274 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | ||
275 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | ||
276 | ICP_QAT_FW_LA_PARTIAL_NONE); | ||
277 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | ||
278 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | ||
279 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, | ||
280 | ICP_QAT_FW_LA_NO_PROTO); | ||
281 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | ||
282 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | ||
283 | } | ||
284 | |||
285 | static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | ||
286 | int alg, struct crypto_authenc_keys *keys) | ||
287 | { | ||
288 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | ||
289 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | ||
290 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; | ||
291 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; | ||
292 | struct icp_qat_hw_auth_algo_blk *hash = | ||
293 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + | ||
294 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); | ||
295 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; | ||
296 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | ||
297 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | ||
298 | void *ptr = &req_tmpl->cd_ctrl; | ||
299 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | ||
300 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | ||
301 | struct icp_qat_fw_la_auth_req_params *auth_param = | ||
302 | (struct icp_qat_fw_la_auth_req_params *) | ||
303 | ((char *)&req_tmpl->serv_specif_rqpars + | ||
304 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | ||
305 | |||
306 | /* CD setup */ | ||
307 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); | ||
308 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | ||
309 | hash->sha.inner_setup.auth_config.config = | ||
310 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | ||
311 | ctx->qat_hash_alg, digestsize); | ||
312 | hash->sha.inner_setup.auth_counter.counter = | ||
313 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | ||
314 | |||
315 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | ||
316 | (uint8_t *)ctx->auth_hw_state_enc)) | ||
317 | return -EFAULT; | ||
318 | |||
319 | /* Request setup */ | ||
320 | qat_alg_init_common_hdr(header); | ||
321 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | ||
322 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | ||
323 | ICP_QAT_FW_LA_RET_AUTH_RES); | ||
324 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | ||
325 | ICP_QAT_FW_LA_NO_CMP_AUTH_RES); | ||
326 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; | ||
327 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | ||
328 | |||
329 | /* Cipher CD config setup */ | ||
330 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | ||
331 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | ||
332 | cipher_cd_ctrl->cipher_cfg_offset = 0; | ||
333 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
334 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
335 | /* Auth CD config setup */ | ||
336 | hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; | ||
337 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | ||
338 | hash_cd_ctrl->inner_res_sz = digestsize; | ||
339 | hash_cd_ctrl->final_sz = digestsize; | ||
340 | |||
341 | switch (ctx->qat_hash_alg) { | ||
342 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
343 | hash_cd_ctrl->inner_state1_sz = | ||
344 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | ||
345 | hash_cd_ctrl->inner_state2_sz = | ||
346 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | ||
347 | break; | ||
348 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
349 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | ||
350 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | ||
351 | break; | ||
352 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
353 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | ||
354 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | ||
355 | break; | ||
356 | default: | ||
357 | break; | ||
358 | } | ||
359 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | ||
360 | ((sizeof(struct icp_qat_hw_auth_setup) + | ||
361 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | ||
362 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | ||
363 | sizeof(struct icp_qat_hw_auth_counter) + | ||
364 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | ||
365 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
366 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | ||
371 | int alg, struct crypto_authenc_keys *keys) | ||
372 | { | ||
373 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | ||
374 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | ||
375 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; | ||
376 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; | ||
377 | struct icp_qat_hw_cipher_algo_blk *cipher = | ||
378 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + | ||
379 | sizeof(struct icp_qat_hw_auth_setup) + | ||
380 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); | ||
381 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; | ||
382 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | ||
383 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | ||
384 | void *ptr = &req_tmpl->cd_ctrl; | ||
385 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | ||
386 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | ||
387 | struct icp_qat_fw_la_auth_req_params *auth_param = | ||
388 | (struct icp_qat_fw_la_auth_req_params *) | ||
389 | ((char *)&req_tmpl->serv_specif_rqpars + | ||
390 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | ||
391 | |||
392 | /* CD setup */ | ||
393 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); | ||
394 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | ||
395 | hash->sha.inner_setup.auth_config.config = | ||
396 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | ||
397 | ctx->qat_hash_alg, | ||
398 | digestsize); | ||
399 | hash->sha.inner_setup.auth_counter.counter = | ||
400 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | ||
401 | |||
402 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | ||
403 | (uint8_t *)ctx->auth_hw_state_dec)) | ||
404 | return -EFAULT; | ||
405 | |||
406 | /* Request setup */ | ||
407 | qat_alg_init_common_hdr(header); | ||
408 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | ||
409 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | ||
410 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); | ||
411 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | ||
412 | ICP_QAT_FW_LA_CMP_AUTH_RES); | ||
413 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; | ||
414 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | ||
415 | |||
416 | /* Cipher CD config setup */ | ||
417 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | ||
418 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | ||
419 | cipher_cd_ctrl->cipher_cfg_offset = | ||
420 | (sizeof(struct icp_qat_hw_auth_setup) + | ||
421 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; | ||
422 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
423 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | ||
424 | |||
425 | /* Auth CD config setup */ | ||
426 | hash_cd_ctrl->hash_cfg_offset = 0; | ||
427 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | ||
428 | hash_cd_ctrl->inner_res_sz = digestsize; | ||
429 | hash_cd_ctrl->final_sz = digestsize; | ||
430 | |||
431 | switch (ctx->qat_hash_alg) { | ||
432 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
433 | hash_cd_ctrl->inner_state1_sz = | ||
434 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | ||
435 | hash_cd_ctrl->inner_state2_sz = | ||
436 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | ||
437 | break; | ||
438 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
439 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | ||
440 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | ||
441 | break; | ||
442 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
443 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | ||
444 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | ||
445 | break; | ||
446 | default: | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | ||
451 | ((sizeof(struct icp_qat_hw_auth_setup) + | ||
452 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | ||
453 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | ||
454 | sizeof(struct icp_qat_hw_auth_counter) + | ||
455 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | ||
456 | auth_param->auth_res_sz = digestsize; | ||
457 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
458 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, | ||
463 | const uint8_t *key, unsigned int keylen) | ||
464 | { | ||
465 | struct crypto_authenc_keys keys; | ||
466 | int alg; | ||
467 | |||
468 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | ||
469 | return -EFAULT; | ||
470 | |||
471 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | ||
472 | goto bad_key; | ||
473 | |||
474 | switch (keys.enckeylen) { | ||
475 | case AES_KEYSIZE_128: | ||
476 | alg = ICP_QAT_HW_CIPHER_ALGO_AES128; | ||
477 | break; | ||
478 | case AES_KEYSIZE_192: | ||
479 | alg = ICP_QAT_HW_CIPHER_ALGO_AES192; | ||
480 | break; | ||
481 | case AES_KEYSIZE_256: | ||
482 | alg = ICP_QAT_HW_CIPHER_ALGO_AES256; | ||
483 | break; | ||
484 | default: | ||
485 | goto bad_key; | ||
486 | break; | ||
487 | } | ||
488 | |||
489 | if (qat_alg_init_enc_session(ctx, alg, &keys)) | ||
490 | goto error; | ||
491 | |||
492 | if (qat_alg_init_dec_session(ctx, alg, &keys)) | ||
493 | goto error; | ||
494 | |||
495 | return 0; | ||
496 | bad_key: | ||
497 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
498 | return -EINVAL; | ||
499 | error: | ||
500 | return -EFAULT; | ||
501 | } | ||
502 | |||
503 | static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | ||
504 | unsigned int keylen) | ||
505 | { | ||
506 | struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); | ||
507 | struct device *dev; | ||
508 | |||
509 | spin_lock(&ctx->lock); | ||
510 | if (ctx->enc_cd) { | ||
511 | /* rekeying */ | ||
512 | dev = &GET_DEV(ctx->inst->accel_dev); | ||
513 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); | ||
514 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); | ||
515 | memset(ctx->auth_hw_state_enc, 0, | ||
516 | sizeof(struct qat_auth_state)); | ||
517 | memset(ctx->auth_hw_state_dec, 0, | ||
518 | sizeof(struct qat_auth_state)); | ||
519 | memset(&ctx->enc_fw_req_tmpl, 0, | ||
520 | sizeof(struct icp_qat_fw_la_bulk_req)); | ||
521 | memset(&ctx->dec_fw_req_tmpl, 0, | ||
522 | sizeof(struct icp_qat_fw_la_bulk_req)); | ||
523 | } else { | ||
524 | /* new key */ | ||
525 | int node = get_current_node(); | ||
526 | struct qat_crypto_instance *inst = | ||
527 | qat_crypto_get_instance_node(node); | ||
528 | if (!inst) { | ||
529 | spin_unlock(&ctx->lock); | ||
530 | return -EINVAL; | ||
531 | } | ||
532 | |||
533 | dev = &GET_DEV(inst->accel_dev); | ||
534 | ctx->inst = inst; | ||
535 | ctx->enc_cd = dma_zalloc_coherent(dev, | ||
536 | sizeof(struct qat_alg_cd), | ||
537 | &ctx->enc_cd_paddr, | ||
538 | GFP_ATOMIC); | ||
539 | if (!ctx->enc_cd) { | ||
540 | spin_unlock(&ctx->lock); | ||
541 | return -ENOMEM; | ||
542 | } | ||
543 | ctx->dec_cd = dma_zalloc_coherent(dev, | ||
544 | sizeof(struct qat_alg_cd), | ||
545 | &ctx->dec_cd_paddr, | ||
546 | GFP_ATOMIC); | ||
547 | if (!ctx->dec_cd) { | ||
548 | spin_unlock(&ctx->lock); | ||
549 | goto out_free_enc; | ||
550 | } | ||
551 | ctx->auth_hw_state_enc = | ||
552 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | ||
553 | &ctx->auth_state_enc_paddr, | ||
554 | GFP_ATOMIC); | ||
555 | if (!ctx->auth_hw_state_enc) { | ||
556 | spin_unlock(&ctx->lock); | ||
557 | goto out_free_dec; | ||
558 | } | ||
559 | ctx->auth_hw_state_dec = | ||
560 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | ||
561 | &ctx->auth_state_dec_paddr, | ||
562 | GFP_ATOMIC); | ||
563 | if (!ctx->auth_hw_state_dec) { | ||
564 | spin_unlock(&ctx->lock); | ||
565 | goto out_free_auth_enc; | ||
566 | } | ||
567 | } | ||
568 | spin_unlock(&ctx->lock); | ||
569 | if (qat_alg_init_sessions(ctx, key, keylen)) | ||
570 | goto out_free_all; | ||
571 | |||
572 | return 0; | ||
573 | |||
574 | out_free_all: | ||
575 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
576 | ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr); | ||
577 | ctx->auth_hw_state_dec = NULL; | ||
578 | out_free_auth_enc: | ||
579 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
580 | ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr); | ||
581 | ctx->auth_hw_state_enc = NULL; | ||
582 | out_free_dec: | ||
583 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
584 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
585 | ctx->dec_cd = NULL; | ||
586 | out_free_enc: | ||
587 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
588 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
589 | ctx->enc_cd = NULL; | ||
590 | return -ENOMEM; | ||
591 | } | ||
592 | |||
593 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | ||
594 | struct qat_crypto_request *qat_req) | ||
595 | { | ||
596 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
597 | struct qat_alg_buf_list *bl = qat_req->buf.bl; | ||
598 | struct qat_alg_buf_list *blout = qat_req->buf.blout; | ||
599 | dma_addr_t blp = qat_req->buf.blp; | ||
600 | dma_addr_t blpout = qat_req->buf.bloutp; | ||
601 | size_t sz = qat_req->buf.sz; | ||
602 | int i, bufs = bl->num_bufs; | ||
603 | |||
604 | for (i = 0; i < bl->num_bufs; i++) | ||
605 | dma_unmap_single(dev, bl->bufers[i].addr, | ||
606 | bl->bufers[i].len, DMA_BIDIRECTIONAL); | ||
607 | |||
608 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | ||
609 | kfree(bl); | ||
610 | if (blp != blpout) { | ||
611 | /* If out of place operation dma unmap only data */ | ||
612 | int bufless = bufs - blout->num_mapped_bufs; | ||
613 | |||
614 | for (i = bufless; i < bufs; i++) { | ||
615 | dma_unmap_single(dev, blout->bufers[i].addr, | ||
616 | blout->bufers[i].len, | ||
617 | DMA_BIDIRECTIONAL); | ||
618 | } | ||
619 | dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); | ||
620 | kfree(blout); | ||
621 | } | ||
622 | } | ||
623 | |||
624 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | ||
625 | struct scatterlist *assoc, | ||
626 | struct scatterlist *sgl, | ||
627 | struct scatterlist *sglout, uint8_t *iv, | ||
628 | uint8_t ivlen, | ||
629 | struct qat_crypto_request *qat_req) | ||
630 | { | ||
631 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
632 | int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); | ||
633 | struct qat_alg_buf_list *bufl; | ||
634 | struct qat_alg_buf_list *buflout = NULL; | ||
635 | dma_addr_t blp; | ||
636 | dma_addr_t bloutp = 0; | ||
637 | struct scatterlist *sg; | ||
638 | size_t sz = sizeof(struct qat_alg_buf_list) + | ||
639 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | ||
640 | |||
641 | if (unlikely(!n)) | ||
642 | return -EINVAL; | ||
643 | |||
644 | bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); | ||
645 | if (unlikely(!bufl)) | ||
646 | return -ENOMEM; | ||
647 | |||
648 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); | ||
649 | if (unlikely(dma_mapping_error(dev, blp))) | ||
650 | goto err; | ||
651 | |||
652 | for_each_sg(assoc, sg, assoc_n, i) { | ||
653 | bufl->bufers[bufs].addr = dma_map_single(dev, | ||
654 | sg_virt(sg), | ||
655 | sg->length, | ||
656 | DMA_BIDIRECTIONAL); | ||
657 | bufl->bufers[bufs].len = sg->length; | ||
658 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
659 | goto err; | ||
660 | bufs++; | ||
661 | } | ||
662 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | ||
663 | DMA_BIDIRECTIONAL); | ||
664 | bufl->bufers[bufs].len = ivlen; | ||
665 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
666 | goto err; | ||
667 | bufs++; | ||
668 | |||
669 | for_each_sg(sgl, sg, n, i) { | ||
670 | int y = i + bufs; | ||
671 | |||
672 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), | ||
673 | sg->length, | ||
674 | DMA_BIDIRECTIONAL); | ||
675 | bufl->bufers[y].len = sg->length; | ||
676 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | ||
677 | goto err; | ||
678 | } | ||
679 | bufl->num_bufs = n + bufs; | ||
680 | qat_req->buf.bl = bufl; | ||
681 | qat_req->buf.blp = blp; | ||
682 | qat_req->buf.sz = sz; | ||
683 | /* Handle out of place operation */ | ||
684 | if (sgl != sglout) { | ||
685 | struct qat_alg_buf *bufers; | ||
686 | |||
687 | buflout = kmalloc_node(sz, GFP_ATOMIC, | ||
688 | inst->accel_dev->numa_node); | ||
689 | if (unlikely(!buflout)) | ||
690 | goto err; | ||
691 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); | ||
692 | if (unlikely(dma_mapping_error(dev, bloutp))) | ||
693 | goto err; | ||
694 | bufers = buflout->bufers; | ||
695 | /* For out of place operation dma map only data and | ||
696 | * reuse assoc mapping and iv */ | ||
697 | for (i = 0; i < bufs; i++) { | ||
698 | bufers[i].len = bufl->bufers[i].len; | ||
699 | bufers[i].addr = bufl->bufers[i].addr; | ||
700 | } | ||
701 | for_each_sg(sglout, sg, n, i) { | ||
702 | int y = i + bufs; | ||
703 | |||
704 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), | ||
705 | sg->length, | ||
706 | DMA_BIDIRECTIONAL); | ||
707 | buflout->bufers[y].len = sg->length; | ||
708 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | ||
709 | goto err; | ||
710 | } | ||
711 | buflout->num_bufs = n + bufs; | ||
712 | buflout->num_mapped_bufs = n; | ||
713 | qat_req->buf.blout = buflout; | ||
714 | qat_req->buf.bloutp = bloutp; | ||
715 | } else { | ||
716 | /* Otherwise set the src and dst to the same address */ | ||
717 | qat_req->buf.bloutp = qat_req->buf.blp; | ||
718 | } | ||
719 | return 0; | ||
720 | err: | ||
721 | dev_err(dev, "Failed to map buf for dma\n"); | ||
722 | for_each_sg(sgl, sg, n + bufs, i) { | ||
723 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { | ||
724 | dma_unmap_single(dev, bufl->bufers[i].addr, | ||
725 | bufl->bufers[i].len, | ||
726 | DMA_BIDIRECTIONAL); | ||
727 | } | ||
728 | } | ||
729 | if (!dma_mapping_error(dev, blp)) | ||
730 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | ||
731 | kfree(bufl); | ||
732 | if (sgl != sglout && buflout) { | ||
733 | for_each_sg(sglout, sg, n, i) { | ||
734 | int y = i + bufs; | ||
735 | |||
736 | if (!dma_mapping_error(dev, buflout->bufers[y].addr)) | ||
737 | dma_unmap_single(dev, buflout->bufers[y].addr, | ||
738 | buflout->bufers[y].len, | ||
739 | DMA_BIDIRECTIONAL); | ||
740 | } | ||
741 | if (!dma_mapping_error(dev, bloutp)) | ||
742 | dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); | ||
743 | kfree(buflout); | ||
744 | } | ||
745 | return -ENOMEM; | ||
746 | } | ||
747 | |||
748 | void qat_alg_callback(void *resp) | ||
749 | { | ||
750 | struct icp_qat_fw_la_resp *qat_resp = resp; | ||
751 | struct qat_crypto_request *qat_req = | ||
752 | (void *)(__force long)qat_resp->opaque_data; | ||
753 | struct qat_alg_session_ctx *ctx = qat_req->ctx; | ||
754 | struct qat_crypto_instance *inst = ctx->inst; | ||
755 | struct aead_request *areq = qat_req->areq; | ||
756 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | ||
757 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | ||
758 | |||
759 | qat_alg_free_bufl(inst, qat_req); | ||
760 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | ||
761 | res = -EBADMSG; | ||
762 | areq->base.complete(&areq->base, res); | ||
763 | } | ||
764 | |||
765 | static int qat_alg_dec(struct aead_request *areq) | ||
766 | { | ||
767 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | ||
768 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
769 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
770 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | ||
771 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
772 | struct icp_qat_fw_la_auth_req_params *auth_param; | ||
773 | struct icp_qat_fw_la_bulk_req *msg; | ||
774 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | ||
775 | int ret, ctr = 0; | ||
776 | |||
777 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | ||
778 | areq->iv, AES_BLOCK_SIZE, qat_req); | ||
779 | if (unlikely(ret)) | ||
780 | return ret; | ||
781 | |||
782 | msg = &qat_req->req; | ||
783 | *msg = ctx->dec_fw_req_tmpl; | ||
784 | qat_req->ctx = ctx; | ||
785 | qat_req->areq = areq; | ||
786 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
787 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
788 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
789 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
790 | cipher_param->cipher_length = areq->cryptlen - digst_size; | ||
791 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | ||
792 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); | ||
793 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | ||
794 | auth_param->auth_off = 0; | ||
795 | auth_param->auth_len = areq->assoclen + | ||
796 | cipher_param->cipher_length + AES_BLOCK_SIZE; | ||
797 | do { | ||
798 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
799 | } while (ret == -EAGAIN && ctr++ < 10); | ||
800 | |||
801 | if (ret == -EAGAIN) { | ||
802 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
803 | return -EBUSY; | ||
804 | } | ||
805 | return -EINPROGRESS; | ||
806 | } | ||
807 | |||
808 | static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | ||
809 | int enc_iv) | ||
810 | { | ||
811 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | ||
812 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
813 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
814 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | ||
815 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
816 | struct icp_qat_fw_la_auth_req_params *auth_param; | ||
817 | struct icp_qat_fw_la_bulk_req *msg; | ||
818 | int ret, ctr = 0; | ||
819 | |||
820 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | ||
821 | iv, AES_BLOCK_SIZE, qat_req); | ||
822 | if (unlikely(ret)) | ||
823 | return ret; | ||
824 | |||
825 | msg = &qat_req->req; | ||
826 | *msg = ctx->enc_fw_req_tmpl; | ||
827 | qat_req->ctx = ctx; | ||
828 | qat_req->areq = areq; | ||
829 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
830 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
831 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
832 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
833 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | ||
834 | |||
835 | if (enc_iv) { | ||
836 | cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; | ||
837 | cipher_param->cipher_offset = areq->assoclen; | ||
838 | } else { | ||
839 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); | ||
840 | cipher_param->cipher_length = areq->cryptlen; | ||
841 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | ||
842 | } | ||
843 | auth_param->auth_off = 0; | ||
844 | auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; | ||
845 | |||
846 | do { | ||
847 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
848 | } while (ret == -EAGAIN && ctr++ < 10); | ||
849 | |||
850 | if (ret == -EAGAIN) { | ||
851 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
852 | return -EBUSY; | ||
853 | } | ||
854 | return -EINPROGRESS; | ||
855 | } | ||
856 | |||
857 | static int qat_alg_enc(struct aead_request *areq) | ||
858 | { | ||
859 | return qat_alg_enc_internal(areq, areq->iv, 0); | ||
860 | } | ||
861 | |||
862 | static int qat_alg_genivenc(struct aead_givcrypt_request *req) | ||
863 | { | ||
864 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | ||
865 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
866 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
867 | __be64 seq; | ||
868 | |||
869 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | ||
870 | seq = cpu_to_be64(req->seq); | ||
871 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | ||
872 | &seq, sizeof(uint64_t)); | ||
873 | return qat_alg_enc_internal(&req->areq, req->giv, 1); | ||
874 | } | ||
875 | |||
876 | static int qat_alg_init(struct crypto_tfm *tfm, | ||
877 | enum icp_qat_hw_auth_algo hash, const char *hash_name) | ||
878 | { | ||
879 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
880 | |||
881 | memset(ctx, '\0', sizeof(*ctx)); | ||
882 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | ||
883 | if (IS_ERR(ctx->hash_tfm)) | ||
884 | return -EFAULT; | ||
885 | spin_lock_init(&ctx->lock); | ||
886 | ctx->qat_hash_alg = hash; | ||
887 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | ||
888 | sizeof(struct qat_crypto_request); | ||
889 | ctx->tfm = tfm; | ||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | static int qat_alg_sha1_init(struct crypto_tfm *tfm) | ||
894 | { | ||
895 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | ||
896 | } | ||
897 | |||
898 | static int qat_alg_sha256_init(struct crypto_tfm *tfm) | ||
899 | { | ||
900 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | ||
901 | } | ||
902 | |||
903 | static int qat_alg_sha512_init(struct crypto_tfm *tfm) | ||
904 | { | ||
905 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | ||
906 | } | ||
907 | |||
908 | static void qat_alg_exit(struct crypto_tfm *tfm) | ||
909 | { | ||
910 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
911 | struct qat_crypto_instance *inst = ctx->inst; | ||
912 | struct device *dev; | ||
913 | |||
914 | if (!IS_ERR(ctx->hash_tfm)) | ||
915 | crypto_free_shash(ctx->hash_tfm); | ||
916 | |||
917 | if (!inst) | ||
918 | return; | ||
919 | |||
920 | dev = &GET_DEV(inst->accel_dev); | ||
921 | if (ctx->enc_cd) | ||
922 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
923 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
924 | if (ctx->dec_cd) | ||
925 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
926 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
927 | if (ctx->auth_hw_state_enc) | ||
928 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
929 | ctx->auth_hw_state_enc, | ||
930 | ctx->auth_state_enc_paddr); | ||
931 | |||
932 | if (ctx->auth_hw_state_dec) | ||
933 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
934 | ctx->auth_hw_state_dec, | ||
935 | ctx->auth_state_dec_paddr); | ||
936 | |||
937 | qat_crypto_put_instance(inst); | ||
938 | } | ||
939 | |||
940 | static struct crypto_alg qat_algs[] = { { | ||
941 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | ||
942 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | ||
943 | .cra_priority = 4001, | ||
944 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
945 | .cra_blocksize = AES_BLOCK_SIZE, | ||
946 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
947 | .cra_alignmask = 0, | ||
948 | .cra_type = &crypto_aead_type, | ||
949 | .cra_module = THIS_MODULE, | ||
950 | .cra_init = qat_alg_sha1_init, | ||
951 | .cra_exit = qat_alg_exit, | ||
952 | .cra_u = { | ||
953 | .aead = { | ||
954 | .setkey = qat_alg_setkey, | ||
955 | .decrypt = qat_alg_dec, | ||
956 | .encrypt = qat_alg_enc, | ||
957 | .givencrypt = qat_alg_genivenc, | ||
958 | .ivsize = AES_BLOCK_SIZE, | ||
959 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
960 | }, | ||
961 | }, | ||
962 | }, { | ||
963 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | ||
964 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", | ||
965 | .cra_priority = 4001, | ||
966 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
967 | .cra_blocksize = AES_BLOCK_SIZE, | ||
968 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
969 | .cra_alignmask = 0, | ||
970 | .cra_type = &crypto_aead_type, | ||
971 | .cra_module = THIS_MODULE, | ||
972 | .cra_init = qat_alg_sha256_init, | ||
973 | .cra_exit = qat_alg_exit, | ||
974 | .cra_u = { | ||
975 | .aead = { | ||
976 | .setkey = qat_alg_setkey, | ||
977 | .decrypt = qat_alg_dec, | ||
978 | .encrypt = qat_alg_enc, | ||
979 | .givencrypt = qat_alg_genivenc, | ||
980 | .ivsize = AES_BLOCK_SIZE, | ||
981 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
982 | }, | ||
983 | }, | ||
984 | }, { | ||
985 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | ||
986 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", | ||
987 | .cra_priority = 4001, | ||
988 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
989 | .cra_blocksize = AES_BLOCK_SIZE, | ||
990 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
991 | .cra_alignmask = 0, | ||
992 | .cra_type = &crypto_aead_type, | ||
993 | .cra_module = THIS_MODULE, | ||
994 | .cra_init = qat_alg_sha512_init, | ||
995 | .cra_exit = qat_alg_exit, | ||
996 | .cra_u = { | ||
997 | .aead = { | ||
998 | .setkey = qat_alg_setkey, | ||
999 | .decrypt = qat_alg_dec, | ||
1000 | .encrypt = qat_alg_enc, | ||
1001 | .givencrypt = qat_alg_genivenc, | ||
1002 | .ivsize = AES_BLOCK_SIZE, | ||
1003 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1004 | }, | ||
1005 | }, | ||
1006 | } }; | ||
1007 | |||
1008 | int qat_algs_register(void) | ||
1009 | { | ||
1010 | if (atomic_add_return(1, &active_dev) == 1) { | ||
1011 | int i; | ||
1012 | |||
1013 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | ||
1014 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | | ||
1015 | CRYPTO_ALG_ASYNC; | ||
1016 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
1017 | } | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | int qat_algs_unregister(void) | ||
1022 | { | ||
1023 | if (atomic_sub_return(1, &active_dev) == 0) | ||
1024 | return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | int qat_algs_init(void) | ||
1029 | { | ||
1030 | atomic_set(&active_dev, 0); | ||
1031 | crypto_get_default_rng(); | ||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | void qat_algs_exit(void) | ||
1036 | { | ||
1037 | crypto_put_default_rng(); | ||
1038 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c new file mode 100644 index 000000000000..0d59bcb50de1 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include "adf_accel_devices.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | #include "adf_transport.h" | ||
52 | #include "adf_cfg.h" | ||
53 | #include "adf_cfg_strings.h" | ||
54 | #include "qat_crypto.h" | ||
55 | #include "icp_qat_fw.h" | ||
56 | |||
57 | #define SEC ADF_KERNEL_SEC | ||
58 | |||
59 | static struct service_hndl qat_crypto; | ||
60 | |||
61 | void qat_crypto_put_instance(struct qat_crypto_instance *inst) | ||
62 | { | ||
63 | if (atomic_sub_return(1, &inst->refctr) == 0) | ||
64 | adf_dev_put(inst->accel_dev); | ||
65 | } | ||
66 | |||
67 | static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | ||
68 | { | ||
69 | struct qat_crypto_instance *inst; | ||
70 | struct list_head *list_ptr, *tmp; | ||
71 | int i; | ||
72 | |||
73 | list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { | ||
74 | inst = list_entry(list_ptr, struct qat_crypto_instance, list); | ||
75 | |||
76 | for (i = 0; i < atomic_read(&inst->refctr); i++) | ||
77 | qat_crypto_put_instance(inst); | ||
78 | |||
79 | if (inst->sym_tx) | ||
80 | adf_remove_ring(inst->sym_tx); | ||
81 | |||
82 | if (inst->sym_rx) | ||
83 | adf_remove_ring(inst->sym_rx); | ||
84 | |||
85 | if (inst->pke_tx) | ||
86 | adf_remove_ring(inst->pke_tx); | ||
87 | |||
88 | if (inst->pke_rx) | ||
89 | adf_remove_ring(inst->pke_rx); | ||
90 | |||
91 | if (inst->rnd_tx) | ||
92 | adf_remove_ring(inst->rnd_tx); | ||
93 | |||
94 | if (inst->rnd_rx) | ||
95 | adf_remove_ring(inst->rnd_rx); | ||
96 | |||
97 | list_del(list_ptr); | ||
98 | kfree(inst); | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | ||
104 | { | ||
105 | struct adf_accel_dev *accel_dev = NULL; | ||
106 | struct qat_crypto_instance *inst_best = NULL; | ||
107 | struct list_head *itr; | ||
108 | unsigned long best = ~0; | ||
109 | |||
110 | list_for_each(itr, adf_devmgr_get_head()) { | ||
111 | accel_dev = list_entry(itr, struct adf_accel_dev, list); | ||
112 | if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) | ||
113 | break; | ||
114 | accel_dev = NULL; | ||
115 | } | ||
116 | if (!accel_dev) { | ||
117 | pr_err("QAT: Could not find device on give node\n"); | ||
118 | accel_dev = adf_devmgr_get_first(); | ||
119 | } | ||
120 | if (!accel_dev || !adf_dev_started(accel_dev)) | ||
121 | return NULL; | ||
122 | |||
123 | list_for_each(itr, &accel_dev->crypto_list) { | ||
124 | struct qat_crypto_instance *inst; | ||
125 | unsigned long cur; | ||
126 | |||
127 | inst = list_entry(itr, struct qat_crypto_instance, list); | ||
128 | cur = atomic_read(&inst->refctr); | ||
129 | if (best > cur) { | ||
130 | inst_best = inst; | ||
131 | best = cur; | ||
132 | } | ||
133 | } | ||
134 | if (inst_best) { | ||
135 | if (atomic_add_return(1, &inst_best->refctr) == 1) { | ||
136 | if (adf_dev_get(accel_dev)) { | ||
137 | atomic_dec(&inst_best->refctr); | ||
138 | pr_err("QAT: Could increment dev refctr\n"); | ||
139 | return NULL; | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | return inst_best; | ||
144 | } | ||
145 | |||
146 | static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | ||
147 | { | ||
148 | int i; | ||
149 | unsigned long bank; | ||
150 | unsigned long num_inst, num_msg_sym, num_msg_asym; | ||
151 | int msg_size; | ||
152 | struct qat_crypto_instance *inst; | ||
153 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
154 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
155 | |||
156 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
157 | strlcpy(key, ADF_NUM_CY, sizeof(key)); | ||
158 | |||
159 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
160 | return -EFAULT; | ||
161 | |||
162 | if (kstrtoul(val, 0, &num_inst)) | ||
163 | return -EFAULT; | ||
164 | |||
165 | for (i = 0; i < num_inst; i++) { | ||
166 | inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, | ||
167 | accel_dev->numa_node); | ||
168 | if (!inst) | ||
169 | goto err; | ||
170 | |||
171 | list_add_tail(&inst->list, &accel_dev->crypto_list); | ||
172 | inst->id = i; | ||
173 | atomic_set(&inst->refctr, 0); | ||
174 | inst->accel_dev = accel_dev; | ||
175 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
176 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
177 | goto err; | ||
178 | |||
179 | if (kstrtoul(val, 10, &bank)) | ||
180 | goto err; | ||
181 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
182 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
183 | goto err; | ||
184 | |||
185 | if (kstrtoul(val, 10, &num_msg_sym)) | ||
186 | goto err; | ||
187 | num_msg_sym = num_msg_sym >> 1; | ||
188 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
189 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
190 | goto err; | ||
191 | |||
192 | if (kstrtoul(val, 10, &num_msg_asym)) | ||
193 | goto err; | ||
194 | num_msg_asym = num_msg_asym >> 1; | ||
195 | |||
196 | msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; | ||
197 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
198 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, | ||
199 | msg_size, key, NULL, 0, &inst->sym_tx)) | ||
200 | goto err; | ||
201 | |||
202 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
203 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
204 | msg_size, key, NULL, 0, &inst->rnd_tx)) | ||
205 | goto err; | ||
206 | |||
207 | msg_size = msg_size >> 1; | ||
208 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
209 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
210 | msg_size, key, NULL, 0, &inst->pke_tx)) | ||
211 | goto err; | ||
212 | |||
213 | msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; | ||
214 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
215 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, | ||
216 | msg_size, key, qat_alg_callback, 0, | ||
217 | &inst->sym_rx)) | ||
218 | goto err; | ||
219 | |||
220 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
221 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
222 | msg_size, key, qat_alg_callback, 0, | ||
223 | &inst->rnd_rx)) | ||
224 | goto err; | ||
225 | |||
226 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
227 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
228 | msg_size, key, qat_alg_callback, 0, | ||
229 | &inst->pke_rx)) | ||
230 | goto err; | ||
231 | } | ||
232 | return 0; | ||
233 | err: | ||
234 | qat_crypto_free_instances(accel_dev); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | |||
238 | static int qat_crypto_init(struct adf_accel_dev *accel_dev) | ||
239 | { | ||
240 | if (qat_crypto_create_instances(accel_dev)) | ||
241 | return -EFAULT; | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev) | ||
247 | { | ||
248 | return qat_crypto_free_instances(accel_dev); | ||
249 | } | ||
250 | |||
251 | static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev, | ||
252 | enum adf_event event) | ||
253 | { | ||
254 | int ret; | ||
255 | |||
256 | switch (event) { | ||
257 | case ADF_EVENT_INIT: | ||
258 | ret = qat_crypto_init(accel_dev); | ||
259 | break; | ||
260 | case ADF_EVENT_SHUTDOWN: | ||
261 | ret = qat_crypto_shutdown(accel_dev); | ||
262 | break; | ||
263 | case ADF_EVENT_RESTARTING: | ||
264 | case ADF_EVENT_RESTARTED: | ||
265 | case ADF_EVENT_START: | ||
266 | case ADF_EVENT_STOP: | ||
267 | default: | ||
268 | ret = 0; | ||
269 | } | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | int qat_crypto_register(void) | ||
274 | { | ||
275 | memset(&qat_crypto, 0, sizeof(qat_crypto)); | ||
276 | qat_crypto.event_hld = qat_crypto_event_handler; | ||
277 | qat_crypto.name = "qat_crypto"; | ||
278 | return adf_service_register(&qat_crypto); | ||
279 | } | ||
280 | |||
281 | int qat_crypto_unregister(void) | ||
282 | { | ||
283 | return adf_service_unregister(&qat_crypto); | ||
284 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h new file mode 100644 index 000000000000..ab8468d11ddb --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _QAT_CRYPTO_INSTANCE_H_ | ||
48 | #define _QAT_CRYPTO_INSTANCE_H_ | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/slab.h> | ||
52 | #include "adf_accel_devices.h" | ||
53 | #include "icp_qat_fw_la.h" | ||
54 | |||
55 | struct qat_crypto_instance { | ||
56 | struct adf_etr_ring_data *sym_tx; | ||
57 | struct adf_etr_ring_data *sym_rx; | ||
58 | struct adf_etr_ring_data *pke_tx; | ||
59 | struct adf_etr_ring_data *pke_rx; | ||
60 | struct adf_etr_ring_data *rnd_tx; | ||
61 | struct adf_etr_ring_data *rnd_rx; | ||
62 | struct adf_accel_dev *accel_dev; | ||
63 | struct list_head list; | ||
64 | unsigned long state; | ||
65 | int id; | ||
66 | atomic_t refctr; | ||
67 | }; | ||
68 | |||
69 | struct qat_crypto_request_buffs { | ||
70 | struct qat_alg_buf_list *bl; | ||
71 | dma_addr_t blp; | ||
72 | struct qat_alg_buf_list *blout; | ||
73 | dma_addr_t bloutp; | ||
74 | size_t sz; | ||
75 | }; | ||
76 | |||
77 | struct qat_crypto_request { | ||
78 | struct icp_qat_fw_la_bulk_req req; | ||
79 | struct qat_alg_session_ctx *ctx; | ||
80 | struct aead_request *areq; | ||
81 | struct qat_crypto_request_buffs buf; | ||
82 | }; | ||
83 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c new file mode 100644 index 000000000000..9b8a31521ff3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -0,0 +1,1393 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/slab.h> | ||
48 | |||
49 | #include "adf_accel_devices.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | #include "icp_qat_hal.h" | ||
52 | #include "icp_qat_uclo.h" | ||
53 | |||
54 | #define BAD_REGADDR 0xffff | ||
55 | #define MAX_RETRY_TIMES 10000 | ||
56 | #define INIT_CTX_ARB_VALUE 0x0 | ||
57 | #define INIT_CTX_ENABLE_VALUE 0x0 | ||
58 | #define INIT_PC_VALUE 0x0 | ||
59 | #define INIT_WAKEUP_EVENTS_VALUE 0x1 | ||
60 | #define INIT_SIG_EVENTS_VALUE 0x1 | ||
61 | #define INIT_CCENABLE_VALUE 0x2000 | ||
62 | #define RST_CSR_QAT_LSB 20 | ||
63 | #define RST_CSR_AE_LSB 0 | ||
64 | #define MC_TIMESTAMP_ENABLE (0x1 << 7) | ||
65 | |||
66 | #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ | ||
67 | (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ | ||
68 | (~(1 << CE_REG_PAR_ERR_BITPOS))) | ||
69 | #define INSERT_IMMED_GPRA_CONST(inst, const_val) \ | ||
70 | (inst = ((inst & 0xFFFF00C03FFull) | \ | ||
71 | ((((const_val) << 12) & 0x0FF00000ull) | \ | ||
72 | (((const_val) << 10) & 0x0003FC00ull)))) | ||
73 | #define INSERT_IMMED_GPRB_CONST(inst, const_val) \ | ||
74 | (inst = ((inst & 0xFFFF00FFF00ull) | \ | ||
75 | ((((const_val) << 12) & 0x0FF00000ull) | \ | ||
76 | (((const_val) << 0) & 0x000000FFull)))) | ||
77 | |||
78 | #define AE(handle, ae) handle->hal_handle->aes[ae] | ||
79 | |||
80 | static const uint64_t inst_4b[] = { | ||
81 | 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, | ||
82 | 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
83 | 0x0A021000000ull | ||
84 | }; | ||
85 | |||
86 | static const uint64_t inst[] = { | ||
87 | 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, | ||
88 | 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
89 | 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, | ||
90 | 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
91 | 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, | ||
92 | 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, | ||
93 | 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, | ||
94 | 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, | ||
95 | 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, | ||
96 | 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, | ||
97 | 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, | ||
98 | 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, | ||
99 | 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, | ||
100 | 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, | ||
101 | 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, | ||
102 | 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, | ||
103 | 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, | ||
104 | 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, | ||
105 | 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, | ||
106 | 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, | ||
107 | 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, | ||
108 | 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, | ||
109 | }; | ||
110 | |||
111 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | ||
112 | unsigned char ae, unsigned int ctx_mask) | ||
113 | { | ||
114 | AE(handle, ae).live_ctx_mask = ctx_mask; | ||
115 | } | ||
116 | |||
117 | #define CSR_RETRY_TIMES 500 | ||
118 | static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, | ||
119 | unsigned char ae, unsigned int csr, | ||
120 | unsigned int *value) | ||
121 | { | ||
122 | unsigned int iterations = CSR_RETRY_TIMES; | ||
123 | |||
124 | do { | ||
125 | *value = GET_AE_CSR(handle, ae, csr); | ||
126 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) | ||
127 | return 0; | ||
128 | } while (iterations--); | ||
129 | |||
130 | pr_err("QAT: Read CSR timeout\n"); | ||
131 | return -EFAULT; | ||
132 | } | ||
133 | |||
134 | static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, | ||
135 | unsigned char ae, unsigned int csr, | ||
136 | unsigned int value) | ||
137 | { | ||
138 | unsigned int iterations = CSR_RETRY_TIMES; | ||
139 | |||
140 | do { | ||
141 | SET_AE_CSR(handle, ae, csr, value); | ||
142 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) | ||
143 | return 0; | ||
144 | } while (iterations--); | ||
145 | |||
146 | pr_err("QAT: Write CSR Timeout\n"); | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, | ||
151 | unsigned char ae, unsigned char ctx, | ||
152 | unsigned int *events) | ||
153 | { | ||
154 | unsigned int cur_ctx; | ||
155 | |||
156 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
157 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
158 | qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); | ||
159 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
160 | } | ||
161 | |||
162 | static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, | ||
163 | unsigned char ae, unsigned int cycles, | ||
164 | int chk_inactive) | ||
165 | { | ||
166 | unsigned int base_cnt = 0, cur_cnt = 0; | ||
167 | unsigned int csr = (1 << ACS_ABO_BITPOS); | ||
168 | int times = MAX_RETRY_TIMES; | ||
169 | int elapsed_cycles = 0; | ||
170 | |||
171 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); | ||
172 | base_cnt &= 0xffff; | ||
173 | while ((int)cycles > elapsed_cycles && times--) { | ||
174 | if (chk_inactive) | ||
175 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); | ||
176 | |||
177 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); | ||
178 | cur_cnt &= 0xffff; | ||
179 | elapsed_cycles = cur_cnt - base_cnt; | ||
180 | |||
181 | if (elapsed_cycles < 0) | ||
182 | elapsed_cycles += 0x10000; | ||
183 | |||
184 | /* ensure at least 8 time cycles elapsed in wait_cycles */ | ||
185 | if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) | ||
186 | return 0; | ||
187 | } | ||
188 | if (!times) { | ||
189 | pr_err("QAT: wait_num_cycles time out\n"); | ||
190 | return -EFAULT; | ||
191 | } | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | #define CLR_BIT(wrd, bit) (wrd & ~(1 << bit)) | ||
196 | #define SET_BIT(wrd, bit) (wrd | 1 << bit) | ||
197 | |||
198 | int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, | ||
199 | unsigned char ae, unsigned char mode) | ||
200 | { | ||
201 | unsigned int csr, new_csr; | ||
202 | |||
203 | if ((mode != 4) && (mode != 8)) { | ||
204 | pr_err("QAT: bad ctx mode=%d\n", mode); | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | /* Sets the accelaration engine context mode to either four or eight */ | ||
209 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
210 | csr = IGNORE_W1C_MASK & csr; | ||
211 | new_csr = (mode == 4) ? | ||
212 | SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : | ||
213 | CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); | ||
214 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, | ||
219 | unsigned char ae, unsigned char mode) | ||
220 | { | ||
221 | unsigned int csr, new_csr; | ||
222 | |||
223 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
224 | csr &= IGNORE_W1C_MASK; | ||
225 | |||
226 | new_csr = (mode) ? | ||
227 | SET_BIT(csr, CE_NN_MODE_BITPOS) : | ||
228 | CLR_BIT(csr, CE_NN_MODE_BITPOS); | ||
229 | |||
230 | if (new_csr != csr) | ||
231 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | ||
237 | unsigned char ae, enum icp_qat_uof_regtype lm_type, | ||
238 | unsigned char mode) | ||
239 | { | ||
240 | unsigned int csr, new_csr; | ||
241 | |||
242 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
243 | csr &= IGNORE_W1C_MASK; | ||
244 | switch (lm_type) { | ||
245 | case ICP_LMEM0: | ||
246 | new_csr = (mode) ? | ||
247 | SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : | ||
248 | CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); | ||
249 | break; | ||
250 | case ICP_LMEM1: | ||
251 | new_csr = (mode) ? | ||
252 | SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : | ||
253 | CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); | ||
254 | break; | ||
255 | default: | ||
256 | pr_err("QAT: lmType = 0x%x\n", lm_type); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | |||
260 | if (new_csr != csr) | ||
261 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static unsigned short qat_hal_get_reg_addr(unsigned int type, | ||
266 | unsigned short reg_num) | ||
267 | { | ||
268 | unsigned short reg_addr; | ||
269 | |||
270 | switch (type) { | ||
271 | case ICP_GPA_ABS: | ||
272 | case ICP_GPB_ABS: | ||
273 | reg_addr = 0x80 | (reg_num & 0x7f); | ||
274 | break; | ||
275 | case ICP_GPA_REL: | ||
276 | case ICP_GPB_REL: | ||
277 | reg_addr = reg_num & 0x1f; | ||
278 | break; | ||
279 | case ICP_SR_RD_REL: | ||
280 | case ICP_SR_WR_REL: | ||
281 | case ICP_SR_REL: | ||
282 | reg_addr = 0x180 | (reg_num & 0x1f); | ||
283 | break; | ||
284 | case ICP_SR_ABS: | ||
285 | reg_addr = 0x140 | ((reg_num & 0x3) << 1); | ||
286 | break; | ||
287 | case ICP_DR_RD_REL: | ||
288 | case ICP_DR_WR_REL: | ||
289 | case ICP_DR_REL: | ||
290 | reg_addr = 0x1c0 | (reg_num & 0x1f); | ||
291 | break; | ||
292 | case ICP_DR_ABS: | ||
293 | reg_addr = 0x100 | ((reg_num & 0x3) << 1); | ||
294 | break; | ||
295 | case ICP_NEIGH_REL: | ||
296 | reg_addr = 0x280 | (reg_num & 0x1f); | ||
297 | break; | ||
298 | case ICP_LMEM0: | ||
299 | reg_addr = 0x200; | ||
300 | break; | ||
301 | case ICP_LMEM1: | ||
302 | reg_addr = 0x220; | ||
303 | break; | ||
304 | case ICP_NO_DEST: | ||
305 | reg_addr = 0x300 | (reg_num & 0xff); | ||
306 | break; | ||
307 | default: | ||
308 | reg_addr = BAD_REGADDR; | ||
309 | break; | ||
310 | } | ||
311 | return reg_addr; | ||
312 | } | ||
313 | |||
314 | void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) | ||
315 | { | ||
316 | unsigned int ae_reset_csr; | ||
317 | |||
318 | ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); | ||
319 | ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB; | ||
320 | ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB; | ||
321 | SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); | ||
322 | } | ||
323 | |||
324 | static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, | ||
325 | unsigned char ae, unsigned int ctx_mask, | ||
326 | unsigned int ae_csr, unsigned int csr_val) | ||
327 | { | ||
328 | unsigned int ctx, cur_ctx; | ||
329 | |||
330 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
331 | |||
332 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
333 | if (!(ctx_mask & (1 << ctx))) | ||
334 | continue; | ||
335 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
336 | qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); | ||
337 | } | ||
338 | |||
339 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
340 | } | ||
341 | |||
342 | static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, | ||
343 | unsigned char ae, unsigned char ctx, | ||
344 | unsigned int ae_csr, unsigned int *csr_val) | ||
345 | { | ||
346 | unsigned int cur_ctx; | ||
347 | |||
348 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
349 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
350 | qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); | ||
351 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
352 | } | ||
353 | |||
354 | static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, | ||
355 | unsigned char ae, unsigned int ctx_mask, | ||
356 | unsigned int events) | ||
357 | { | ||
358 | unsigned int ctx, cur_ctx; | ||
359 | |||
360 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
361 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
362 | if (!(ctx_mask & (1 << ctx))) | ||
363 | continue; | ||
364 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
365 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); | ||
366 | } | ||
367 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
368 | } | ||
369 | |||
370 | static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, | ||
371 | unsigned char ae, unsigned int ctx_mask, | ||
372 | unsigned int events) | ||
373 | { | ||
374 | unsigned int ctx, cur_ctx; | ||
375 | |||
376 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
377 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
378 | if (!(ctx_mask & (1 << ctx))) | ||
379 | continue; | ||
380 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
381 | qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, | ||
382 | events); | ||
383 | } | ||
384 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
385 | } | ||
386 | |||
387 | static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) | ||
388 | { | ||
389 | unsigned int base_cnt, cur_cnt; | ||
390 | unsigned char ae; | ||
391 | unsigned int times = MAX_RETRY_TIMES; | ||
392 | |||
393 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
394 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
395 | continue; | ||
396 | |||
397 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | ||
398 | (unsigned int *)&base_cnt); | ||
399 | base_cnt &= 0xffff; | ||
400 | |||
401 | do { | ||
402 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | ||
403 | (unsigned int *)&cur_cnt); | ||
404 | cur_cnt &= 0xffff; | ||
405 | } while (times-- && (cur_cnt == base_cnt)); | ||
406 | |||
407 | if (!times) { | ||
408 | pr_err("QAT: AE%d is inactive!!\n", ae); | ||
409 | return -EFAULT; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | ||
417 | { | ||
418 | unsigned int misc_ctl; | ||
419 | unsigned char ae; | ||
420 | |||
421 | /* stop the timestamp timers */ | ||
422 | misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL); | ||
423 | if (misc_ctl & MC_TIMESTAMP_ENABLE) | ||
424 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & | ||
425 | (~MC_TIMESTAMP_ENABLE)); | ||
426 | |||
427 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
428 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
429 | continue; | ||
430 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); | ||
431 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); | ||
432 | } | ||
433 | /* start timestamp timers */ | ||
434 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE); | ||
435 | } | ||
436 | |||
437 | #define ESRAM_AUTO_TINIT (1<<2) | ||
438 | #define ESRAM_AUTO_TINIT_DONE (1<<3) | ||
439 | #define ESRAM_AUTO_INIT_USED_CYCLES (1640) | ||
440 | #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C | ||
441 | static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) | ||
442 | { | ||
443 | void __iomem *csr_addr = handle->hal_ep_csr_addr_v + | ||
444 | ESRAM_AUTO_INIT_CSR_OFFSET; | ||
445 | unsigned int csr_val, times = 30; | ||
446 | |||
447 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
448 | if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) | ||
449 | return 0; | ||
450 | |||
451 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
452 | csr_val |= ESRAM_AUTO_TINIT; | ||
453 | ADF_CSR_WR(csr_addr, 0, csr_val); | ||
454 | |||
455 | do { | ||
456 | qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); | ||
457 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
458 | } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); | ||
459 | if ((!times)) { | ||
460 | pr_err("QAT: Fail to init eSram!\n"); | ||
461 | return -EFAULT; | ||
462 | } | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | #define SHRAM_INIT_CYCLES 2060 | ||
467 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) | ||
468 | { | ||
469 | unsigned int ae_reset_csr; | ||
470 | unsigned char ae; | ||
471 | unsigned int clk_csr; | ||
472 | unsigned int times = 100; | ||
473 | unsigned int csr; | ||
474 | |||
475 | /* write to the reset csr */ | ||
476 | ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); | ||
477 | ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB); | ||
478 | ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB); | ||
479 | do { | ||
480 | SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); | ||
481 | if (!(times--)) | ||
482 | goto out_err; | ||
483 | csr = GET_GLB_CSR(handle, ICP_RESET); | ||
484 | } while ((handle->hal_handle->ae_mask | | ||
485 | (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr); | ||
486 | /* enable clock */ | ||
487 | clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE); | ||
488 | clk_csr |= handle->hal_handle->ae_mask << 0; | ||
489 | clk_csr |= handle->hal_handle->slice_mask << 20; | ||
490 | SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr); | ||
491 | if (qat_hal_check_ae_alive(handle)) | ||
492 | goto out_err; | ||
493 | |||
494 | /* Set undefined power-up/reset states to reasonable default values */ | ||
495 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
496 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
497 | continue; | ||
498 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | ||
499 | INIT_CTX_ENABLE_VALUE); | ||
500 | qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, | ||
501 | CTX_STS_INDIRECT, | ||
502 | handle->hal_handle->upc_mask & | ||
503 | INIT_PC_VALUE); | ||
504 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); | ||
505 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); | ||
506 | qat_hal_put_wakeup_event(handle, ae, | ||
507 | ICP_QAT_UCLO_AE_ALL_CTX, | ||
508 | INIT_WAKEUP_EVENTS_VALUE); | ||
509 | qat_hal_put_sig_event(handle, ae, | ||
510 | ICP_QAT_UCLO_AE_ALL_CTX, | ||
511 | INIT_SIG_EVENTS_VALUE); | ||
512 | } | ||
513 | if (qat_hal_init_esram(handle)) | ||
514 | goto out_err; | ||
515 | if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) | ||
516 | goto out_err; | ||
517 | qat_hal_reset_timestamp(handle); | ||
518 | |||
519 | return 0; | ||
520 | out_err: | ||
521 | pr_err("QAT: failed to get device out of reset\n"); | ||
522 | return -EFAULT; | ||
523 | } | ||
524 | |||
525 | static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, | ||
526 | unsigned char ae, unsigned int ctx_mask) | ||
527 | { | ||
528 | unsigned int ctx; | ||
529 | |||
530 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | ||
531 | ctx &= IGNORE_W1C_MASK & | ||
532 | (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); | ||
533 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | ||
534 | } | ||
535 | |||
536 | static uint64_t qat_hal_parity_64bit(uint64_t word) | ||
537 | { | ||
538 | word ^= word >> 1; | ||
539 | word ^= word >> 2; | ||
540 | word ^= word >> 4; | ||
541 | word ^= word >> 8; | ||
542 | word ^= word >> 16; | ||
543 | word ^= word >> 32; | ||
544 | return word & 1; | ||
545 | } | ||
546 | |||
547 | static uint64_t qat_hal_set_uword_ecc(uint64_t uword) | ||
548 | { | ||
549 | uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, | ||
550 | bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, | ||
551 | bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, | ||
552 | bit6_mask = 0xdaf69a46910ULL; | ||
553 | |||
554 | /* clear the ecc bits */ | ||
555 | uword &= ~(0x7fULL << 0x2C); | ||
556 | uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; | ||
557 | uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; | ||
558 | uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; | ||
559 | uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; | ||
560 | uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; | ||
561 | uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; | ||
562 | uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; | ||
563 | return uword; | ||
564 | } | ||
565 | |||
566 | void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, | ||
567 | unsigned char ae, unsigned int uaddr, | ||
568 | unsigned int words_num, uint64_t *uword) | ||
569 | { | ||
570 | unsigned int ustore_addr; | ||
571 | unsigned int i; | ||
572 | |||
573 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
574 | uaddr |= UA_ECS; | ||
575 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
576 | for (i = 0; i < words_num; i++) { | ||
577 | unsigned int uwrd_lo, uwrd_hi; | ||
578 | uint64_t tmp; | ||
579 | |||
580 | tmp = qat_hal_set_uword_ecc(uword[i]); | ||
581 | uwrd_lo = (unsigned int)(tmp & 0xffffffff); | ||
582 | uwrd_hi = (unsigned int)(tmp >> 0x20); | ||
583 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
584 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
585 | } | ||
586 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
587 | } | ||
588 | |||
589 | static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, | ||
590 | unsigned char ae, unsigned int ctx_mask) | ||
591 | { | ||
592 | unsigned int ctx; | ||
593 | |||
594 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | ||
595 | ctx &= IGNORE_W1C_MASK; | ||
596 | ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; | ||
597 | ctx |= (ctx_mask << CE_ENABLE_BITPOS); | ||
598 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | ||
599 | } | ||
600 | |||
601 | static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | ||
602 | { | ||
603 | unsigned char ae; | ||
604 | unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; | ||
605 | int times = MAX_RETRY_TIMES; | ||
606 | unsigned int csr_val = 0; | ||
607 | unsigned short reg; | ||
608 | unsigned int savctx = 0; | ||
609 | int ret = 0; | ||
610 | |||
611 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
612 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
613 | continue; | ||
614 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { | ||
615 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, | ||
616 | reg, 0); | ||
617 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, | ||
618 | reg, 0); | ||
619 | } | ||
620 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | ||
621 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); | ||
622 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); | ||
623 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); | ||
624 | csr_val &= IGNORE_W1C_MASK; | ||
625 | csr_val |= CE_NN_MODE; | ||
626 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); | ||
627 | qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), | ||
628 | (uint64_t *)inst); | ||
629 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
630 | handle->hal_handle->upc_mask & | ||
631 | INIT_PC_VALUE); | ||
632 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
633 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); | ||
634 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); | ||
635 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, | ||
636 | CTX_SIG_EVENTS_INDIRECT, 0); | ||
637 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); | ||
638 | qat_hal_enable_ctx(handle, ae, ctx_mask); | ||
639 | } | ||
640 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
641 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
642 | continue; | ||
643 | /* wait for AE to finish */ | ||
644 | do { | ||
645 | ret = qat_hal_wait_cycles(handle, ae, 20, 1); | ||
646 | } while (ret && times--); | ||
647 | |||
648 | if (!times) { | ||
649 | pr_err("QAT: clear GPR of AE %d failed", ae); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | qat_hal_disable_ctx(handle, ae, ctx_mask); | ||
653 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
654 | savctx & ACS_ACNO); | ||
655 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | ||
656 | INIT_CTX_ENABLE_VALUE); | ||
657 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
658 | handle->hal_handle->upc_mask & | ||
659 | INIT_PC_VALUE); | ||
660 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); | ||
661 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); | ||
662 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, | ||
663 | INIT_WAKEUP_EVENTS_VALUE); | ||
664 | qat_hal_put_sig_event(handle, ae, ctx_mask, | ||
665 | INIT_SIG_EVENTS_VALUE); | ||
666 | } | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | #define ICP_DH895XCC_AE_OFFSET 0x20000 | ||
671 | #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) | ||
672 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 | ||
673 | #define ICP_DH895XCC_EP_OFFSET 0x3a000 | ||
674 | #define ICP_DH895XCC_PMISC_BAR 1 | ||
675 | int qat_hal_init(struct adf_accel_dev *accel_dev) | ||
676 | { | ||
677 | unsigned char ae; | ||
678 | unsigned int max_en_ae_id = 0; | ||
679 | struct icp_qat_fw_loader_handle *handle; | ||
680 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | ||
681 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
682 | struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; | ||
683 | |||
684 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); | ||
685 | if (!handle) | ||
686 | return -ENOMEM; | ||
687 | |||
688 | handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + | ||
689 | ICP_DH895XCC_CAP_OFFSET; | ||
690 | handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + | ||
691 | ICP_DH895XCC_AE_OFFSET; | ||
692 | handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; | ||
693 | handle->hal_cap_ae_local_csr_addr_v = | ||
694 | handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; | ||
695 | |||
696 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); | ||
697 | if (!handle->hal_handle) | ||
698 | goto out_hal_handle; | ||
699 | handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; | ||
700 | handle->hal_handle->ae_mask = hw_data->ae_mask; | ||
701 | handle->hal_handle->slice_mask = hw_data->accel_mask; | ||
702 | /* create AE objects */ | ||
703 | handle->hal_handle->upc_mask = 0x1ffff; | ||
704 | handle->hal_handle->max_ustore = 0x4000; | ||
705 | for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { | ||
706 | if (!(hw_data->ae_mask & (1 << ae))) | ||
707 | continue; | ||
708 | handle->hal_handle->aes[ae].free_addr = 0; | ||
709 | handle->hal_handle->aes[ae].free_size = | ||
710 | handle->hal_handle->max_ustore; | ||
711 | handle->hal_handle->aes[ae].ustore_size = | ||
712 | handle->hal_handle->max_ustore; | ||
713 | handle->hal_handle->aes[ae].live_ctx_mask = | ||
714 | ICP_QAT_UCLO_AE_ALL_CTX; | ||
715 | max_en_ae_id = ae; | ||
716 | } | ||
717 | handle->hal_handle->ae_max_num = max_en_ae_id + 1; | ||
718 | /* take all AEs out of reset */ | ||
719 | if (qat_hal_clr_reset(handle)) { | ||
720 | pr_err("QAT: qat_hal_clr_reset error\n"); | ||
721 | goto out_err; | ||
722 | } | ||
723 | if (qat_hal_clear_gpr(handle)) | ||
724 | goto out_err; | ||
725 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ | ||
726 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
727 | unsigned int csr_val = 0; | ||
728 | |||
729 | if (!(hw_data->ae_mask & (1 << ae))) | ||
730 | continue; | ||
731 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); | ||
732 | csr_val |= 0x1; | ||
733 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); | ||
734 | } | ||
735 | accel_dev->fw_loader->fw_loader = handle; | ||
736 | return 0; | ||
737 | |||
738 | out_err: | ||
739 | kfree(handle->hal_handle); | ||
740 | out_hal_handle: | ||
741 | kfree(handle); | ||
742 | return -EFAULT; | ||
743 | } | ||
744 | |||
745 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) | ||
746 | { | ||
747 | if (!handle) | ||
748 | return; | ||
749 | kfree(handle->hal_handle); | ||
750 | kfree(handle); | ||
751 | } | ||
752 | |||
753 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
754 | unsigned int ctx_mask) | ||
755 | { | ||
756 | qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & | ||
757 | ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); | ||
758 | qat_hal_enable_ctx(handle, ae, ctx_mask); | ||
759 | } | ||
760 | |||
761 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
762 | unsigned int ctx_mask) | ||
763 | { | ||
764 | qat_hal_disable_ctx(handle, ae, ctx_mask); | ||
765 | } | ||
766 | |||
767 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, | ||
768 | unsigned char ae, unsigned int ctx_mask, unsigned int upc) | ||
769 | { | ||
770 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
771 | handle->hal_handle->upc_mask & upc); | ||
772 | } | ||
773 | |||
774 | static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, | ||
775 | unsigned char ae, unsigned int uaddr, | ||
776 | unsigned int words_num, uint64_t *uword) | ||
777 | { | ||
778 | unsigned int i, uwrd_lo, uwrd_hi; | ||
779 | unsigned int ustore_addr, misc_control; | ||
780 | |||
781 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); | ||
782 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, | ||
783 | misc_control & 0xfffffffb); | ||
784 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
785 | uaddr |= UA_ECS; | ||
786 | for (i = 0; i < words_num; i++) { | ||
787 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
788 | uaddr++; | ||
789 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); | ||
790 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); | ||
791 | uword[i] = uwrd_hi; | ||
792 | uword[i] = (uword[i] << 0x20) | uwrd_lo; | ||
793 | } | ||
794 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); | ||
795 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
796 | } | ||
797 | |||
798 | void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, | ||
799 | unsigned char ae, unsigned int uaddr, | ||
800 | unsigned int words_num, unsigned int *data) | ||
801 | { | ||
802 | unsigned int i, ustore_addr; | ||
803 | |||
804 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
805 | uaddr |= UA_ECS; | ||
806 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
807 | for (i = 0; i < words_num; i++) { | ||
808 | unsigned int uwrd_lo, uwrd_hi, tmp; | ||
809 | |||
810 | uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | | ||
811 | ((data[i] & 0xff00) << 2) | | ||
812 | (0x3 << 8) | (data[i] & 0xff); | ||
813 | uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); | ||
814 | uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8; | ||
815 | tmp = ((data[i] >> 0x10) & 0xffff); | ||
816 | uwrd_hi |= (hweight32(tmp) & 0x1) << 9; | ||
817 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
818 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
819 | } | ||
820 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
821 | } | ||
822 | |||
823 | #define MAX_EXEC_INST 100 | ||
824 | static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, | ||
825 | unsigned char ae, unsigned char ctx, | ||
826 | uint64_t *micro_inst, unsigned int inst_num, | ||
827 | int code_off, unsigned int max_cycle, | ||
828 | unsigned int *endpc) | ||
829 | { | ||
830 | uint64_t savuwords[MAX_EXEC_INST]; | ||
831 | unsigned int ind_lm_addr0, ind_lm_addr1; | ||
832 | unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; | ||
833 | unsigned int ind_cnt_sig; | ||
834 | unsigned int ind_sig, act_sig; | ||
835 | unsigned int csr_val = 0, newcsr_val; | ||
836 | unsigned int savctx; | ||
837 | unsigned int savcc, wakeup_events, savpc; | ||
838 | unsigned int ctxarb_ctl, ctx_enables; | ||
839 | |||
840 | if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { | ||
841 | pr_err("QAT: invalid instruction num %d\n", inst_num); | ||
842 | return -EINVAL; | ||
843 | } | ||
844 | /* save current context */ | ||
845 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); | ||
846 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); | ||
847 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, | ||
848 | &ind_lm_addr_byte0); | ||
849 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, | ||
850 | &ind_lm_addr_byte1); | ||
851 | if (inst_num <= MAX_EXEC_INST) | ||
852 | qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); | ||
853 | qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); | ||
854 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); | ||
855 | savpc = (savpc & handle->hal_handle->upc_mask) >> 0; | ||
856 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
857 | ctx_enables &= IGNORE_W1C_MASK; | ||
858 | qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); | ||
859 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
860 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); | ||
861 | qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, | ||
862 | &ind_cnt_sig); | ||
863 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); | ||
864 | qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); | ||
865 | /* execute micro codes */ | ||
866 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
867 | qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); | ||
868 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); | ||
869 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); | ||
870 | if (code_off) | ||
871 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); | ||
872 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); | ||
873 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); | ||
874 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); | ||
875 | qat_hal_enable_ctx(handle, ae, (1 << ctx)); | ||
876 | /* wait for micro codes to finish */ | ||
877 | if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) | ||
878 | return -EFAULT; | ||
879 | if (endpc) { | ||
880 | unsigned int ctx_status; | ||
881 | |||
882 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, | ||
883 | &ctx_status); | ||
884 | *endpc = ctx_status & handle->hal_handle->upc_mask; | ||
885 | } | ||
886 | /* retore to saved context */ | ||
887 | qat_hal_disable_ctx(handle, ae, (1 << ctx)); | ||
888 | if (inst_num <= MAX_EXEC_INST) | ||
889 | qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); | ||
890 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); | ||
891 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, | ||
892 | handle->hal_handle->upc_mask & savpc); | ||
893 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | ||
894 | newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); | ||
895 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); | ||
896 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); | ||
897 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); | ||
898 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); | ||
899 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
900 | LM_ADDR_0_INDIRECT, ind_lm_addr0); | ||
901 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
902 | LM_ADDR_1_INDIRECT, ind_lm_addr1); | ||
903 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
904 | INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); | ||
905 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
906 | INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); | ||
907 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
908 | FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); | ||
909 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
910 | CTX_SIG_EVENTS_INDIRECT, ind_sig); | ||
911 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); | ||
912 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
913 | |||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, | ||
918 | unsigned char ae, unsigned char ctx, | ||
919 | enum icp_qat_uof_regtype reg_type, | ||
920 | unsigned short reg_num, unsigned int *data) | ||
921 | { | ||
922 | unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; | ||
923 | unsigned int ctxarb_cntl, ustore_addr, ctx_enables; | ||
924 | unsigned short reg_addr; | ||
925 | int status = 0; | ||
926 | uint64_t insts, savuword; | ||
927 | |||
928 | reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
929 | if (reg_addr == BAD_REGADDR) { | ||
930 | pr_err("QAT: bad regaddr=0x%x\n", reg_addr); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | switch (reg_type) { | ||
934 | case ICP_GPA_REL: | ||
935 | insts = 0xA070000000ull | (reg_addr & 0x3ff); | ||
936 | break; | ||
937 | default: | ||
938 | insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); | ||
939 | break; | ||
940 | } | ||
941 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
942 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); | ||
943 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
944 | ctx_enables &= IGNORE_W1C_MASK; | ||
945 | if (ctx != (savctx & ACS_ACNO)) | ||
946 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
947 | ctx & ACS_ACNO); | ||
948 | qat_hal_get_uwords(handle, ae, 0, 1, &savuword); | ||
949 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
950 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
951 | uaddr = UA_ECS; | ||
952 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
953 | insts = qat_hal_set_uword_ecc(insts); | ||
954 | uwrd_lo = (unsigned int)(insts & 0xffffffff); | ||
955 | uwrd_hi = (unsigned int)(insts >> 0x20); | ||
956 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
957 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
958 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
959 | /* delay for at least 8 cycles */ | ||
960 | qat_hal_wait_cycles(handle, ae, 0x8, 0); | ||
961 | /* | ||
962 | * read ALU output | ||
963 | * the instruction should have been executed | ||
964 | * prior to clearing the ECS in putUwords | ||
965 | */ | ||
966 | qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); | ||
967 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
968 | qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); | ||
969 | if (ctx != (savctx & ACS_ACNO)) | ||
970 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
971 | savctx & ACS_ACNO); | ||
972 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); | ||
973 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
974 | |||
975 | return status; | ||
976 | } | ||
977 | |||
978 | static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, | ||
979 | unsigned char ae, unsigned char ctx, | ||
980 | enum icp_qat_uof_regtype reg_type, | ||
981 | unsigned short reg_num, unsigned int data) | ||
982 | { | ||
983 | unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; | ||
984 | uint64_t insts[] = { | ||
985 | 0x0F440000000ull, | ||
986 | 0x0F040000000ull, | ||
987 | 0x0F0000C0300ull, | ||
988 | 0x0E000010000ull | ||
989 | }; | ||
990 | const int num_inst = ARRAY_SIZE(insts), code_off = 1; | ||
991 | const int imm_w1 = 0, imm_w0 = 1; | ||
992 | |||
993 | dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
994 | if (dest_addr == BAD_REGADDR) { | ||
995 | pr_err("QAT: bad destAddr=0x%x\n", dest_addr); | ||
996 | return -EINVAL; | ||
997 | } | ||
998 | |||
999 | data16lo = 0xffff & data; | ||
1000 | data16hi = 0xffff & (data >> 0x10); | ||
1001 | src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) | ||
1002 | (0xff & data16hi)); | ||
1003 | src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) | ||
1004 | (0xff & data16lo)); | ||
1005 | switch (reg_type) { | ||
1006 | case ICP_GPA_REL: | ||
1007 | insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | | ||
1008 | ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); | ||
1009 | insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | | ||
1010 | ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); | ||
1011 | break; | ||
1012 | default: | ||
1013 | insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | | ||
1014 | ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); | ||
1015 | |||
1016 | insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | | ||
1017 | ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); | ||
1018 | break; | ||
1019 | } | ||
1020 | |||
1021 | return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst, | ||
1022 | code_off, num_inst * 0x5, NULL); | ||
1023 | } | ||
1024 | |||
1025 | int qat_hal_get_ins_num(void) | ||
1026 | { | ||
1027 | return ARRAY_SIZE(inst_4b); | ||
1028 | } | ||
1029 | |||
1030 | static int qat_hal_concat_micro_code(uint64_t *micro_inst, | ||
1031 | unsigned int inst_num, unsigned int size, | ||
1032 | unsigned int addr, unsigned int *value) | ||
1033 | { | ||
1034 | int i, val_indx; | ||
1035 | unsigned int cur_value; | ||
1036 | const uint64_t *inst_arr; | ||
1037 | int fixup_offset; | ||
1038 | int usize = 0; | ||
1039 | int orig_num; | ||
1040 | |||
1041 | orig_num = inst_num; | ||
1042 | val_indx = 0; | ||
1043 | cur_value = value[val_indx++]; | ||
1044 | inst_arr = inst_4b; | ||
1045 | usize = ARRAY_SIZE(inst_4b); | ||
1046 | fixup_offset = inst_num; | ||
1047 | for (i = 0; i < usize; i++) | ||
1048 | micro_inst[inst_num++] = inst_arr[i]; | ||
1049 | INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); | ||
1050 | fixup_offset++; | ||
1051 | INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); | ||
1052 | fixup_offset++; | ||
1053 | INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); | ||
1054 | fixup_offset++; | ||
1055 | INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); | ||
1056 | |||
1057 | return inst_num - orig_num; | ||
1058 | } | ||
1059 | |||
1060 | static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, | ||
1061 | unsigned char ae, unsigned char ctx, | ||
1062 | int *pfirst_exec, uint64_t *micro_inst, | ||
1063 | unsigned int inst_num) | ||
1064 | { | ||
1065 | int stat = 0; | ||
1066 | unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; | ||
1067 | unsigned int gprb0 = 0, gprb1 = 0; | ||
1068 | |||
1069 | if (*pfirst_exec) { | ||
1070 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); | ||
1071 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); | ||
1072 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); | ||
1073 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); | ||
1074 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); | ||
1075 | *pfirst_exec = 0; | ||
1076 | } | ||
1077 | stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1, | ||
1078 | inst_num * 0x5, NULL); | ||
1079 | if (stat != 0) | ||
1080 | return -EFAULT; | ||
1081 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); | ||
1082 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); | ||
1083 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); | ||
1084 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); | ||
1085 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
1091 | unsigned char ae, | ||
1092 | struct icp_qat_uof_batch_init *lm_init_header) | ||
1093 | { | ||
1094 | struct icp_qat_uof_batch_init *plm_init; | ||
1095 | uint64_t *micro_inst_arry; | ||
1096 | int micro_inst_num; | ||
1097 | int alloc_inst_size; | ||
1098 | int first_exec = 1; | ||
1099 | int stat = 0; | ||
1100 | |||
1101 | plm_init = lm_init_header->next; | ||
1102 | alloc_inst_size = lm_init_header->size; | ||
1103 | if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) | ||
1104 | alloc_inst_size = handle->hal_handle->max_ustore; | ||
1105 | micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), | ||
1106 | GFP_KERNEL); | ||
1107 | if (!micro_inst_arry) | ||
1108 | return -ENOMEM; | ||
1109 | micro_inst_num = 0; | ||
1110 | while (plm_init) { | ||
1111 | unsigned int addr, *value, size; | ||
1112 | |||
1113 | ae = plm_init->ae; | ||
1114 | addr = plm_init->addr; | ||
1115 | value = plm_init->value; | ||
1116 | size = plm_init->size; | ||
1117 | micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry, | ||
1118 | micro_inst_num, | ||
1119 | size, addr, value); | ||
1120 | plm_init = plm_init->next; | ||
1121 | } | ||
1122 | /* exec micro codes */ | ||
1123 | if (micro_inst_arry && (micro_inst_num > 0)) { | ||
1124 | micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; | ||
1125 | stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, | ||
1126 | micro_inst_arry, | ||
1127 | micro_inst_num); | ||
1128 | } | ||
1129 | kfree(micro_inst_arry); | ||
1130 | return stat; | ||
1131 | } | ||
1132 | |||
1133 | static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1134 | unsigned char ae, unsigned char ctx, | ||
1135 | enum icp_qat_uof_regtype reg_type, | ||
1136 | unsigned short reg_num, unsigned int val) | ||
1137 | { | ||
1138 | int status = 0; | ||
1139 | unsigned int reg_addr; | ||
1140 | unsigned int ctx_enables; | ||
1141 | unsigned short mask; | ||
1142 | unsigned short dr_offset = 0x10; | ||
1143 | |||
1144 | status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1145 | if (CE_INUSE_CONTEXTS & ctx_enables) { | ||
1146 | if (ctx & 0x1) { | ||
1147 | pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); | ||
1148 | return -EINVAL; | ||
1149 | } | ||
1150 | mask = 0x1f; | ||
1151 | dr_offset = 0x20; | ||
1152 | } else { | ||
1153 | mask = 0x0f; | ||
1154 | } | ||
1155 | if (reg_num & ~mask) | ||
1156 | return -EINVAL; | ||
1157 | reg_addr = reg_num + (ctx << 0x5); | ||
1158 | switch (reg_type) { | ||
1159 | case ICP_SR_RD_REL: | ||
1160 | case ICP_SR_REL: | ||
1161 | SET_AE_XFER(handle, ae, reg_addr, val); | ||
1162 | break; | ||
1163 | case ICP_DR_RD_REL: | ||
1164 | case ICP_DR_REL: | ||
1165 | SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); | ||
1166 | break; | ||
1167 | default: | ||
1168 | status = -EINVAL; | ||
1169 | break; | ||
1170 | } | ||
1171 | return status; | ||
1172 | } | ||
1173 | |||
1174 | static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1175 | unsigned char ae, unsigned char ctx, | ||
1176 | enum icp_qat_uof_regtype reg_type, | ||
1177 | unsigned short reg_num, unsigned int data) | ||
1178 | { | ||
1179 | unsigned int gprval, ctx_enables; | ||
1180 | unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, | ||
1181 | data16low; | ||
1182 | unsigned short reg_mask; | ||
1183 | int status = 0; | ||
1184 | uint64_t micro_inst[] = { | ||
1185 | 0x0F440000000ull, | ||
1186 | 0x0F040000000ull, | ||
1187 | 0x0A000000000ull, | ||
1188 | 0x0F0000C0300ull, | ||
1189 | 0x0E000010000ull | ||
1190 | }; | ||
1191 | const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; | ||
1192 | const unsigned short gprnum = 0, dly = num_inst * 0x5; | ||
1193 | |||
1194 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1195 | if (CE_INUSE_CONTEXTS & ctx_enables) { | ||
1196 | if (ctx & 0x1) { | ||
1197 | pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); | ||
1198 | return -EINVAL; | ||
1199 | } | ||
1200 | reg_mask = (unsigned short)~0x1f; | ||
1201 | } else { | ||
1202 | reg_mask = (unsigned short)~0xf; | ||
1203 | } | ||
1204 | if (reg_num & reg_mask) | ||
1205 | return -EINVAL; | ||
1206 | xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
1207 | if (xfr_addr == BAD_REGADDR) { | ||
1208 | pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); | ||
1209 | return -EINVAL; | ||
1210 | } | ||
1211 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); | ||
1212 | gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); | ||
1213 | data16low = 0xffff & data; | ||
1214 | data16hi = 0xffff & (data >> 0x10); | ||
1215 | src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, | ||
1216 | (unsigned short)(0xff & data16hi)); | ||
1217 | src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, | ||
1218 | (unsigned short)(0xff & data16low)); | ||
1219 | micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | | ||
1220 | ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); | ||
1221 | micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | | ||
1222 | ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); | ||
1223 | micro_inst[0x2] = micro_inst[0x2] | | ||
1224 | ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); | ||
1225 | status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst, | ||
1226 | code_off, dly, NULL); | ||
1227 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); | ||
1228 | return status; | ||
1229 | } | ||
1230 | |||
1231 | static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, | ||
1232 | unsigned char ae, unsigned char ctx, | ||
1233 | unsigned short nn, unsigned int val) | ||
1234 | { | ||
1235 | unsigned int ctx_enables; | ||
1236 | int stat = 0; | ||
1237 | |||
1238 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1239 | ctx_enables &= IGNORE_W1C_MASK; | ||
1240 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); | ||
1241 | |||
1242 | stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); | ||
1243 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
1244 | return stat; | ||
1245 | } | ||
1246 | |||
1247 | static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle | ||
1248 | *handle, unsigned char ae, | ||
1249 | unsigned short absreg_num, | ||
1250 | unsigned short *relreg, | ||
1251 | unsigned char *ctx) | ||
1252 | { | ||
1253 | unsigned int ctx_enables; | ||
1254 | |||
1255 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1256 | if (ctx_enables & CE_INUSE_CONTEXTS) { | ||
1257 | /* 4-ctx mode */ | ||
1258 | *relreg = absreg_num & 0x1F; | ||
1259 | *ctx = (absreg_num >> 0x4) & 0x6; | ||
1260 | } else { | ||
1261 | /* 8-ctx mode */ | ||
1262 | *relreg = absreg_num & 0x0F; | ||
1263 | *ctx = (absreg_num >> 0x4) & 0x7; | ||
1264 | } | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, | ||
1269 | unsigned char ae, unsigned char ctx_mask, | ||
1270 | enum icp_qat_uof_regtype reg_type, | ||
1271 | unsigned short reg_num, unsigned int regdata) | ||
1272 | { | ||
1273 | int stat = 0; | ||
1274 | unsigned short reg; | ||
1275 | unsigned char ctx = 0; | ||
1276 | enum icp_qat_uof_regtype type; | ||
1277 | |||
1278 | if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) | ||
1279 | return -EINVAL; | ||
1280 | |||
1281 | do { | ||
1282 | if (ctx_mask == 0) { | ||
1283 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1284 | &ctx); | ||
1285 | type = reg_type - 1; | ||
1286 | } else { | ||
1287 | reg = reg_num; | ||
1288 | type = reg_type; | ||
1289 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1290 | continue; | ||
1291 | } | ||
1292 | stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); | ||
1293 | if (stat) { | ||
1294 | pr_err("QAT: write gpr fail\n"); | ||
1295 | return -EINVAL; | ||
1296 | } | ||
1297 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1298 | |||
1299 | return 0; | ||
1300 | } | ||
1301 | |||
1302 | int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1303 | unsigned char ae, unsigned char ctx_mask, | ||
1304 | enum icp_qat_uof_regtype reg_type, | ||
1305 | unsigned short reg_num, unsigned int regdata) | ||
1306 | { | ||
1307 | int stat = 0; | ||
1308 | unsigned short reg; | ||
1309 | unsigned char ctx = 0; | ||
1310 | enum icp_qat_uof_regtype type; | ||
1311 | |||
1312 | if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) | ||
1313 | return -EINVAL; | ||
1314 | |||
1315 | do { | ||
1316 | if (ctx_mask == 0) { | ||
1317 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1318 | &ctx); | ||
1319 | type = reg_type - 3; | ||
1320 | } else { | ||
1321 | reg = reg_num; | ||
1322 | type = reg_type; | ||
1323 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1324 | continue; | ||
1325 | } | ||
1326 | stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg, | ||
1327 | regdata); | ||
1328 | if (stat) { | ||
1329 | pr_err("QAT: write wr xfer fail\n"); | ||
1330 | return -EINVAL; | ||
1331 | } | ||
1332 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1333 | |||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1338 | unsigned char ae, unsigned char ctx_mask, | ||
1339 | enum icp_qat_uof_regtype reg_type, | ||
1340 | unsigned short reg_num, unsigned int regdata) | ||
1341 | { | ||
1342 | int stat = 0; | ||
1343 | unsigned short reg; | ||
1344 | unsigned char ctx = 0; | ||
1345 | enum icp_qat_uof_regtype type; | ||
1346 | |||
1347 | if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) | ||
1348 | return -EINVAL; | ||
1349 | |||
1350 | do { | ||
1351 | if (ctx_mask == 0) { | ||
1352 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1353 | &ctx); | ||
1354 | type = reg_type - 3; | ||
1355 | } else { | ||
1356 | reg = reg_num; | ||
1357 | type = reg_type; | ||
1358 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1359 | continue; | ||
1360 | } | ||
1361 | stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg, | ||
1362 | regdata); | ||
1363 | if (stat) { | ||
1364 | pr_err("QAT: write rd xfer fail\n"); | ||
1365 | return -EINVAL; | ||
1366 | } | ||
1367 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1368 | |||
1369 | return 0; | ||
1370 | } | ||
1371 | |||
1372 | int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, | ||
1373 | unsigned char ae, unsigned char ctx_mask, | ||
1374 | unsigned short reg_num, unsigned int regdata) | ||
1375 | { | ||
1376 | int stat = 0; | ||
1377 | unsigned char ctx; | ||
1378 | |||
1379 | if (ctx_mask == 0) | ||
1380 | return -EINVAL; | ||
1381 | |||
1382 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
1383 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1384 | continue; | ||
1385 | stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); | ||
1386 | if (stat) { | ||
1387 | pr_err("QAT: write neigh error\n"); | ||
1388 | return -EINVAL; | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | return 0; | ||
1393 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c new file mode 100644 index 000000000000..1e27f9f7fddf --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -0,0 +1,1181 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/slab.h> | ||
48 | #include <linux/ctype.h> | ||
49 | #include <linux/kernel.h> | ||
50 | |||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_common_drv.h" | ||
53 | #include "icp_qat_uclo.h" | ||
54 | #include "icp_qat_hal.h" | ||
55 | #include "icp_qat_fw_loader_handle.h" | ||
56 | |||
57 | #define UWORD_CPYBUF_SIZE 1024 | ||
58 | #define INVLD_UWORD 0xffffffffffull | ||
59 | #define PID_MINOR_REV 0xf | ||
60 | #define PID_MAJOR_REV (0xf << 4) | ||
61 | |||
62 | static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, | ||
63 | unsigned int ae, unsigned int image_num) | ||
64 | { | ||
65 | struct icp_qat_uclo_aedata *ae_data; | ||
66 | struct icp_qat_uclo_encapme *encap_image; | ||
67 | struct icp_qat_uclo_page *page = NULL; | ||
68 | struct icp_qat_uclo_aeslice *ae_slice = NULL; | ||
69 | |||
70 | ae_data = &obj_handle->ae_data[ae]; | ||
71 | encap_image = &obj_handle->ae_uimage[image_num]; | ||
72 | ae_slice = &ae_data->ae_slices[ae_data->slice_num]; | ||
73 | ae_slice->encap_image = encap_image; | ||
74 | |||
75 | if (encap_image->img_ptr) { | ||
76 | ae_slice->ctx_mask_assigned = | ||
77 | encap_image->img_ptr->ctx_assigned; | ||
78 | ae_data->eff_ustore_size = obj_handle->ustore_phy_size; | ||
79 | } else { | ||
80 | ae_slice->ctx_mask_assigned = 0; | ||
81 | } | ||
82 | ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); | ||
83 | if (!ae_slice->region) | ||
84 | return -ENOMEM; | ||
85 | ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); | ||
86 | if (!ae_slice->page) | ||
87 | goto out_err; | ||
88 | page = ae_slice->page; | ||
89 | page->encap_page = encap_image->page; | ||
90 | ae_slice->page->region = ae_slice->region; | ||
91 | ae_data->slice_num++; | ||
92 | return 0; | ||
93 | out_err: | ||
94 | kfree(ae_slice->region); | ||
95 | ae_slice->region = NULL; | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) | ||
100 | { | ||
101 | unsigned int i; | ||
102 | |||
103 | if (!ae_data) { | ||
104 | pr_err("QAT: bad argument, ae_data is NULL\n "); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | for (i = 0; i < ae_data->slice_num; i++) { | ||
109 | kfree(ae_data->ae_slices[i].region); | ||
110 | ae_data->ae_slices[i].region = NULL; | ||
111 | kfree(ae_data->ae_slices[i].page); | ||
112 | ae_data->ae_slices[i].page = NULL; | ||
113 | } | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, | ||
118 | unsigned int str_offset) | ||
119 | { | ||
120 | if ((!str_table->table_len) || (str_offset > str_table->table_len)) | ||
121 | return NULL; | ||
122 | return (char *)(((unsigned long)(str_table->strings)) + str_offset); | ||
123 | } | ||
124 | |||
125 | static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) | ||
126 | { | ||
127 | int maj = hdr->maj_ver & 0xff; | ||
128 | int min = hdr->min_ver & 0xff; | ||
129 | |||
130 | if (hdr->file_id != ICP_QAT_UOF_FID) { | ||
131 | pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); | ||
132 | return -EINVAL; | ||
133 | } | ||
134 | if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { | ||
135 | pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", | ||
136 | maj, min); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, | ||
143 | unsigned int addr, unsigned int *val, | ||
144 | unsigned int num_in_bytes) | ||
145 | { | ||
146 | unsigned int outval; | ||
147 | unsigned char *ptr = (unsigned char *)val; | ||
148 | |||
149 | while (num_in_bytes) { | ||
150 | memcpy(&outval, ptr, 4); | ||
151 | SRAM_WRITE(handle, addr, outval); | ||
152 | num_in_bytes -= 4; | ||
153 | ptr += 4; | ||
154 | addr += 4; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, | ||
159 | unsigned char ae, unsigned int addr, | ||
160 | unsigned int *val, | ||
161 | unsigned int num_in_bytes) | ||
162 | { | ||
163 | unsigned int outval; | ||
164 | unsigned char *ptr = (unsigned char *)val; | ||
165 | |||
166 | addr >>= 0x2; /* convert to uword address */ | ||
167 | |||
168 | while (num_in_bytes) { | ||
169 | memcpy(&outval, ptr, 4); | ||
170 | qat_hal_wr_umem(handle, ae, addr++, 1, &outval); | ||
171 | num_in_bytes -= 4; | ||
172 | ptr += 4; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, | ||
177 | unsigned char ae, | ||
178 | struct icp_qat_uof_batch_init | ||
179 | *umem_init_header) | ||
180 | { | ||
181 | struct icp_qat_uof_batch_init *umem_init; | ||
182 | |||
183 | if (!umem_init_header) | ||
184 | return; | ||
185 | umem_init = umem_init_header->next; | ||
186 | while (umem_init) { | ||
187 | unsigned int addr, *value, size; | ||
188 | |||
189 | ae = umem_init->ae; | ||
190 | addr = umem_init->addr; | ||
191 | value = umem_init->value; | ||
192 | size = umem_init->size; | ||
193 | qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); | ||
194 | umem_init = umem_init->next; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, | ||
200 | struct icp_qat_uof_batch_init **base) | ||
201 | { | ||
202 | struct icp_qat_uof_batch_init *umem_init; | ||
203 | |||
204 | umem_init = *base; | ||
205 | while (umem_init) { | ||
206 | struct icp_qat_uof_batch_init *pre; | ||
207 | |||
208 | pre = umem_init; | ||
209 | umem_init = umem_init->next; | ||
210 | kfree(pre); | ||
211 | } | ||
212 | *base = NULL; | ||
213 | } | ||
214 | |||
215 | static int qat_uclo_parse_num(char *str, unsigned int *num) | ||
216 | { | ||
217 | char buf[16] = {0}; | ||
218 | unsigned long ae = 0; | ||
219 | int i; | ||
220 | |||
221 | strncpy(buf, str, 15); | ||
222 | for (i = 0; i < 16; i++) { | ||
223 | if (!isdigit(buf[i])) { | ||
224 | buf[i] = '\0'; | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | if ((kstrtoul(buf, 10, &ae))) | ||
229 | return -EFAULT; | ||
230 | |||
231 | *num = (unsigned int)ae; | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, | ||
236 | struct icp_qat_uof_initmem *init_mem, | ||
237 | unsigned int size_range, unsigned int *ae) | ||
238 | { | ||
239 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
240 | char *str; | ||
241 | |||
242 | if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { | ||
243 | pr_err("QAT: initmem is out of range"); | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { | ||
247 | pr_err("QAT: Memory scope for init_mem error\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); | ||
251 | if (!str) { | ||
252 | pr_err("QAT: AE name assigned in UOF init table is NULL\n"); | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | if (qat_uclo_parse_num(str, ae)) { | ||
256 | pr_err("QAT: Parse num for AE number failed\n"); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | if (*ae >= ICP_QAT_UCLO_MAX_AE) { | ||
260 | pr_err("QAT: ae %d out of range\n", *ae); | ||
261 | return -EINVAL; | ||
262 | } | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle | ||
267 | *handle, struct icp_qat_uof_initmem | ||
268 | *init_mem, unsigned int ae, | ||
269 | struct icp_qat_uof_batch_init | ||
270 | **init_tab_base) | ||
271 | { | ||
272 | struct icp_qat_uof_batch_init *init_header, *tail; | ||
273 | struct icp_qat_uof_batch_init *mem_init, *tail_old; | ||
274 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
275 | unsigned int i, flag = 0; | ||
276 | |||
277 | mem_val_attr = | ||
278 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
279 | sizeof(struct icp_qat_uof_initmem)); | ||
280 | |||
281 | init_header = *init_tab_base; | ||
282 | if (!init_header) { | ||
283 | init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); | ||
284 | if (!init_header) | ||
285 | return -ENOMEM; | ||
286 | init_header->size = 1; | ||
287 | *init_tab_base = init_header; | ||
288 | flag = 1; | ||
289 | } | ||
290 | tail_old = init_header; | ||
291 | while (tail_old->next) | ||
292 | tail_old = tail_old->next; | ||
293 | tail = tail_old; | ||
294 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
295 | mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); | ||
296 | if (!mem_init) | ||
297 | goto out_err; | ||
298 | mem_init->ae = ae; | ||
299 | mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; | ||
300 | mem_init->value = &mem_val_attr->value; | ||
301 | mem_init->size = 4; | ||
302 | mem_init->next = NULL; | ||
303 | tail->next = mem_init; | ||
304 | tail = mem_init; | ||
305 | init_header->size += qat_hal_get_ins_num(); | ||
306 | mem_val_attr++; | ||
307 | } | ||
308 | return 0; | ||
309 | out_err: | ||
310 | while (tail_old) { | ||
311 | mem_init = tail_old->next; | ||
312 | kfree(tail_old); | ||
313 | tail_old = mem_init; | ||
314 | } | ||
315 | if (flag) | ||
316 | kfree(*init_tab_base); | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | |||
320 | static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, | ||
321 | struct icp_qat_uof_initmem *init_mem) | ||
322 | { | ||
323 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
324 | unsigned int ae; | ||
325 | |||
326 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, | ||
327 | ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) | ||
328 | return -EINVAL; | ||
329 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
330 | &obj_handle->lm_init_tab[ae])) | ||
331 | return -EINVAL; | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, | ||
336 | struct icp_qat_uof_initmem *init_mem) | ||
337 | { | ||
338 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
339 | unsigned int ae, ustore_size, uaddr, i; | ||
340 | |||
341 | ustore_size = obj_handle->ustore_phy_size; | ||
342 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) | ||
343 | return -EINVAL; | ||
344 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
345 | &obj_handle->umem_init_tab[ae])) | ||
346 | return -EINVAL; | ||
347 | /* set the highest ustore address referenced */ | ||
348 | uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; | ||
349 | for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) { | ||
350 | if (obj_handle->ae_data[ae].ae_slices[i]. | ||
351 | encap_image->uwords_num < uaddr) | ||
352 | obj_handle->ae_data[ae].ae_slices[i]. | ||
353 | encap_image->uwords_num = uaddr; | ||
354 | } | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 | ||
359 | static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, | ||
360 | struct icp_qat_uof_initmem *init_mem) | ||
361 | { | ||
362 | unsigned int i; | ||
363 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
364 | |||
365 | mem_val_attr = | ||
366 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
367 | sizeof(struct icp_qat_uof_initmem)); | ||
368 | |||
369 | switch (init_mem->region) { | ||
370 | case ICP_QAT_UOF_SRAM_REGION: | ||
371 | if ((init_mem->addr + init_mem->num_in_bytes) > | ||
372 | ICP_DH895XCC_PESRAM_BAR_SIZE) { | ||
373 | pr_err("QAT: initmem on SRAM is out of range"); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
377 | qat_uclo_wr_sram_by_words(handle, | ||
378 | init_mem->addr + | ||
379 | mem_val_attr->offset_in_byte, | ||
380 | &mem_val_attr->value, 4); | ||
381 | mem_val_attr++; | ||
382 | } | ||
383 | break; | ||
384 | case ICP_QAT_UOF_LMEM_REGION: | ||
385 | if (qat_uclo_init_lmem_seg(handle, init_mem)) | ||
386 | return -EINVAL; | ||
387 | break; | ||
388 | case ICP_QAT_UOF_UMEM_REGION: | ||
389 | if (qat_uclo_init_umem_seg(handle, init_mem)) | ||
390 | return -EINVAL; | ||
391 | break; | ||
392 | default: | ||
393 | pr_err("QAT: initmem region error. region type=0x%x\n", | ||
394 | init_mem->region); | ||
395 | return -EINVAL; | ||
396 | } | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, | ||
401 | struct icp_qat_uclo_encapme *image) | ||
402 | { | ||
403 | unsigned int i; | ||
404 | struct icp_qat_uclo_encap_page *page; | ||
405 | struct icp_qat_uof_image *uof_image; | ||
406 | unsigned char ae; | ||
407 | unsigned int ustore_size; | ||
408 | unsigned int patt_pos; | ||
409 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
410 | uint64_t *fill_data; | ||
411 | |||
412 | uof_image = image->img_ptr; | ||
413 | fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), | ||
414 | GFP_KERNEL); | ||
415 | if (!fill_data) | ||
416 | return -ENOMEM; | ||
417 | for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) | ||
418 | memcpy(&fill_data[i], &uof_image->fill_pattern, | ||
419 | sizeof(uint64_t)); | ||
420 | page = image->page; | ||
421 | |||
422 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
423 | if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) | ||
424 | continue; | ||
425 | ustore_size = obj_handle->ae_data[ae].eff_ustore_size; | ||
426 | patt_pos = page->beg_addr_p + page->micro_words_num; | ||
427 | |||
428 | qat_hal_wr_uwords(handle, (unsigned char)ae, 0, | ||
429 | page->beg_addr_p, &fill_data[0]); | ||
430 | qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, | ||
431 | ustore_size - patt_pos + 1, | ||
432 | &fill_data[page->beg_addr_p]); | ||
433 | } | ||
434 | kfree(fill_data); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | ||
439 | { | ||
440 | int i, ae; | ||
441 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
442 | struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; | ||
443 | |||
444 | for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { | ||
445 | if (initmem->num_in_bytes) { | ||
446 | if (qat_uclo_init_ae_memory(handle, initmem)) | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | initmem = (struct icp_qat_uof_initmem *)((unsigned long)( | ||
450 | (unsigned long)initmem + | ||
451 | sizeof(struct icp_qat_uof_initmem)) + | ||
452 | (sizeof(struct icp_qat_uof_memvar_attr) * | ||
453 | initmem->val_attr_num)); | ||
454 | } | ||
455 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
456 | if (qat_hal_batch_wr_lm(handle, ae, | ||
457 | obj_handle->lm_init_tab[ae])) { | ||
458 | pr_err("QAT: fail to batch init lmem for AE %d\n", ae); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | qat_uclo_cleanup_batch_init_list(handle, | ||
462 | &obj_handle->lm_init_tab[ae]); | ||
463 | qat_uclo_batch_wr_umem(handle, ae, | ||
464 | obj_handle->umem_init_tab[ae]); | ||
465 | qat_uclo_cleanup_batch_init_list(handle, | ||
466 | &obj_handle-> | ||
467 | umem_init_tab[ae]); | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, | ||
473 | char *chunk_id, void *cur) | ||
474 | { | ||
475 | int i; | ||
476 | struct icp_qat_uof_chunkhdr *chunk_hdr = | ||
477 | (struct icp_qat_uof_chunkhdr *) | ||
478 | ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); | ||
479 | |||
480 | for (i = 0; i < obj_hdr->num_chunks; i++) { | ||
481 | if ((cur < (void *)&chunk_hdr[i]) && | ||
482 | !strncmp(chunk_hdr[i].chunk_id, chunk_id, | ||
483 | ICP_QAT_UOF_OBJID_LEN)) { | ||
484 | return &chunk_hdr[i]; | ||
485 | } | ||
486 | } | ||
487 | return NULL; | ||
488 | } | ||
489 | |||
490 | static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) | ||
491 | { | ||
492 | int i; | ||
493 | unsigned int topbit = 1 << 0xF; | ||
494 | unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); | ||
495 | |||
496 | reg ^= inbyte << 0x8; | ||
497 | for (i = 0; i < 0x8; i++) { | ||
498 | if (reg & topbit) | ||
499 | reg = (reg << 1) ^ 0x1021; | ||
500 | else | ||
501 | reg <<= 1; | ||
502 | } | ||
503 | return reg & 0xFFFF; | ||
504 | } | ||
505 | |||
506 | static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) | ||
507 | { | ||
508 | unsigned int chksum = 0; | ||
509 | |||
510 | if (ptr) | ||
511 | while (num--) | ||
512 | chksum = qat_uclo_calc_checksum(chksum, *ptr++); | ||
513 | return chksum; | ||
514 | } | ||
515 | |||
516 | static struct icp_qat_uclo_objhdr * | ||
517 | qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, | ||
518 | char *chunk_id) | ||
519 | { | ||
520 | struct icp_qat_uof_filechunkhdr *file_chunk; | ||
521 | struct icp_qat_uclo_objhdr *obj_hdr; | ||
522 | char *chunk; | ||
523 | int i; | ||
524 | |||
525 | file_chunk = (struct icp_qat_uof_filechunkhdr *) | ||
526 | (buf + sizeof(struct icp_qat_uof_filehdr)); | ||
527 | for (i = 0; i < file_hdr->num_chunks; i++) { | ||
528 | if (!strncmp(file_chunk->chunk_id, chunk_id, | ||
529 | ICP_QAT_UOF_OBJID_LEN)) { | ||
530 | chunk = buf + file_chunk->offset; | ||
531 | if (file_chunk->checksum != qat_uclo_calc_str_checksum( | ||
532 | chunk, file_chunk->size)) | ||
533 | break; | ||
534 | obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); | ||
535 | if (!obj_hdr) | ||
536 | break; | ||
537 | obj_hdr->file_buff = chunk; | ||
538 | obj_hdr->checksum = file_chunk->checksum; | ||
539 | obj_hdr->size = file_chunk->size; | ||
540 | return obj_hdr; | ||
541 | } | ||
542 | file_chunk++; | ||
543 | } | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | static unsigned int | ||
548 | qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
549 | struct icp_qat_uof_image *image) | ||
550 | { | ||
551 | struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; | ||
552 | struct icp_qat_uof_objtable *neigh_reg_tab; | ||
553 | struct icp_qat_uof_code_page *code_page; | ||
554 | |||
555 | code_page = (struct icp_qat_uof_code_page *) | ||
556 | ((char *)image + sizeof(struct icp_qat_uof_image)); | ||
557 | uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
558 | code_page->uc_var_tab_offset); | ||
559 | imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
560 | code_page->imp_var_tab_offset); | ||
561 | imp_expr_tab = (struct icp_qat_uof_objtable *) | ||
562 | (encap_uof_obj->beg_uof + | ||
563 | code_page->imp_expr_tab_offset); | ||
564 | if (uc_var_tab->entry_num || imp_var_tab->entry_num || | ||
565 | imp_expr_tab->entry_num) { | ||
566 | pr_err("QAT: UOF can't contain imported variable to be parsed"); | ||
567 | return -EINVAL; | ||
568 | } | ||
569 | neigh_reg_tab = (struct icp_qat_uof_objtable *) | ||
570 | (encap_uof_obj->beg_uof + | ||
571 | code_page->neigh_reg_tab_offset); | ||
572 | if (neigh_reg_tab->entry_num) { | ||
573 | pr_err("QAT: UOF can't contain shared control store feature"); | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | if (image->numpages > 1) { | ||
577 | pr_err("QAT: UOF can't contain multiple pages"); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { | ||
581 | pr_err("QAT: UOF can't use shared control store feature"); | ||
582 | return -EFAULT; | ||
583 | } | ||
584 | if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { | ||
585 | pr_err("QAT: UOF can't use reloadable feature"); | ||
586 | return -EFAULT; | ||
587 | } | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj | ||
592 | *encap_uof_obj, | ||
593 | struct icp_qat_uof_image *img, | ||
594 | struct icp_qat_uclo_encap_page *page) | ||
595 | { | ||
596 | struct icp_qat_uof_code_page *code_page; | ||
597 | struct icp_qat_uof_code_area *code_area; | ||
598 | struct icp_qat_uof_objtable *uword_block_tab; | ||
599 | struct icp_qat_uof_uword_block *uwblock; | ||
600 | int i; | ||
601 | |||
602 | code_page = (struct icp_qat_uof_code_page *) | ||
603 | ((char *)img + sizeof(struct icp_qat_uof_image)); | ||
604 | page->def_page = code_page->def_page; | ||
605 | page->page_region = code_page->page_region; | ||
606 | page->beg_addr_v = code_page->beg_addr_v; | ||
607 | page->beg_addr_p = code_page->beg_addr_p; | ||
608 | code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + | ||
609 | code_page->code_area_offset); | ||
610 | page->micro_words_num = code_area->micro_words_num; | ||
611 | uword_block_tab = (struct icp_qat_uof_objtable *) | ||
612 | (encap_uof_obj->beg_uof + | ||
613 | code_area->uword_block_tab); | ||
614 | page->uwblock_num = uword_block_tab->entry_num; | ||
615 | uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + | ||
616 | sizeof(struct icp_qat_uof_objtable)); | ||
617 | page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; | ||
618 | for (i = 0; i < uword_block_tab->entry_num; i++) | ||
619 | page->uwblock[i].micro_words = | ||
620 | (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; | ||
621 | } | ||
622 | |||
623 | static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | ||
624 | struct icp_qat_uclo_encapme *ae_uimage, | ||
625 | int max_image) | ||
626 | { | ||
627 | int i, j; | ||
628 | struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; | ||
629 | struct icp_qat_uof_image *image; | ||
630 | struct icp_qat_uof_objtable *ae_regtab; | ||
631 | struct icp_qat_uof_objtable *init_reg_sym_tab; | ||
632 | struct icp_qat_uof_objtable *sbreak_tab; | ||
633 | struct icp_qat_uof_encap_obj *encap_uof_obj = | ||
634 | &obj_handle->encap_uof_obj; | ||
635 | |||
636 | for (j = 0; j < max_image; j++) { | ||
637 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
638 | ICP_QAT_UOF_IMAG, chunk_hdr); | ||
639 | if (!chunk_hdr) | ||
640 | break; | ||
641 | image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + | ||
642 | chunk_hdr->offset); | ||
643 | ae_regtab = (struct icp_qat_uof_objtable *) | ||
644 | (image->reg_tab_offset + | ||
645 | obj_handle->obj_hdr->file_buff); | ||
646 | ae_uimage[j].ae_reg_num = ae_regtab->entry_num; | ||
647 | ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) | ||
648 | (((char *)ae_regtab) + | ||
649 | sizeof(struct icp_qat_uof_objtable)); | ||
650 | init_reg_sym_tab = (struct icp_qat_uof_objtable *) | ||
651 | (image->init_reg_sym_tab + | ||
652 | obj_handle->obj_hdr->file_buff); | ||
653 | ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; | ||
654 | ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) | ||
655 | (((char *)init_reg_sym_tab) + | ||
656 | sizeof(struct icp_qat_uof_objtable)); | ||
657 | sbreak_tab = (struct icp_qat_uof_objtable *) | ||
658 | (image->sbreak_tab + obj_handle->obj_hdr->file_buff); | ||
659 | ae_uimage[j].sbreak_num = sbreak_tab->entry_num; | ||
660 | ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) | ||
661 | (((char *)sbreak_tab) + | ||
662 | sizeof(struct icp_qat_uof_objtable)); | ||
663 | ae_uimage[j].img_ptr = image; | ||
664 | if (qat_uclo_check_image_compat(encap_uof_obj, image)) | ||
665 | goto out_err; | ||
666 | ae_uimage[j].page = | ||
667 | kzalloc(sizeof(struct icp_qat_uclo_encap_page), | ||
668 | GFP_KERNEL); | ||
669 | if (!ae_uimage[j].page) | ||
670 | goto out_err; | ||
671 | qat_uclo_map_image_page(encap_uof_obj, image, | ||
672 | ae_uimage[j].page); | ||
673 | } | ||
674 | return j; | ||
675 | out_err: | ||
676 | for (i = 0; i < j; i++) | ||
677 | kfree(ae_uimage[i].page); | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) | ||
682 | { | ||
683 | int i, ae; | ||
684 | int mflag = 0; | ||
685 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
686 | |||
687 | for (ae = 0; ae <= max_ae; ae++) { | ||
688 | if (!test_bit(ae, | ||
689 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
690 | continue; | ||
691 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
692 | if (!test_bit(ae, (unsigned long *) | ||
693 | &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) | ||
694 | continue; | ||
695 | mflag = 1; | ||
696 | if (qat_uclo_init_ae_data(obj_handle, ae, i)) | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | } | ||
700 | if (!mflag) { | ||
701 | pr_err("QAT: uimage uses AE not set"); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static struct icp_qat_uof_strtable * | ||
708 | qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, | ||
709 | char *tab_name, struct icp_qat_uof_strtable *str_table) | ||
710 | { | ||
711 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
712 | |||
713 | chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) | ||
714 | obj_hdr->file_buff, tab_name, NULL); | ||
715 | if (chunk_hdr) { | ||
716 | int hdr_size; | ||
717 | |||
718 | memcpy(&str_table->table_len, obj_hdr->file_buff + | ||
719 | chunk_hdr->offset, sizeof(str_table->table_len)); | ||
720 | hdr_size = (char *)&str_table->strings - (char *)str_table; | ||
721 | str_table->strings = (unsigned long)obj_hdr->file_buff + | ||
722 | chunk_hdr->offset + hdr_size; | ||
723 | return str_table; | ||
724 | } | ||
725 | return NULL; | ||
726 | } | ||
727 | |||
728 | static void | ||
729 | qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
730 | struct icp_qat_uclo_init_mem_table *init_mem_tab) | ||
731 | { | ||
732 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
733 | |||
734 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
735 | ICP_QAT_UOF_IMEM, NULL); | ||
736 | if (chunk_hdr) { | ||
737 | memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + | ||
738 | chunk_hdr->offset, sizeof(unsigned int)); | ||
739 | init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) | ||
740 | (encap_uof_obj->beg_uof + chunk_hdr->offset + | ||
741 | sizeof(unsigned int)); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) | ||
746 | { | ||
747 | unsigned int maj_ver, prod_type = obj_handle->prod_type; | ||
748 | |||
749 | if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { | ||
750 | pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", | ||
751 | obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); | ||
752 | return -EINVAL; | ||
753 | } | ||
754 | maj_ver = obj_handle->prod_rev & 0xff; | ||
755 | if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || | ||
756 | (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { | ||
757 | pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | ||
764 | unsigned char ae, unsigned char ctx_mask, | ||
765 | enum icp_qat_uof_regtype reg_type, | ||
766 | unsigned short reg_addr, unsigned int value) | ||
767 | { | ||
768 | switch (reg_type) { | ||
769 | case ICP_GPA_ABS: | ||
770 | case ICP_GPB_ABS: | ||
771 | ctx_mask = 0; | ||
772 | case ICP_GPA_REL: | ||
773 | case ICP_GPB_REL: | ||
774 | return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, | ||
775 | reg_addr, value); | ||
776 | case ICP_SR_ABS: | ||
777 | case ICP_DR_ABS: | ||
778 | case ICP_SR_RD_ABS: | ||
779 | case ICP_DR_RD_ABS: | ||
780 | ctx_mask = 0; | ||
781 | case ICP_SR_REL: | ||
782 | case ICP_DR_REL: | ||
783 | case ICP_SR_RD_REL: | ||
784 | case ICP_DR_RD_REL: | ||
785 | return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, | ||
786 | reg_addr, value); | ||
787 | case ICP_SR_WR_ABS: | ||
788 | case ICP_DR_WR_ABS: | ||
789 | ctx_mask = 0; | ||
790 | case ICP_SR_WR_REL: | ||
791 | case ICP_DR_WR_REL: | ||
792 | return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, | ||
793 | reg_addr, value); | ||
794 | case ICP_NEIGH_REL: | ||
795 | return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); | ||
796 | default: | ||
797 | pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); | ||
798 | return -EFAULT; | ||
799 | } | ||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, | ||
804 | unsigned int ae, | ||
805 | struct icp_qat_uclo_encapme *encap_ae) | ||
806 | { | ||
807 | unsigned int i; | ||
808 | unsigned char ctx_mask; | ||
809 | struct icp_qat_uof_init_regsym *init_regsym; | ||
810 | |||
811 | if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == | ||
812 | ICP_QAT_UCLO_MAX_CTX) | ||
813 | ctx_mask = 0xff; | ||
814 | else | ||
815 | ctx_mask = 0x55; | ||
816 | |||
817 | for (i = 0; i < encap_ae->init_regsym_num; i++) { | ||
818 | unsigned int exp_res; | ||
819 | |||
820 | init_regsym = &encap_ae->init_regsym[i]; | ||
821 | exp_res = init_regsym->value; | ||
822 | switch (init_regsym->init_type) { | ||
823 | case ICP_QAT_UOF_INIT_REG: | ||
824 | qat_uclo_init_reg(handle, ae, ctx_mask, | ||
825 | (enum icp_qat_uof_regtype) | ||
826 | init_regsym->reg_type, | ||
827 | (unsigned short)init_regsym->reg_addr, | ||
828 | exp_res); | ||
829 | break; | ||
830 | case ICP_QAT_UOF_INIT_REG_CTX: | ||
831 | /* check if ctx is appropriate for the ctxMode */ | ||
832 | if (!((1 << init_regsym->ctx) & ctx_mask)) { | ||
833 | pr_err("QAT: invalid ctx num = 0x%x\n", | ||
834 | init_regsym->ctx); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | qat_uclo_init_reg(handle, ae, | ||
838 | (unsigned char) | ||
839 | (1 << init_regsym->ctx), | ||
840 | (enum icp_qat_uof_regtype) | ||
841 | init_regsym->reg_type, | ||
842 | (unsigned short)init_regsym->reg_addr, | ||
843 | exp_res); | ||
844 | break; | ||
845 | case ICP_QAT_UOF_INIT_EXPR: | ||
846 | pr_err("QAT: INIT_EXPR feature not supported\n"); | ||
847 | return -EINVAL; | ||
848 | case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: | ||
849 | pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); | ||
850 | return -EINVAL; | ||
851 | default: | ||
852 | break; | ||
853 | } | ||
854 | } | ||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) | ||
859 | { | ||
860 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
861 | unsigned int s, ae; | ||
862 | |||
863 | if (obj_handle->global_inited) | ||
864 | return 0; | ||
865 | if (obj_handle->init_mem_tab.entry_num) { | ||
866 | if (qat_uclo_init_memory(handle)) { | ||
867 | pr_err("QAT: initialize memory failed\n"); | ||
868 | return -EINVAL; | ||
869 | } | ||
870 | } | ||
871 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
872 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
873 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
874 | continue; | ||
875 | if (qat_uclo_init_reg_sym(handle, ae, | ||
876 | obj_handle->ae_data[ae]. | ||
877 | ae_slices[s].encap_image)) | ||
878 | return -EINVAL; | ||
879 | } | ||
880 | } | ||
881 | obj_handle->global_inited = 1; | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) | ||
886 | { | ||
887 | unsigned char ae, nn_mode, s; | ||
888 | struct icp_qat_uof_image *uof_image; | ||
889 | struct icp_qat_uclo_aedata *ae_data; | ||
890 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
891 | |||
892 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
893 | if (!test_bit(ae, | ||
894 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
895 | continue; | ||
896 | ae_data = &obj_handle->ae_data[ae]; | ||
897 | for (s = 0; s < min_t(unsigned int, ae_data->slice_num, | ||
898 | ICP_QAT_UCLO_MAX_CTX); s++) { | ||
899 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
900 | continue; | ||
901 | uof_image = ae_data->ae_slices[s].encap_image->img_ptr; | ||
902 | if (qat_hal_set_ae_ctx_mode(handle, ae, | ||
903 | (char)ICP_QAT_CTX_MODE | ||
904 | (uof_image->ae_mode))) { | ||
905 | pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); | ||
906 | return -EFAULT; | ||
907 | } | ||
908 | nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); | ||
909 | if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { | ||
910 | pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); | ||
911 | return -EFAULT; | ||
912 | } | ||
913 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, | ||
914 | (char)ICP_QAT_LOC_MEM0_MODE | ||
915 | (uof_image->ae_mode))) { | ||
916 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); | ||
917 | return -EFAULT; | ||
918 | } | ||
919 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, | ||
920 | (char)ICP_QAT_LOC_MEM1_MODE | ||
921 | (uof_image->ae_mode))) { | ||
922 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); | ||
923 | return -EFAULT; | ||
924 | } | ||
925 | } | ||
926 | } | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) | ||
931 | { | ||
932 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
933 | struct icp_qat_uclo_encapme *image; | ||
934 | int a; | ||
935 | |||
936 | for (a = 0; a < obj_handle->uimage_num; a++) { | ||
937 | image = &obj_handle->ae_uimage[a]; | ||
938 | image->uwords_num = image->page->beg_addr_p + | ||
939 | image->page->micro_words_num; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
944 | { | ||
945 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
946 | unsigned int ae; | ||
947 | |||
948 | obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), | ||
949 | GFP_KERNEL); | ||
950 | if (!obj_handle->uword_buf) | ||
951 | return -ENOMEM; | ||
952 | obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; | ||
953 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) | ||
954 | obj_handle->obj_hdr->file_buff; | ||
955 | obj_handle->uword_in_bytes = 6; | ||
956 | obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; | ||
957 | obj_handle->prod_rev = PID_MAJOR_REV | | ||
958 | (PID_MINOR_REV & handle->hal_handle->revision_id); | ||
959 | if (qat_uclo_check_uof_compat(obj_handle)) { | ||
960 | pr_err("QAT: UOF incompatible\n"); | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; | ||
964 | if (!obj_handle->obj_hdr->file_buff || | ||
965 | !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, | ||
966 | &obj_handle->str_table)) { | ||
967 | pr_err("QAT: UOF doesn't have effective images\n"); | ||
968 | goto out_err; | ||
969 | } | ||
970 | obj_handle->uimage_num = | ||
971 | qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, | ||
972 | ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); | ||
973 | if (!obj_handle->uimage_num) | ||
974 | goto out_err; | ||
975 | if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { | ||
976 | pr_err("QAT: Bad object\n"); | ||
977 | goto out_check_uof_aemask_err; | ||
978 | } | ||
979 | qat_uclo_init_uword_num(handle); | ||
980 | qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, | ||
981 | &obj_handle->init_mem_tab); | ||
982 | if (qat_uclo_set_ae_mode(handle)) | ||
983 | goto out_check_uof_aemask_err; | ||
984 | return 0; | ||
985 | out_check_uof_aemask_err: | ||
986 | for (ae = 0; ae < obj_handle->uimage_num; ae++) | ||
987 | kfree(obj_handle->ae_uimage[ae].page); | ||
988 | out_err: | ||
989 | kfree(obj_handle->uword_buf); | ||
990 | return -EFAULT; | ||
991 | } | ||
992 | |||
993 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
994 | void *addr_ptr, int mem_size) | ||
995 | { | ||
996 | struct icp_qat_uof_filehdr *filehdr; | ||
997 | struct icp_qat_uclo_objhandle *objhdl; | ||
998 | |||
999 | BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= | ||
1000 | (sizeof(handle->hal_handle->ae_mask) * 8)); | ||
1001 | |||
1002 | if (!handle || !addr_ptr || mem_size < 24) | ||
1003 | return -EINVAL; | ||
1004 | objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); | ||
1005 | if (!objhdl) | ||
1006 | return -ENOMEM; | ||
1007 | objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); | ||
1008 | if (!objhdl->obj_buf) | ||
1009 | goto out_objbuf_err; | ||
1010 | filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; | ||
1011 | if (qat_uclo_check_format(filehdr)) | ||
1012 | goto out_objhdr_err; | ||
1013 | objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, | ||
1014 | ICP_QAT_UOF_OBJS); | ||
1015 | if (!objhdl->obj_hdr) { | ||
1016 | pr_err("QAT: object file chunk is null\n"); | ||
1017 | goto out_objhdr_err; | ||
1018 | } | ||
1019 | handle->obj_handle = objhdl; | ||
1020 | if (qat_uclo_parse_uof_obj(handle)) | ||
1021 | goto out_overlay_obj_err; | ||
1022 | return 0; | ||
1023 | |||
1024 | out_overlay_obj_err: | ||
1025 | handle->obj_handle = NULL; | ||
1026 | kfree(objhdl->obj_hdr); | ||
1027 | out_objhdr_err: | ||
1028 | kfree(objhdl->obj_buf); | ||
1029 | out_objbuf_err: | ||
1030 | kfree(objhdl); | ||
1031 | return -ENOMEM; | ||
1032 | } | ||
1033 | |||
1034 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
1035 | { | ||
1036 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1037 | unsigned int a; | ||
1038 | |||
1039 | if (!obj_handle) | ||
1040 | return; | ||
1041 | |||
1042 | kfree(obj_handle->uword_buf); | ||
1043 | for (a = 0; a < obj_handle->uimage_num; a++) | ||
1044 | kfree(obj_handle->ae_uimage[a].page); | ||
1045 | |||
1046 | for (a = 0; a < handle->hal_handle->ae_max_num; a++) | ||
1047 | qat_uclo_free_ae_data(&obj_handle->ae_data[a]); | ||
1048 | |||
1049 | kfree(obj_handle->obj_hdr); | ||
1050 | kfree(obj_handle->obj_buf); | ||
1051 | kfree(obj_handle); | ||
1052 | handle->obj_handle = NULL; | ||
1053 | } | ||
1054 | |||
1055 | static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, | ||
1056 | struct icp_qat_uclo_encap_page *encap_page, | ||
1057 | uint64_t *uword, unsigned int addr_p, | ||
1058 | unsigned int raddr, uint64_t fill) | ||
1059 | { | ||
1060 | uint64_t uwrd = 0; | ||
1061 | unsigned int i; | ||
1062 | |||
1063 | if (!encap_page) { | ||
1064 | *uword = fill; | ||
1065 | return; | ||
1066 | } | ||
1067 | for (i = 0; i < encap_page->uwblock_num; i++) { | ||
1068 | if (raddr >= encap_page->uwblock[i].start_addr && | ||
1069 | raddr <= encap_page->uwblock[i].start_addr + | ||
1070 | encap_page->uwblock[i].words_num - 1) { | ||
1071 | raddr -= encap_page->uwblock[i].start_addr; | ||
1072 | raddr *= obj_handle->uword_in_bytes; | ||
1073 | memcpy(&uwrd, (void *)(((unsigned long) | ||
1074 | encap_page->uwblock[i].micro_words) + raddr), | ||
1075 | obj_handle->uword_in_bytes); | ||
1076 | uwrd = uwrd & 0xbffffffffffull; | ||
1077 | } | ||
1078 | } | ||
1079 | *uword = uwrd; | ||
1080 | if (*uword == INVLD_UWORD) | ||
1081 | *uword = fill; | ||
1082 | } | ||
1083 | |||
1084 | static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, | ||
1085 | struct icp_qat_uclo_encap_page | ||
1086 | *encap_page, unsigned int ae) | ||
1087 | { | ||
1088 | unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; | ||
1089 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1090 | uint64_t fill_pat; | ||
1091 | |||
1092 | /* load the page starting at appropriate ustore address */ | ||
1093 | /* get fill-pattern from an image -- they are all the same */ | ||
1094 | memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, | ||
1095 | sizeof(uint64_t)); | ||
1096 | uw_physical_addr = encap_page->beg_addr_p; | ||
1097 | uw_relative_addr = 0; | ||
1098 | words_num = encap_page->micro_words_num; | ||
1099 | while (words_num) { | ||
1100 | if (words_num < UWORD_CPYBUF_SIZE) | ||
1101 | cpylen = words_num; | ||
1102 | else | ||
1103 | cpylen = UWORD_CPYBUF_SIZE; | ||
1104 | |||
1105 | /* load the buffer */ | ||
1106 | for (i = 0; i < cpylen; i++) | ||
1107 | qat_uclo_fill_uwords(obj_handle, encap_page, | ||
1108 | &obj_handle->uword_buf[i], | ||
1109 | uw_physical_addr + i, | ||
1110 | uw_relative_addr + i, fill_pat); | ||
1111 | |||
1112 | /* copy the buffer to ustore */ | ||
1113 | qat_hal_wr_uwords(handle, (unsigned char)ae, | ||
1114 | uw_physical_addr, cpylen, | ||
1115 | obj_handle->uword_buf); | ||
1116 | |||
1117 | uw_physical_addr += cpylen; | ||
1118 | uw_relative_addr += cpylen; | ||
1119 | words_num -= cpylen; | ||
1120 | } | ||
1121 | } | ||
1122 | |||
1123 | static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, | ||
1124 | struct icp_qat_uof_image *image) | ||
1125 | { | ||
1126 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1127 | unsigned int ctx_mask, s; | ||
1128 | struct icp_qat_uclo_page *page; | ||
1129 | unsigned char ae; | ||
1130 | int ctx; | ||
1131 | |||
1132 | if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) | ||
1133 | ctx_mask = 0xff; | ||
1134 | else | ||
1135 | ctx_mask = 0x55; | ||
1136 | /* load the default page and set assigned CTX PC | ||
1137 | * to the entrypoint address */ | ||
1138 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
1139 | if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) | ||
1140 | continue; | ||
1141 | /* find the slice to which this image is assigned */ | ||
1142 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
1143 | if (image->ctx_assigned & obj_handle->ae_data[ae]. | ||
1144 | ae_slices[s].ctx_mask_assigned) | ||
1145 | break; | ||
1146 | } | ||
1147 | if (s >= obj_handle->ae_data[ae].slice_num) | ||
1148 | continue; | ||
1149 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1150 | if (!page->encap_page->def_page) | ||
1151 | continue; | ||
1152 | qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); | ||
1153 | |||
1154 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1155 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) | ||
1156 | obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = | ||
1157 | (ctx_mask & (1 << ctx)) ? page : NULL; | ||
1158 | qat_hal_set_live_ctx(handle, (unsigned char)ae, | ||
1159 | image->ctx_assigned); | ||
1160 | qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, | ||
1161 | image->entry_address); | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | ||
1166 | { | ||
1167 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1168 | unsigned int i; | ||
1169 | |||
1170 | if (qat_uclo_init_globals(handle)) | ||
1171 | return -EINVAL; | ||
1172 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
1173 | if (!obj_handle->ae_uimage[i].img_ptr) | ||
1174 | return -EINVAL; | ||
1175 | if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) | ||
1176 | return -EINVAL; | ||
1177 | qat_uclo_wr_uimage_page(handle, | ||
1178 | obj_handle->ae_uimage[i].img_ptr); | ||
1179 | } | ||
1180 | return 0; | ||
1181 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile new file mode 100644 index 000000000000..25171c557043 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o | ||
3 | qat_dh895xcc-objs := adf_drv.o \ | ||
4 | adf_isr.o \ | ||
5 | adf_dh895xcc_hw_data.o \ | ||
6 | adf_hw_arbiter.o \ | ||
7 | qat_admin.o \ | ||
8 | adf_admin.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c new file mode 100644 index 000000000000..978d6c56639d --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/types.h> | ||
48 | #include <linux/mutex.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/delay.h> | ||
51 | #include <linux/pci.h> | ||
52 | #include <linux/dma-mapping.h> | ||
53 | #include <adf_accel_devices.h> | ||
54 | #include "adf_drv.h" | ||
55 | #include "adf_dh895xcc_hw_data.h" | ||
56 | |||
57 | #define ADF_ADMINMSG_LEN 32 | ||
58 | |||
59 | struct adf_admin_comms { | ||
60 | dma_addr_t phy_addr; | ||
61 | void *virt_addr; | ||
62 | void __iomem *mailbox_addr; | ||
63 | struct mutex lock; /* protects adf_admin_comms struct */ | ||
64 | }; | ||
65 | |||
66 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
67 | uint32_t ae, void *in, void *out) | ||
68 | { | ||
69 | struct adf_admin_comms *admin = accel_dev->admin; | ||
70 | int offset = ae * ADF_ADMINMSG_LEN * 2; | ||
71 | void __iomem *mailbox = admin->mailbox_addr; | ||
72 | int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; | ||
73 | int times, received; | ||
74 | |||
75 | mutex_lock(&admin->lock); | ||
76 | |||
77 | if (ADF_CSR_RD(mailbox, mb_offset) == 1) { | ||
78 | mutex_unlock(&admin->lock); | ||
79 | return -EAGAIN; | ||
80 | } | ||
81 | |||
82 | memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); | ||
83 | ADF_CSR_WR(mailbox, mb_offset, 1); | ||
84 | received = 0; | ||
85 | for (times = 0; times < 50; times++) { | ||
86 | msleep(20); | ||
87 | if (ADF_CSR_RD(mailbox, mb_offset) == 0) { | ||
88 | received = 1; | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | if (received) | ||
93 | memcpy(out, admin->virt_addr + offset + | ||
94 | ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); | ||
95 | else | ||
96 | pr_err("QAT: Failed to send admin msg to accelerator\n"); | ||
97 | |||
98 | mutex_unlock(&admin->lock); | ||
99 | return received ? 0 : -EFAULT; | ||
100 | } | ||
101 | |||
102 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | ||
103 | { | ||
104 | struct adf_admin_comms *admin; | ||
105 | struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; | ||
106 | void __iomem *csr = pmisc->virt_addr; | ||
107 | void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; | ||
108 | uint64_t reg_val; | ||
109 | |||
110 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, | ||
111 | accel_dev->numa_node); | ||
112 | if (!admin) | ||
113 | return -ENOMEM; | ||
114 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
115 | &admin->phy_addr, GFP_KERNEL); | ||
116 | if (!admin->virt_addr) { | ||
117 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | ||
118 | kfree(admin); | ||
119 | return -ENOMEM; | ||
120 | } | ||
121 | reg_val = (uint64_t)admin->phy_addr; | ||
122 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); | ||
123 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); | ||
124 | mutex_init(&admin->lock); | ||
125 | admin->mailbox_addr = mailbox; | ||
126 | accel_dev->admin = admin; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) | ||
131 | { | ||
132 | struct adf_admin_comms *admin = accel_dev->admin; | ||
133 | |||
134 | if (!admin) | ||
135 | return; | ||
136 | |||
137 | if (admin->virt_addr) | ||
138 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
139 | admin->virt_addr, admin->phy_addr); | ||
140 | |||
141 | mutex_destroy(&admin->lock); | ||
142 | kfree(admin); | ||
143 | accel_dev->admin = NULL; | ||
144 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c new file mode 100644 index 000000000000..ef05825cc651 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include "adf_dh895xcc_hw_data.h" | ||
49 | #include "adf_drv.h" | ||
50 | |||
51 | /* Worker thread to service arbiter mappings based on dev SKUs */ | ||
52 | static const uint32_t thrd_to_arb_map_sku4[] = { | ||
53 | 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, | ||
54 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, | ||
55 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 | ||
56 | }; | ||
57 | |||
58 | static const uint32_t thrd_to_arb_map_sku6[] = { | ||
59 | 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, | ||
60 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, | ||
61 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 | ||
62 | }; | ||
63 | |||
64 | static struct adf_hw_device_class dh895xcc_class = { | ||
65 | .name = ADF_DH895XCC_DEVICE_NAME, | ||
66 | .type = DEV_DH895XCC, | ||
67 | .instances = 0 | ||
68 | }; | ||
69 | |||
70 | static uint32_t get_accel_mask(uint32_t fuse) | ||
71 | { | ||
72 | return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & | ||
73 | ADF_DH895XCC_ACCELERATORS_MASK; | ||
74 | } | ||
75 | |||
76 | static uint32_t get_ae_mask(uint32_t fuse) | ||
77 | { | ||
78 | return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; | ||
79 | } | ||
80 | |||
81 | static uint32_t get_num_accels(struct adf_hw_device_data *self) | ||
82 | { | ||
83 | uint32_t i, ctr = 0; | ||
84 | |||
85 | if (!self || !self->accel_mask) | ||
86 | return 0; | ||
87 | |||
88 | for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { | ||
89 | if (self->accel_mask & (1 << i)) | ||
90 | ctr++; | ||
91 | } | ||
92 | return ctr; | ||
93 | } | ||
94 | |||
95 | static uint32_t get_num_aes(struct adf_hw_device_data *self) | ||
96 | { | ||
97 | uint32_t i, ctr = 0; | ||
98 | |||
99 | if (!self || !self->ae_mask) | ||
100 | return 0; | ||
101 | |||
102 | for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { | ||
103 | if (self->ae_mask & (1 << i)) | ||
104 | ctr++; | ||
105 | } | ||
106 | return ctr; | ||
107 | } | ||
108 | |||
109 | static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) | ||
110 | { | ||
111 | return ADF_DH895XCC_PMISC_BAR; | ||
112 | } | ||
113 | |||
114 | static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) | ||
115 | { | ||
116 | return ADF_DH895XCC_ETR_BAR; | ||
117 | } | ||
118 | |||
119 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
120 | { | ||
121 | int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) | ||
122 | >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; | ||
123 | |||
124 | switch (sku) { | ||
125 | case ADF_DH895XCC_FUSECTL_SKU_1: | ||
126 | return DEV_SKU_1; | ||
127 | case ADF_DH895XCC_FUSECTL_SKU_2: | ||
128 | return DEV_SKU_2; | ||
129 | case ADF_DH895XCC_FUSECTL_SKU_3: | ||
130 | return DEV_SKU_3; | ||
131 | case ADF_DH895XCC_FUSECTL_SKU_4: | ||
132 | return DEV_SKU_4; | ||
133 | default: | ||
134 | return DEV_SKU_UNKNOWN; | ||
135 | } | ||
136 | return DEV_SKU_UNKNOWN; | ||
137 | } | ||
138 | |||
139 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
140 | uint32_t const **arb_map_config) | ||
141 | { | ||
142 | switch (accel_dev->accel_pci_dev.sku) { | ||
143 | case DEV_SKU_1: | ||
144 | *arb_map_config = thrd_to_arb_map_sku4; | ||
145 | break; | ||
146 | |||
147 | case DEV_SKU_2: | ||
148 | case DEV_SKU_4: | ||
149 | *arb_map_config = thrd_to_arb_map_sku6; | ||
150 | break; | ||
151 | default: | ||
152 | pr_err("QAT: The configuration doesn't match any SKU"); | ||
153 | *arb_map_config = NULL; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | ||
158 | { | ||
159 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
160 | struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; | ||
161 | void __iomem *csr = misc_bar->virt_addr; | ||
162 | unsigned int val, i; | ||
163 | |||
164 | /* Enable Accel Engine error detection & correction */ | ||
165 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
166 | val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); | ||
167 | val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; | ||
168 | ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); | ||
169 | val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); | ||
170 | val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; | ||
171 | ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); | ||
172 | } | ||
173 | |||
174 | /* Enable shared memory error detection & correction */ | ||
175 | for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { | ||
176 | val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); | ||
177 | val |= ADF_DH895XCC_ERRSSMSH_EN; | ||
178 | ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); | ||
179 | val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); | ||
180 | val |= ADF_DH895XCC_ERRSSMSH_EN; | ||
181 | ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | ||
186 | { | ||
187 | hw_data->dev_class = &dh895xcc_class; | ||
188 | hw_data->instance_id = dh895xcc_class.instances++; | ||
189 | hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; | ||
190 | hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; | ||
191 | hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; | ||
192 | hw_data->num_logical_accel = 1; | ||
193 | hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; | ||
194 | hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; | ||
195 | hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; | ||
196 | hw_data->alloc_irq = adf_isr_resource_alloc; | ||
197 | hw_data->free_irq = adf_isr_resource_free; | ||
198 | hw_data->enable_error_correction = adf_enable_error_correction; | ||
199 | hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; | ||
200 | hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; | ||
201 | hw_data->get_accel_mask = get_accel_mask; | ||
202 | hw_data->get_ae_mask = get_ae_mask; | ||
203 | hw_data->get_num_accels = get_num_accels; | ||
204 | hw_data->get_num_aes = get_num_aes; | ||
205 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
206 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
207 | hw_data->get_sku = get_sku; | ||
208 | hw_data->fw_name = ADF_DH895XCC_FW; | ||
209 | } | ||
210 | |||
211 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | ||
212 | { | ||
213 | hw_data->dev_class->instances--; | ||
214 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h new file mode 100644 index 000000000000..b707f292b377 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DH895x_HW_DATA_H_ | ||
48 | #define ADF_DH895x_HW_DATA_H_ | ||
49 | |||
50 | /* PCIe configuration space */ | ||
51 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 | ||
52 | #define ADF_DH895XCC_TX_RINGS_MASK 0xFF | ||
53 | #define ADF_DH895XCC_FUSECTL_OFFSET 0x40 | ||
54 | #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 | ||
55 | #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 | ||
56 | #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 | ||
57 | #define ADF_DH895XCC_FUSECTL_SKU_2 0x1 | ||
58 | #define ADF_DH895XCC_FUSECTL_SKU_3 0x2 | ||
59 | #define ADF_DH895XCC_FUSECTL_SKU_4 0x3 | ||
60 | #define ADF_DH895XCC_MAX_ACCELERATORS 6 | ||
61 | #define ADF_DH895XCC_MAX_ACCELENGINES 12 | ||
62 | #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 | ||
63 | #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F | ||
64 | #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF | ||
65 | #define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C | ||
66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 | ||
67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | ||
68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | ||
69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFF | ||
70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 | ||
71 | /* Error detection and correction */ | ||
72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | ||
73 | #define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) | ||
74 | #define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28) | ||
75 | #define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12) | ||
76 | #define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18) | ||
77 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) | ||
78 | #define ADF_DH895XCC_ERRSSMSH_EN (1 << 3) | ||
79 | |||
80 | /* Admin Messages Registers */ | ||
81 | #define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) | ||
82 | #define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) | ||
83 | #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 | ||
84 | #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 | ||
85 | #define ADF_DH895XCC_FW "qat_895xcc.bin" | ||
86 | #endif | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c new file mode 100644 index 000000000000..0d0435a41be9 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include <adf_transport_access_macros.h> | ||
64 | #include "adf_dh895xcc_hw_data.h" | ||
65 | #include "adf_drv.h" | ||
66 | |||
67 | static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; | ||
68 | |||
69 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
71 | |||
72 | static const struct pci_device_id adf_pci_tbl[] = { | ||
73 | ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), | ||
74 | {0,} | ||
75 | }; | ||
76 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
77 | |||
78 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
79 | static void adf_remove(struct pci_dev *dev); | ||
80 | |||
81 | static struct pci_driver adf_driver = { | ||
82 | .id_table = adf_pci_tbl, | ||
83 | .name = adf_driver_name, | ||
84 | .probe = adf_probe, | ||
85 | .remove = adf_remove | ||
86 | }; | ||
87 | |||
88 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
89 | { | ||
90 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
91 | int i; | ||
92 | |||
93 | adf_exit_admin_comms(accel_dev); | ||
94 | adf_exit_arb(accel_dev); | ||
95 | adf_cleanup_etr_data(accel_dev); | ||
96 | |||
97 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
98 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
99 | |||
100 | if (bar->virt_addr) | ||
101 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
102 | } | ||
103 | |||
104 | if (accel_dev->hw_device) { | ||
105 | switch (accel_dev->hw_device->pci_dev_id) { | ||
106 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
107 | adf_clean_hw_data_dh895xcc(accel_dev->hw_device); | ||
108 | break; | ||
109 | default: | ||
110 | break; | ||
111 | } | ||
112 | kfree(accel_dev->hw_device); | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | adf_devmgr_rm_dev(accel_dev); | ||
117 | pci_release_regions(accel_pci_dev->pci_dev); | ||
118 | pci_disable_device(accel_pci_dev->pci_dev); | ||
119 | kfree(accel_dev); | ||
120 | } | ||
121 | |||
122 | static uint8_t adf_get_dev_node_id(struct pci_dev *pdev) | ||
123 | { | ||
124 | unsigned int bus_per_cpu = 0; | ||
125 | struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1); | ||
126 | |||
127 | if (!c->phys_proc_id) | ||
128 | return 0; | ||
129 | |||
130 | bus_per_cpu = 256 / (c->phys_proc_id + 1); | ||
131 | |||
132 | if (bus_per_cpu != 0) | ||
133 | return pdev->bus->number / bus_per_cpu; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int qat_dev_start(struct adf_accel_dev *accel_dev) | ||
138 | { | ||
139 | int cpus = num_online_cpus(); | ||
140 | int banks = GET_MAX_BANKS(accel_dev); | ||
141 | int instances = min(cpus, banks); | ||
142 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
143 | int i; | ||
144 | unsigned long val; | ||
145 | |||
146 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
147 | goto err; | ||
148 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
149 | goto err; | ||
150 | for (i = 0; i < instances; i++) { | ||
151 | val = i; | ||
152 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
153 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
154 | key, (void *)&val, ADF_DEC)) | ||
155 | goto err; | ||
156 | |||
157 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, | ||
158 | i); | ||
159 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
160 | key, (void *)&val, ADF_DEC)) | ||
161 | goto err; | ||
162 | |||
163 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
164 | val = 128; | ||
165 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
166 | key, (void *)&val, ADF_DEC)) | ||
167 | goto err; | ||
168 | |||
169 | val = 512; | ||
170 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
171 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
172 | key, (void *)&val, ADF_DEC)) | ||
173 | goto err; | ||
174 | |||
175 | val = 0; | ||
176 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
177 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
178 | key, (void *)&val, ADF_DEC)) | ||
179 | goto err; | ||
180 | |||
181 | val = 2; | ||
182 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
183 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
184 | key, (void *)&val, ADF_DEC)) | ||
185 | goto err; | ||
186 | |||
187 | val = 4; | ||
188 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
189 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
190 | key, (void *)&val, ADF_DEC)) | ||
191 | goto err; | ||
192 | |||
193 | val = 8; | ||
194 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
195 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
196 | key, (void *)&val, ADF_DEC)) | ||
197 | goto err; | ||
198 | |||
199 | val = 10; | ||
200 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
201 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
202 | key, (void *)&val, ADF_DEC)) | ||
203 | goto err; | ||
204 | |||
205 | val = 12; | ||
206 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
207 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
208 | key, (void *)&val, ADF_DEC)) | ||
209 | goto err; | ||
210 | |||
211 | val = ADF_COALESCING_DEF_TIME; | ||
212 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); | ||
213 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
214 | key, (void *)&val, ADF_DEC)) | ||
215 | goto err; | ||
216 | } | ||
217 | |||
218 | val = i; | ||
219 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
220 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
221 | goto err; | ||
222 | |||
223 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
224 | return adf_dev_start(accel_dev); | ||
225 | err: | ||
226 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
231 | { | ||
232 | struct adf_accel_dev *accel_dev; | ||
233 | struct adf_accel_pci *accel_pci_dev; | ||
234 | struct adf_hw_device_data *hw_data; | ||
235 | void __iomem *pmisc_bar_addr = NULL; | ||
236 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
237 | unsigned int i, bar_nr; | ||
238 | uint8_t node; | ||
239 | int ret; | ||
240 | |||
241 | switch (ent->device) { | ||
242 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
243 | break; | ||
244 | default: | ||
245 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
246 | return -ENODEV; | ||
247 | } | ||
248 | |||
249 | node = adf_get_dev_node_id(pdev); | ||
250 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); | ||
251 | if (!accel_dev) | ||
252 | return -ENOMEM; | ||
253 | |||
254 | accel_dev->numa_node = node; | ||
255 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
256 | |||
257 | /* Add accel device to accel table. | ||
258 | * This should be called before adf_cleanup_accel is called */ | ||
259 | if (adf_devmgr_add_dev(accel_dev)) { | ||
260 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
261 | kfree(accel_dev); | ||
262 | return -EFAULT; | ||
263 | } | ||
264 | |||
265 | accel_dev->owner = THIS_MODULE; | ||
266 | /* Allocate and configure device configuration structure */ | ||
267 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); | ||
268 | if (!hw_data) { | ||
269 | ret = -ENOMEM; | ||
270 | goto out_err; | ||
271 | } | ||
272 | |||
273 | accel_dev->hw_device = hw_data; | ||
274 | switch (ent->device) { | ||
275 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
276 | adf_init_hw_data_dh895xcc(accel_dev->hw_device); | ||
277 | break; | ||
278 | default: | ||
279 | return -ENODEV; | ||
280 | } | ||
281 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
282 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | ||
283 | pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, | ||
284 | &hw_data->fuses); | ||
285 | |||
286 | /* Get Accelerators and Accelerators Engines masks */ | ||
287 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
288 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
289 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
290 | accel_pci_dev->pci_dev = pdev; | ||
291 | /* If the device has no acceleration engines then ignore it. */ | ||
292 | if (!hw_data->accel_mask || !hw_data->ae_mask || | ||
293 | ((~hw_data->ae_mask) & 0x01)) { | ||
294 | dev_err(&pdev->dev, "No acceleration units found"); | ||
295 | ret = -EFAULT; | ||
296 | goto out_err; | ||
297 | } | ||
298 | |||
299 | /* Create dev top level debugfs entry */ | ||
300 | snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, | ||
301 | hw_data->dev_class->name, hw_data->instance_id); | ||
302 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
303 | if (!accel_dev->debugfs_dir) { | ||
304 | dev_err(&pdev->dev, "Could not create debugfs dir\n"); | ||
305 | ret = -EINVAL; | ||
306 | goto out_err; | ||
307 | } | ||
308 | |||
309 | /* Create device configuration table */ | ||
310 | ret = adf_cfg_dev_add(accel_dev); | ||
311 | if (ret) | ||
312 | goto out_err; | ||
313 | |||
314 | /* enable PCI device */ | ||
315 | if (pci_enable_device(pdev)) { | ||
316 | ret = -EFAULT; | ||
317 | goto out_err; | ||
318 | } | ||
319 | |||
320 | /* set dma identifier */ | ||
321 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
322 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
323 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
324 | ret = -EFAULT; | ||
325 | goto out_err; | ||
326 | } else { | ||
327 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
328 | } | ||
329 | |||
330 | } else { | ||
331 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
332 | } | ||
333 | |||
334 | if (pci_request_regions(pdev, adf_driver_name)) { | ||
335 | ret = -EFAULT; | ||
336 | goto out_err; | ||
337 | } | ||
338 | |||
339 | /* Read accelerator capabilities mask */ | ||
340 | pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, | ||
341 | &hw_data->accel_capabilities_mask); | ||
342 | |||
343 | /* Find and map all the device's BARS */ | ||
344 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
345 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
346 | |||
347 | bar_nr = i * 2; | ||
348 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
349 | if (!bar->base_addr) | ||
350 | break; | ||
351 | bar->size = pci_resource_len(pdev, bar_nr); | ||
352 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
353 | if (!bar->virt_addr) { | ||
354 | dev_err(&pdev->dev, "Failed to map BAR %d\n", i); | ||
355 | ret = -EFAULT; | ||
356 | goto out_err; | ||
357 | } | ||
358 | if (i == ADF_DH895XCC_PMISC_BAR) | ||
359 | pmisc_bar_addr = bar->virt_addr; | ||
360 | } | ||
361 | pci_set_master(pdev); | ||
362 | |||
363 | if (adf_enable_aer(accel_dev, &adf_driver)) { | ||
364 | dev_err(&pdev->dev, "Failed to enable aer\n"); | ||
365 | ret = -EFAULT; | ||
366 | goto out_err; | ||
367 | } | ||
368 | |||
369 | if (adf_init_etr_data(accel_dev)) { | ||
370 | dev_err(&pdev->dev, "Failed initialize etr\n"); | ||
371 | ret = -EFAULT; | ||
372 | goto out_err; | ||
373 | } | ||
374 | |||
375 | if (adf_init_admin_comms(accel_dev)) { | ||
376 | dev_err(&pdev->dev, "Failed initialize admin comms\n"); | ||
377 | ret = -EFAULT; | ||
378 | goto out_err; | ||
379 | } | ||
380 | |||
381 | if (adf_init_arb(accel_dev)) { | ||
382 | dev_err(&pdev->dev, "Failed initialize hw arbiter\n"); | ||
383 | ret = -EFAULT; | ||
384 | goto out_err; | ||
385 | } | ||
386 | if (pci_save_state(pdev)) { | ||
387 | dev_err(&pdev->dev, "Failed to save pci state\n"); | ||
388 | ret = -ENOMEM; | ||
389 | goto out_err; | ||
390 | } | ||
391 | |||
392 | /* Enable bundle and misc interrupts */ | ||
393 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, | ||
394 | ADF_DH895XCC_SMIA0_MASK); | ||
395 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, | ||
396 | ADF_DH895XCC_SMIA1_MASK); | ||
397 | |||
398 | ret = qat_dev_start(accel_dev); | ||
399 | if (ret) { | ||
400 | adf_dev_stop(accel_dev); | ||
401 | goto out_err; | ||
402 | } | ||
403 | |||
404 | return 0; | ||
405 | out_err: | ||
406 | adf_cleanup_accel(accel_dev); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | static void __exit adf_remove(struct pci_dev *pdev) | ||
411 | { | ||
412 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
413 | |||
414 | if (!accel_dev) { | ||
415 | pr_err("QAT: Driver removal failed\n"); | ||
416 | return; | ||
417 | } | ||
418 | if (adf_dev_stop(accel_dev)) | ||
419 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
420 | adf_disable_aer(accel_dev); | ||
421 | adf_cleanup_accel(accel_dev); | ||
422 | } | ||
423 | |||
424 | static int __init adfdrv_init(void) | ||
425 | { | ||
426 | request_module("intel_qat"); | ||
427 | if (qat_admin_register()) | ||
428 | return -EFAULT; | ||
429 | |||
430 | if (pci_register_driver(&adf_driver)) { | ||
431 | pr_err("QAT: Driver initialization failed\n"); | ||
432 | return -EFAULT; | ||
433 | } | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static void __exit adfdrv_release(void) | ||
438 | { | ||
439 | pci_unregister_driver(&adf_driver); | ||
440 | qat_admin_unregister(); | ||
441 | } | ||
442 | |||
443 | module_init(adfdrv_init); | ||
444 | module_exit(adfdrv_release); | ||
445 | |||
446 | MODULE_LICENSE("Dual BSD/GPL"); | ||
447 | MODULE_AUTHOR("Intel"); | ||
448 | MODULE_FIRMWARE("qat_895xcc.bin"); | ||
449 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h new file mode 100644 index 000000000000..a2fbb6ce75cd --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DH895x_DRV_H_ | ||
48 | #define ADF_DH895x_DRV_H_ | ||
49 | #include <adf_accel_devices.h> | ||
50 | #include <adf_transport.h> | ||
51 | |||
52 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
53 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
54 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | ||
55 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); | ||
56 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); | ||
57 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
58 | uint32_t const **arb_map_config); | ||
59 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); | ||
60 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); | ||
61 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
62 | uint32_t ae, void *in, void *out); | ||
63 | int qat_admin_register(void); | ||
64 | int qat_admin_unregister(void); | ||
65 | int adf_init_arb(struct adf_accel_dev *accel_dev); | ||
66 | void adf_exit_arb(struct adf_accel_dev *accel_dev); | ||
67 | #endif | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c new file mode 100644 index 000000000000..1864bdb36f8f --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_transport_internal.h> | ||
49 | #include "adf_drv.h" | ||
50 | |||
51 | #define ADF_ARB_NUM 4 | ||
52 | #define ADF_ARB_REQ_RING_NUM 8 | ||
53 | #define ADF_ARB_REG_SIZE 0x4 | ||
54 | #define ADF_ARB_WTR_SIZE 0x20 | ||
55 | #define ADF_ARB_OFFSET 0x30000 | ||
56 | #define ADF_ARB_REG_SLOT 0x1000 | ||
57 | #define ADF_ARB_WTR_OFFSET 0x010 | ||
58 | #define ADF_ARB_RO_EN_OFFSET 0x090 | ||
59 | #define ADF_ARB_WQCFG_OFFSET 0x100 | ||
60 | #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 | ||
61 | #define ADF_ARB_WRK_2_SER_MAP 10 | ||
62 | #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C | ||
63 | |||
64 | #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ | ||
65 | ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ | ||
66 | (ADF_ARB_REG_SLOT * index), value) | ||
67 | |||
68 | #define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ | ||
69 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
70 | ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) | ||
71 | |||
72 | #define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ | ||
73 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
74 | ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ | ||
75 | (ADF_ARB_REG_SIZE * index), value) | ||
76 | |||
77 | #define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ | ||
78 | ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ | ||
79 | (ADF_ARB_REG_SIZE * index), value) | ||
80 | |||
81 | #define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ | ||
82 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
83 | ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ | ||
84 | (ADF_ARB_REG_SIZE * index), value) | ||
85 | |||
86 | #define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ | ||
87 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
88 | ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) | ||
89 | |||
90 | int adf_init_arb(struct adf_accel_dev *accel_dev) | ||
91 | { | ||
92 | void __iomem *csr = accel_dev->transport->banks[0].csr_addr; | ||
93 | uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; | ||
94 | uint32_t arb, i; | ||
95 | const uint32_t *thd_2_arb_cfg; | ||
96 | |||
97 | /* Service arb configured for 32 bytes responses and | ||
98 | * ring flow control check enabled. */ | ||
99 | for (arb = 0; arb < ADF_ARB_NUM; arb++) | ||
100 | WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); | ||
101 | |||
102 | /* Setup service weighting */ | ||
103 | for (arb = 0; arb < ADF_ARB_NUM; arb++) | ||
104 | for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) | ||
105 | WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); | ||
106 | |||
107 | /* Setup ring response ordering */ | ||
108 | for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) | ||
109 | WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); | ||
110 | |||
111 | /* Setup worker queue registers */ | ||
112 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
113 | WRITE_CSR_ARB_WQCFG(csr, i, i); | ||
114 | |||
115 | /* Map worker threads to service arbiters */ | ||
116 | adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); | ||
117 | |||
118 | if (!thd_2_arb_cfg) | ||
119 | return -EFAULT; | ||
120 | |||
121 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
122 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) | ||
128 | { | ||
129 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, | ||
130 | ring->bank->bank_number, | ||
131 | ring->bank->ring_mask & 0xFF); | ||
132 | } | ||
133 | |||
134 | void adf_exit_arb(struct adf_accel_dev *accel_dev) | ||
135 | { | ||
136 | void __iomem *csr; | ||
137 | unsigned int i; | ||
138 | |||
139 | if (!accel_dev->transport) | ||
140 | return; | ||
141 | |||
142 | csr = accel_dev->transport->banks[0].csr_addr; | ||
143 | |||
144 | /* Reset arbiter configuration */ | ||
145 | for (i = 0; i < ADF_ARB_NUM; i++) | ||
146 | WRITE_CSR_ARB_SARCONFIG(csr, i, 0); | ||
147 | |||
148 | /* Shutdown work queue */ | ||
149 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
150 | WRITE_CSR_ARB_WQCFG(csr, i, 0); | ||
151 | |||
152 | /* Unmap worker threads to service arbiters */ | ||
153 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
154 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); | ||
155 | |||
156 | /* Disable arbitration on all rings */ | ||
157 | for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) | ||
158 | WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); | ||
159 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c new file mode 100644 index 000000000000..d4172dedf775 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/types.h> | ||
50 | #include <linux/pci.h> | ||
51 | #include <linux/slab.h> | ||
52 | #include <linux/errno.h> | ||
53 | #include <linux/interrupt.h> | ||
54 | #include <adf_accel_devices.h> | ||
55 | #include <adf_common_drv.h> | ||
56 | #include <adf_cfg.h> | ||
57 | #include <adf_cfg_strings.h> | ||
58 | #include <adf_cfg_common.h> | ||
59 | #include <adf_transport_access_macros.h> | ||
60 | #include <adf_transport_internal.h> | ||
61 | #include "adf_drv.h" | ||
62 | |||
63 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) | ||
64 | { | ||
65 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
66 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
67 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
68 | int i; | ||
69 | |||
70 | for (i = 0; i < msix_num_entries; i++) | ||
71 | pci_dev_info->msix_entries.entries[i].entry = i; | ||
72 | |||
73 | if (pci_enable_msix(pci_dev_info->pci_dev, | ||
74 | pci_dev_info->msix_entries.entries, | ||
75 | msix_num_entries)) { | ||
76 | pr_err("QAT: Failed to enable MSIX IRQ\n"); | ||
77 | return -EFAULT; | ||
78 | } | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) | ||
83 | { | ||
84 | pci_disable_msix(pci_dev_info->pci_dev); | ||
85 | } | ||
86 | |||
87 | static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) | ||
88 | { | ||
89 | struct adf_etr_bank_data *bank = bank_ptr; | ||
90 | |||
91 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); | ||
92 | tasklet_hi_schedule(&bank->resp_hanlder); | ||
93 | return IRQ_HANDLED; | ||
94 | } | ||
95 | |||
96 | static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) | ||
97 | { | ||
98 | struct adf_accel_dev *accel_dev = dev_ptr; | ||
99 | |||
100 | pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); | ||
101 | return IRQ_HANDLED; | ||
102 | } | ||
103 | |||
104 | static int adf_request_irqs(struct adf_accel_dev *accel_dev) | ||
105 | { | ||
106 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
107 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
108 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | ||
109 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
110 | int ret, i; | ||
111 | char *name; | ||
112 | |||
113 | /* Request msix irq for all banks */ | ||
114 | for (i = 0; i < hw_data->num_banks; i++) { | ||
115 | struct adf_etr_bank_data *bank = &etr_data->banks[i]; | ||
116 | unsigned int cpu, cpus = num_online_cpus(); | ||
117 | |||
118 | name = *(pci_dev_info->msix_entries.names + i); | ||
119 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, | ||
120 | "qat%d-bundle%d", accel_dev->accel_id, i); | ||
121 | ret = request_irq(msixe[i].vector, | ||
122 | adf_msix_isr_bundle, 0, name, bank); | ||
123 | if (ret) { | ||
124 | pr_err("QAT: failed to enable irq %d for %s\n", | ||
125 | msixe[i].vector, name); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; | ||
130 | irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); | ||
131 | } | ||
132 | |||
133 | /* Request msix irq for AE */ | ||
134 | name = *(pci_dev_info->msix_entries.names + i); | ||
135 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, | ||
136 | "qat%d-ae-cluster", accel_dev->accel_id); | ||
137 | ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); | ||
138 | if (ret) { | ||
139 | pr_err("QAT: failed to enable irq %d, for %s\n", | ||
140 | msixe[i].vector, name); | ||
141 | return ret; | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void adf_free_irqs(struct adf_accel_dev *accel_dev) | ||
147 | { | ||
148 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
149 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
150 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | ||
151 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
152 | int i; | ||
153 | |||
154 | for (i = 0; i < hw_data->num_banks; i++) { | ||
155 | irq_set_affinity_hint(msixe[i].vector, NULL); | ||
156 | free_irq(msixe[i].vector, &etr_data->banks[i]); | ||
157 | } | ||
158 | irq_set_affinity_hint(msixe[i].vector, NULL); | ||
159 | free_irq(msixe[i].vector, accel_dev); | ||
160 | } | ||
161 | |||
162 | static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) | ||
163 | { | ||
164 | int i; | ||
165 | char **names; | ||
166 | struct msix_entry *entries; | ||
167 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
168 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
169 | |||
170 | entries = kzalloc_node(msix_num_entries * sizeof(*entries), | ||
171 | GFP_KERNEL, accel_dev->numa_node); | ||
172 | if (!entries) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); | ||
176 | if (!names) { | ||
177 | kfree(entries); | ||
178 | return -ENOMEM; | ||
179 | } | ||
180 | for (i = 0; i < msix_num_entries; i++) { | ||
181 | *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); | ||
182 | if (!(*(names + i))) | ||
183 | goto err; | ||
184 | } | ||
185 | accel_dev->accel_pci_dev.msix_entries.entries = entries; | ||
186 | accel_dev->accel_pci_dev.msix_entries.names = names; | ||
187 | return 0; | ||
188 | err: | ||
189 | for (i = 0; i < msix_num_entries; i++) { | ||
190 | if (*(names + i)) | ||
191 | kfree(*(names + i)); | ||
192 | } | ||
193 | kfree(entries); | ||
194 | kfree(names); | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | |||
198 | static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) | ||
199 | { | ||
200 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
201 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
202 | char **names = accel_dev->accel_pci_dev.msix_entries.names; | ||
203 | int i; | ||
204 | |||
205 | kfree(accel_dev->accel_pci_dev.msix_entries.entries); | ||
206 | for (i = 0; i < msix_num_entries; i++) { | ||
207 | if (*(names + i)) | ||
208 | kfree(*(names + i)); | ||
209 | } | ||
210 | kfree(names); | ||
211 | } | ||
212 | |||
213 | static int adf_setup_bh(struct adf_accel_dev *accel_dev) | ||
214 | { | ||
215 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
216 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
217 | int i; | ||
218 | |||
219 | for (i = 0; i < hw_data->num_banks; i++) | ||
220 | tasklet_init(&priv_data->banks[i].resp_hanlder, | ||
221 | adf_response_handler, | ||
222 | (unsigned long)&priv_data->banks[i]); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) | ||
227 | { | ||
228 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
229 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
230 | int i; | ||
231 | |||
232 | for (i = 0; i < hw_data->num_banks; i++) { | ||
233 | tasklet_disable(&priv_data->banks[i].resp_hanlder); | ||
234 | tasklet_kill(&priv_data->banks[i].resp_hanlder); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev) | ||
239 | { | ||
240 | adf_free_irqs(accel_dev); | ||
241 | adf_cleanup_bh(accel_dev); | ||
242 | adf_disable_msix(&accel_dev->accel_pci_dev); | ||
243 | adf_isr_free_msix_entry_table(accel_dev); | ||
244 | } | ||
245 | |||
246 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) | ||
247 | { | ||
248 | int ret; | ||
249 | |||
250 | ret = adf_isr_alloc_msix_entry_table(accel_dev); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | if (adf_enable_msix(accel_dev)) | ||
254 | goto err_out; | ||
255 | |||
256 | if (adf_setup_bh(accel_dev)) | ||
257 | goto err_out; | ||
258 | |||
259 | if (adf_request_irqs(accel_dev)) | ||
260 | goto err_out; | ||
261 | |||
262 | return 0; | ||
263 | err_out: | ||
264 | adf_isr_resource_free(accel_dev); | ||
265 | return -EFAULT; | ||
266 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c new file mode 100644 index 000000000000..55b7a8e48bad --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <icp_qat_fw_init_admin.h> | ||
48 | #include <adf_accel_devices.h> | ||
49 | #include <adf_common_drv.h> | ||
50 | #include "adf_drv.h" | ||
51 | |||
52 | static struct service_hndl qat_admin; | ||
53 | |||
54 | static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) | ||
55 | { | ||
56 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
57 | struct icp_qat_fw_init_admin_req req; | ||
58 | struct icp_qat_fw_init_admin_resp resp; | ||
59 | int i; | ||
60 | |||
61 | memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); | ||
62 | req.init_admin_cmd_id = cmd; | ||
63 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
64 | memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); | ||
65 | if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || | ||
66 | resp.init_resp_hdr.status) | ||
67 | return -EFAULT; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int qat_admin_start(struct adf_accel_dev *accel_dev) | ||
73 | { | ||
74 | return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); | ||
75 | } | ||
76 | |||
77 | static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, | ||
78 | enum adf_event event) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | switch (event) { | ||
83 | case ADF_EVENT_START: | ||
84 | ret = qat_admin_start(accel_dev); | ||
85 | break; | ||
86 | case ADF_EVENT_STOP: | ||
87 | case ADF_EVENT_INIT: | ||
88 | case ADF_EVENT_SHUTDOWN: | ||
89 | default: | ||
90 | ret = 0; | ||
91 | } | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | int qat_admin_register(void) | ||
96 | { | ||
97 | memset(&qat_admin, 0, sizeof(struct service_hndl)); | ||
98 | qat_admin.event_hld = qat_admin_event_handler; | ||
99 | qat_admin.name = "qat_admin"; | ||
100 | qat_admin.admin = 1; | ||
101 | return adf_service_register(&qat_admin); | ||
102 | } | ||
103 | |||
104 | int qat_admin_unregister(void) | ||
105 | { | ||
106 | return adf_service_unregister(&qat_admin); | ||
107 | } | ||
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile new file mode 100644 index 000000000000..348dc3173afa --- /dev/null +++ b/drivers/crypto/qce/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o | ||
2 | qcrypto-objs := core.o \ | ||
3 | common.o \ | ||
4 | dma.o \ | ||
5 | sha.o \ | ||
6 | ablkcipher.o | ||
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c new file mode 100644 index 000000000000..ad592de475a4 --- /dev/null +++ b/drivers/crypto/qce/ablkcipher.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <crypto/aes.h> | ||
18 | #include <crypto/algapi.h> | ||
19 | #include <crypto/des.h> | ||
20 | |||
21 | #include "cipher.h" | ||
22 | |||
23 | static LIST_HEAD(ablkcipher_algs); | ||
24 | |||
25 | static void qce_ablkcipher_done(void *data) | ||
26 | { | ||
27 | struct crypto_async_request *async_req = data; | ||
28 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
29 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
30 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
31 | struct qce_device *qce = tmpl->qce; | ||
32 | enum dma_data_direction dir_src, dir_dst; | ||
33 | u32 status; | ||
34 | int error; | ||
35 | bool diff_dst; | ||
36 | |||
37 | diff_dst = (req->src != req->dst) ? true : false; | ||
38 | dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; | ||
39 | dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; | ||
40 | |||
41 | error = qce_dma_terminate_all(&qce->dma); | ||
42 | if (error) | ||
43 | dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", | ||
44 | error); | ||
45 | |||
46 | if (diff_dst) | ||
47 | qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, | ||
48 | rctx->dst_chained); | ||
49 | qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
50 | rctx->dst_chained); | ||
51 | |||
52 | sg_free_table(&rctx->dst_tbl); | ||
53 | |||
54 | error = qce_check_status(qce, &status); | ||
55 | if (error < 0) | ||
56 | dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); | ||
57 | |||
58 | qce->async_req_done(tmpl->qce, error); | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) | ||
63 | { | ||
64 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
65 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
66 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
67 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
68 | struct qce_device *qce = tmpl->qce; | ||
69 | enum dma_data_direction dir_src, dir_dst; | ||
70 | struct scatterlist *sg; | ||
71 | bool diff_dst; | ||
72 | gfp_t gfp; | ||
73 | int ret; | ||
74 | |||
75 | rctx->iv = req->info; | ||
76 | rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
77 | rctx->cryptlen = req->nbytes; | ||
78 | |||
79 | diff_dst = (req->src != req->dst) ? true : false; | ||
80 | dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; | ||
81 | dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; | ||
82 | |||
83 | rctx->src_nents = qce_countsg(req->src, req->nbytes, | ||
84 | &rctx->src_chained); | ||
85 | if (diff_dst) { | ||
86 | rctx->dst_nents = qce_countsg(req->dst, req->nbytes, | ||
87 | &rctx->dst_chained); | ||
88 | } else { | ||
89 | rctx->dst_nents = rctx->src_nents; | ||
90 | rctx->dst_chained = rctx->src_chained; | ||
91 | } | ||
92 | |||
93 | rctx->dst_nents += 1; | ||
94 | |||
95 | gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
96 | GFP_KERNEL : GFP_ATOMIC; | ||
97 | |||
98 | ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); | ||
99 | if (ret) | ||
100 | return ret; | ||
101 | |||
102 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | ||
103 | |||
104 | sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); | ||
105 | if (IS_ERR(sg)) { | ||
106 | ret = PTR_ERR(sg); | ||
107 | goto error_free; | ||
108 | } | ||
109 | |||
110 | sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); | ||
111 | if (IS_ERR(sg)) { | ||
112 | ret = PTR_ERR(sg); | ||
113 | goto error_free; | ||
114 | } | ||
115 | |||
116 | sg_mark_end(sg); | ||
117 | rctx->dst_sg = rctx->dst_tbl.sgl; | ||
118 | |||
119 | ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
120 | rctx->dst_chained); | ||
121 | if (ret < 0) | ||
122 | goto error_free; | ||
123 | |||
124 | if (diff_dst) { | ||
125 | ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, | ||
126 | rctx->src_chained); | ||
127 | if (ret < 0) | ||
128 | goto error_unmap_dst; | ||
129 | rctx->src_sg = req->src; | ||
130 | } else { | ||
131 | rctx->src_sg = rctx->dst_sg; | ||
132 | } | ||
133 | |||
134 | ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, | ||
135 | rctx->dst_sg, rctx->dst_nents, | ||
136 | qce_ablkcipher_done, async_req); | ||
137 | if (ret) | ||
138 | goto error_unmap_src; | ||
139 | |||
140 | qce_dma_issue_pending(&qce->dma); | ||
141 | |||
142 | ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); | ||
143 | if (ret) | ||
144 | goto error_terminate; | ||
145 | |||
146 | return 0; | ||
147 | |||
148 | error_terminate: | ||
149 | qce_dma_terminate_all(&qce->dma); | ||
150 | error_unmap_src: | ||
151 | if (diff_dst) | ||
152 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, | ||
153 | rctx->src_chained); | ||
154 | error_unmap_dst: | ||
155 | qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
156 | rctx->dst_chained); | ||
157 | error_free: | ||
158 | sg_free_table(&rctx->dst_tbl); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, | ||
163 | unsigned int keylen) | ||
164 | { | ||
165 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); | ||
166 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
167 | unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; | ||
168 | int ret; | ||
169 | |||
170 | if (!key || !keylen) | ||
171 | return -EINVAL; | ||
172 | |||
173 | if (IS_AES(flags)) { | ||
174 | switch (keylen) { | ||
175 | case AES_KEYSIZE_128: | ||
176 | case AES_KEYSIZE_256: | ||
177 | break; | ||
178 | default: | ||
179 | goto fallback; | ||
180 | } | ||
181 | } else if (IS_DES(flags)) { | ||
182 | u32 tmp[DES_EXPKEY_WORDS]; | ||
183 | |||
184 | ret = des_ekey(tmp, key); | ||
185 | if (!ret && crypto_ablkcipher_get_flags(ablk) & | ||
186 | CRYPTO_TFM_REQ_WEAK_KEY) | ||
187 | goto weakkey; | ||
188 | } | ||
189 | |||
190 | ctx->enc_keylen = keylen; | ||
191 | memcpy(ctx->enc_key, key, keylen); | ||
192 | return 0; | ||
193 | fallback: | ||
194 | ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); | ||
195 | if (!ret) | ||
196 | ctx->enc_keylen = keylen; | ||
197 | return ret; | ||
198 | weakkey: | ||
199 | crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | |||
203 | static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) | ||
204 | { | ||
205 | struct crypto_tfm *tfm = | ||
206 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
207 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
208 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
209 | struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); | ||
210 | int ret; | ||
211 | |||
212 | rctx->flags = tmpl->alg_flags; | ||
213 | rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; | ||
214 | |||
215 | if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && | ||
216 | ctx->enc_keylen != AES_KEYSIZE_256) { | ||
217 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
218 | ret = encrypt ? crypto_ablkcipher_encrypt(req) : | ||
219 | crypto_ablkcipher_decrypt(req); | ||
220 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); | ||
225 | } | ||
226 | |||
227 | static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) | ||
228 | { | ||
229 | return qce_ablkcipher_crypt(req, 1); | ||
230 | } | ||
231 | |||
232 | static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) | ||
233 | { | ||
234 | return qce_ablkcipher_crypt(req, 0); | ||
235 | } | ||
236 | |||
237 | static int qce_ablkcipher_init(struct crypto_tfm *tfm) | ||
238 | { | ||
239 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
240 | |||
241 | memset(ctx, 0, sizeof(*ctx)); | ||
242 | tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); | ||
243 | |||
244 | ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), | ||
245 | CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
246 | CRYPTO_ALG_ASYNC | | ||
247 | CRYPTO_ALG_NEED_FALLBACK); | ||
248 | if (IS_ERR(ctx->fallback)) | ||
249 | return PTR_ERR(ctx->fallback); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static void qce_ablkcipher_exit(struct crypto_tfm *tfm) | ||
255 | { | ||
256 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
257 | |||
258 | crypto_free_ablkcipher(ctx->fallback); | ||
259 | } | ||
260 | |||
261 | struct qce_ablkcipher_def { | ||
262 | unsigned long flags; | ||
263 | const char *name; | ||
264 | const char *drv_name; | ||
265 | unsigned int blocksize; | ||
266 | unsigned int ivsize; | ||
267 | unsigned int min_keysize; | ||
268 | unsigned int max_keysize; | ||
269 | }; | ||
270 | |||
271 | static const struct qce_ablkcipher_def ablkcipher_def[] = { | ||
272 | { | ||
273 | .flags = QCE_ALG_AES | QCE_MODE_ECB, | ||
274 | .name = "ecb(aes)", | ||
275 | .drv_name = "ecb-aes-qce", | ||
276 | .blocksize = AES_BLOCK_SIZE, | ||
277 | .ivsize = AES_BLOCK_SIZE, | ||
278 | .min_keysize = AES_MIN_KEY_SIZE, | ||
279 | .max_keysize = AES_MAX_KEY_SIZE, | ||
280 | }, | ||
281 | { | ||
282 | .flags = QCE_ALG_AES | QCE_MODE_CBC, | ||
283 | .name = "cbc(aes)", | ||
284 | .drv_name = "cbc-aes-qce", | ||
285 | .blocksize = AES_BLOCK_SIZE, | ||
286 | .ivsize = AES_BLOCK_SIZE, | ||
287 | .min_keysize = AES_MIN_KEY_SIZE, | ||
288 | .max_keysize = AES_MAX_KEY_SIZE, | ||
289 | }, | ||
290 | { | ||
291 | .flags = QCE_ALG_AES | QCE_MODE_CTR, | ||
292 | .name = "ctr(aes)", | ||
293 | .drv_name = "ctr-aes-qce", | ||
294 | .blocksize = AES_BLOCK_SIZE, | ||
295 | .ivsize = AES_BLOCK_SIZE, | ||
296 | .min_keysize = AES_MIN_KEY_SIZE, | ||
297 | .max_keysize = AES_MAX_KEY_SIZE, | ||
298 | }, | ||
299 | { | ||
300 | .flags = QCE_ALG_AES | QCE_MODE_XTS, | ||
301 | .name = "xts(aes)", | ||
302 | .drv_name = "xts-aes-qce", | ||
303 | .blocksize = AES_BLOCK_SIZE, | ||
304 | .ivsize = AES_BLOCK_SIZE, | ||
305 | .min_keysize = AES_MIN_KEY_SIZE, | ||
306 | .max_keysize = AES_MAX_KEY_SIZE, | ||
307 | }, | ||
308 | { | ||
309 | .flags = QCE_ALG_DES | QCE_MODE_ECB, | ||
310 | .name = "ecb(des)", | ||
311 | .drv_name = "ecb-des-qce", | ||
312 | .blocksize = DES_BLOCK_SIZE, | ||
313 | .ivsize = 0, | ||
314 | .min_keysize = DES_KEY_SIZE, | ||
315 | .max_keysize = DES_KEY_SIZE, | ||
316 | }, | ||
317 | { | ||
318 | .flags = QCE_ALG_DES | QCE_MODE_CBC, | ||
319 | .name = "cbc(des)", | ||
320 | .drv_name = "cbc-des-qce", | ||
321 | .blocksize = DES_BLOCK_SIZE, | ||
322 | .ivsize = DES_BLOCK_SIZE, | ||
323 | .min_keysize = DES_KEY_SIZE, | ||
324 | .max_keysize = DES_KEY_SIZE, | ||
325 | }, | ||
326 | { | ||
327 | .flags = QCE_ALG_3DES | QCE_MODE_ECB, | ||
328 | .name = "ecb(des3_ede)", | ||
329 | .drv_name = "ecb-3des-qce", | ||
330 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
331 | .ivsize = 0, | ||
332 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
333 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
334 | }, | ||
335 | { | ||
336 | .flags = QCE_ALG_3DES | QCE_MODE_CBC, | ||
337 | .name = "cbc(des3_ede)", | ||
338 | .drv_name = "cbc-3des-qce", | ||
339 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
340 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
341 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
342 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
343 | }, | ||
344 | }; | ||
345 | |||
346 | static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, | ||
347 | struct qce_device *qce) | ||
348 | { | ||
349 | struct qce_alg_template *tmpl; | ||
350 | struct crypto_alg *alg; | ||
351 | int ret; | ||
352 | |||
353 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | ||
354 | if (!tmpl) | ||
355 | return -ENOMEM; | ||
356 | |||
357 | alg = &tmpl->alg.crypto; | ||
358 | |||
359 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | ||
360 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
361 | def->drv_name); | ||
362 | |||
363 | alg->cra_blocksize = def->blocksize; | ||
364 | alg->cra_ablkcipher.ivsize = def->ivsize; | ||
365 | alg->cra_ablkcipher.min_keysize = def->min_keysize; | ||
366 | alg->cra_ablkcipher.max_keysize = def->max_keysize; | ||
367 | alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; | ||
368 | alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; | ||
369 | alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; | ||
370 | |||
371 | alg->cra_priority = 300; | ||
372 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | ||
373 | CRYPTO_ALG_NEED_FALLBACK; | ||
374 | alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); | ||
375 | alg->cra_alignmask = 0; | ||
376 | alg->cra_type = &crypto_ablkcipher_type; | ||
377 | alg->cra_module = THIS_MODULE; | ||
378 | alg->cra_init = qce_ablkcipher_init; | ||
379 | alg->cra_exit = qce_ablkcipher_exit; | ||
380 | INIT_LIST_HEAD(&alg->cra_list); | ||
381 | |||
382 | INIT_LIST_HEAD(&tmpl->entry); | ||
383 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; | ||
384 | tmpl->alg_flags = def->flags; | ||
385 | tmpl->qce = qce; | ||
386 | |||
387 | ret = crypto_register_alg(alg); | ||
388 | if (ret) { | ||
389 | kfree(tmpl); | ||
390 | dev_err(qce->dev, "%s registration failed\n", alg->cra_name); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | list_add_tail(&tmpl->entry, &ablkcipher_algs); | ||
395 | dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static void qce_ablkcipher_unregister(struct qce_device *qce) | ||
400 | { | ||
401 | struct qce_alg_template *tmpl, *n; | ||
402 | |||
403 | list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { | ||
404 | crypto_unregister_alg(&tmpl->alg.crypto); | ||
405 | list_del(&tmpl->entry); | ||
406 | kfree(tmpl); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | static int qce_ablkcipher_register(struct qce_device *qce) | ||
411 | { | ||
412 | int ret, i; | ||
413 | |||
414 | for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { | ||
415 | ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); | ||
416 | if (ret) | ||
417 | goto err; | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | err: | ||
422 | qce_ablkcipher_unregister(qce); | ||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | const struct qce_algo_ops ablkcipher_ops = { | ||
427 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
428 | .register_algs = qce_ablkcipher_register, | ||
429 | .unregister_algs = qce_ablkcipher_unregister, | ||
430 | .async_req_handle = qce_ablkcipher_async_req_handle, | ||
431 | }; | ||
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h new file mode 100644 index 000000000000..d5757cfcda2d --- /dev/null +++ b/drivers/crypto/qce/cipher.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _CIPHER_H_ | ||
15 | #define _CIPHER_H_ | ||
16 | |||
17 | #include "common.h" | ||
18 | #include "core.h" | ||
19 | |||
20 | #define QCE_MAX_KEY_SIZE 64 | ||
21 | |||
22 | struct qce_cipher_ctx { | ||
23 | u8 enc_key[QCE_MAX_KEY_SIZE]; | ||
24 | unsigned int enc_keylen; | ||
25 | struct crypto_ablkcipher *fallback; | ||
26 | }; | ||
27 | |||
28 | /** | ||
29 | * struct qce_cipher_reqctx - holds private cipher objects per request | ||
30 | * @flags: operation flags | ||
31 | * @iv: pointer to the IV | ||
32 | * @ivsize: IV size | ||
33 | * @src_nents: source entries | ||
34 | * @dst_nents: destination entries | ||
35 | * @src_chained: is source chained | ||
36 | * @dst_chained: is destination chained | ||
37 | * @result_sg: scatterlist used for result buffer | ||
38 | * @dst_tbl: destination sg table | ||
39 | * @dst_sg: destination sg pointer table beginning | ||
40 | * @src_tbl: source sg table | ||
41 | * @src_sg: source sg pointer table beginning; | ||
42 | * @cryptlen: crypto length | ||
43 | */ | ||
44 | struct qce_cipher_reqctx { | ||
45 | unsigned long flags; | ||
46 | u8 *iv; | ||
47 | unsigned int ivsize; | ||
48 | int src_nents; | ||
49 | int dst_nents; | ||
50 | bool src_chained; | ||
51 | bool dst_chained; | ||
52 | struct scatterlist result_sg; | ||
53 | struct sg_table dst_tbl; | ||
54 | struct scatterlist *dst_sg; | ||
55 | struct sg_table src_tbl; | ||
56 | struct scatterlist *src_sg; | ||
57 | unsigned int cryptlen; | ||
58 | }; | ||
59 | |||
60 | static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) | ||
61 | { | ||
62 | struct crypto_alg *alg = tfm->__crt_alg; | ||
63 | return container_of(alg, struct qce_alg_template, alg.crypto); | ||
64 | } | ||
65 | |||
66 | extern const struct qce_algo_ops ablkcipher_ops; | ||
67 | |||
68 | #endif /* _CIPHER_H_ */ | ||
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c new file mode 100644 index 000000000000..1fb5fde7fc03 --- /dev/null +++ b/drivers/crypto/qce/common.c | |||
@@ -0,0 +1,438 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/err.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <crypto/scatterwalk.h> | ||
18 | #include <crypto/sha.h> | ||
19 | |||
20 | #include "cipher.h" | ||
21 | #include "common.h" | ||
22 | #include "core.h" | ||
23 | #include "regs-v5.h" | ||
24 | #include "sha.h" | ||
25 | |||
26 | #define QCE_SECTOR_SIZE 512 | ||
27 | |||
28 | static inline u32 qce_read(struct qce_device *qce, u32 offset) | ||
29 | { | ||
30 | return readl(qce->base + offset); | ||
31 | } | ||
32 | |||
33 | static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) | ||
34 | { | ||
35 | writel(val, qce->base + offset); | ||
36 | } | ||
37 | |||
38 | static inline void qce_write_array(struct qce_device *qce, u32 offset, | ||
39 | const u32 *val, unsigned int len) | ||
40 | { | ||
41 | int i; | ||
42 | |||
43 | for (i = 0; i < len; i++) | ||
44 | qce_write(qce, offset + i * sizeof(u32), val[i]); | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) | ||
49 | { | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < len; i++) | ||
53 | qce_write(qce, offset + i * sizeof(u32), 0); | ||
54 | } | ||
55 | |||
56 | static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) | ||
57 | { | ||
58 | u32 cfg = 0; | ||
59 | |||
60 | if (IS_AES(flags)) { | ||
61 | if (aes_key_size == AES_KEYSIZE_128) | ||
62 | cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; | ||
63 | else if (aes_key_size == AES_KEYSIZE_256) | ||
64 | cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; | ||
65 | } | ||
66 | |||
67 | if (IS_AES(flags)) | ||
68 | cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; | ||
69 | else if (IS_DES(flags) || IS_3DES(flags)) | ||
70 | cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; | ||
71 | |||
72 | if (IS_DES(flags)) | ||
73 | cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; | ||
74 | |||
75 | if (IS_3DES(flags)) | ||
76 | cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; | ||
77 | |||
78 | switch (flags & QCE_MODE_MASK) { | ||
79 | case QCE_MODE_ECB: | ||
80 | cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; | ||
81 | break; | ||
82 | case QCE_MODE_CBC: | ||
83 | cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; | ||
84 | break; | ||
85 | case QCE_MODE_CTR: | ||
86 | cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; | ||
87 | break; | ||
88 | case QCE_MODE_XTS: | ||
89 | cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; | ||
90 | break; | ||
91 | case QCE_MODE_CCM: | ||
92 | cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; | ||
93 | cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; | ||
94 | break; | ||
95 | default: | ||
96 | return ~0; | ||
97 | } | ||
98 | |||
99 | return cfg; | ||
100 | } | ||
101 | |||
102 | static u32 qce_auth_cfg(unsigned long flags, u32 key_size) | ||
103 | { | ||
104 | u32 cfg = 0; | ||
105 | |||
106 | if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) | ||
107 | cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; | ||
108 | else | ||
109 | cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; | ||
110 | |||
111 | if (IS_CCM(flags) || IS_CMAC(flags)) { | ||
112 | if (key_size == AES_KEYSIZE_128) | ||
113 | cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; | ||
114 | else if (key_size == AES_KEYSIZE_256) | ||
115 | cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; | ||
116 | } | ||
117 | |||
118 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) | ||
119 | cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; | ||
120 | else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) | ||
121 | cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; | ||
122 | else if (IS_CMAC(flags)) | ||
123 | cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; | ||
124 | |||
125 | if (IS_SHA1(flags) || IS_SHA256(flags)) | ||
126 | cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; | ||
127 | else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || | ||
128 | IS_CBC(flags) || IS_CTR(flags)) | ||
129 | cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; | ||
130 | else if (IS_AES(flags) && IS_CCM(flags)) | ||
131 | cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; | ||
132 | else if (IS_AES(flags) && IS_CMAC(flags)) | ||
133 | cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; | ||
134 | |||
135 | if (IS_SHA(flags) || IS_SHA_HMAC(flags)) | ||
136 | cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; | ||
137 | |||
138 | if (IS_CCM(flags)) | ||
139 | cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; | ||
140 | |||
141 | if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || | ||
142 | IS_CMAC(flags)) | ||
143 | cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); | ||
144 | |||
145 | return cfg; | ||
146 | } | ||
147 | |||
148 | static u32 qce_config_reg(struct qce_device *qce, int little) | ||
149 | { | ||
150 | u32 beats = (qce->burst_size >> 3) - 1; | ||
151 | u32 pipe_pair = qce->pipe_pair_id; | ||
152 | u32 config; | ||
153 | |||
154 | config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; | ||
155 | config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | | ||
156 | BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); | ||
157 | config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; | ||
158 | config &= ~HIGH_SPD_EN_N_SHIFT; | ||
159 | |||
160 | if (little) | ||
161 | config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); | ||
162 | |||
163 | return config; | ||
164 | } | ||
165 | |||
166 | void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) | ||
167 | { | ||
168 | __be32 *d = dst; | ||
169 | const u8 *s = src; | ||
170 | unsigned int n; | ||
171 | |||
172 | n = len / sizeof(u32); | ||
173 | for (; n > 0; n--) { | ||
174 | *d = cpu_to_be32p((const __u32 *) s); | ||
175 | s += sizeof(__u32); | ||
176 | d++; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) | ||
181 | { | ||
182 | u8 swap[QCE_AES_IV_LENGTH]; | ||
183 | u32 i, j; | ||
184 | |||
185 | if (ivsize > QCE_AES_IV_LENGTH) | ||
186 | return; | ||
187 | |||
188 | memset(swap, 0, QCE_AES_IV_LENGTH); | ||
189 | |||
190 | for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; | ||
191 | i < QCE_AES_IV_LENGTH; i++, j--) | ||
192 | swap[i] = src[j]; | ||
193 | |||
194 | qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); | ||
195 | } | ||
196 | |||
197 | static void qce_xtskey(struct qce_device *qce, const u8 *enckey, | ||
198 | unsigned int enckeylen, unsigned int cryptlen) | ||
199 | { | ||
200 | u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; | ||
201 | unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); | ||
202 | unsigned int xtsdusize; | ||
203 | |||
204 | qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, | ||
205 | enckeylen / 2); | ||
206 | qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); | ||
207 | |||
208 | /* xts du size 512B */ | ||
209 | xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); | ||
210 | qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); | ||
211 | } | ||
212 | |||
213 | static void qce_setup_config(struct qce_device *qce) | ||
214 | { | ||
215 | u32 config; | ||
216 | |||
217 | /* get big endianness */ | ||
218 | config = qce_config_reg(qce, 0); | ||
219 | |||
220 | /* clear status */ | ||
221 | qce_write(qce, REG_STATUS, 0); | ||
222 | qce_write(qce, REG_CONFIG, config); | ||
223 | } | ||
224 | |||
225 | static inline void qce_crypto_go(struct qce_device *qce) | ||
226 | { | ||
227 | qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); | ||
228 | } | ||
229 | |||
230 | static int qce_setup_regs_ahash(struct crypto_async_request *async_req, | ||
231 | u32 totallen, u32 offset) | ||
232 | { | ||
233 | struct ahash_request *req = ahash_request_cast(async_req); | ||
234 | struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); | ||
235 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
236 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
237 | struct qce_device *qce = tmpl->qce; | ||
238 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
239 | unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); | ||
240 | __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; | ||
241 | __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; | ||
242 | u32 auth_cfg = 0, config; | ||
243 | unsigned int iv_words; | ||
244 | |||
245 | /* if not the last, the size has to be on the block boundary */ | ||
246 | if (!rctx->last_blk && req->nbytes % blocksize) | ||
247 | return -EINVAL; | ||
248 | |||
249 | qce_setup_config(qce); | ||
250 | |||
251 | if (IS_CMAC(rctx->flags)) { | ||
252 | qce_write(qce, REG_AUTH_SEG_CFG, 0); | ||
253 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | ||
254 | qce_write(qce, REG_ENCR_SEG_SIZE, 0); | ||
255 | qce_clear_array(qce, REG_AUTH_IV0, 16); | ||
256 | qce_clear_array(qce, REG_AUTH_KEY0, 16); | ||
257 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | ||
258 | |||
259 | auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); | ||
260 | } | ||
261 | |||
262 | if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { | ||
263 | u32 authkey_words = rctx->authklen / sizeof(u32); | ||
264 | |||
265 | qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); | ||
266 | qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, | ||
267 | authkey_words); | ||
268 | } | ||
269 | |||
270 | if (IS_CMAC(rctx->flags)) | ||
271 | goto go_proc; | ||
272 | |||
273 | if (rctx->first_blk) | ||
274 | memcpy(auth, rctx->digest, digestsize); | ||
275 | else | ||
276 | qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); | ||
277 | |||
278 | iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; | ||
279 | qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); | ||
280 | |||
281 | if (rctx->first_blk) | ||
282 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | ||
283 | else | ||
284 | qce_write_array(qce, REG_AUTH_BYTECNT0, | ||
285 | (u32 *)rctx->byte_count, 2); | ||
286 | |||
287 | auth_cfg = qce_auth_cfg(rctx->flags, 0); | ||
288 | |||
289 | if (rctx->last_blk) | ||
290 | auth_cfg |= BIT(AUTH_LAST_SHIFT); | ||
291 | else | ||
292 | auth_cfg &= ~BIT(AUTH_LAST_SHIFT); | ||
293 | |||
294 | if (rctx->first_blk) | ||
295 | auth_cfg |= BIT(AUTH_FIRST_SHIFT); | ||
296 | else | ||
297 | auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); | ||
298 | |||
299 | go_proc: | ||
300 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | ||
301 | qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); | ||
302 | qce_write(qce, REG_AUTH_SEG_START, 0); | ||
303 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | ||
304 | qce_write(qce, REG_SEG_SIZE, req->nbytes); | ||
305 | |||
306 | /* get little endianness */ | ||
307 | config = qce_config_reg(qce, 1); | ||
308 | qce_write(qce, REG_CONFIG, config); | ||
309 | |||
310 | qce_crypto_go(qce); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, | ||
316 | u32 totallen, u32 offset) | ||
317 | { | ||
318 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
319 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
320 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | ||
321 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
322 | struct qce_device *qce = tmpl->qce; | ||
323 | __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; | ||
324 | __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; | ||
325 | unsigned int enckey_words, enciv_words; | ||
326 | unsigned int keylen; | ||
327 | u32 encr_cfg = 0, auth_cfg = 0, config; | ||
328 | unsigned int ivsize = rctx->ivsize; | ||
329 | unsigned long flags = rctx->flags; | ||
330 | |||
331 | qce_setup_config(qce); | ||
332 | |||
333 | if (IS_XTS(flags)) | ||
334 | keylen = ctx->enc_keylen / 2; | ||
335 | else | ||
336 | keylen = ctx->enc_keylen; | ||
337 | |||
338 | qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); | ||
339 | enckey_words = keylen / sizeof(u32); | ||
340 | |||
341 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | ||
342 | |||
343 | encr_cfg = qce_encr_cfg(flags, keylen); | ||
344 | |||
345 | if (IS_DES(flags)) { | ||
346 | enciv_words = 2; | ||
347 | enckey_words = 2; | ||
348 | } else if (IS_3DES(flags)) { | ||
349 | enciv_words = 2; | ||
350 | enckey_words = 6; | ||
351 | } else if (IS_AES(flags)) { | ||
352 | if (IS_XTS(flags)) | ||
353 | qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, | ||
354 | rctx->cryptlen); | ||
355 | enciv_words = 4; | ||
356 | } else { | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); | ||
361 | |||
362 | if (!IS_ECB(flags)) { | ||
363 | if (IS_XTS(flags)) | ||
364 | qce_xts_swapiv(enciv, rctx->iv, ivsize); | ||
365 | else | ||
366 | qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); | ||
367 | |||
368 | qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); | ||
369 | } | ||
370 | |||
371 | if (IS_ENCRYPT(flags)) | ||
372 | encr_cfg |= BIT(ENCODE_SHIFT); | ||
373 | |||
374 | qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); | ||
375 | qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); | ||
376 | qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); | ||
377 | |||
378 | if (IS_CTR(flags)) { | ||
379 | qce_write(qce, REG_CNTR_MASK, ~0); | ||
380 | qce_write(qce, REG_CNTR_MASK0, ~0); | ||
381 | qce_write(qce, REG_CNTR_MASK1, ~0); | ||
382 | qce_write(qce, REG_CNTR_MASK2, ~0); | ||
383 | } | ||
384 | |||
385 | qce_write(qce, REG_SEG_SIZE, totallen); | ||
386 | |||
387 | /* get little endianness */ | ||
388 | config = qce_config_reg(qce, 1); | ||
389 | qce_write(qce, REG_CONFIG, config); | ||
390 | |||
391 | qce_crypto_go(qce); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, | ||
397 | u32 offset) | ||
398 | { | ||
399 | switch (type) { | ||
400 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
401 | return qce_setup_regs_ablkcipher(async_req, totallen, offset); | ||
402 | case CRYPTO_ALG_TYPE_AHASH: | ||
403 | return qce_setup_regs_ahash(async_req, totallen, offset); | ||
404 | default: | ||
405 | return -EINVAL; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | #define STATUS_ERRORS \ | ||
410 | (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) | ||
411 | |||
412 | int qce_check_status(struct qce_device *qce, u32 *status) | ||
413 | { | ||
414 | int ret = 0; | ||
415 | |||
416 | *status = qce_read(qce, REG_STATUS); | ||
417 | |||
418 | /* | ||
419 | * Don't use result dump status. The operation may not be complete. | ||
420 | * Instead, use the status we just read from device. In case, we need to | ||
421 | * use result_status from result dump the result_status needs to be byte | ||
422 | * swapped, since we set the device to little endian. | ||
423 | */ | ||
424 | if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) | ||
425 | ret = -ENXIO; | ||
426 | |||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) | ||
431 | { | ||
432 | u32 val; | ||
433 | |||
434 | val = qce_read(qce, REG_VERSION); | ||
435 | *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; | ||
436 | *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; | ||
437 | *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; | ||
438 | } | ||
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h new file mode 100644 index 000000000000..a4addd4f7d6c --- /dev/null +++ b/drivers/crypto/qce/common.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _COMMON_H_ | ||
15 | #define _COMMON_H_ | ||
16 | |||
17 | #include <linux/crypto.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <crypto/aes.h> | ||
20 | #include <crypto/hash.h> | ||
21 | |||
22 | /* key size in bytes */ | ||
23 | #define QCE_SHA_HMAC_KEY_SIZE 64 | ||
24 | #define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 | ||
25 | |||
26 | /* IV length in bytes */ | ||
27 | #define QCE_AES_IV_LENGTH AES_BLOCK_SIZE | ||
28 | /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | ||
29 | #define QCE_MAX_IV_SIZE AES_BLOCK_SIZE | ||
30 | |||
31 | /* maximum nonce bytes */ | ||
32 | #define QCE_MAX_NONCE 16 | ||
33 | #define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32)) | ||
34 | |||
35 | /* burst size alignment requirement */ | ||
36 | #define QCE_MAX_ALIGN_SIZE 64 | ||
37 | |||
38 | /* cipher algorithms */ | ||
39 | #define QCE_ALG_DES BIT(0) | ||
40 | #define QCE_ALG_3DES BIT(1) | ||
41 | #define QCE_ALG_AES BIT(2) | ||
42 | |||
43 | /* hash and hmac algorithms */ | ||
44 | #define QCE_HASH_SHA1 BIT(3) | ||
45 | #define QCE_HASH_SHA256 BIT(4) | ||
46 | #define QCE_HASH_SHA1_HMAC BIT(5) | ||
47 | #define QCE_HASH_SHA256_HMAC BIT(6) | ||
48 | #define QCE_HASH_AES_CMAC BIT(7) | ||
49 | |||
50 | /* cipher modes */ | ||
51 | #define QCE_MODE_CBC BIT(8) | ||
52 | #define QCE_MODE_ECB BIT(9) | ||
53 | #define QCE_MODE_CTR BIT(10) | ||
54 | #define QCE_MODE_XTS BIT(11) | ||
55 | #define QCE_MODE_CCM BIT(12) | ||
56 | #define QCE_MODE_MASK GENMASK(12, 8) | ||
57 | |||
58 | /* cipher encryption/decryption operations */ | ||
59 | #define QCE_ENCRYPT BIT(13) | ||
60 | #define QCE_DECRYPT BIT(14) | ||
61 | |||
62 | #define IS_DES(flags) (flags & QCE_ALG_DES) | ||
63 | #define IS_3DES(flags) (flags & QCE_ALG_3DES) | ||
64 | #define IS_AES(flags) (flags & QCE_ALG_AES) | ||
65 | |||
66 | #define IS_SHA1(flags) (flags & QCE_HASH_SHA1) | ||
67 | #define IS_SHA256(flags) (flags & QCE_HASH_SHA256) | ||
68 | #define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC) | ||
69 | #define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC) | ||
70 | #define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC) | ||
71 | #define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags)) | ||
72 | #define IS_SHA_HMAC(flags) \ | ||
73 | (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags)) | ||
74 | |||
75 | #define IS_CBC(mode) (mode & QCE_MODE_CBC) | ||
76 | #define IS_ECB(mode) (mode & QCE_MODE_ECB) | ||
77 | #define IS_CTR(mode) (mode & QCE_MODE_CTR) | ||
78 | #define IS_XTS(mode) (mode & QCE_MODE_XTS) | ||
79 | #define IS_CCM(mode) (mode & QCE_MODE_CCM) | ||
80 | |||
81 | #define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT) | ||
82 | #define IS_DECRYPT(dir) (dir & QCE_DECRYPT) | ||
83 | |||
84 | struct qce_alg_template { | ||
85 | struct list_head entry; | ||
86 | u32 crypto_alg_type; | ||
87 | unsigned long alg_flags; | ||
88 | const u32 *std_iv; | ||
89 | union { | ||
90 | struct crypto_alg crypto; | ||
91 | struct ahash_alg ahash; | ||
92 | } alg; | ||
93 | struct qce_device *qce; | ||
94 | }; | ||
95 | |||
96 | void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); | ||
97 | int qce_check_status(struct qce_device *qce, u32 *status); | ||
98 | void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); | ||
99 | int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, | ||
100 | u32 offset); | ||
101 | |||
102 | #endif /* _COMMON_H_ */ | ||
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c new file mode 100644 index 000000000000..33ae3545dc48 --- /dev/null +++ b/drivers/crypto/qce/core.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <crypto/internal/hash.h> | ||
22 | #include <crypto/sha.h> | ||
23 | |||
24 | #include "core.h" | ||
25 | #include "cipher.h" | ||
26 | #include "sha.h" | ||
27 | |||
28 | #define QCE_MAJOR_VERSION5 0x05 | ||
29 | #define QCE_QUEUE_LENGTH 1 | ||
30 | |||
31 | static const struct qce_algo_ops *qce_ops[] = { | ||
32 | &ablkcipher_ops, | ||
33 | &ahash_ops, | ||
34 | }; | ||
35 | |||
36 | static void qce_unregister_algs(struct qce_device *qce) | ||
37 | { | ||
38 | const struct qce_algo_ops *ops; | ||
39 | int i; | ||
40 | |||
41 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
42 | ops = qce_ops[i]; | ||
43 | ops->unregister_algs(qce); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | static int qce_register_algs(struct qce_device *qce) | ||
48 | { | ||
49 | const struct qce_algo_ops *ops; | ||
50 | int i, ret = -ENODEV; | ||
51 | |||
52 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
53 | ops = qce_ops[i]; | ||
54 | ret = ops->register_algs(qce); | ||
55 | if (ret) | ||
56 | break; | ||
57 | } | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | static int qce_handle_request(struct crypto_async_request *async_req) | ||
63 | { | ||
64 | int ret = -EINVAL, i; | ||
65 | const struct qce_algo_ops *ops; | ||
66 | u32 type = crypto_tfm_alg_type(async_req->tfm); | ||
67 | |||
68 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
69 | ops = qce_ops[i]; | ||
70 | if (type != ops->type) | ||
71 | continue; | ||
72 | ret = ops->async_req_handle(async_req); | ||
73 | break; | ||
74 | } | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | static int qce_handle_queue(struct qce_device *qce, | ||
80 | struct crypto_async_request *req) | ||
81 | { | ||
82 | struct crypto_async_request *async_req, *backlog; | ||
83 | unsigned long flags; | ||
84 | int ret = 0, err; | ||
85 | |||
86 | spin_lock_irqsave(&qce->lock, flags); | ||
87 | |||
88 | if (req) | ||
89 | ret = crypto_enqueue_request(&qce->queue, req); | ||
90 | |||
91 | /* busy, do not dequeue request */ | ||
92 | if (qce->req) { | ||
93 | spin_unlock_irqrestore(&qce->lock, flags); | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | backlog = crypto_get_backlog(&qce->queue); | ||
98 | async_req = crypto_dequeue_request(&qce->queue); | ||
99 | if (async_req) | ||
100 | qce->req = async_req; | ||
101 | |||
102 | spin_unlock_irqrestore(&qce->lock, flags); | ||
103 | |||
104 | if (!async_req) | ||
105 | return ret; | ||
106 | |||
107 | if (backlog) { | ||
108 | spin_lock_bh(&qce->lock); | ||
109 | backlog->complete(backlog, -EINPROGRESS); | ||
110 | spin_unlock_bh(&qce->lock); | ||
111 | } | ||
112 | |||
113 | err = qce_handle_request(async_req); | ||
114 | if (err) { | ||
115 | qce->result = err; | ||
116 | tasklet_schedule(&qce->done_tasklet); | ||
117 | } | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static void qce_tasklet_req_done(unsigned long data) | ||
123 | { | ||
124 | struct qce_device *qce = (struct qce_device *)data; | ||
125 | struct crypto_async_request *req; | ||
126 | unsigned long flags; | ||
127 | |||
128 | spin_lock_irqsave(&qce->lock, flags); | ||
129 | req = qce->req; | ||
130 | qce->req = NULL; | ||
131 | spin_unlock_irqrestore(&qce->lock, flags); | ||
132 | |||
133 | if (req) | ||
134 | req->complete(req, qce->result); | ||
135 | |||
136 | qce_handle_queue(qce, NULL); | ||
137 | } | ||
138 | |||
139 | static int qce_async_request_enqueue(struct qce_device *qce, | ||
140 | struct crypto_async_request *req) | ||
141 | { | ||
142 | return qce_handle_queue(qce, req); | ||
143 | } | ||
144 | |||
145 | static void qce_async_request_done(struct qce_device *qce, int ret) | ||
146 | { | ||
147 | qce->result = ret; | ||
148 | tasklet_schedule(&qce->done_tasklet); | ||
149 | } | ||
150 | |||
151 | static int qce_check_version(struct qce_device *qce) | ||
152 | { | ||
153 | u32 major, minor, step; | ||
154 | |||
155 | qce_get_version(qce, &major, &minor, &step); | ||
156 | |||
157 | /* | ||
158 | * the driver does not support v5 with minor 0 because it has special | ||
159 | * alignment requirements. | ||
160 | */ | ||
161 | if (major != QCE_MAJOR_VERSION5 || minor == 0) | ||
162 | return -ENODEV; | ||
163 | |||
164 | qce->burst_size = QCE_BAM_BURST_SIZE; | ||
165 | qce->pipe_pair_id = 1; | ||
166 | |||
167 | dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", | ||
168 | major, minor, step); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int qce_crypto_probe(struct platform_device *pdev) | ||
174 | { | ||
175 | struct device *dev = &pdev->dev; | ||
176 | struct qce_device *qce; | ||
177 | struct resource *res; | ||
178 | int ret; | ||
179 | |||
180 | qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); | ||
181 | if (!qce) | ||
182 | return -ENOMEM; | ||
183 | |||
184 | qce->dev = dev; | ||
185 | platform_set_drvdata(pdev, qce); | ||
186 | |||
187 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
188 | qce->base = devm_ioremap_resource(&pdev->dev, res); | ||
189 | if (IS_ERR(qce->base)) | ||
190 | return PTR_ERR(qce->base); | ||
191 | |||
192 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
193 | if (ret < 0) | ||
194 | return ret; | ||
195 | |||
196 | qce->core = devm_clk_get(qce->dev, "core"); | ||
197 | if (IS_ERR(qce->core)) | ||
198 | return PTR_ERR(qce->core); | ||
199 | |||
200 | qce->iface = devm_clk_get(qce->dev, "iface"); | ||
201 | if (IS_ERR(qce->iface)) | ||
202 | return PTR_ERR(qce->iface); | ||
203 | |||
204 | qce->bus = devm_clk_get(qce->dev, "bus"); | ||
205 | if (IS_ERR(qce->bus)) | ||
206 | return PTR_ERR(qce->bus); | ||
207 | |||
208 | ret = clk_prepare_enable(qce->core); | ||
209 | if (ret) | ||
210 | return ret; | ||
211 | |||
212 | ret = clk_prepare_enable(qce->iface); | ||
213 | if (ret) | ||
214 | goto err_clks_core; | ||
215 | |||
216 | ret = clk_prepare_enable(qce->bus); | ||
217 | if (ret) | ||
218 | goto err_clks_iface; | ||
219 | |||
220 | ret = qce_dma_request(qce->dev, &qce->dma); | ||
221 | if (ret) | ||
222 | goto err_clks; | ||
223 | |||
224 | ret = qce_check_version(qce); | ||
225 | if (ret) | ||
226 | goto err_clks; | ||
227 | |||
228 | spin_lock_init(&qce->lock); | ||
229 | tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, | ||
230 | (unsigned long)qce); | ||
231 | crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); | ||
232 | |||
233 | qce->async_req_enqueue = qce_async_request_enqueue; | ||
234 | qce->async_req_done = qce_async_request_done; | ||
235 | |||
236 | ret = qce_register_algs(qce); | ||
237 | if (ret) | ||
238 | goto err_dma; | ||
239 | |||
240 | return 0; | ||
241 | |||
242 | err_dma: | ||
243 | qce_dma_release(&qce->dma); | ||
244 | err_clks: | ||
245 | clk_disable_unprepare(qce->bus); | ||
246 | err_clks_iface: | ||
247 | clk_disable_unprepare(qce->iface); | ||
248 | err_clks_core: | ||
249 | clk_disable_unprepare(qce->core); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | static int qce_crypto_remove(struct platform_device *pdev) | ||
254 | { | ||
255 | struct qce_device *qce = platform_get_drvdata(pdev); | ||
256 | |||
257 | tasklet_kill(&qce->done_tasklet); | ||
258 | qce_unregister_algs(qce); | ||
259 | qce_dma_release(&qce->dma); | ||
260 | clk_disable_unprepare(qce->bus); | ||
261 | clk_disable_unprepare(qce->iface); | ||
262 | clk_disable_unprepare(qce->core); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static const struct of_device_id qce_crypto_of_match[] = { | ||
267 | { .compatible = "qcom,crypto-v5.1", }, | ||
268 | {} | ||
269 | }; | ||
270 | MODULE_DEVICE_TABLE(of, qce_crypto_of_match); | ||
271 | |||
272 | static struct platform_driver qce_crypto_driver = { | ||
273 | .probe = qce_crypto_probe, | ||
274 | .remove = qce_crypto_remove, | ||
275 | .driver = { | ||
276 | .owner = THIS_MODULE, | ||
277 | .name = KBUILD_MODNAME, | ||
278 | .of_match_table = qce_crypto_of_match, | ||
279 | }, | ||
280 | }; | ||
281 | module_platform_driver(qce_crypto_driver); | ||
282 | |||
283 | MODULE_LICENSE("GPL v2"); | ||
284 | MODULE_DESCRIPTION("Qualcomm crypto engine driver"); | ||
285 | MODULE_ALIAS("platform:" KBUILD_MODNAME); | ||
286 | MODULE_AUTHOR("The Linux Foundation"); | ||
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h new file mode 100644 index 000000000000..549965d4d91f --- /dev/null +++ b/drivers/crypto/qce/core.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _CORE_H_ | ||
15 | #define _CORE_H_ | ||
16 | |||
17 | #include "dma.h" | ||
18 | |||
19 | /** | ||
20 | * struct qce_device - crypto engine device structure | ||
21 | * @queue: crypto request queue | ||
22 | * @lock: the lock protects queue and req | ||
23 | * @done_tasklet: done tasklet object | ||
24 | * @req: current active request | ||
25 | * @result: result of current transform | ||
26 | * @base: virtual IO base | ||
27 | * @dev: pointer to device structure | ||
28 | * @core: core device clock | ||
29 | * @iface: interface clock | ||
30 | * @bus: bus clock | ||
31 | * @dma: pointer to dma data | ||
32 | * @burst_size: the crypto burst size | ||
33 | * @pipe_pair_id: which pipe pair id the device using | ||
34 | * @async_req_enqueue: invoked by every algorithm to enqueue a request | ||
35 | * @async_req_done: invoked by every algorithm to finish its request | ||
36 | */ | ||
37 | struct qce_device { | ||
38 | struct crypto_queue queue; | ||
39 | spinlock_t lock; | ||
40 | struct tasklet_struct done_tasklet; | ||
41 | struct crypto_async_request *req; | ||
42 | int result; | ||
43 | void __iomem *base; | ||
44 | struct device *dev; | ||
45 | struct clk *core, *iface, *bus; | ||
46 | struct qce_dma_data dma; | ||
47 | int burst_size; | ||
48 | unsigned int pipe_pair_id; | ||
49 | int (*async_req_enqueue)(struct qce_device *qce, | ||
50 | struct crypto_async_request *req); | ||
51 | void (*async_req_done)(struct qce_device *qce, int ret); | ||
52 | }; | ||
53 | |||
54 | /** | ||
55 | * struct qce_algo_ops - algorithm operations per crypto type | ||
56 | * @type: should be CRYPTO_ALG_TYPE_XXX | ||
57 | * @register_algs: invoked by core to register the algorithms | ||
58 | * @unregister_algs: invoked by core to unregister the algorithms | ||
59 | * @async_req_handle: invoked by core to handle enqueued request | ||
60 | */ | ||
61 | struct qce_algo_ops { | ||
62 | u32 type; | ||
63 | int (*register_algs)(struct qce_device *qce); | ||
64 | void (*unregister_algs)(struct qce_device *qce); | ||
65 | int (*async_req_handle)(struct crypto_async_request *async_req); | ||
66 | }; | ||
67 | |||
68 | #endif /* _CORE_H_ */ | ||
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c new file mode 100644 index 000000000000..0fb21e13f247 --- /dev/null +++ b/drivers/crypto/qce/dma.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/dmaengine.h> | ||
15 | #include <crypto/scatterwalk.h> | ||
16 | |||
17 | #include "dma.h" | ||
18 | |||
19 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma) | ||
20 | { | ||
21 | int ret; | ||
22 | |||
23 | dma->txchan = dma_request_slave_channel_reason(dev, "tx"); | ||
24 | if (IS_ERR(dma->txchan)) | ||
25 | return PTR_ERR(dma->txchan); | ||
26 | |||
27 | dma->rxchan = dma_request_slave_channel_reason(dev, "rx"); | ||
28 | if (IS_ERR(dma->rxchan)) { | ||
29 | ret = PTR_ERR(dma->rxchan); | ||
30 | goto error_rx; | ||
31 | } | ||
32 | |||
33 | dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, | ||
34 | GFP_KERNEL); | ||
35 | if (!dma->result_buf) { | ||
36 | ret = -ENOMEM; | ||
37 | goto error_nomem; | ||
38 | } | ||
39 | |||
40 | dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; | ||
41 | |||
42 | return 0; | ||
43 | error_nomem: | ||
44 | dma_release_channel(dma->rxchan); | ||
45 | error_rx: | ||
46 | dma_release_channel(dma->txchan); | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | void qce_dma_release(struct qce_dma_data *dma) | ||
51 | { | ||
52 | dma_release_channel(dma->txchan); | ||
53 | dma_release_channel(dma->rxchan); | ||
54 | kfree(dma->result_buf); | ||
55 | } | ||
56 | |||
57 | int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
58 | enum dma_data_direction dir, bool chained) | ||
59 | { | ||
60 | int err; | ||
61 | |||
62 | if (chained) { | ||
63 | while (sg) { | ||
64 | err = dma_map_sg(dev, sg, 1, dir); | ||
65 | if (!err) | ||
66 | return -EFAULT; | ||
67 | sg = scatterwalk_sg_next(sg); | ||
68 | } | ||
69 | } else { | ||
70 | err = dma_map_sg(dev, sg, nents, dir); | ||
71 | if (!err) | ||
72 | return -EFAULT; | ||
73 | } | ||
74 | |||
75 | return nents; | ||
76 | } | ||
77 | |||
78 | void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
79 | enum dma_data_direction dir, bool chained) | ||
80 | { | ||
81 | if (chained) | ||
82 | while (sg) { | ||
83 | dma_unmap_sg(dev, sg, 1, dir); | ||
84 | sg = scatterwalk_sg_next(sg); | ||
85 | } | ||
86 | else | ||
87 | dma_unmap_sg(dev, sg, nents, dir); | ||
88 | } | ||
89 | |||
90 | int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) | ||
91 | { | ||
92 | struct scatterlist *sg = sglist; | ||
93 | int nents = 0; | ||
94 | |||
95 | if (chained) | ||
96 | *chained = false; | ||
97 | |||
98 | while (nbytes > 0 && sg) { | ||
99 | nents++; | ||
100 | nbytes -= sg->length; | ||
101 | if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) | ||
102 | *chained = true; | ||
103 | sg = scatterwalk_sg_next(sg); | ||
104 | } | ||
105 | |||
106 | return nents; | ||
107 | } | ||
108 | |||
109 | struct scatterlist * | ||
110 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) | ||
111 | { | ||
112 | struct scatterlist *sg = sgt->sgl, *sg_last = NULL; | ||
113 | |||
114 | while (sg) { | ||
115 | if (!sg_page(sg)) | ||
116 | break; | ||
117 | sg = sg_next(sg); | ||
118 | } | ||
119 | |||
120 | if (!sg) | ||
121 | return ERR_PTR(-EINVAL); | ||
122 | |||
123 | while (new_sgl && sg) { | ||
124 | sg_set_page(sg, sg_page(new_sgl), new_sgl->length, | ||
125 | new_sgl->offset); | ||
126 | sg_last = sg; | ||
127 | sg = sg_next(sg); | ||
128 | new_sgl = sg_next(new_sgl); | ||
129 | } | ||
130 | |||
131 | return sg_last; | ||
132 | } | ||
133 | |||
134 | static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, | ||
135 | int nents, unsigned long flags, | ||
136 | enum dma_transfer_direction dir, | ||
137 | dma_async_tx_callback cb, void *cb_param) | ||
138 | { | ||
139 | struct dma_async_tx_descriptor *desc; | ||
140 | dma_cookie_t cookie; | ||
141 | |||
142 | if (!sg || !nents) | ||
143 | return -EINVAL; | ||
144 | |||
145 | desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); | ||
146 | if (!desc) | ||
147 | return -EINVAL; | ||
148 | |||
149 | desc->callback = cb; | ||
150 | desc->callback_param = cb_param; | ||
151 | cookie = dmaengine_submit(desc); | ||
152 | |||
153 | return dma_submit_error(cookie); | ||
154 | } | ||
155 | |||
156 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, | ||
157 | int rx_nents, struct scatterlist *tx_sg, int tx_nents, | ||
158 | dma_async_tx_callback cb, void *cb_param) | ||
159 | { | ||
160 | struct dma_chan *rxchan = dma->rxchan; | ||
161 | struct dma_chan *txchan = dma->txchan; | ||
162 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; | ||
163 | int ret; | ||
164 | |||
165 | ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV, | ||
166 | NULL, NULL); | ||
167 | if (ret) | ||
168 | return ret; | ||
169 | |||
170 | return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM, | ||
171 | cb, cb_param); | ||
172 | } | ||
173 | |||
174 | void qce_dma_issue_pending(struct qce_dma_data *dma) | ||
175 | { | ||
176 | dma_async_issue_pending(dma->rxchan); | ||
177 | dma_async_issue_pending(dma->txchan); | ||
178 | } | ||
179 | |||
180 | int qce_dma_terminate_all(struct qce_dma_data *dma) | ||
181 | { | ||
182 | int ret; | ||
183 | |||
184 | ret = dmaengine_terminate_all(dma->rxchan); | ||
185 | return ret ?: dmaengine_terminate_all(dma->txchan); | ||
186 | } | ||
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h new file mode 100644 index 000000000000..805e378d59e9 --- /dev/null +++ b/drivers/crypto/qce/dma.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _DMA_H_ | ||
15 | #define _DMA_H_ | ||
16 | |||
17 | /* maximum data transfer block size between BAM and CE */ | ||
18 | #define QCE_BAM_BURST_SIZE 64 | ||
19 | |||
20 | #define QCE_AUTHIV_REGS_CNT 16 | ||
21 | #define QCE_AUTH_BYTECOUNT_REGS_CNT 4 | ||
22 | #define QCE_CNTRIV_REGS_CNT 4 | ||
23 | |||
24 | struct qce_result_dump { | ||
25 | u32 auth_iv[QCE_AUTHIV_REGS_CNT]; | ||
26 | u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT]; | ||
27 | u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT]; | ||
28 | u32 status; | ||
29 | u32 status2; | ||
30 | }; | ||
31 | |||
32 | #define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE) | ||
33 | #define QCE_RESULT_BUF_SZ \ | ||
34 | ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE) | ||
35 | |||
36 | struct qce_dma_data { | ||
37 | struct dma_chan *txchan; | ||
38 | struct dma_chan *rxchan; | ||
39 | struct qce_result_dump *result_buf; | ||
40 | void *ignore_buf; | ||
41 | }; | ||
42 | |||
43 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma); | ||
44 | void qce_dma_release(struct qce_dma_data *dma); | ||
45 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, | ||
46 | int in_ents, struct scatterlist *sg_out, int out_ents, | ||
47 | dma_async_tx_callback cb, void *cb_param); | ||
48 | void qce_dma_issue_pending(struct qce_dma_data *dma); | ||
49 | int qce_dma_terminate_all(struct qce_dma_data *dma); | ||
50 | int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained); | ||
51 | void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
52 | enum dma_data_direction dir, bool chained); | ||
53 | int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
54 | enum dma_data_direction dir, bool chained); | ||
55 | struct scatterlist * | ||
56 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); | ||
57 | |||
58 | #endif /* _DMA_H_ */ | ||
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h new file mode 100644 index 000000000000..f0e19e35664a --- /dev/null +++ b/drivers/crypto/qce/regs-v5.h | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _REGS_V5_H_ | ||
15 | #define _REGS_V5_H_ | ||
16 | |||
17 | #include <linux/bitops.h> | ||
18 | |||
19 | #define REG_VERSION 0x000 | ||
20 | #define REG_STATUS 0x100 | ||
21 | #define REG_STATUS2 0x104 | ||
22 | #define REG_ENGINES_AVAIL 0x108 | ||
23 | #define REG_FIFO_SIZES 0x10c | ||
24 | #define REG_SEG_SIZE 0x110 | ||
25 | #define REG_GOPROC 0x120 | ||
26 | #define REG_ENCR_SEG_CFG 0x200 | ||
27 | #define REG_ENCR_SEG_SIZE 0x204 | ||
28 | #define REG_ENCR_SEG_START 0x208 | ||
29 | #define REG_CNTR0_IV0 0x20c | ||
30 | #define REG_CNTR1_IV1 0x210 | ||
31 | #define REG_CNTR2_IV2 0x214 | ||
32 | #define REG_CNTR3_IV3 0x218 | ||
33 | #define REG_CNTR_MASK 0x21C | ||
34 | #define REG_ENCR_CCM_INT_CNTR0 0x220 | ||
35 | #define REG_ENCR_CCM_INT_CNTR1 0x224 | ||
36 | #define REG_ENCR_CCM_INT_CNTR2 0x228 | ||
37 | #define REG_ENCR_CCM_INT_CNTR3 0x22c | ||
38 | #define REG_ENCR_XTS_DU_SIZE 0x230 | ||
39 | #define REG_CNTR_MASK2 0x234 | ||
40 | #define REG_CNTR_MASK1 0x238 | ||
41 | #define REG_CNTR_MASK0 0x23c | ||
42 | #define REG_AUTH_SEG_CFG 0x300 | ||
43 | #define REG_AUTH_SEG_SIZE 0x304 | ||
44 | #define REG_AUTH_SEG_START 0x308 | ||
45 | #define REG_AUTH_IV0 0x310 | ||
46 | #define REG_AUTH_IV1 0x314 | ||
47 | #define REG_AUTH_IV2 0x318 | ||
48 | #define REG_AUTH_IV3 0x31c | ||
49 | #define REG_AUTH_IV4 0x320 | ||
50 | #define REG_AUTH_IV5 0x324 | ||
51 | #define REG_AUTH_IV6 0x328 | ||
52 | #define REG_AUTH_IV7 0x32c | ||
53 | #define REG_AUTH_IV8 0x330 | ||
54 | #define REG_AUTH_IV9 0x334 | ||
55 | #define REG_AUTH_IV10 0x338 | ||
56 | #define REG_AUTH_IV11 0x33c | ||
57 | #define REG_AUTH_IV12 0x340 | ||
58 | #define REG_AUTH_IV13 0x344 | ||
59 | #define REG_AUTH_IV14 0x348 | ||
60 | #define REG_AUTH_IV15 0x34c | ||
61 | #define REG_AUTH_INFO_NONCE0 0x350 | ||
62 | #define REG_AUTH_INFO_NONCE1 0x354 | ||
63 | #define REG_AUTH_INFO_NONCE2 0x358 | ||
64 | #define REG_AUTH_INFO_NONCE3 0x35c | ||
65 | #define REG_AUTH_BYTECNT0 0x390 | ||
66 | #define REG_AUTH_BYTECNT1 0x394 | ||
67 | #define REG_AUTH_BYTECNT2 0x398 | ||
68 | #define REG_AUTH_BYTECNT3 0x39c | ||
69 | #define REG_AUTH_EXP_MAC0 0x3a0 | ||
70 | #define REG_AUTH_EXP_MAC1 0x3a4 | ||
71 | #define REG_AUTH_EXP_MAC2 0x3a8 | ||
72 | #define REG_AUTH_EXP_MAC3 0x3ac | ||
73 | #define REG_AUTH_EXP_MAC4 0x3b0 | ||
74 | #define REG_AUTH_EXP_MAC5 0x3b4 | ||
75 | #define REG_AUTH_EXP_MAC6 0x3b8 | ||
76 | #define REG_AUTH_EXP_MAC7 0x3bc | ||
77 | #define REG_CONFIG 0x400 | ||
78 | #define REG_GOPROC_QC_KEY 0x1000 | ||
79 | #define REG_GOPROC_OEM_KEY 0x2000 | ||
80 | #define REG_ENCR_KEY0 0x3000 | ||
81 | #define REG_ENCR_KEY1 0x3004 | ||
82 | #define REG_ENCR_KEY2 0x3008 | ||
83 | #define REG_ENCR_KEY3 0x300c | ||
84 | #define REG_ENCR_KEY4 0x3010 | ||
85 | #define REG_ENCR_KEY5 0x3014 | ||
86 | #define REG_ENCR_KEY6 0x3018 | ||
87 | #define REG_ENCR_KEY7 0x301c | ||
88 | #define REG_ENCR_XTS_KEY0 0x3020 | ||
89 | #define REG_ENCR_XTS_KEY1 0x3024 | ||
90 | #define REG_ENCR_XTS_KEY2 0x3028 | ||
91 | #define REG_ENCR_XTS_KEY3 0x302c | ||
92 | #define REG_ENCR_XTS_KEY4 0x3030 | ||
93 | #define REG_ENCR_XTS_KEY5 0x3034 | ||
94 | #define REG_ENCR_XTS_KEY6 0x3038 | ||
95 | #define REG_ENCR_XTS_KEY7 0x303c | ||
96 | #define REG_AUTH_KEY0 0x3040 | ||
97 | #define REG_AUTH_KEY1 0x3044 | ||
98 | #define REG_AUTH_KEY2 0x3048 | ||
99 | #define REG_AUTH_KEY3 0x304c | ||
100 | #define REG_AUTH_KEY4 0x3050 | ||
101 | #define REG_AUTH_KEY5 0x3054 | ||
102 | #define REG_AUTH_KEY6 0x3058 | ||
103 | #define REG_AUTH_KEY7 0x305c | ||
104 | #define REG_AUTH_KEY8 0x3060 | ||
105 | #define REG_AUTH_KEY9 0x3064 | ||
106 | #define REG_AUTH_KEY10 0x3068 | ||
107 | #define REG_AUTH_KEY11 0x306c | ||
108 | #define REG_AUTH_KEY12 0x3070 | ||
109 | #define REG_AUTH_KEY13 0x3074 | ||
110 | #define REG_AUTH_KEY14 0x3078 | ||
111 | #define REG_AUTH_KEY15 0x307c | ||
112 | |||
113 | /* Register bits - REG_VERSION */ | ||
114 | #define CORE_STEP_REV_SHIFT 0 | ||
115 | #define CORE_STEP_REV_MASK GENMASK(15, 0) | ||
116 | #define CORE_MINOR_REV_SHIFT 16 | ||
117 | #define CORE_MINOR_REV_MASK GENMASK(23, 16) | ||
118 | #define CORE_MAJOR_REV_SHIFT 24 | ||
119 | #define CORE_MAJOR_REV_MASK GENMASK(31, 24) | ||
120 | |||
121 | /* Register bits - REG_STATUS */ | ||
122 | #define MAC_FAILED_SHIFT 31 | ||
123 | #define DOUT_SIZE_AVAIL_SHIFT 26 | ||
124 | #define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26) | ||
125 | #define DIN_SIZE_AVAIL_SHIFT 21 | ||
126 | #define DIN_SIZE_AVAIL_MASK GENMASK(25, 21) | ||
127 | #define HSD_ERR_SHIFT 20 | ||
128 | #define ACCESS_VIOL_SHIFT 19 | ||
129 | #define PIPE_ACTIVE_ERR_SHIFT 18 | ||
130 | #define CFG_CHNG_ERR_SHIFT 17 | ||
131 | #define DOUT_ERR_SHIFT 16 | ||
132 | #define DIN_ERR_SHIFT 15 | ||
133 | #define AXI_ERR_SHIFT 14 | ||
134 | #define CRYPTO_STATE_SHIFT 10 | ||
135 | #define CRYPTO_STATE_MASK GENMASK(13, 10) | ||
136 | #define ENCR_BUSY_SHIFT 9 | ||
137 | #define AUTH_BUSY_SHIFT 8 | ||
138 | #define DOUT_INTR_SHIFT 7 | ||
139 | #define DIN_INTR_SHIFT 6 | ||
140 | #define OP_DONE_INTR_SHIFT 5 | ||
141 | #define ERR_INTR_SHIFT 4 | ||
142 | #define DOUT_RDY_SHIFT 3 | ||
143 | #define DIN_RDY_SHIFT 2 | ||
144 | #define OPERATION_DONE_SHIFT 1 | ||
145 | #define SW_ERR_SHIFT 0 | ||
146 | |||
147 | /* Register bits - REG_STATUS2 */ | ||
148 | #define AXI_EXTRA_SHIFT 1 | ||
149 | #define LOCKED_SHIFT 2 | ||
150 | |||
151 | /* Register bits - REG_CONFIG */ | ||
152 | #define REQ_SIZE_SHIFT 17 | ||
153 | #define REQ_SIZE_MASK GENMASK(20, 17) | ||
154 | #define REQ_SIZE_ENUM_1_BEAT 0 | ||
155 | #define REQ_SIZE_ENUM_2_BEAT 1 | ||
156 | #define REQ_SIZE_ENUM_3_BEAT 2 | ||
157 | #define REQ_SIZE_ENUM_4_BEAT 3 | ||
158 | #define REQ_SIZE_ENUM_5_BEAT 4 | ||
159 | #define REQ_SIZE_ENUM_6_BEAT 5 | ||
160 | #define REQ_SIZE_ENUM_7_BEAT 6 | ||
161 | #define REQ_SIZE_ENUM_8_BEAT 7 | ||
162 | #define REQ_SIZE_ENUM_9_BEAT 8 | ||
163 | #define REQ_SIZE_ENUM_10_BEAT 9 | ||
164 | #define REQ_SIZE_ENUM_11_BEAT 10 | ||
165 | #define REQ_SIZE_ENUM_12_BEAT 11 | ||
166 | #define REQ_SIZE_ENUM_13_BEAT 12 | ||
167 | #define REQ_SIZE_ENUM_14_BEAT 13 | ||
168 | #define REQ_SIZE_ENUM_15_BEAT 14 | ||
169 | #define REQ_SIZE_ENUM_16_BEAT 15 | ||
170 | |||
171 | #define MAX_QUEUED_REQ_SHIFT 14 | ||
172 | #define MAX_QUEUED_REQ_MASK GENMASK(24, 16) | ||
173 | #define ENUM_1_QUEUED_REQS 0 | ||
174 | #define ENUM_2_QUEUED_REQS 1 | ||
175 | #define ENUM_3_QUEUED_REQS 2 | ||
176 | |||
177 | #define IRQ_ENABLES_SHIFT 10 | ||
178 | #define IRQ_ENABLES_MASK GENMASK(13, 10) | ||
179 | |||
180 | #define LITTLE_ENDIAN_MODE_SHIFT 9 | ||
181 | #define PIPE_SET_SELECT_SHIFT 5 | ||
182 | #define PIPE_SET_SELECT_MASK GENMASK(8, 5) | ||
183 | |||
184 | #define HIGH_SPD_EN_N_SHIFT 4 | ||
185 | #define MASK_DOUT_INTR_SHIFT 3 | ||
186 | #define MASK_DIN_INTR_SHIFT 2 | ||
187 | #define MASK_OP_DONE_INTR_SHIFT 1 | ||
188 | #define MASK_ERR_INTR_SHIFT 0 | ||
189 | |||
190 | /* Register bits - REG_AUTH_SEG_CFG */ | ||
191 | #define COMP_EXP_MAC_SHIFT 24 | ||
192 | #define COMP_EXP_MAC_DISABLED 0 | ||
193 | #define COMP_EXP_MAC_ENABLED 1 | ||
194 | |||
195 | #define F9_DIRECTION_SHIFT 23 | ||
196 | #define F9_DIRECTION_UPLINK 0 | ||
197 | #define F9_DIRECTION_DOWNLINK 1 | ||
198 | |||
199 | #define AUTH_NONCE_NUM_WORDS_SHIFT 20 | ||
200 | #define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20) | ||
201 | |||
202 | #define USE_PIPE_KEY_AUTH_SHIFT 19 | ||
203 | #define USE_HW_KEY_AUTH_SHIFT 18 | ||
204 | #define AUTH_FIRST_SHIFT 17 | ||
205 | #define AUTH_LAST_SHIFT 16 | ||
206 | |||
207 | #define AUTH_POS_SHIFT 14 | ||
208 | #define AUTH_POS_MASK GENMASK(15, 14) | ||
209 | #define AUTH_POS_BEFORE 0 | ||
210 | #define AUTH_POS_AFTER 1 | ||
211 | |||
212 | #define AUTH_SIZE_SHIFT 9 | ||
213 | #define AUTH_SIZE_MASK GENMASK(13, 9) | ||
214 | #define AUTH_SIZE_SHA1 0 | ||
215 | #define AUTH_SIZE_SHA256 1 | ||
216 | #define AUTH_SIZE_ENUM_1_BYTES 0 | ||
217 | #define AUTH_SIZE_ENUM_2_BYTES 1 | ||
218 | #define AUTH_SIZE_ENUM_3_BYTES 2 | ||
219 | #define AUTH_SIZE_ENUM_4_BYTES 3 | ||
220 | #define AUTH_SIZE_ENUM_5_BYTES 4 | ||
221 | #define AUTH_SIZE_ENUM_6_BYTES 5 | ||
222 | #define AUTH_SIZE_ENUM_7_BYTES 6 | ||
223 | #define AUTH_SIZE_ENUM_8_BYTES 7 | ||
224 | #define AUTH_SIZE_ENUM_9_BYTES 8 | ||
225 | #define AUTH_SIZE_ENUM_10_BYTES 9 | ||
226 | #define AUTH_SIZE_ENUM_11_BYTES 10 | ||
227 | #define AUTH_SIZE_ENUM_12_BYTES 11 | ||
228 | #define AUTH_SIZE_ENUM_13_BYTES 12 | ||
229 | #define AUTH_SIZE_ENUM_14_BYTES 13 | ||
230 | #define AUTH_SIZE_ENUM_15_BYTES 14 | ||
231 | #define AUTH_SIZE_ENUM_16_BYTES 15 | ||
232 | |||
233 | #define AUTH_MODE_SHIFT 6 | ||
234 | #define AUTH_MODE_MASK GENMASK(8, 6) | ||
235 | #define AUTH_MODE_HASH 0 | ||
236 | #define AUTH_MODE_HMAC 1 | ||
237 | #define AUTH_MODE_CCM 0 | ||
238 | #define AUTH_MODE_CMAC 1 | ||
239 | |||
240 | #define AUTH_KEY_SIZE_SHIFT 3 | ||
241 | #define AUTH_KEY_SIZE_MASK GENMASK(5, 3) | ||
242 | #define AUTH_KEY_SZ_AES128 0 | ||
243 | #define AUTH_KEY_SZ_AES256 2 | ||
244 | |||
245 | #define AUTH_ALG_SHIFT 0 | ||
246 | #define AUTH_ALG_MASK GENMASK(2, 0) | ||
247 | #define AUTH_ALG_NONE 0 | ||
248 | #define AUTH_ALG_SHA 1 | ||
249 | #define AUTH_ALG_AES 2 | ||
250 | #define AUTH_ALG_KASUMI 3 | ||
251 | #define AUTH_ALG_SNOW3G 4 | ||
252 | #define AUTH_ALG_ZUC 5 | ||
253 | |||
254 | /* Register bits - REG_ENCR_XTS_DU_SIZE */ | ||
255 | #define ENCR_XTS_DU_SIZE_SHIFT 0 | ||
256 | #define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0) | ||
257 | |||
258 | /* Register bits - REG_ENCR_SEG_CFG */ | ||
259 | #define F8_KEYSTREAM_ENABLE_SHIFT 17 | ||
260 | #define F8_KEYSTREAM_DISABLED 0 | ||
261 | #define F8_KEYSTREAM_ENABLED 1 | ||
262 | |||
263 | #define F8_DIRECTION_SHIFT 16 | ||
264 | #define F8_DIRECTION_UPLINK 0 | ||
265 | #define F8_DIRECTION_DOWNLINK 1 | ||
266 | |||
267 | #define USE_PIPE_KEY_ENCR_SHIFT 15 | ||
268 | #define USE_PIPE_KEY_ENCR_ENABLED 1 | ||
269 | #define USE_KEY_REGISTERS 0 | ||
270 | |||
271 | #define USE_HW_KEY_ENCR_SHIFT 14 | ||
272 | #define USE_KEY_REG 0 | ||
273 | #define USE_HW_KEY 1 | ||
274 | |||
275 | #define LAST_CCM_SHIFT 13 | ||
276 | #define LAST_CCM_XFR 1 | ||
277 | #define INTERM_CCM_XFR 0 | ||
278 | |||
279 | #define CNTR_ALG_SHIFT 11 | ||
280 | #define CNTR_ALG_MASK GENMASK(12, 11) | ||
281 | #define CNTR_ALG_NIST 0 | ||
282 | |||
283 | #define ENCODE_SHIFT 10 | ||
284 | |||
285 | #define ENCR_MODE_SHIFT 6 | ||
286 | #define ENCR_MODE_MASK GENMASK(9, 6) | ||
287 | #define ENCR_MODE_ECB 0 | ||
288 | #define ENCR_MODE_CBC 1 | ||
289 | #define ENCR_MODE_CTR 2 | ||
290 | #define ENCR_MODE_XTS 3 | ||
291 | #define ENCR_MODE_CCM 4 | ||
292 | |||
293 | #define ENCR_KEY_SZ_SHIFT 3 | ||
294 | #define ENCR_KEY_SZ_MASK GENMASK(5, 3) | ||
295 | #define ENCR_KEY_SZ_DES 0 | ||
296 | #define ENCR_KEY_SZ_3DES 1 | ||
297 | #define ENCR_KEY_SZ_AES128 0 | ||
298 | #define ENCR_KEY_SZ_AES256 2 | ||
299 | |||
300 | #define ENCR_ALG_SHIFT 0 | ||
301 | #define ENCR_ALG_MASK GENMASK(2, 0) | ||
302 | #define ENCR_ALG_NONE 0 | ||
303 | #define ENCR_ALG_DES 1 | ||
304 | #define ENCR_ALG_AES 2 | ||
305 | #define ENCR_ALG_KASUMI 4 | ||
306 | #define ENCR_ALG_SNOW_3G 5 | ||
307 | #define ENCR_ALG_ZUC 6 | ||
308 | |||
309 | /* Register bits - REG_GOPROC */ | ||
310 | #define GO_SHIFT 0 | ||
311 | #define CLR_CNTXT_SHIFT 1 | ||
312 | #define RESULTS_DUMP_SHIFT 2 | ||
313 | |||
314 | /* Register bits - REG_ENGINES_AVAIL */ | ||
315 | #define ENCR_AES_SEL_SHIFT 0 | ||
316 | #define DES_SEL_SHIFT 1 | ||
317 | #define ENCR_SNOW3G_SEL_SHIFT 2 | ||
318 | #define ENCR_KASUMI_SEL_SHIFT 3 | ||
319 | #define SHA_SEL_SHIFT 4 | ||
320 | #define SHA512_SEL_SHIFT 5 | ||
321 | #define AUTH_AES_SEL_SHIFT 6 | ||
322 | #define AUTH_SNOW3G_SEL_SHIFT 7 | ||
323 | #define AUTH_KASUMI_SEL_SHIFT 8 | ||
324 | #define BAM_PIPE_SETS_SHIFT 9 | ||
325 | #define BAM_PIPE_SETS_MASK GENMASK(12, 9) | ||
326 | #define AXI_WR_BEATS_SHIFT 13 | ||
327 | #define AXI_WR_BEATS_MASK GENMASK(18, 13) | ||
328 | #define AXI_RD_BEATS_SHIFT 19 | ||
329 | #define AXI_RD_BEATS_MASK GENMASK(24, 19) | ||
330 | #define ENCR_ZUC_SEL_SHIFT 26 | ||
331 | #define AUTH_ZUC_SEL_SHIFT 27 | ||
332 | #define ZUC_ENABLE_SHIFT 28 | ||
333 | |||
334 | #endif /* _REGS_V5_H_ */ | ||
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c new file mode 100644 index 000000000000..f3385934eed2 --- /dev/null +++ b/drivers/crypto/qce/sha.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | |||
18 | #include "common.h" | ||
19 | #include "core.h" | ||
20 | #include "sha.h" | ||
21 | |||
22 | /* crypto hw padding constant for first operation */ | ||
23 | #define SHA_PADDING 64 | ||
24 | #define SHA_PADDING_MASK (SHA_PADDING - 1) | ||
25 | |||
26 | static LIST_HEAD(ahash_algs); | ||
27 | |||
28 | static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { | ||
29 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 | ||
30 | }; | ||
31 | |||
32 | static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { | ||
33 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
34 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 | ||
35 | }; | ||
36 | |||
37 | static void qce_ahash_done(void *data) | ||
38 | { | ||
39 | struct crypto_async_request *async_req = data; | ||
40 | struct ahash_request *req = ahash_request_cast(async_req); | ||
41 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
42 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
43 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
44 | struct qce_device *qce = tmpl->qce; | ||
45 | struct qce_result_dump *result = qce->dma.result_buf; | ||
46 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
47 | int error; | ||
48 | u32 status; | ||
49 | |||
50 | error = qce_dma_terminate_all(&qce->dma); | ||
51 | if (error) | ||
52 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); | ||
53 | |||
54 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
55 | rctx->src_chained); | ||
56 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
57 | |||
58 | memcpy(rctx->digest, result->auth_iv, digestsize); | ||
59 | if (req->result) | ||
60 | memcpy(req->result, result->auth_iv, digestsize); | ||
61 | |||
62 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); | ||
63 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); | ||
64 | |||
65 | error = qce_check_status(qce, &status); | ||
66 | if (error < 0) | ||
67 | dev_dbg(qce->dev, "ahash operation error (%x)\n", status); | ||
68 | |||
69 | req->src = rctx->src_orig; | ||
70 | req->nbytes = rctx->nbytes_orig; | ||
71 | rctx->last_blk = false; | ||
72 | rctx->first_blk = false; | ||
73 | |||
74 | qce->async_req_done(tmpl->qce, error); | ||
75 | } | ||
76 | |||
77 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) | ||
78 | { | ||
79 | struct ahash_request *req = ahash_request_cast(async_req); | ||
80 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
81 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | ||
82 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
83 | struct qce_device *qce = tmpl->qce; | ||
84 | unsigned long flags = rctx->flags; | ||
85 | int ret; | ||
86 | |||
87 | if (IS_SHA_HMAC(flags)) { | ||
88 | rctx->authkey = ctx->authkey; | ||
89 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; | ||
90 | } else if (IS_CMAC(flags)) { | ||
91 | rctx->authkey = ctx->authkey; | ||
92 | rctx->authklen = AES_KEYSIZE_128; | ||
93 | } | ||
94 | |||
95 | rctx->src_nents = qce_countsg(req->src, req->nbytes, | ||
96 | &rctx->src_chained); | ||
97 | ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
98 | rctx->src_chained); | ||
99 | if (ret < 0) | ||
100 | return ret; | ||
101 | |||
102 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | ||
103 | |||
104 | ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
105 | if (ret < 0) | ||
106 | goto error_unmap_src; | ||
107 | |||
108 | ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, | ||
109 | &rctx->result_sg, 1, qce_ahash_done, async_req); | ||
110 | if (ret) | ||
111 | goto error_unmap_dst; | ||
112 | |||
113 | qce_dma_issue_pending(&qce->dma); | ||
114 | |||
115 | ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); | ||
116 | if (ret) | ||
117 | goto error_terminate; | ||
118 | |||
119 | return 0; | ||
120 | |||
121 | error_terminate: | ||
122 | qce_dma_terminate_all(&qce->dma); | ||
123 | error_unmap_dst: | ||
124 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
125 | error_unmap_src: | ||
126 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
127 | rctx->src_chained); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static int qce_ahash_init(struct ahash_request *req) | ||
132 | { | ||
133 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
134 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
135 | const u32 *std_iv = tmpl->std_iv; | ||
136 | |||
137 | memset(rctx, 0, sizeof(*rctx)); | ||
138 | rctx->first_blk = true; | ||
139 | rctx->last_blk = false; | ||
140 | rctx->flags = tmpl->alg_flags; | ||
141 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int qce_ahash_export(struct ahash_request *req, void *out) | ||
147 | { | ||
148 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
149 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
150 | unsigned long flags = rctx->flags; | ||
151 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
152 | unsigned int blocksize = | ||
153 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
154 | |||
155 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
156 | struct sha1_state *out_state = out; | ||
157 | |||
158 | out_state->count = rctx->count; | ||
159 | qce_cpu_to_be32p_array((__be32 *)out_state->state, | ||
160 | rctx->digest, digestsize); | ||
161 | memcpy(out_state->buffer, rctx->buf, blocksize); | ||
162 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
163 | struct sha256_state *out_state = out; | ||
164 | |||
165 | out_state->count = rctx->count; | ||
166 | qce_cpu_to_be32p_array((__be32 *)out_state->state, | ||
167 | rctx->digest, digestsize); | ||
168 | memcpy(out_state->buf, rctx->buf, blocksize); | ||
169 | } else { | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int qce_import_common(struct ahash_request *req, u64 in_count, | ||
177 | const u32 *state, const u8 *buffer, bool hmac) | ||
178 | { | ||
179 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
180 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
181 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
182 | unsigned int blocksize; | ||
183 | u64 count = in_count; | ||
184 | |||
185 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
186 | rctx->count = in_count; | ||
187 | memcpy(rctx->buf, buffer, blocksize); | ||
188 | |||
189 | if (in_count <= blocksize) { | ||
190 | rctx->first_blk = 1; | ||
191 | } else { | ||
192 | rctx->first_blk = 0; | ||
193 | /* | ||
194 | * For HMAC, there is a hardware padding done when first block | ||
195 | * is set. Therefore the byte_count must be incremened by 64 | ||
196 | * after the first block operation. | ||
197 | */ | ||
198 | if (hmac) | ||
199 | count += SHA_PADDING; | ||
200 | } | ||
201 | |||
202 | rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); | ||
203 | rctx->byte_count[1] = (__force __be32)(count >> 32); | ||
204 | qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, | ||
205 | digestsize); | ||
206 | rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int qce_ahash_import(struct ahash_request *req, const void *in) | ||
212 | { | ||
213 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
214 | unsigned long flags = rctx->flags; | ||
215 | bool hmac = IS_SHA_HMAC(flags); | ||
216 | int ret = -EINVAL; | ||
217 | |||
218 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
219 | const struct sha1_state *state = in; | ||
220 | |||
221 | ret = qce_import_common(req, state->count, state->state, | ||
222 | state->buffer, hmac); | ||
223 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
224 | const struct sha256_state *state = in; | ||
225 | |||
226 | ret = qce_import_common(req, state->count, state->state, | ||
227 | state->buf, hmac); | ||
228 | } | ||
229 | |||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | static int qce_ahash_update(struct ahash_request *req) | ||
234 | { | ||
235 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
236 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
237 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
238 | struct qce_device *qce = tmpl->qce; | ||
239 | struct scatterlist *sg_last, *sg; | ||
240 | unsigned int total, len; | ||
241 | unsigned int hash_later; | ||
242 | unsigned int nbytes; | ||
243 | unsigned int blocksize; | ||
244 | |||
245 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
246 | rctx->count += req->nbytes; | ||
247 | |||
248 | /* check for buffer from previous updates and append it */ | ||
249 | total = req->nbytes + rctx->buflen; | ||
250 | |||
251 | if (total <= blocksize) { | ||
252 | scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, | ||
253 | 0, req->nbytes, 0); | ||
254 | rctx->buflen += req->nbytes; | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* save the original req structure fields */ | ||
259 | rctx->src_orig = req->src; | ||
260 | rctx->nbytes_orig = req->nbytes; | ||
261 | |||
262 | /* | ||
263 | * if we have data from previous update copy them on buffer. The old | ||
264 | * data will be combined with current request bytes. | ||
265 | */ | ||
266 | if (rctx->buflen) | ||
267 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
268 | |||
269 | /* calculate how many bytes will be hashed later */ | ||
270 | hash_later = total % blocksize; | ||
271 | if (hash_later) { | ||
272 | unsigned int src_offset = req->nbytes - hash_later; | ||
273 | scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, | ||
274 | hash_later, 0); | ||
275 | } | ||
276 | |||
277 | /* here nbytes is multiple of blocksize */ | ||
278 | nbytes = total - hash_later; | ||
279 | |||
280 | len = rctx->buflen; | ||
281 | sg = sg_last = req->src; | ||
282 | |||
283 | while (len < nbytes && sg) { | ||
284 | if (len + sg_dma_len(sg) > nbytes) | ||
285 | break; | ||
286 | len += sg_dma_len(sg); | ||
287 | sg_last = sg; | ||
288 | sg = scatterwalk_sg_next(sg); | ||
289 | } | ||
290 | |||
291 | if (!sg_last) | ||
292 | return -EINVAL; | ||
293 | |||
294 | sg_mark_end(sg_last); | ||
295 | |||
296 | if (rctx->buflen) { | ||
297 | sg_init_table(rctx->sg, 2); | ||
298 | sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
299 | scatterwalk_sg_chain(rctx->sg, 2, req->src); | ||
300 | req->src = rctx->sg; | ||
301 | } | ||
302 | |||
303 | req->nbytes = nbytes; | ||
304 | rctx->buflen = hash_later; | ||
305 | |||
306 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
307 | } | ||
308 | |||
309 | static int qce_ahash_final(struct ahash_request *req) | ||
310 | { | ||
311 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
312 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
313 | struct qce_device *qce = tmpl->qce; | ||
314 | |||
315 | if (!rctx->buflen) | ||
316 | return 0; | ||
317 | |||
318 | rctx->last_blk = true; | ||
319 | |||
320 | rctx->src_orig = req->src; | ||
321 | rctx->nbytes_orig = req->nbytes; | ||
322 | |||
323 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
324 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
325 | |||
326 | req->src = rctx->sg; | ||
327 | req->nbytes = rctx->buflen; | ||
328 | |||
329 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
330 | } | ||
331 | |||
332 | static int qce_ahash_digest(struct ahash_request *req) | ||
333 | { | ||
334 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
335 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
336 | struct qce_device *qce = tmpl->qce; | ||
337 | int ret; | ||
338 | |||
339 | ret = qce_ahash_init(req); | ||
340 | if (ret) | ||
341 | return ret; | ||
342 | |||
343 | rctx->src_orig = req->src; | ||
344 | rctx->nbytes_orig = req->nbytes; | ||
345 | rctx->first_blk = true; | ||
346 | rctx->last_blk = true; | ||
347 | |||
348 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
349 | } | ||
350 | |||
351 | struct qce_ahash_result { | ||
352 | struct completion completion; | ||
353 | int error; | ||
354 | }; | ||
355 | |||
356 | static void qce_digest_complete(struct crypto_async_request *req, int error) | ||
357 | { | ||
358 | struct qce_ahash_result *result = req->data; | ||
359 | |||
360 | if (error == -EINPROGRESS) | ||
361 | return; | ||
362 | |||
363 | result->error = error; | ||
364 | complete(&result->completion); | ||
365 | } | ||
366 | |||
367 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
368 | unsigned int keylen) | ||
369 | { | ||
370 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | ||
371 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
372 | struct qce_ahash_result result; | ||
373 | struct ahash_request *req; | ||
374 | struct scatterlist sg; | ||
375 | unsigned int blocksize; | ||
376 | struct crypto_ahash *ahash_tfm; | ||
377 | u8 *buf; | ||
378 | int ret; | ||
379 | const char *alg_name; | ||
380 | |||
381 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
382 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | ||
383 | |||
384 | if (keylen <= blocksize) { | ||
385 | memcpy(ctx->authkey, key, keylen); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | if (digestsize == SHA1_DIGEST_SIZE) | ||
390 | alg_name = "sha1-qce"; | ||
391 | else if (digestsize == SHA256_DIGEST_SIZE) | ||
392 | alg_name = "sha256-qce"; | ||
393 | else | ||
394 | return -EINVAL; | ||
395 | |||
396 | ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, | ||
397 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
398 | if (IS_ERR(ahash_tfm)) | ||
399 | return PTR_ERR(ahash_tfm); | ||
400 | |||
401 | req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); | ||
402 | if (!req) { | ||
403 | ret = -ENOMEM; | ||
404 | goto err_free_ahash; | ||
405 | } | ||
406 | |||
407 | init_completion(&result.completion); | ||
408 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
409 | qce_digest_complete, &result); | ||
410 | crypto_ahash_clear_flags(ahash_tfm, ~0); | ||
411 | |||
412 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); | ||
413 | if (!buf) { | ||
414 | ret = -ENOMEM; | ||
415 | goto err_free_req; | ||
416 | } | ||
417 | |||
418 | memcpy(buf, key, keylen); | ||
419 | sg_init_one(&sg, buf, keylen); | ||
420 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); | ||
421 | |||
422 | ret = crypto_ahash_digest(req); | ||
423 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
424 | ret = wait_for_completion_interruptible(&result.completion); | ||
425 | if (!ret) | ||
426 | ret = result.error; | ||
427 | } | ||
428 | |||
429 | if (ret) | ||
430 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
431 | |||
432 | kfree(buf); | ||
433 | err_free_req: | ||
434 | ahash_request_free(req); | ||
435 | err_free_ahash: | ||
436 | crypto_free_ahash(ahash_tfm); | ||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) | ||
441 | { | ||
442 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
443 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); | ||
444 | |||
445 | crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); | ||
446 | memset(ctx, 0, sizeof(*ctx)); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | struct qce_ahash_def { | ||
451 | unsigned long flags; | ||
452 | const char *name; | ||
453 | const char *drv_name; | ||
454 | unsigned int digestsize; | ||
455 | unsigned int blocksize; | ||
456 | unsigned int statesize; | ||
457 | const u32 *std_iv; | ||
458 | }; | ||
459 | |||
460 | static const struct qce_ahash_def ahash_def[] = { | ||
461 | { | ||
462 | .flags = QCE_HASH_SHA1, | ||
463 | .name = "sha1", | ||
464 | .drv_name = "sha1-qce", | ||
465 | .digestsize = SHA1_DIGEST_SIZE, | ||
466 | .blocksize = SHA1_BLOCK_SIZE, | ||
467 | .statesize = sizeof(struct sha1_state), | ||
468 | .std_iv = std_iv_sha1, | ||
469 | }, | ||
470 | { | ||
471 | .flags = QCE_HASH_SHA256, | ||
472 | .name = "sha256", | ||
473 | .drv_name = "sha256-qce", | ||
474 | .digestsize = SHA256_DIGEST_SIZE, | ||
475 | .blocksize = SHA256_BLOCK_SIZE, | ||
476 | .statesize = sizeof(struct sha256_state), | ||
477 | .std_iv = std_iv_sha256, | ||
478 | }, | ||
479 | { | ||
480 | .flags = QCE_HASH_SHA1_HMAC, | ||
481 | .name = "hmac(sha1)", | ||
482 | .drv_name = "hmac-sha1-qce", | ||
483 | .digestsize = SHA1_DIGEST_SIZE, | ||
484 | .blocksize = SHA1_BLOCK_SIZE, | ||
485 | .statesize = sizeof(struct sha1_state), | ||
486 | .std_iv = std_iv_sha1, | ||
487 | }, | ||
488 | { | ||
489 | .flags = QCE_HASH_SHA256_HMAC, | ||
490 | .name = "hmac(sha256)", | ||
491 | .drv_name = "hmac-sha256-qce", | ||
492 | .digestsize = SHA256_DIGEST_SIZE, | ||
493 | .blocksize = SHA256_BLOCK_SIZE, | ||
494 | .statesize = sizeof(struct sha256_state), | ||
495 | .std_iv = std_iv_sha256, | ||
496 | }, | ||
497 | }; | ||
498 | |||
499 | static int qce_ahash_register_one(const struct qce_ahash_def *def, | ||
500 | struct qce_device *qce) | ||
501 | { | ||
502 | struct qce_alg_template *tmpl; | ||
503 | struct ahash_alg *alg; | ||
504 | struct crypto_alg *base; | ||
505 | int ret; | ||
506 | |||
507 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | ||
508 | if (!tmpl) | ||
509 | return -ENOMEM; | ||
510 | |||
511 | tmpl->std_iv = def->std_iv; | ||
512 | |||
513 | alg = &tmpl->alg.ahash; | ||
514 | alg->init = qce_ahash_init; | ||
515 | alg->update = qce_ahash_update; | ||
516 | alg->final = qce_ahash_final; | ||
517 | alg->digest = qce_ahash_digest; | ||
518 | alg->export = qce_ahash_export; | ||
519 | alg->import = qce_ahash_import; | ||
520 | if (IS_SHA_HMAC(def->flags)) | ||
521 | alg->setkey = qce_ahash_hmac_setkey; | ||
522 | alg->halg.digestsize = def->digestsize; | ||
523 | alg->halg.statesize = def->statesize; | ||
524 | |||
525 | base = &alg->halg.base; | ||
526 | base->cra_blocksize = def->blocksize; | ||
527 | base->cra_priority = 300; | ||
528 | base->cra_flags = CRYPTO_ALG_ASYNC; | ||
529 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); | ||
530 | base->cra_alignmask = 0; | ||
531 | base->cra_module = THIS_MODULE; | ||
532 | base->cra_init = qce_ahash_cra_init; | ||
533 | INIT_LIST_HEAD(&base->cra_list); | ||
534 | |||
535 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | ||
536 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
537 | def->drv_name); | ||
538 | |||
539 | INIT_LIST_HEAD(&tmpl->entry); | ||
540 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; | ||
541 | tmpl->alg_flags = def->flags; | ||
542 | tmpl->qce = qce; | ||
543 | |||
544 | ret = crypto_register_ahash(alg); | ||
545 | if (ret) { | ||
546 | kfree(tmpl); | ||
547 | dev_err(qce->dev, "%s registration failed\n", base->cra_name); | ||
548 | return ret; | ||
549 | } | ||
550 | |||
551 | list_add_tail(&tmpl->entry, &ahash_algs); | ||
552 | dev_dbg(qce->dev, "%s is registered\n", base->cra_name); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static void qce_ahash_unregister(struct qce_device *qce) | ||
557 | { | ||
558 | struct qce_alg_template *tmpl, *n; | ||
559 | |||
560 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { | ||
561 | crypto_unregister_ahash(&tmpl->alg.ahash); | ||
562 | list_del(&tmpl->entry); | ||
563 | kfree(tmpl); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | static int qce_ahash_register(struct qce_device *qce) | ||
568 | { | ||
569 | int ret, i; | ||
570 | |||
571 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { | ||
572 | ret = qce_ahash_register_one(&ahash_def[i], qce); | ||
573 | if (ret) | ||
574 | goto err; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | err: | ||
579 | qce_ahash_unregister(qce); | ||
580 | return ret; | ||
581 | } | ||
582 | |||
583 | const struct qce_algo_ops ahash_ops = { | ||
584 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
585 | .register_algs = qce_ahash_register, | ||
586 | .unregister_algs = qce_ahash_unregister, | ||
587 | .async_req_handle = qce_ahash_async_req_handle, | ||
588 | }; | ||
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h new file mode 100644 index 000000000000..286f0d5397f3 --- /dev/null +++ b/drivers/crypto/qce/sha.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _SHA_H_ | ||
15 | #define _SHA_H_ | ||
16 | |||
17 | #include <crypto/scatterwalk.h> | ||
18 | #include <crypto/sha.h> | ||
19 | |||
20 | #include "common.h" | ||
21 | #include "core.h" | ||
22 | |||
23 | #define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE | ||
24 | #define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE | ||
25 | |||
26 | struct qce_sha_ctx { | ||
27 | u8 authkey[QCE_SHA_MAX_BLOCKSIZE]; | ||
28 | }; | ||
29 | |||
30 | /** | ||
31 | * struct qce_sha_reqctx - holds private ahash objects per request | ||
32 | * @buf: used during update, import and export | ||
33 | * @tmpbuf: buffer for internal use | ||
34 | * @digest: calculated digest buffer | ||
35 | * @buflen: length of the buffer | ||
36 | * @flags: operation flags | ||
37 | * @src_orig: original request sg list | ||
38 | * @nbytes_orig: original request number of bytes | ||
39 | * @src_chained: is source scatterlist chained | ||
40 | * @src_nents: source number of entries | ||
41 | * @byte_count: byte count | ||
42 | * @count: save count in states during update, import and export | ||
43 | * @first_blk: is it the first block | ||
44 | * @last_blk: is it the last block | ||
45 | * @sg: used to chain sg lists | ||
46 | * @authkey: pointer to auth key in sha ctx | ||
47 | * @authklen: auth key length | ||
48 | * @result_sg: scatterlist used for result buffer | ||
49 | */ | ||
50 | struct qce_sha_reqctx { | ||
51 | u8 buf[QCE_SHA_MAX_BLOCKSIZE]; | ||
52 | u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE]; | ||
53 | u8 digest[QCE_SHA_MAX_DIGESTSIZE]; | ||
54 | unsigned int buflen; | ||
55 | unsigned long flags; | ||
56 | struct scatterlist *src_orig; | ||
57 | unsigned int nbytes_orig; | ||
58 | bool src_chained; | ||
59 | int src_nents; | ||
60 | __be32 byte_count[2]; | ||
61 | u64 count; | ||
62 | bool first_blk; | ||
63 | bool last_blk; | ||
64 | struct scatterlist sg[2]; | ||
65 | u8 *authkey; | ||
66 | unsigned int authklen; | ||
67 | struct scatterlist result_sg; | ||
68 | }; | ||
69 | |||
70 | static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm) | ||
71 | { | ||
72 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
73 | struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), | ||
74 | struct ahash_alg, halg); | ||
75 | |||
76 | return container_of(alg, struct qce_alg_template, alg.ahash); | ||
77 | } | ||
78 | |||
79 | extern const struct qce_algo_ops ahash_ops; | ||
80 | |||
81 | #endif /* _SHA_H_ */ | ||
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index a999f537228f..92105f3dc8e0 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx) | |||
190 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) | 190 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) |
191 | { | 191 | { |
192 | struct cryp_ctx *ctx; | 192 | struct cryp_ctx *ctx; |
193 | int i; | 193 | int count; |
194 | struct cryp_device_data *device_data; | 194 | struct cryp_device_data *device_data; |
195 | 195 | ||
196 | if (param == NULL) { | 196 | if (param == NULL) { |
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) | |||
215 | if (cryp_pending_irq_src(device_data, | 215 | if (cryp_pending_irq_src(device_data, |
216 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { | 216 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { |
217 | if (ctx->outlen / ctx->blocksize > 0) { | 217 | if (ctx->outlen / ctx->blocksize > 0) { |
218 | for (i = 0; i < ctx->blocksize / 4; i++) { | 218 | count = ctx->blocksize / 4; |
219 | *(ctx->outdata) = readl_relaxed( | 219 | |
220 | &device_data->base->dout); | 220 | readsl(&device_data->base->dout, ctx->outdata, count); |
221 | ctx->outdata += 4; | 221 | ctx->outdata += count; |
222 | ctx->outlen -= 4; | 222 | ctx->outlen -= count; |
223 | } | ||
224 | 223 | ||
225 | if (ctx->outlen == 0) { | 224 | if (ctx->outlen == 0) { |
226 | cryp_disable_irq_src(device_data, | 225 | cryp_disable_irq_src(device_data, |
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) | |||
230 | } else if (cryp_pending_irq_src(device_data, | 229 | } else if (cryp_pending_irq_src(device_data, |
231 | CRYP_IRQ_SRC_INPUT_FIFO)) { | 230 | CRYP_IRQ_SRC_INPUT_FIFO)) { |
232 | if (ctx->datalen / ctx->blocksize > 0) { | 231 | if (ctx->datalen / ctx->blocksize > 0) { |
233 | for (i = 0 ; i < ctx->blocksize / 4; i++) { | 232 | count = ctx->blocksize / 4; |
234 | writel_relaxed(ctx->indata, | 233 | |
235 | &device_data->base->din); | 234 | writesl(&device_data->base->din, ctx->indata, count); |
236 | ctx->indata += 4; | 235 | |
237 | ctx->datalen -= 4; | 236 | ctx->indata += count; |
238 | } | 237 | ctx->datalen -= count; |
239 | 238 | ||
240 | if (ctx->datalen == 0) | 239 | if (ctx->datalen == 0) |
241 | cryp_disable_irq_src(device_data, | 240 | cryp_disable_irq_src(device_data, |