aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 12:11:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 12:11:12 -0500
commitccc9d4a6d640cbde05d519edeb727881646cf71b (patch)
tree736c90b100703501d5e3fa3eccc57a48f70bef14 /drivers/crypto
parent66ef3493d4bb387f5a83915e33dc893102fd1b43 (diff)
parent271817a3e92c0455bda5856d87eca244ad67d3a2 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Add support for cipher output IVs in testmgr - Add missing crypto_ahash_blocksize helper - Mark authenc and des ciphers as not allowed under FIPS. Algorithms: - Add CRC support to 842 compression - Add keywrap algorithm - A number of changes to the akcipher interface: + Separate functions for setting public/private keys. + Use SG lists. Drivers: - Add Intel SHA Extension optimised SHA1 and SHA256 - Use dma_map_sg instead of custom functions in crypto drivers - Add support for STM32 RNG - Add support for ST RNG - Add Device Tree support to exynos RNG driver - Add support for mxs-dcp crypto device on MX6SL - Add xts(aes) support to caam - Add ctr(aes) and xts(aes) support to qat - A large set of fixes from Russell King for the marvell/cesa driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (115 commits) crypto: asymmetric_keys - Fix unaligned access in x509_get_sig_params() crypto: akcipher - Don't #include crypto/public_key.h as the contents aren't used hwrng: exynos - Add Device Tree support hwrng: exynos - Fix missing configuration after suspend to RAM hwrng: exynos - Add timeout for waiting on init done dt-bindings: rng: Describe Exynos4 PRNG bindings crypto: marvell/cesa - use __le32 for hardware descriptors crypto: marvell/cesa - fix missing cpu_to_le32() in mv_cesa_dma_add_op() crypto: marvell/cesa - use memcpy_fromio()/memcpy_toio() crypto: marvell/cesa - use gfp_t for gfp flags crypto: marvell/cesa - use dma_addr_t for cur_dma crypto: marvell/cesa - use readl_relaxed()/writel_relaxed() crypto: caam - fix indentation of close braces crypto: caam - only export the state we really need to export crypto: caam - fix non-block aligned hash calculation crypto: caam - avoid needlessly saving and restoring caam_hash_ctx crypto: caam - print errno code when hash registration fails crypto: marvell/cesa - fix memory leak crypto: marvell/cesa - fix first-fragment handling in mv_cesa_ahash_dma_last_req() crypto: marvell/cesa - rearrange handling for sw padded hashes ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig5
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c23
-rw-r--r--drivers/crypto/atmel-aes.c44
-rw-r--r--drivers/crypto/atmel-sha.c33
-rw-r--r--drivers/crypto/atmel-tdes.c35
-rw-r--r--drivers/crypto/bfin_crc.c25
-rw-r--r--drivers/crypto/caam/caamalg.c232
-rw-r--r--drivers/crypto/caam/caamhash.c131
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h72
-rw-r--r--drivers/crypto/ccp/Kconfig13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c20
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
-rw-r--r--drivers/crypto/ccp/ccp-pci.c2
-rw-r--r--drivers/crypto/ccp/ccp-platform.c6
-rw-r--r--drivers/crypto/marvell/cesa.h55
-rw-r--r--drivers/crypto/marvell/cipher.c13
-rw-r--r--drivers/crypto/marvell/hash.c471
-rw-r--r--drivers/crypto/marvell/tdma.c42
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c8
-rw-r--r--drivers/crypto/picoxcell_crypto.c1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile12
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c178
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c213
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c79
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c5
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsakey.asn15
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
-rw-r--r--drivers/crypto/qce/ablkcipher.c30
-rw-r--r--drivers/crypto/qce/cipher.h4
-rw-r--r--drivers/crypto/qce/dma.c52
-rw-r--r--drivers/crypto/qce/dma.h5
-rw-r--r--drivers/crypto/qce/sha.c18
-rw-r--r--drivers/crypto/qce/sha.h2
-rw-r--r--drivers/crypto/sahara.c108
-rw-r--r--drivers/crypto/talitos.c104
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c71
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c50
47 files changed, 1143 insertions, 1198 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index d234719065a5..2569e043317e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -420,7 +420,7 @@ config CRYPTO_DEV_CCP
420 bool "Support for AMD Cryptographic Coprocessor" 420 bool "Support for AMD Cryptographic Coprocessor"
421 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM 421 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
422 help 422 help
423 The AMD Cryptographic Coprocessor provides hardware support 423 The AMD Cryptographic Coprocessor provides hardware offload support
424 for encryption, hashing and related operations. 424 for encryption, hashing and related operations.
425 425
426if CRYPTO_DEV_CCP 426if CRYPTO_DEV_CCP
@@ -429,7 +429,8 @@ endif
429 429
430config CRYPTO_DEV_MXS_DCP 430config CRYPTO_DEV_MXS_DCP
431 tristate "Support for Freescale MXS DCP" 431 tristate "Support for Freescale MXS DCP"
432 depends on ARCH_MXS 432 depends on (ARCH_MXS || ARCH_MXC)
433 select STMP_DEVICE
433 select CRYPTO_CBC 434 select CRYPTO_CBC
434 select CRYPTO_ECB 435 select CRYPTO_ECB
435 select CRYPTO_AES 436 select CRYPTO_AES
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 192a8fa325c1..58a630e55d5d 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -740,26 +740,6 @@ void crypto4xx_return_pd(struct crypto4xx_device *dev,
740 pd_uinfo->state = PD_ENTRY_FREE; 740 pd_uinfo->state = PD_ENTRY_FREE;
741} 741}
742 742
743/*
744 * derive number of elements in scatterlist
745 * Shamlessly copy from talitos.c
746 */
747static int get_sg_count(struct scatterlist *sg_list, int nbytes)
748{
749 struct scatterlist *sg = sg_list;
750 int sg_nents = 0;
751
752 while (nbytes) {
753 sg_nents++;
754 if (sg->length > nbytes)
755 break;
756 nbytes -= sg->length;
757 sg = sg_next(sg);
758 }
759
760 return sg_nents;
761}
762
763static u32 get_next_gd(u32 current) 743static u32 get_next_gd(u32 current)
764{ 744{
765 if (current != PPC4XX_LAST_GD) 745 if (current != PPC4XX_LAST_GD)
@@ -800,7 +780,7 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
800 u32 gd_idx = 0; 780 u32 gd_idx = 0;
801 781
802 /* figure how many gd is needed */ 782 /* figure how many gd is needed */
803 num_gd = get_sg_count(src, datalen); 783 num_gd = sg_nents_for_len(src, datalen);
804 if (num_gd == 1) 784 if (num_gd == 1)
805 num_gd = 0; 785 num_gd = 0;
806 786
@@ -1284,6 +1264,7 @@ static const struct of_device_id crypto4xx_match[] = {
1284 { .compatible = "amcc,ppc4xx-crypto",}, 1264 { .compatible = "amcc,ppc4xx-crypto",},
1285 { }, 1265 { },
1286}; 1266};
1267MODULE_DEVICE_TABLE(of, crypto4xx_match);
1287 1268
1288static struct platform_driver crypto4xx_driver = { 1269static struct platform_driver crypto4xx_driver = {
1289 .driver = { 1270 .driver = {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 0f9a9dc06a83..fb16d812c8f5 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -260,7 +260,11 @@ static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
260 260
261static int atmel_aes_hw_init(struct atmel_aes_dev *dd) 261static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
262{ 262{
263 clk_prepare_enable(dd->iclk); 263 int err;
264
265 err = clk_prepare_enable(dd->iclk);
266 if (err)
267 return err;
264 268
265 if (!(dd->flags & AES_FLAGS_INIT)) { 269 if (!(dd->flags & AES_FLAGS_INIT)) {
266 atmel_aes_write(dd, AES_CR, AES_CR_SWRST); 270 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
@@ -1320,7 +1324,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
1320 struct crypto_platform_data *pdata; 1324 struct crypto_platform_data *pdata;
1321 struct device *dev = &pdev->dev; 1325 struct device *dev = &pdev->dev;
1322 struct resource *aes_res; 1326 struct resource *aes_res;
1323 unsigned long aes_phys_size;
1324 int err; 1327 int err;
1325 1328
1326 pdata = pdev->dev.platform_data; 1329 pdata = pdev->dev.platform_data;
@@ -1337,7 +1340,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
1337 goto aes_dd_err; 1340 goto aes_dd_err;
1338 } 1341 }
1339 1342
1340 aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL); 1343 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
1341 if (aes_dd == NULL) { 1344 if (aes_dd == NULL) {
1342 dev_err(dev, "unable to alloc data struct.\n"); 1345 dev_err(dev, "unable to alloc data struct.\n");
1343 err = -ENOMEM; 1346 err = -ENOMEM;
@@ -1368,36 +1371,35 @@ static int atmel_aes_probe(struct platform_device *pdev)
1368 goto res_err; 1371 goto res_err;
1369 } 1372 }
1370 aes_dd->phys_base = aes_res->start; 1373 aes_dd->phys_base = aes_res->start;
1371 aes_phys_size = resource_size(aes_res);
1372 1374
1373 /* Get the IRQ */ 1375 /* Get the IRQ */
1374 aes_dd->irq = platform_get_irq(pdev, 0); 1376 aes_dd->irq = platform_get_irq(pdev, 0);
1375 if (aes_dd->irq < 0) { 1377 if (aes_dd->irq < 0) {
1376 dev_err(dev, "no IRQ resource info\n"); 1378 dev_err(dev, "no IRQ resource info\n");
1377 err = aes_dd->irq; 1379 err = aes_dd->irq;
1378 goto aes_irq_err; 1380 goto res_err;
1379 } 1381 }
1380 1382
1381 err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", 1383 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
1382 aes_dd); 1384 IRQF_SHARED, "atmel-aes", aes_dd);
1383 if (err) { 1385 if (err) {
1384 dev_err(dev, "unable to request aes irq.\n"); 1386 dev_err(dev, "unable to request aes irq.\n");
1385 goto aes_irq_err; 1387 goto res_err;
1386 } 1388 }
1387 1389
1388 /* Initializing the clock */ 1390 /* Initializing the clock */
1389 aes_dd->iclk = clk_get(&pdev->dev, "aes_clk"); 1391 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
1390 if (IS_ERR(aes_dd->iclk)) { 1392 if (IS_ERR(aes_dd->iclk)) {
1391 dev_err(dev, "clock initialization failed.\n"); 1393 dev_err(dev, "clock initialization failed.\n");
1392 err = PTR_ERR(aes_dd->iclk); 1394 err = PTR_ERR(aes_dd->iclk);
1393 goto clk_err; 1395 goto res_err;
1394 } 1396 }
1395 1397
1396 aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size); 1398 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
1397 if (!aes_dd->io_base) { 1399 if (!aes_dd->io_base) {
1398 dev_err(dev, "can't ioremap\n"); 1400 dev_err(dev, "can't ioremap\n");
1399 err = -ENOMEM; 1401 err = -ENOMEM;
1400 goto aes_io_err; 1402 goto res_err;
1401 } 1403 }
1402 1404
1403 atmel_aes_hw_version_init(aes_dd); 1405 atmel_aes_hw_version_init(aes_dd);
@@ -1434,17 +1436,9 @@ err_algs:
1434err_aes_dma: 1436err_aes_dma:
1435 atmel_aes_buff_cleanup(aes_dd); 1437 atmel_aes_buff_cleanup(aes_dd);
1436err_aes_buff: 1438err_aes_buff:
1437 iounmap(aes_dd->io_base);
1438aes_io_err:
1439 clk_put(aes_dd->iclk);
1440clk_err:
1441 free_irq(aes_dd->irq, aes_dd);
1442aes_irq_err:
1443res_err: 1439res_err:
1444 tasklet_kill(&aes_dd->done_task); 1440 tasklet_kill(&aes_dd->done_task);
1445 tasklet_kill(&aes_dd->queue_task); 1441 tasklet_kill(&aes_dd->queue_task);
1446 kfree(aes_dd);
1447 aes_dd = NULL;
1448aes_dd_err: 1442aes_dd_err:
1449 dev_err(dev, "initialization failed.\n"); 1443 dev_err(dev, "initialization failed.\n");
1450 1444
@@ -1469,16 +1463,6 @@ static int atmel_aes_remove(struct platform_device *pdev)
1469 1463
1470 atmel_aes_dma_cleanup(aes_dd); 1464 atmel_aes_dma_cleanup(aes_dd);
1471 1465
1472 iounmap(aes_dd->io_base);
1473
1474 clk_put(aes_dd->iclk);
1475
1476 if (aes_dd->irq > 0)
1477 free_irq(aes_dd->irq, aes_dd);
1478
1479 kfree(aes_dd);
1480 aes_dd = NULL;
1481
1482 return 0; 1466 return 0;
1483} 1467}
1484 1468
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 5b35433c5399..660d8c06540b 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -794,7 +794,11 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
794 794
795static int atmel_sha_hw_init(struct atmel_sha_dev *dd) 795static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
796{ 796{
797 clk_prepare_enable(dd->iclk); 797 int err;
798
799 err = clk_prepare_enable(dd->iclk);
800 if (err)
801 return err;
798 802
799 if (!(SHA_FLAGS_INIT & dd->flags)) { 803 if (!(SHA_FLAGS_INIT & dd->flags)) {
800 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); 804 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
@@ -1345,11 +1349,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
1345 struct crypto_platform_data *pdata; 1349 struct crypto_platform_data *pdata;
1346 struct device *dev = &pdev->dev; 1350 struct device *dev = &pdev->dev;
1347 struct resource *sha_res; 1351 struct resource *sha_res;
1348 unsigned long sha_phys_size;
1349 int err; 1352 int err;
1350 1353
1351 sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), 1354 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
1352 GFP_KERNEL);
1353 if (sha_dd == NULL) { 1355 if (sha_dd == NULL) {
1354 dev_err(dev, "unable to alloc data struct.\n"); 1356 dev_err(dev, "unable to alloc data struct.\n");
1355 err = -ENOMEM; 1357 err = -ENOMEM;
@@ -1378,7 +1380,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
1378 goto res_err; 1380 goto res_err;
1379 } 1381 }
1380 sha_dd->phys_base = sha_res->start; 1382 sha_dd->phys_base = sha_res->start;
1381 sha_phys_size = resource_size(sha_res);
1382 1383
1383 /* Get the IRQ */ 1384 /* Get the IRQ */
1384 sha_dd->irq = platform_get_irq(pdev, 0); 1385 sha_dd->irq = platform_get_irq(pdev, 0);
@@ -1388,26 +1389,26 @@ static int atmel_sha_probe(struct platform_device *pdev)
1388 goto res_err; 1389 goto res_err;
1389 } 1390 }
1390 1391
1391 err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", 1392 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1392 sha_dd); 1393 IRQF_SHARED, "atmel-sha", sha_dd);
1393 if (err) { 1394 if (err) {
1394 dev_err(dev, "unable to request sha irq.\n"); 1395 dev_err(dev, "unable to request sha irq.\n");
1395 goto res_err; 1396 goto res_err;
1396 } 1397 }
1397 1398
1398 /* Initializing the clock */ 1399 /* Initializing the clock */
1399 sha_dd->iclk = clk_get(&pdev->dev, "sha_clk"); 1400 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
1400 if (IS_ERR(sha_dd->iclk)) { 1401 if (IS_ERR(sha_dd->iclk)) {
1401 dev_err(dev, "clock initialization failed.\n"); 1402 dev_err(dev, "clock initialization failed.\n");
1402 err = PTR_ERR(sha_dd->iclk); 1403 err = PTR_ERR(sha_dd->iclk);
1403 goto clk_err; 1404 goto res_err;
1404 } 1405 }
1405 1406
1406 sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size); 1407 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
1407 if (!sha_dd->io_base) { 1408 if (!sha_dd->io_base) {
1408 dev_err(dev, "can't ioremap\n"); 1409 dev_err(dev, "can't ioremap\n");
1409 err = -ENOMEM; 1410 err = -ENOMEM;
1410 goto sha_io_err; 1411 goto res_err;
1411 } 1412 }
1412 1413
1413 atmel_sha_hw_version_init(sha_dd); 1414 atmel_sha_hw_version_init(sha_dd);
@@ -1421,12 +1422,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
1421 if (IS_ERR(pdata)) { 1422 if (IS_ERR(pdata)) {
1422 dev_err(&pdev->dev, "platform data not available\n"); 1423 dev_err(&pdev->dev, "platform data not available\n");
1423 err = PTR_ERR(pdata); 1424 err = PTR_ERR(pdata);
1424 goto err_pdata; 1425 goto res_err;
1425 } 1426 }
1426 } 1427 }
1427 if (!pdata->dma_slave) { 1428 if (!pdata->dma_slave) {
1428 err = -ENXIO; 1429 err = -ENXIO;
1429 goto err_pdata; 1430 goto res_err;
1430 } 1431 }
1431 err = atmel_sha_dma_init(sha_dd, pdata); 1432 err = atmel_sha_dma_init(sha_dd, pdata);
1432 if (err) 1433 if (err)
@@ -1457,12 +1458,6 @@ err_algs:
1457 if (sha_dd->caps.has_dma) 1458 if (sha_dd->caps.has_dma)
1458 atmel_sha_dma_cleanup(sha_dd); 1459 atmel_sha_dma_cleanup(sha_dd);
1459err_sha_dma: 1460err_sha_dma:
1460err_pdata:
1461 iounmap(sha_dd->io_base);
1462sha_io_err:
1463 clk_put(sha_dd->iclk);
1464clk_err:
1465 free_irq(sha_dd->irq, sha_dd);
1466res_err: 1461res_err:
1467 tasklet_kill(&sha_dd->done_task); 1462 tasklet_kill(&sha_dd->done_task);
1468sha_dd_err: 1463sha_dd_err:
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index ca2999709eb4..2c7a628d0375 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -218,7 +218,11 @@ static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
218 218
219static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) 219static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
220{ 220{
221 clk_prepare_enable(dd->iclk); 221 int err;
222
223 err = clk_prepare_enable(dd->iclk);
224 if (err)
225 return err;
222 226
223 if (!(dd->flags & TDES_FLAGS_INIT)) { 227 if (!(dd->flags & TDES_FLAGS_INIT)) {
224 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); 228 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
@@ -1355,7 +1359,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
1355 struct crypto_platform_data *pdata; 1359 struct crypto_platform_data *pdata;
1356 struct device *dev = &pdev->dev; 1360 struct device *dev = &pdev->dev;
1357 struct resource *tdes_res; 1361 struct resource *tdes_res;
1358 unsigned long tdes_phys_size;
1359 int err; 1362 int err;
1360 1363
1361 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); 1364 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
@@ -1389,7 +1392,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
1389 goto res_err; 1392 goto res_err;
1390 } 1393 }
1391 tdes_dd->phys_base = tdes_res->start; 1394 tdes_dd->phys_base = tdes_res->start;
1392 tdes_phys_size = resource_size(tdes_res);
1393 1395
1394 /* Get the IRQ */ 1396 /* Get the IRQ */
1395 tdes_dd->irq = platform_get_irq(pdev, 0); 1397 tdes_dd->irq = platform_get_irq(pdev, 0);
@@ -1399,26 +1401,26 @@ static int atmel_tdes_probe(struct platform_device *pdev)
1399 goto res_err; 1401 goto res_err;
1400 } 1402 }
1401 1403
1402 err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED, 1404 err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1403 "atmel-tdes", tdes_dd); 1405 IRQF_SHARED, "atmel-tdes", tdes_dd);
1404 if (err) { 1406 if (err) {
1405 dev_err(dev, "unable to request tdes irq.\n"); 1407 dev_err(dev, "unable to request tdes irq.\n");
1406 goto tdes_irq_err; 1408 goto res_err;
1407 } 1409 }
1408 1410
1409 /* Initializing the clock */ 1411 /* Initializing the clock */
1410 tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk"); 1412 tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1411 if (IS_ERR(tdes_dd->iclk)) { 1413 if (IS_ERR(tdes_dd->iclk)) {
1412 dev_err(dev, "clock initialization failed.\n"); 1414 dev_err(dev, "clock initialization failed.\n");
1413 err = PTR_ERR(tdes_dd->iclk); 1415 err = PTR_ERR(tdes_dd->iclk);
1414 goto clk_err; 1416 goto res_err;
1415 } 1417 }
1416 1418
1417 tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size); 1419 tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1418 if (!tdes_dd->io_base) { 1420 if (!tdes_dd->io_base) {
1419 dev_err(dev, "can't ioremap\n"); 1421 dev_err(dev, "can't ioremap\n");
1420 err = -ENOMEM; 1422 err = -ENOMEM;
1421 goto tdes_io_err; 1423 goto res_err;
1422 } 1424 }
1423 1425
1424 atmel_tdes_hw_version_init(tdes_dd); 1426 atmel_tdes_hw_version_init(tdes_dd);
@@ -1474,12 +1476,6 @@ err_tdes_dma:
1474err_pdata: 1476err_pdata:
1475 atmel_tdes_buff_cleanup(tdes_dd); 1477 atmel_tdes_buff_cleanup(tdes_dd);
1476err_tdes_buff: 1478err_tdes_buff:
1477 iounmap(tdes_dd->io_base);
1478tdes_io_err:
1479 clk_put(tdes_dd->iclk);
1480clk_err:
1481 free_irq(tdes_dd->irq, tdes_dd);
1482tdes_irq_err:
1483res_err: 1479res_err:
1484 tasklet_kill(&tdes_dd->done_task); 1480 tasklet_kill(&tdes_dd->done_task);
1485 tasklet_kill(&tdes_dd->queue_task); 1481 tasklet_kill(&tdes_dd->queue_task);
@@ -1510,13 +1506,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
1510 1506
1511 atmel_tdes_buff_cleanup(tdes_dd); 1507 atmel_tdes_buff_cleanup(tdes_dd);
1512 1508
1513 iounmap(tdes_dd->io_base);
1514
1515 clk_put(tdes_dd->iclk);
1516
1517 if (tdes_dd->irq >= 0)
1518 free_irq(tdes_dd->irq, tdes_dd);
1519
1520 return 0; 1509 return 0;
1521} 1510}
1522 1511
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 2f0b3337505d..95b73968cf72 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -96,26 +96,6 @@ struct bfin_crypto_crc_ctx {
96 u32 key; 96 u32 key;
97}; 97};
98 98
99
100/*
101 * derive number of elements in scatterlist
102 */
103static int sg_count(struct scatterlist *sg_list)
104{
105 struct scatterlist *sg = sg_list;
106 int sg_nents = 1;
107
108 if (sg_list == NULL)
109 return 0;
110
111 while (!sg_is_last(sg)) {
112 sg_nents++;
113 sg = sg_next(sg);
114 }
115
116 return sg_nents;
117}
118
119/* 99/*
120 * get element in scatter list by given index 100 * get element in scatter list by given index
121 */ 101 */
@@ -160,7 +140,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
160 } 140 }
161 spin_unlock_bh(&crc_list.lock); 141 spin_unlock_bh(&crc_list.lock);
162 142
163 if (sg_count(req->src) > CRC_MAX_DMA_DESC) { 143 if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
164 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", 144 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
165 CRC_MAX_DMA_DESC); 145 CRC_MAX_DMA_DESC);
166 return -EINVAL; 146 return -EINVAL;
@@ -376,7 +356,8 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
376 ctx->sg = req->src; 356 ctx->sg = req->src;
377 357
378 /* Chop crc buffer size to multiple of 32 bit */ 358 /* Chop crc buffer size to multiple of 32 bit */
379 nsg = ctx->sg_nents = sg_count(ctx->sg); 359 nsg = sg_nents(ctx->sg);
360 ctx->sg_nents = nsg;
380 ctx->sg_buflen = ctx->buflast_len + req->nbytes; 361 ctx->sg_buflen = ctx->buflast_len + req->nbytes;
381 ctx->bufnext_len = ctx->sg_buflen % 4; 362 ctx->bufnext_len = ctx->sg_buflen % 4;
382 ctx->sg_buflen &= ~0x3; 363 ctx->sg_buflen &= ~0x3;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ba79d638f782..ea8189f4b021 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1705,14 +1705,131 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1705 return ret; 1705 return ret;
1706} 1706}
1707 1707
1708static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1709 const u8 *key, unsigned int keylen)
1710{
1711 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1712 struct device *jrdev = ctx->jrdev;
1713 u32 *key_jump_cmd, *desc;
1714 __be64 sector_size = cpu_to_be64(512);
1715
1716 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1717 crypto_ablkcipher_set_flags(ablkcipher,
1718 CRYPTO_TFM_RES_BAD_KEY_LEN);
1719 dev_err(jrdev, "key size mismatch\n");
1720 return -EINVAL;
1721 }
1722
1723 memcpy(ctx->key, key, keylen);
1724 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1725 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1726 dev_err(jrdev, "unable to map key i/o memory\n");
1727 return -ENOMEM;
1728 }
1729 ctx->enckeylen = keylen;
1730
1731 /* xts_ablkcipher_encrypt shared descriptor */
1732 desc = ctx->sh_desc_enc;
1733 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1734 /* Skip if already shared */
1735 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1736 JUMP_COND_SHRD);
1737
1738 /* Load class1 keys only */
1739 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1740 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1741
1742 /* Load sector size with index 40 bytes (0x28) */
1743 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1744 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1745 append_data(desc, (void *)&sector_size, 8);
1746
1747 set_jump_tgt_here(desc, key_jump_cmd);
1748
1749 /*
1750 * create sequence for loading the sector index
1751 * Upper 8B of IV - will be used as sector index
1752 * Lower 8B of IV - will be discarded
1753 */
1754 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1755 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1756 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1757
1758 /* Load operation */
1759 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1760 OP_ALG_ENCRYPT);
1761
1762 /* Perform operation */
1763 ablkcipher_append_src_dst(desc);
1764
1765 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1766 DMA_TO_DEVICE);
1767 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1768 dev_err(jrdev, "unable to map shared descriptor\n");
1769 return -ENOMEM;
1770 }
1771#ifdef DEBUG
1772 print_hex_dump(KERN_ERR,
1773 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1774 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1775#endif
1776
1777 /* xts_ablkcipher_decrypt shared descriptor */
1778 desc = ctx->sh_desc_dec;
1779
1780 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1781 /* Skip if already shared */
1782 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1783 JUMP_COND_SHRD);
1784
1785 /* Load class1 key only */
1786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1787 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1788
1789 /* Load sector size with index 40 bytes (0x28) */
1790 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1791 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1792 append_data(desc, (void *)&sector_size, 8);
1793
1794 set_jump_tgt_here(desc, key_jump_cmd);
1795
1796 /*
1797 * create sequence for loading the sector index
1798 * Upper 8B of IV - will be used as sector index
1799 * Lower 8B of IV - will be discarded
1800 */
1801 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1802 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1803 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1804
1805 /* Load operation */
1806 append_dec_op1(desc, ctx->class1_alg_type);
1807
1808 /* Perform operation */
1809 ablkcipher_append_src_dst(desc);
1810
1811 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1812 DMA_TO_DEVICE);
1813 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1814 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1815 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1816 dev_err(jrdev, "unable to map shared descriptor\n");
1817 return -ENOMEM;
1818 }
1819#ifdef DEBUG
1820 print_hex_dump(KERN_ERR,
1821 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1822 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1823#endif
1824
1825 return 0;
1826}
1827
1708/* 1828/*
1709 * aead_edesc - s/w-extended aead descriptor 1829 * aead_edesc - s/w-extended aead descriptor
1710 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 1830 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1711 * @assoc_chained: if source is chained
1712 * @src_nents: number of segments in input scatterlist 1831 * @src_nents: number of segments in input scatterlist
1713 * @src_chained: if source is chained
1714 * @dst_nents: number of segments in output scatterlist 1832 * @dst_nents: number of segments in output scatterlist
1715 * @dst_chained: if destination is chained
1716 * @iv_dma: dma address of iv for checking continuity and link table 1833 * @iv_dma: dma address of iv for checking continuity and link table
1717 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1834 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1718 * @sec4_sg_bytes: length of dma mapped sec4_sg space 1835 * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1721,11 +1838,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1721 */ 1838 */
1722struct aead_edesc { 1839struct aead_edesc {
1723 int assoc_nents; 1840 int assoc_nents;
1724 bool assoc_chained;
1725 int src_nents; 1841 int src_nents;
1726 bool src_chained;
1727 int dst_nents; 1842 int dst_nents;
1728 bool dst_chained;
1729 dma_addr_t iv_dma; 1843 dma_addr_t iv_dma;
1730 int sec4_sg_bytes; 1844 int sec4_sg_bytes;
1731 dma_addr_t sec4_sg_dma; 1845 dma_addr_t sec4_sg_dma;
@@ -1736,9 +1850,7 @@ struct aead_edesc {
1736/* 1850/*
1737 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 1851 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1738 * @src_nents: number of segments in input scatterlist 1852 * @src_nents: number of segments in input scatterlist
1739 * @src_chained: if source is chained
1740 * @dst_nents: number of segments in output scatterlist 1853 * @dst_nents: number of segments in output scatterlist
1741 * @dst_chained: if destination is chained
1742 * @iv_dma: dma address of iv for checking continuity and link table 1854 * @iv_dma: dma address of iv for checking continuity and link table
1743 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1855 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1744 * @sec4_sg_bytes: length of dma mapped sec4_sg space 1856 * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1747,9 +1859,7 @@ struct aead_edesc {
1747 */ 1859 */
1748struct ablkcipher_edesc { 1860struct ablkcipher_edesc {
1749 int src_nents; 1861 int src_nents;
1750 bool src_chained;
1751 int dst_nents; 1862 int dst_nents;
1752 bool dst_chained;
1753 dma_addr_t iv_dma; 1863 dma_addr_t iv_dma;
1754 int sec4_sg_bytes; 1864 int sec4_sg_bytes;
1755 dma_addr_t sec4_sg_dma; 1865 dma_addr_t sec4_sg_dma;
@@ -1759,18 +1869,15 @@ struct ablkcipher_edesc {
1759 1869
1760static void caam_unmap(struct device *dev, struct scatterlist *src, 1870static void caam_unmap(struct device *dev, struct scatterlist *src,
1761 struct scatterlist *dst, int src_nents, 1871 struct scatterlist *dst, int src_nents,
1762 bool src_chained, int dst_nents, bool dst_chained, 1872 int dst_nents,
1763 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 1873 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1764 int sec4_sg_bytes) 1874 int sec4_sg_bytes)
1765{ 1875{
1766 if (dst != src) { 1876 if (dst != src) {
1767 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 1877 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1768 src_chained); 1878 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1769 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1770 dst_chained);
1771 } else { 1879 } else {
1772 dma_unmap_sg_chained(dev, src, src_nents ? : 1, 1880 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1773 DMA_BIDIRECTIONAL, src_chained);
1774 } 1881 }
1775 1882
1776 if (iv_dma) 1883 if (iv_dma)
@@ -1785,8 +1892,7 @@ static void aead_unmap(struct device *dev,
1785 struct aead_request *req) 1892 struct aead_request *req)
1786{ 1893{
1787 caam_unmap(dev, req->src, req->dst, 1894 caam_unmap(dev, req->src, req->dst,
1788 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1895 edesc->src_nents, edesc->dst_nents, 0, 0,
1789 edesc->dst_chained, 0, 0,
1790 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1896 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1791} 1897}
1792 1898
@@ -1798,8 +1904,8 @@ static void ablkcipher_unmap(struct device *dev,
1798 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1904 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1799 1905
1800 caam_unmap(dev, req->src, req->dst, 1906 caam_unmap(dev, req->src, req->dst,
1801 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1907 edesc->src_nents, edesc->dst_nents,
1802 edesc->dst_chained, edesc->iv_dma, ivsize, 1908 edesc->iv_dma, ivsize,
1803 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1909 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1804} 1910}
1805 1911
@@ -2169,22 +2275,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2169 struct aead_edesc *edesc; 2275 struct aead_edesc *edesc;
2170 int sgc; 2276 int sgc;
2171 bool all_contig = true; 2277 bool all_contig = true;
2172 bool src_chained = false, dst_chained = false;
2173 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 2278 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2174 unsigned int authsize = ctx->authsize; 2279 unsigned int authsize = ctx->authsize;
2175 2280
2176 if (unlikely(req->dst != req->src)) { 2281 if (unlikely(req->dst != req->src)) {
2177 src_nents = sg_count(req->src, req->assoclen + req->cryptlen, 2282 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2178 &src_chained);
2179 dst_nents = sg_count(req->dst, 2283 dst_nents = sg_count(req->dst,
2180 req->assoclen + req->cryptlen + 2284 req->assoclen + req->cryptlen +
2181 (encrypt ? authsize : (-authsize)), 2285 (encrypt ? authsize : (-authsize)));
2182 &dst_chained);
2183 } else { 2286 } else {
2184 src_nents = sg_count(req->src, 2287 src_nents = sg_count(req->src,
2185 req->assoclen + req->cryptlen + 2288 req->assoclen + req->cryptlen +
2186 (encrypt ? authsize : 0), 2289 (encrypt ? authsize : 0));
2187 &src_chained);
2188 } 2290 }
2189 2291
2190 /* Check if data are contiguous. */ 2292 /* Check if data are contiguous. */
@@ -2207,37 +2309,35 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2207 } 2309 }
2208 2310
2209 if (likely(req->src == req->dst)) { 2311 if (likely(req->src == req->dst)) {
2210 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2312 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2211 DMA_BIDIRECTIONAL, src_chained); 2313 DMA_BIDIRECTIONAL);
2212 if (unlikely(!sgc)) { 2314 if (unlikely(!sgc)) {
2213 dev_err(jrdev, "unable to map source\n"); 2315 dev_err(jrdev, "unable to map source\n");
2214 kfree(edesc); 2316 kfree(edesc);
2215 return ERR_PTR(-ENOMEM); 2317 return ERR_PTR(-ENOMEM);
2216 } 2318 }
2217 } else { 2319 } else {
2218 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2320 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2219 DMA_TO_DEVICE, src_chained); 2321 DMA_TO_DEVICE);
2220 if (unlikely(!sgc)) { 2322 if (unlikely(!sgc)) {
2221 dev_err(jrdev, "unable to map source\n"); 2323 dev_err(jrdev, "unable to map source\n");
2222 kfree(edesc); 2324 kfree(edesc);
2223 return ERR_PTR(-ENOMEM); 2325 return ERR_PTR(-ENOMEM);
2224 } 2326 }
2225 2327
2226 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2328 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2227 DMA_FROM_DEVICE, dst_chained); 2329 DMA_FROM_DEVICE);
2228 if (unlikely(!sgc)) { 2330 if (unlikely(!sgc)) {
2229 dev_err(jrdev, "unable to map destination\n"); 2331 dev_err(jrdev, "unable to map destination\n");
2230 dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, 2332 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2231 DMA_TO_DEVICE, src_chained); 2333 DMA_TO_DEVICE);
2232 kfree(edesc); 2334 kfree(edesc);
2233 return ERR_PTR(-ENOMEM); 2335 return ERR_PTR(-ENOMEM);
2234 } 2336 }
2235 } 2337 }
2236 2338
2237 edesc->src_nents = src_nents; 2339 edesc->src_nents = src_nents;
2238 edesc->src_chained = src_chained;
2239 edesc->dst_nents = dst_nents; 2340 edesc->dst_nents = dst_nents;
2240 edesc->dst_chained = dst_chained;
2241 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 2341 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2242 desc_bytes; 2342 desc_bytes;
2243 *all_contig_ptr = all_contig; 2343 *all_contig_ptr = all_contig;
@@ -2467,22 +2567,21 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2467 bool iv_contig = false; 2567 bool iv_contig = false;
2468 int sgc; 2568 int sgc;
2469 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2569 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2470 bool src_chained = false, dst_chained = false;
2471 int sec4_sg_index; 2570 int sec4_sg_index;
2472 2571
2473 src_nents = sg_count(req->src, req->nbytes, &src_chained); 2572 src_nents = sg_count(req->src, req->nbytes);
2474 2573
2475 if (req->dst != req->src) 2574 if (req->dst != req->src)
2476 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2575 dst_nents = sg_count(req->dst, req->nbytes);
2477 2576
2478 if (likely(req->src == req->dst)) { 2577 if (likely(req->src == req->dst)) {
2479 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2578 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2480 DMA_BIDIRECTIONAL, src_chained); 2579 DMA_BIDIRECTIONAL);
2481 } else { 2580 } else {
2482 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2581 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2483 DMA_TO_DEVICE, src_chained); 2582 DMA_TO_DEVICE);
2484 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2583 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2485 DMA_FROM_DEVICE, dst_chained); 2584 DMA_FROM_DEVICE);
2486 } 2585 }
2487 2586
2488 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 2587 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
@@ -2511,9 +2610,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2511 } 2610 }
2512 2611
2513 edesc->src_nents = src_nents; 2612 edesc->src_nents = src_nents;
2514 edesc->src_chained = src_chained;
2515 edesc->dst_nents = dst_nents; 2613 edesc->dst_nents = dst_nents;
2516 edesc->dst_chained = dst_chained;
2517 edesc->sec4_sg_bytes = sec4_sg_bytes; 2614 edesc->sec4_sg_bytes = sec4_sg_bytes;
2518 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2615 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2519 desc_bytes; 2616 desc_bytes;
@@ -2646,22 +2743,21 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2646 bool iv_contig = false; 2743 bool iv_contig = false;
2647 int sgc; 2744 int sgc;
2648 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2745 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2649 bool src_chained = false, dst_chained = false;
2650 int sec4_sg_index; 2746 int sec4_sg_index;
2651 2747
2652 src_nents = sg_count(req->src, req->nbytes, &src_chained); 2748 src_nents = sg_count(req->src, req->nbytes);
2653 2749
2654 if (unlikely(req->dst != req->src)) 2750 if (unlikely(req->dst != req->src))
2655 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2751 dst_nents = sg_count(req->dst, req->nbytes);
2656 2752
2657 if (likely(req->src == req->dst)) { 2753 if (likely(req->src == req->dst)) {
2658 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2754 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2659 DMA_BIDIRECTIONAL, src_chained); 2755 DMA_BIDIRECTIONAL);
2660 } else { 2756 } else {
2661 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2757 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2662 DMA_TO_DEVICE, src_chained); 2758 DMA_TO_DEVICE);
2663 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2759 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2664 DMA_FROM_DEVICE, dst_chained); 2760 DMA_FROM_DEVICE);
2665 } 2761 }
2666 2762
2667 /* 2763 /*
@@ -2690,9 +2786,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2690 } 2786 }
2691 2787
2692 edesc->src_nents = src_nents; 2788 edesc->src_nents = src_nents;
2693 edesc->src_chained = src_chained;
2694 edesc->dst_nents = dst_nents; 2789 edesc->dst_nents = dst_nents;
2695 edesc->dst_chained = dst_chained;
2696 edesc->sec4_sg_bytes = sec4_sg_bytes; 2790 edesc->sec4_sg_bytes = sec4_sg_bytes;
2697 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2791 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2698 desc_bytes; 2792 desc_bytes;
@@ -2871,7 +2965,23 @@ static struct caam_alg_template driver_algs[] = {
2871 .ivsize = CTR_RFC3686_IV_SIZE, 2965 .ivsize = CTR_RFC3686_IV_SIZE,
2872 }, 2966 },
2873 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 2967 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2874 } 2968 },
2969 {
2970 .name = "xts(aes)",
2971 .driver_name = "xts-aes-caam",
2972 .blocksize = AES_BLOCK_SIZE,
2973 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2974 .template_ablkcipher = {
2975 .setkey = xts_ablkcipher_setkey,
2976 .encrypt = ablkcipher_encrypt,
2977 .decrypt = ablkcipher_decrypt,
2978 .geniv = "eseqiv",
2979 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2980 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2981 .ivsize = AES_BLOCK_SIZE,
2982 },
2983 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2984 },
2875}; 2985};
2876 2986
2877static struct caam_aead_alg driver_aeads[] = { 2987static struct caam_aead_alg driver_aeads[] = {
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 94433b9fc200..49106ea42887 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -134,6 +134,15 @@ struct caam_hash_state {
134 int current_buf; 134 int current_buf;
135}; 135};
136 136
137struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
140 int buflen;
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
144};
145
137/* Common job descriptor seq in/out ptr routines */ 146/* Common job descriptor seq in/out ptr routines */
138 147
139/* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 148/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -181,10 +190,9 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
181/* Map req->src and put it in link table */ 190/* Map req->src and put it in link table */
182static inline void src_map_to_sec4_sg(struct device *jrdev, 191static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents, 192 struct scatterlist *src, int src_nents,
184 struct sec4_sg_entry *sec4_sg, 193 struct sec4_sg_entry *sec4_sg)
185 bool chained)
186{ 194{
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 195 dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 196 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189} 197}
190 198
@@ -585,7 +593,6 @@ badkey:
585 * ahash_edesc - s/w-extended ahash descriptor 593 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result 594 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table 595 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist 596 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space 597 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table 598 * @sec4_sg: pointer to h/w link table
@@ -594,7 +601,6 @@ badkey:
594struct ahash_edesc { 601struct ahash_edesc {
595 dma_addr_t dst_dma; 602 dma_addr_t dst_dma;
596 dma_addr_t sec4_sg_dma; 603 dma_addr_t sec4_sg_dma;
597 bool chained;
598 int src_nents; 604 int src_nents;
599 int sec4_sg_bytes; 605 int sec4_sg_bytes;
600 struct sec4_sg_entry *sec4_sg; 606 struct sec4_sg_entry *sec4_sg;
@@ -606,8 +612,7 @@ static inline void ahash_unmap(struct device *dev,
606 struct ahash_request *req, int dst_len) 612 struct ahash_request *req, int dst_len)
607{ 613{
608 if (edesc->src_nents) 614 if (edesc->src_nents)
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 615 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
610 DMA_TO_DEVICE, edesc->chained);
611 if (edesc->dst_dma) 616 if (edesc->dst_dma)
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 617 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613 618
@@ -788,7 +793,6 @@ static int ahash_update_ctx(struct ahash_request *req)
788 dma_addr_t ptr = ctx->sh_desc_update_dma; 793 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 794 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc; 795 struct ahash_edesc *edesc;
791 bool chained = false;
792 int ret = 0; 796 int ret = 0;
793 int sh_len; 797 int sh_len;
794 798
@@ -797,8 +801,8 @@ static int ahash_update_ctx(struct ahash_request *req)
797 to_hash = in_len - *next_buflen; 801 to_hash = in_len - *next_buflen;
798 802
799 if (to_hash) { 803 if (to_hash) {
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 804 src_nents = sg_nents_for_len(req->src,
801 &chained); 805 req->nbytes - (*next_buflen));
802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 806 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 807 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry); 808 sizeof(struct sec4_sg_entry);
@@ -816,7 +820,6 @@ static int ahash_update_ctx(struct ahash_request *req)
816 } 820 }
817 821
818 edesc->src_nents = src_nents; 822 edesc->src_nents = src_nents;
819 edesc->chained = chained;
820 edesc->sec4_sg_bytes = sec4_sg_bytes; 823 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 824 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 DESC_JOB_IO_LEN; 825 DESC_JOB_IO_LEN;
@@ -829,12 +832,11 @@ static int ahash_update_ctx(struct ahash_request *req)
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 832 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
830 edesc->sec4_sg + 1, 833 edesc->sec4_sg + 1,
831 buf, state->buf_dma, 834 buf, state->buf_dma,
832 *next_buflen, *buflen); 835 *buflen, last_buflen);
833 836
834 if (src_nents) { 837 if (src_nents) {
835 src_map_to_sec4_sg(jrdev, req->src, src_nents, 838 src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 edesc->sec4_sg + sec4_sg_src_index, 839 edesc->sec4_sg + sec4_sg_src_index);
837 chained);
838 if (*next_buflen) 840 if (*next_buflen)
839 scatterwalk_map_and_copy(next_buf, req->src, 841 scatterwalk_map_and_copy(next_buf, req->src,
840 to_hash - *buflen, 842 to_hash - *buflen,
@@ -996,11 +998,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
996 int src_nents; 998 int src_nents;
997 int digestsize = crypto_ahash_digestsize(ahash); 999 int digestsize = crypto_ahash_digestsize(ahash);
998 struct ahash_edesc *edesc; 1000 struct ahash_edesc *edesc;
999 bool chained = false;
1000 int ret = 0; 1001 int ret = 0;
1001 int sh_len; 1002 int sh_len;
1002 1003
1003 src_nents = __sg_count(req->src, req->nbytes, &chained); 1004 src_nents = sg_nents_for_len(req->src, req->nbytes);
1004 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1005 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1005 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1006 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1006 sizeof(struct sec4_sg_entry); 1007 sizeof(struct sec4_sg_entry);
@@ -1018,7 +1019,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
1018 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1019 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1019 1020
1020 edesc->src_nents = src_nents; 1021 edesc->src_nents = src_nents;
1021 edesc->chained = chained;
1022 edesc->sec4_sg_bytes = sec4_sg_bytes; 1022 edesc->sec4_sg_bytes = sec4_sg_bytes;
1023 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1023 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1024 DESC_JOB_IO_LEN; 1024 DESC_JOB_IO_LEN;
@@ -1033,7 +1033,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
1033 last_buflen); 1033 last_buflen);
1034 1034
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1036 sec4_sg_src_index, chained); 1036 sec4_sg_src_index);
1037 1037
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1039 sec4_sg_bytes, DMA_TO_DEVICE); 1039 sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1081,14 +1081,12 @@ static int ahash_digest(struct ahash_request *req)
1081 int src_nents, sec4_sg_bytes; 1081 int src_nents, sec4_sg_bytes;
1082 dma_addr_t src_dma; 1082 dma_addr_t src_dma;
1083 struct ahash_edesc *edesc; 1083 struct ahash_edesc *edesc;
1084 bool chained = false;
1085 int ret = 0; 1084 int ret = 0;
1086 u32 options; 1085 u32 options;
1087 int sh_len; 1086 int sh_len;
1088 1087
1089 src_nents = sg_count(req->src, req->nbytes, &chained); 1088 src_nents = sg_count(req->src, req->nbytes);
1090 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1089 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1091 chained);
1092 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1090 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1093 1091
1094 /* allocate space for base edesc and hw desc commands, link tables */ 1092 /* allocate space for base edesc and hw desc commands, link tables */
@@ -1102,7 +1100,6 @@ static int ahash_digest(struct ahash_request *req)
1102 DESC_JOB_IO_LEN; 1100 DESC_JOB_IO_LEN;
1103 edesc->sec4_sg_bytes = sec4_sg_bytes; 1101 edesc->sec4_sg_bytes = sec4_sg_bytes;
1104 edesc->src_nents = src_nents; 1102 edesc->src_nents = src_nents;
1105 edesc->chained = chained;
1106 1103
1107 sh_len = desc_len(sh_desc); 1104 sh_len = desc_len(sh_desc);
1108 desc = edesc->hw_desc; 1105 desc = edesc->hw_desc;
@@ -1228,7 +1225,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1228 struct ahash_edesc *edesc; 1225 struct ahash_edesc *edesc;
1229 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1226 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1230 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1227 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1231 bool chained = false;
1232 int ret = 0; 1228 int ret = 0;
1233 int sh_len; 1229 int sh_len;
1234 1230
@@ -1236,8 +1232,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1236 to_hash = in_len - *next_buflen; 1232 to_hash = in_len - *next_buflen;
1237 1233
1238 if (to_hash) { 1234 if (to_hash) {
1239 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1235 src_nents = sg_nents_for_len(req->src,
1240 &chained); 1236 req->nbytes - (*next_buflen));
1241 sec4_sg_bytes = (1 + src_nents) * 1237 sec4_sg_bytes = (1 + src_nents) *
1242 sizeof(struct sec4_sg_entry); 1238 sizeof(struct sec4_sg_entry);
1243 1239
@@ -1254,7 +1250,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1254 } 1250 }
1255 1251
1256 edesc->src_nents = src_nents; 1252 edesc->src_nents = src_nents;
1257 edesc->chained = chained;
1258 edesc->sec4_sg_bytes = sec4_sg_bytes; 1253 edesc->sec4_sg_bytes = sec4_sg_bytes;
1259 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1254 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1260 DESC_JOB_IO_LEN; 1255 DESC_JOB_IO_LEN;
@@ -1263,7 +1258,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1263 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1258 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1264 buf, *buflen); 1259 buf, *buflen);
1265 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1260 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1266 edesc->sec4_sg + 1, chained); 1261 edesc->sec4_sg + 1);
1267 if (*next_buflen) { 1262 if (*next_buflen) {
1268 scatterwalk_map_and_copy(next_buf, req->src, 1263 scatterwalk_map_and_copy(next_buf, req->src,
1269 to_hash - *buflen, 1264 to_hash - *buflen,
@@ -1343,11 +1338,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1343 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1338 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1344 int digestsize = crypto_ahash_digestsize(ahash); 1339 int digestsize = crypto_ahash_digestsize(ahash);
1345 struct ahash_edesc *edesc; 1340 struct ahash_edesc *edesc;
1346 bool chained = false;
1347 int sh_len; 1341 int sh_len;
1348 int ret = 0; 1342 int ret = 0;
1349 1343
1350 src_nents = __sg_count(req->src, req->nbytes, &chained); 1344 src_nents = sg_nents_for_len(req->src, req->nbytes);
1351 sec4_sg_src_index = 2; 1345 sec4_sg_src_index = 2;
1352 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1346 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1353 sizeof(struct sec4_sg_entry); 1347 sizeof(struct sec4_sg_entry);
@@ -1365,7 +1359,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1365 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1359 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1366 1360
1367 edesc->src_nents = src_nents; 1361 edesc->src_nents = src_nents;
1368 edesc->chained = chained;
1369 edesc->sec4_sg_bytes = sec4_sg_bytes; 1362 edesc->sec4_sg_bytes = sec4_sg_bytes;
1370 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1363 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1371 DESC_JOB_IO_LEN; 1364 DESC_JOB_IO_LEN;
@@ -1374,8 +1367,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1374 state->buf_dma, buflen, 1367 state->buf_dma, buflen,
1375 last_buflen); 1368 last_buflen);
1376 1369
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1370 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1378 chained);
1379 1371
1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1372 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1381 sec4_sg_bytes, DMA_TO_DEVICE); 1373 sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1429,7 +1421,6 @@ static int ahash_update_first(struct ahash_request *req)
1429 dma_addr_t src_dma; 1421 dma_addr_t src_dma;
1430 u32 options; 1422 u32 options;
1431 struct ahash_edesc *edesc; 1423 struct ahash_edesc *edesc;
1432 bool chained = false;
1433 int ret = 0; 1424 int ret = 0;
1434 int sh_len; 1425 int sh_len;
1435 1426
@@ -1438,10 +1429,8 @@ static int ahash_update_first(struct ahash_request *req)
1438 to_hash = req->nbytes - *next_buflen; 1429 to_hash = req->nbytes - *next_buflen;
1439 1430
1440 if (to_hash) { 1431 if (to_hash) {
1441 src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1432 src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
1442 &chained); 1433 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1444 DMA_TO_DEVICE, chained);
1445 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1434 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1446 1435
1447 /* 1436 /*
@@ -1457,7 +1446,6 @@ static int ahash_update_first(struct ahash_request *req)
1457 } 1446 }
1458 1447
1459 edesc->src_nents = src_nents; 1448 edesc->src_nents = src_nents;
1460 edesc->chained = chained;
1461 edesc->sec4_sg_bytes = sec4_sg_bytes; 1449 edesc->sec4_sg_bytes = sec4_sg_bytes;
1462 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1450 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1463 DESC_JOB_IO_LEN; 1451 DESC_JOB_IO_LEN;
@@ -1574,25 +1562,42 @@ static int ahash_final(struct ahash_request *req)
1574 1562
1575static int ahash_export(struct ahash_request *req, void *out) 1563static int ahash_export(struct ahash_request *req, void *out)
1576{ 1564{
1577 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1578 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1579 struct caam_hash_state *state = ahash_request_ctx(req); 1565 struct caam_hash_state *state = ahash_request_ctx(req);
1566 struct caam_export_state *export = out;
1567 int len;
1568 u8 *buf;
1569
1570 if (state->current_buf) {
1571 buf = state->buf_1;
1572 len = state->buflen_1;
1573 } else {
1574 buf = state->buf_0;
1575 len = state->buflen_1;
1576 }
1577
1578 memcpy(export->buf, buf, len);
1579 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1580 export->buflen = len;
1581 export->update = state->update;
1582 export->final = state->final;
1583 export->finup = state->finup;
1580 1584
1581 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1582 memcpy(out + sizeof(struct caam_hash_ctx), state,
1583 sizeof(struct caam_hash_state));
1584 return 0; 1585 return 0;
1585} 1586}
1586 1587
1587static int ahash_import(struct ahash_request *req, const void *in) 1588static int ahash_import(struct ahash_request *req, const void *in)
1588{ 1589{
1589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1590 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1591 struct caam_hash_state *state = ahash_request_ctx(req); 1590 struct caam_hash_state *state = ahash_request_ctx(req);
1591 const struct caam_export_state *export = in;
1592
1593 memset(state, 0, sizeof(*state));
1594 memcpy(state->buf_0, export->buf, export->buflen);
1595 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1596 state->buflen_0 = export->buflen;
1597 state->update = export->update;
1598 state->final = export->final;
1599 state->finup = export->finup;
1592 1600
1593 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1594 memcpy(state, in + sizeof(struct caam_hash_ctx),
1595 sizeof(struct caam_hash_state));
1596 return 0; 1601 return 0;
1597} 1602}
1598 1603
@@ -1626,8 +1631,9 @@ static struct caam_hash_template driver_hash[] = {
1626 .setkey = ahash_setkey, 1631 .setkey = ahash_setkey,
1627 .halg = { 1632 .halg = {
1628 .digestsize = SHA1_DIGEST_SIZE, 1633 .digestsize = SHA1_DIGEST_SIZE,
1629 }, 1634 .statesize = sizeof(struct caam_export_state),
1630 }, 1635 },
1636 },
1631 .alg_type = OP_ALG_ALGSEL_SHA1, 1637 .alg_type = OP_ALG_ALGSEL_SHA1,
1632 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1638 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1633 }, { 1639 }, {
@@ -1647,8 +1653,9 @@ static struct caam_hash_template driver_hash[] = {
1647 .setkey = ahash_setkey, 1653 .setkey = ahash_setkey,
1648 .halg = { 1654 .halg = {
1649 .digestsize = SHA224_DIGEST_SIZE, 1655 .digestsize = SHA224_DIGEST_SIZE,
1650 }, 1656 .statesize = sizeof(struct caam_export_state),
1651 }, 1657 },
1658 },
1652 .alg_type = OP_ALG_ALGSEL_SHA224, 1659 .alg_type = OP_ALG_ALGSEL_SHA224,
1653 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1660 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1654 }, { 1661 }, {
@@ -1668,8 +1675,9 @@ static struct caam_hash_template driver_hash[] = {
1668 .setkey = ahash_setkey, 1675 .setkey = ahash_setkey,
1669 .halg = { 1676 .halg = {
1670 .digestsize = SHA256_DIGEST_SIZE, 1677 .digestsize = SHA256_DIGEST_SIZE,
1671 }, 1678 .statesize = sizeof(struct caam_export_state),
1672 }, 1679 },
1680 },
1673 .alg_type = OP_ALG_ALGSEL_SHA256, 1681 .alg_type = OP_ALG_ALGSEL_SHA256,
1674 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1682 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1675 }, { 1683 }, {
@@ -1689,8 +1697,9 @@ static struct caam_hash_template driver_hash[] = {
1689 .setkey = ahash_setkey, 1697 .setkey = ahash_setkey,
1690 .halg = { 1698 .halg = {
1691 .digestsize = SHA384_DIGEST_SIZE, 1699 .digestsize = SHA384_DIGEST_SIZE,
1692 }, 1700 .statesize = sizeof(struct caam_export_state),
1693 }, 1701 },
1702 },
1694 .alg_type = OP_ALG_ALGSEL_SHA384, 1703 .alg_type = OP_ALG_ALGSEL_SHA384,
1695 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1704 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1696 }, { 1705 }, {
@@ -1710,8 +1719,9 @@ static struct caam_hash_template driver_hash[] = {
1710 .setkey = ahash_setkey, 1719 .setkey = ahash_setkey,
1711 .halg = { 1720 .halg = {
1712 .digestsize = SHA512_DIGEST_SIZE, 1721 .digestsize = SHA512_DIGEST_SIZE,
1713 }, 1722 .statesize = sizeof(struct caam_export_state),
1714 }, 1723 },
1724 },
1715 .alg_type = OP_ALG_ALGSEL_SHA512, 1725 .alg_type = OP_ALG_ALGSEL_SHA512,
1716 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1726 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1717 }, { 1727 }, {
@@ -1731,8 +1741,9 @@ static struct caam_hash_template driver_hash[] = {
1731 .setkey = ahash_setkey, 1741 .setkey = ahash_setkey,
1732 .halg = { 1742 .halg = {
1733 .digestsize = MD5_DIGEST_SIZE, 1743 .digestsize = MD5_DIGEST_SIZE,
1734 }, 1744 .statesize = sizeof(struct caam_export_state),
1735 }, 1745 },
1746 },
1736 .alg_type = OP_ALG_ALGSEL_MD5, 1747 .alg_type = OP_ALG_ALGSEL_MD5,
1737 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1748 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1738 }, 1749 },
@@ -1952,8 +1963,9 @@ static int __init caam_algapi_hash_init(void)
1952 1963
1953 err = crypto_register_ahash(&t_alg->ahash_alg); 1964 err = crypto_register_ahash(&t_alg->ahash_alg);
1954 if (err) { 1965 if (err) {
1955 pr_warn("%s alg registration failed\n", 1966 pr_warn("%s alg registration failed: %d\n",
1956 t_alg->ahash_alg.halg.base.cra_driver_name); 1967 t_alg->ahash_alg.halg.base.cra_driver_name,
1968 err);
1957 kfree(t_alg); 1969 kfree(t_alg);
1958 } else 1970 } else
1959 list_add_tail(&t_alg->entry, &hash_list); 1971 list_add_tail(&t_alg->entry, &hash_list);
@@ -1968,8 +1980,9 @@ static int __init caam_algapi_hash_init(void)
1968 1980
1969 err = crypto_register_ahash(&t_alg->ahash_alg); 1981 err = crypto_register_ahash(&t_alg->ahash_alg);
1970 if (err) { 1982 if (err) {
1971 pr_warn("%s alg registration failed\n", 1983 pr_warn("%s alg registration failed: %d\n",
1972 t_alg->ahash_alg.halg.base.cra_driver_name); 1984 t_alg->ahash_alg.halg.base.cra_driver_name,
1985 err);
1973 kfree(t_alg); 1986 kfree(t_alg);
1974 } else 1987 } else
1975 list_add_tail(&t_alg->entry, &hash_list); 1988 list_add_tail(&t_alg->entry, &hash_list);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 983d663ef671..1e93c6af2275 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1492,7 +1492,6 @@ struct sec4_sg_entry {
1492#define JUMP_JSL (1 << JUMP_JSL_SHIFT) 1492#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
1493 1493
1494#define JUMP_TYPE_SHIFT 22 1494#define JUMP_TYPE_SHIFT 22
1495#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
1496#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT) 1495#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
1497#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT) 1496#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
1498#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT) 1497#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 18cd6d1f5870..12ec6616e89d 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -69,81 +69,13 @@ static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
69 return sec4_sg_ptr - 1; 69 return sec4_sg_ptr - 1;
70} 70}
71 71
72/* count number of elements in scatterlist */
73static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
74 bool *chained)
75{
76 struct scatterlist *sg = sg_list;
77 int sg_nents = 0;
78
79 while (nbytes > 0) {
80 sg_nents++;
81 nbytes -= sg->length;
82 if (!sg_is_last(sg) && (sg + 1)->length == 0)
83 *chained = true;
84 sg = sg_next(sg);
85 }
86
87 return sg_nents;
88}
89
90/* derive number of elements in scatterlist, but return 0 for 1 */ 72/* derive number of elements in scatterlist, but return 0 for 1 */
91static inline int sg_count(struct scatterlist *sg_list, int nbytes, 73static inline int sg_count(struct scatterlist *sg_list, int nbytes)
92 bool *chained)
93{ 74{
94 int sg_nents = __sg_count(sg_list, nbytes, chained); 75 int sg_nents = sg_nents_for_len(sg_list, nbytes);
95 76
96 if (likely(sg_nents == 1)) 77 if (likely(sg_nents == 1))
97 return 0; 78 return 0;
98 79
99 return sg_nents; 80 return sg_nents;
100} 81}
101
102static inline void dma_unmap_sg_chained(
103 struct device *dev, struct scatterlist *sg, unsigned int nents,
104 enum dma_data_direction dir, bool chained)
105{
106 if (unlikely(chained)) {
107 int i;
108 struct scatterlist *tsg = sg;
109
110 /*
111 * Use a local copy of the sg pointer to avoid moving the
112 * head of the list pointed to by sg as we walk the list.
113 */
114 for (i = 0; i < nents; i++) {
115 dma_unmap_sg(dev, tsg, 1, dir);
116 tsg = sg_next(tsg);
117 }
118 } else if (nents) {
119 dma_unmap_sg(dev, sg, nents, dir);
120 }
121}
122
123static inline int dma_map_sg_chained(
124 struct device *dev, struct scatterlist *sg, unsigned int nents,
125 enum dma_data_direction dir, bool chained)
126{
127 if (unlikely(chained)) {
128 int i;
129 struct scatterlist *tsg = sg;
130
131 /*
132 * Use a local copy of the sg pointer to avoid moving the
133 * head of the list pointed to by sg as we walk the list.
134 */
135 for (i = 0; i < nents; i++) {
136 if (!dma_map_sg(dev, tsg, 1, dir)) {
137 dma_unmap_sg_chained(dev, sg, i, dir,
138 chained);
139 nents = 0;
140 break;
141 }
142
143 tsg = sg_next(tsg);
144 }
145 } else
146 nents = dma_map_sg(dev, sg, nents, dir);
147
148 return nents;
149}
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index ae38f6b6cc10..3cd8481065f8 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -5,12 +5,12 @@ config CRYPTO_DEV_CCP_DD
5 select HW_RANDOM 5 select HW_RANDOM
6 help 6 help
7 Provides the interface to use the AMD Cryptographic Coprocessor 7 Provides the interface to use the AMD Cryptographic Coprocessor
8 which can be used to accelerate or offload encryption operations 8 which can be used to offload encryption operations such as SHA,
9 such as SHA, AES and more. If you choose 'M' here, this module 9 AES and more. If you choose 'M' here, this module will be called
10 will be called ccp. 10 ccp.
11 11
12config CRYPTO_DEV_CCP_CRYPTO 12config CRYPTO_DEV_CCP_CRYPTO
13 tristate "Encryption and hashing acceleration support" 13 tristate "Encryption and hashing offload support"
14 depends on CRYPTO_DEV_CCP_DD 14 depends on CRYPTO_DEV_CCP_DD
15 default m 15 default m
16 select CRYPTO_HASH 16 select CRYPTO_HASH
@@ -18,6 +18,5 @@ config CRYPTO_DEV_CCP_CRYPTO
18 select CRYPTO_AUTHENC 18 select CRYPTO_AUTHENC
19 help 19 help
20 Support for using the cryptographic API with the AMD Cryptographic 20 Support for using the cryptographic API with the AMD Cryptographic
21 Coprocessor. This module supports acceleration and offload of SHA 21 Coprocessor. This module supports offload of SHA and AES algorithms.
22 and AES algorithms. If you choose 'M' here, this module will be 22 If you choose 'M' here, this module will be called ccp_crypto.
23 called ccp_crypto.
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index ea7e8446956a..d89f20c04266 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -118,10 +118,19 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
118 if (rctx->buf_count) { 118 if (rctx->buf_count) {
119 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 119 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
120 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 120 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
121 if (!sg) {
122 ret = -EINVAL;
123 goto e_free;
124 }
121 } 125 }
122 126
123 if (nbytes) 127 if (nbytes) {
124 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 128 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
129 if (!sg) {
130 ret = -EINVAL;
131 goto e_free;
132 }
133 }
125 134
126 if (need_pad) { 135 if (need_pad) {
127 int pad_length = block_size - (len & (block_size - 1)); 136 int pad_length = block_size - (len & (block_size - 1));
@@ -132,6 +141,10 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
132 rctx->pad[0] = 0x80; 141 rctx->pad[0] = 0x80;
133 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length); 142 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
134 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); 143 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
144 if (!sg) {
145 ret = -EINVAL;
146 goto e_free;
147 }
135 } 148 }
136 if (sg) { 149 if (sg) {
137 sg_mark_end(sg); 150 sg_mark_end(sg);
@@ -163,6 +176,11 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
163 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 176 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
164 177
165 return ret; 178 return ret;
179
180e_free:
181 sg_free_table(&rctx->data_sg);
182
183 return ret;
166} 184}
167 185
168static int ccp_aes_cmac_init(struct ahash_request *req) 186static int ccp_aes_cmac_init(struct ahash_request *req)
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index bdec01ec608f..e0380e59c361 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -305,14 +305,16 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
305 for (sg = table->sgl; sg; sg = sg_next(sg)) 305 for (sg = table->sgl; sg; sg = sg_next(sg))
306 if (!sg_page(sg)) 306 if (!sg_page(sg))
307 break; 307 break;
308 BUG_ON(!sg); 308 if (WARN_ON(!sg))
309 return NULL;
309 310
310 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { 311 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
311 sg_set_page(sg, sg_page(sg_add), sg_add->length, 312 sg_set_page(sg, sg_page(sg_add), sg_add->length,
312 sg_add->offset); 313 sg_add->offset);
313 sg_last = sg; 314 sg_last = sg;
314 } 315 }
315 BUG_ON(sg_add); 316 if (WARN_ON(sg_add))
317 return NULL;
316 318
317 return sg_last; 319 return sg_last;
318} 320}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 507b34e0cc19..d14b3f28e010 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -107,7 +107,15 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
107 107
108 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 108 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
110 if (!sg) {
111 ret = -EINVAL;
112 goto e_free;
113 }
110 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 114 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
115 if (!sg) {
116 ret = -EINVAL;
117 goto e_free;
118 }
111 sg_mark_end(sg); 119 sg_mark_end(sg);
112 120
113 sg = rctx->data_sg.sgl; 121 sg = rctx->data_sg.sgl;
@@ -142,6 +150,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
142 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 150 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
143 151
144 return ret; 152 return ret;
153
154e_free:
155 sg_free_table(&rctx->data_sg);
156
157 return ret;
145} 158}
146 159
147static int ccp_sha_init(struct ahash_request *req) 160static int ccp_sha_init(struct ahash_request *req)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d09c6c4af4aa..c6e883b296a9 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -611,15 +611,16 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
611 1); 611 1);
612} 612}
613 613
614static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, 614static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
615 struct scatterlist *sg, 615 struct scatterlist *sg,
616 unsigned int len, unsigned int se_len, 616 unsigned int len, unsigned int se_len,
617 bool sign_extend) 617 bool sign_extend)
618{ 618{
619 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; 619 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
620 u8 buffer[CCP_REVERSE_BUF_SIZE]; 620 u8 buffer[CCP_REVERSE_BUF_SIZE];
621 621
622 BUG_ON(se_len > sizeof(buffer)); 622 if (WARN_ON(se_len > sizeof(buffer)))
623 return -EINVAL;
623 624
624 sg_offset = len; 625 sg_offset = len;
625 dm_offset = 0; 626 dm_offset = 0;
@@ -642,6 +643,8 @@ static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
642 se_len - ksb_len); 643 se_len - ksb_len);
643 } 644 }
644 } 645 }
646
647 return 0;
645} 648}
646 649
647static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, 650static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
@@ -1606,8 +1609,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1606 if (ret) 1609 if (ret)
1607 goto e_ksb; 1610 goto e_ksb;
1608 1611
1609 ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, 1612 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1610 false); 1613 CCP_KSB_BYTES, false);
1614 if (ret)
1615 goto e_exp;
1611 ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, 1616 ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
1612 CCP_PASSTHRU_BYTESWAP_NOOP); 1617 CCP_PASSTHRU_BYTESWAP_NOOP);
1613 if (ret) { 1618 if (ret) {
@@ -1623,11 +1628,15 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1623 if (ret) 1628 if (ret)
1624 goto e_exp; 1629 goto e_exp;
1625 1630
1626 ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, 1631 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1627 false); 1632 CCP_KSB_BYTES, false);
1633 if (ret)
1634 goto e_src;
1628 src.address += o_len; /* Adjust the address for the copy operation */ 1635 src.address += o_len; /* Adjust the address for the copy operation */
1629 ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, 1636 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1630 false); 1637 CCP_KSB_BYTES, false);
1638 if (ret)
1639 goto e_src;
1631 src.address -= o_len; /* Reset the address to original value */ 1640 src.address -= o_len; /* Reset the address to original value */
1632 1641
1633 /* Prepare the output area for the operation */ 1642 /* Prepare the output area for the operation */
@@ -1841,21 +1850,27 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1841 save = src.address; 1850 save = src.address;
1842 1851
1843 /* Copy the ECC modulus */ 1852 /* Copy the ECC modulus */
1844 ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, 1853 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1845 CCP_ECC_OPERAND_SIZE, false); 1854 CCP_ECC_OPERAND_SIZE, false);
1855 if (ret)
1856 goto e_src;
1846 src.address += CCP_ECC_OPERAND_SIZE; 1857 src.address += CCP_ECC_OPERAND_SIZE;
1847 1858
1848 /* Copy the first operand */ 1859 /* Copy the first operand */
1849 ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, 1860 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1850 ecc->u.mm.operand_1_len, 1861 ecc->u.mm.operand_1_len,
1851 CCP_ECC_OPERAND_SIZE, false); 1862 CCP_ECC_OPERAND_SIZE, false);
1863 if (ret)
1864 goto e_src;
1852 src.address += CCP_ECC_OPERAND_SIZE; 1865 src.address += CCP_ECC_OPERAND_SIZE;
1853 1866
1854 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { 1867 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1855 /* Copy the second operand */ 1868 /* Copy the second operand */
1856 ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, 1869 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1857 ecc->u.mm.operand_2_len, 1870 ecc->u.mm.operand_2_len,
1858 CCP_ECC_OPERAND_SIZE, false); 1871 CCP_ECC_OPERAND_SIZE, false);
1872 if (ret)
1873 goto e_src;
1859 src.address += CCP_ECC_OPERAND_SIZE; 1874 src.address += CCP_ECC_OPERAND_SIZE;
1860 } 1875 }
1861 1876
@@ -1960,18 +1975,24 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1960 save = src.address; 1975 save = src.address;
1961 1976
1962 /* Copy the ECC modulus */ 1977 /* Copy the ECC modulus */
1963 ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, 1978 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1964 CCP_ECC_OPERAND_SIZE, false); 1979 CCP_ECC_OPERAND_SIZE, false);
1980 if (ret)
1981 goto e_src;
1965 src.address += CCP_ECC_OPERAND_SIZE; 1982 src.address += CCP_ECC_OPERAND_SIZE;
1966 1983
1967 /* Copy the first point X and Y coordinate */ 1984 /* Copy the first point X and Y coordinate */
1968 ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, 1985 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1969 ecc->u.pm.point_1.x_len, 1986 ecc->u.pm.point_1.x_len,
1970 CCP_ECC_OPERAND_SIZE, false); 1987 CCP_ECC_OPERAND_SIZE, false);
1988 if (ret)
1989 goto e_src;
1971 src.address += CCP_ECC_OPERAND_SIZE; 1990 src.address += CCP_ECC_OPERAND_SIZE;
1972 ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, 1991 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1973 ecc->u.pm.point_1.y_len, 1992 ecc->u.pm.point_1.y_len,
1974 CCP_ECC_OPERAND_SIZE, false); 1993 CCP_ECC_OPERAND_SIZE, false);
1994 if (ret)
1995 goto e_src;
1975 src.address += CCP_ECC_OPERAND_SIZE; 1996 src.address += CCP_ECC_OPERAND_SIZE;
1976 1997
1977 /* Set the first point Z coordianate to 1 */ 1998 /* Set the first point Z coordianate to 1 */
@@ -1980,13 +2001,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1980 2001
1981 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { 2002 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1982 /* Copy the second point X and Y coordinate */ 2003 /* Copy the second point X and Y coordinate */
1983 ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, 2004 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1984 ecc->u.pm.point_2.x_len, 2005 ecc->u.pm.point_2.x_len,
1985 CCP_ECC_OPERAND_SIZE, false); 2006 CCP_ECC_OPERAND_SIZE, false);
2007 if (ret)
2008 goto e_src;
1986 src.address += CCP_ECC_OPERAND_SIZE; 2009 src.address += CCP_ECC_OPERAND_SIZE;
1987 ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, 2010 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1988 ecc->u.pm.point_2.y_len, 2011 ecc->u.pm.point_2.y_len,
1989 CCP_ECC_OPERAND_SIZE, false); 2012 CCP_ECC_OPERAND_SIZE, false);
2013 if (ret)
2014 goto e_src;
1990 src.address += CCP_ECC_OPERAND_SIZE; 2015 src.address += CCP_ECC_OPERAND_SIZE;
1991 2016
1992 /* Set the second point Z coordianate to 1 */ 2017 /* Set the second point Z coordianate to 1 */
@@ -1994,16 +2019,21 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1994 src.address += CCP_ECC_OPERAND_SIZE; 2019 src.address += CCP_ECC_OPERAND_SIZE;
1995 } else { 2020 } else {
1996 /* Copy the Domain "a" parameter */ 2021 /* Copy the Domain "a" parameter */
1997 ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, 2022 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1998 ecc->u.pm.domain_a_len, 2023 ecc->u.pm.domain_a_len,
1999 CCP_ECC_OPERAND_SIZE, false); 2024 CCP_ECC_OPERAND_SIZE, false);
2025 if (ret)
2026 goto e_src;
2000 src.address += CCP_ECC_OPERAND_SIZE; 2027 src.address += CCP_ECC_OPERAND_SIZE;
2001 2028
2002 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { 2029 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2003 /* Copy the scalar value */ 2030 /* Copy the scalar value */
2004 ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, 2031 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
2005 ecc->u.pm.scalar_len, 2032 ecc->u.pm.scalar_len,
2006 CCP_ECC_OPERAND_SIZE, false); 2033 CCP_ECC_OPERAND_SIZE,
2034 false);
2035 if (ret)
2036 goto e_src;
2007 src.address += CCP_ECC_OPERAND_SIZE; 2037 src.address += CCP_ECC_OPERAND_SIZE;
2008 } 2038 }
2009 } 2039 }
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index af190d4795a8..6ade02f04f91 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -319,7 +319,7 @@ static const struct pci_device_id ccp_pci_table[] = {
319MODULE_DEVICE_TABLE(pci, ccp_pci_table); 319MODULE_DEVICE_TABLE(pci, ccp_pci_table);
320 320
321static struct pci_driver ccp_pci_driver = { 321static struct pci_driver ccp_pci_driver = {
322 .name = "AMD Cryptographic Coprocessor", 322 .name = "ccp",
323 .id_table = ccp_pci_table, 323 .id_table = ccp_pci_table,
324 .probe = ccp_pci_probe, 324 .probe = ccp_pci_probe,
325 .remove = ccp_pci_remove, 325 .remove = ccp_pci_remove,
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index bb241c3ab6b9..8b923b7e9389 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -29,7 +29,6 @@
29#include "ccp-dev.h" 29#include "ccp-dev.h"
30 30
31struct ccp_platform { 31struct ccp_platform {
32 int use_acpi;
33 int coherent; 32 int coherent;
34}; 33};
35 34
@@ -95,7 +94,6 @@ static int ccp_platform_probe(struct platform_device *pdev)
95 struct ccp_device *ccp; 94 struct ccp_device *ccp;
96 struct ccp_platform *ccp_platform; 95 struct ccp_platform *ccp_platform;
97 struct device *dev = &pdev->dev; 96 struct device *dev = &pdev->dev;
98 struct acpi_device *adev = ACPI_COMPANION(dev);
99 struct resource *ior; 97 struct resource *ior;
100 int ret; 98 int ret;
101 99
@@ -112,8 +110,6 @@ static int ccp_platform_probe(struct platform_device *pdev)
112 ccp->get_irq = ccp_get_irqs; 110 ccp->get_irq = ccp_get_irqs;
113 ccp->free_irq = ccp_free_irqs; 111 ccp->free_irq = ccp_free_irqs;
114 112
115 ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1;
116
117 ior = ccp_find_mmio_area(ccp); 113 ior = ccp_find_mmio_area(ccp);
118 ccp->io_map = devm_ioremap_resource(dev, ior); 114 ccp->io_map = devm_ioremap_resource(dev, ior);
119 if (IS_ERR(ccp->io_map)) { 115 if (IS_ERR(ccp->io_map)) {
@@ -229,7 +225,7 @@ MODULE_DEVICE_TABLE(of, ccp_of_match);
229 225
230static struct platform_driver ccp_platform_driver = { 226static struct platform_driver ccp_platform_driver = {
231 .driver = { 227 .driver = {
232 .name = "AMD Cryptographic Coprocessor", 228 .name = "ccp",
233#ifdef CONFIG_ACPI 229#ifdef CONFIG_ACPI
234 .acpi_match_table = ccp_acpi_match, 230 .acpi_match_table = ccp_acpi_match,
235#endif 231#endif
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index bc2a55bc35e4..bd985e72520b 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -174,19 +174,19 @@
174 174
175#define CESA_SA_DESC_MAC_DATA(offset) \ 175#define CESA_SA_DESC_MAC_DATA(offset) \
176 cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset)) 176 cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset))
177#define CESA_SA_DESC_MAC_DATA_MSK GENMASK(15, 0) 177#define CESA_SA_DESC_MAC_DATA_MSK cpu_to_le32(GENMASK(15, 0))
178 178
179#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16) 179#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16)
180#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK GENMASK(31, 16) 180#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK cpu_to_le32(GENMASK(31, 16))
181 181
182#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff 182#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff
183 183
184#define CESA_SA_DESC_MAC_DIGEST(offset) \ 184#define CESA_SA_DESC_MAC_DIGEST(offset) \
185 cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset)) 185 cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset))
186#define CESA_SA_DESC_MAC_DIGEST_MSK GENMASK(15, 0) 186#define CESA_SA_DESC_MAC_DIGEST_MSK cpu_to_le32(GENMASK(15, 0))
187 187
188#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16) 188#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16)
189#define CESA_SA_DESC_MAC_FRAG_LEN_MSK GENMASK(31, 16) 189#define CESA_SA_DESC_MAC_FRAG_LEN_MSK cpu_to_le32(GENMASK(31, 16))
190 190
191#define CESA_SA_DESC_MAC_IV(offset) \ 191#define CESA_SA_DESC_MAC_IV(offset) \
192 cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \ 192 cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \
@@ -219,14 +219,14 @@
219 * to be executed. 219 * to be executed.
220 */ 220 */
221struct mv_cesa_sec_accel_desc { 221struct mv_cesa_sec_accel_desc {
222 u32 config; 222 __le32 config;
223 u32 enc_p; 223 __le32 enc_p;
224 u32 enc_len; 224 __le32 enc_len;
225 u32 enc_key_p; 225 __le32 enc_key_p;
226 u32 enc_iv; 226 __le32 enc_iv;
227 u32 mac_src_p; 227 __le32 mac_src_p;
228 u32 mac_digest; 228 __le32 mac_digest;
229 u32 mac_iv; 229 __le32 mac_iv;
230}; 230};
231 231
232/** 232/**
@@ -293,11 +293,13 @@ struct mv_cesa_op_ctx {
293 * operation. 293 * operation.
294 */ 294 */
295struct mv_cesa_tdma_desc { 295struct mv_cesa_tdma_desc {
296 u32 byte_cnt; 296 __le32 byte_cnt;
297 u32 src; 297 __le32 src;
298 u32 dst; 298 __le32 dst;
299 u32 next_dma; 299 __le32 next_dma;
300 u32 cur_dma; 300
301 /* Software state */
302 dma_addr_t cur_dma;
301 struct mv_cesa_tdma_desc *next; 303 struct mv_cesa_tdma_desc *next;
302 union { 304 union {
303 struct mv_cesa_op_ctx *op; 305 struct mv_cesa_op_ctx *op;
@@ -612,7 +614,8 @@ struct mv_cesa_ahash_req {
612 u64 len; 614 u64 len;
613 int src_nents; 615 int src_nents;
614 bool last_req; 616 bool last_req;
615 __be32 state[8]; 617 bool algo_le;
618 u32 state[8];
616}; 619};
617 620
618/* CESA functions */ 621/* CESA functions */
@@ -626,7 +629,7 @@ static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
626 op->desc.config |= cpu_to_le32(cfg); 629 op->desc.config |= cpu_to_le32(cfg);
627} 630}
628 631
629static inline u32 mv_cesa_get_op_cfg(struct mv_cesa_op_ctx *op) 632static inline u32 mv_cesa_get_op_cfg(const struct mv_cesa_op_ctx *op)
630{ 633{
631 return le32_to_cpu(op->desc.config); 634 return le32_to_cpu(op->desc.config);
632} 635}
@@ -676,7 +679,7 @@ static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine,
676 if (int_mask == engine->int_mask) 679 if (int_mask == engine->int_mask)
677 return; 680 return;
678 681
679 writel(int_mask, engine->regs + CESA_SA_INT_MSK); 682 writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK);
680 engine->int_mask = int_mask; 683 engine->int_mask = int_mask;
681} 684}
682 685
@@ -685,6 +688,12 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
685 return engine->int_mask; 688 return engine->int_mask;
686} 689}
687 690
691static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
692{
693 return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) ==
694 CESA_SA_DESC_CFG_FIRST_FRAG;
695}
696
688int mv_cesa_queue_req(struct crypto_async_request *req); 697int mv_cesa_queue_req(struct crypto_async_request *req);
689 698
690/* 699/*
@@ -789,10 +798,8 @@ int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
789 dma_addr_t dst, dma_addr_t src, u32 size, 798 dma_addr_t dst, dma_addr_t src, u32 size,
790 u32 flags, gfp_t gfp_flags); 799 u32 flags, gfp_t gfp_flags);
791 800
792int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, 801int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags);
793 u32 flags); 802int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags);
794
795int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags);
796 803
797int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, 804int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
798 struct mv_cesa_dma_iter *dma_iter, 805 struct mv_cesa_dma_iter *dma_iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 3df2f4e7adb2..6edae64bb387 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -98,14 +98,14 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
98 98
99 /* FIXME: only update enc_len field */ 99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) { 100 if (!sreq->skip_ctx) {
101 memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
102 sreq->skip_ctx = true; 102 sreq->skip_ctx = true;
103 } else { 103 } else {
104 memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc)); 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
105 } 105 }
106 106
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
110} 110}
111 111
@@ -145,8 +145,9 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
145 if (ret) 145 if (ret)
146 return ret; 146 return ret;
147 147
148 memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 148 memcpy_fromio(ablkreq->info,
149 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); 149 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
150 151
151 return 0; 152 return 0;
152} 153}
@@ -181,7 +182,7 @@ mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
181 sreq->size = 0; 182 sreq->size = 0;
182 sreq->offset = 0; 183 sreq->offset = 0;
183 mv_cesa_adjust_op(engine, &sreq->op); 184 mv_cesa_adjust_op(engine, &sreq->op);
184 memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); 185 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
185} 186}
186 187
187static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, 188static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e8d0d7128137..6ec55b4a087b 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -27,10 +27,10 @@ mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req) 27 struct ahash_request *req)
28{ 28{
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes; 30 unsigned int len = req->nbytes + creq->cache_ptr;
31 31
32 if (!creq->last_req) 32 if (!creq->last_req)
33 len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; 33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
34 34
35 mv_cesa_req_dma_iter_init(&iter->base, len); 35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
@@ -179,7 +179,6 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
179 179
180static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 180static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
181{ 181{
182 __be64 bits = cpu_to_be64(creq->len << 3);
183 unsigned int index, padlen; 182 unsigned int index, padlen;
184 183
185 buf[0] = 0x80; 184 buf[0] = 0x80;
@@ -187,7 +186,14 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
187 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 186 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
188 padlen = mv_cesa_ahash_pad_len(creq); 187 padlen = mv_cesa_ahash_pad_len(creq);
189 memset(buf + 1, 0, padlen - 1); 188 memset(buf + 1, 0, padlen - 1);
190 memcpy(buf + padlen, &bits, sizeof(bits)); 189
190 if (creq->algo_le) {
191 __le64 bits = cpu_to_le64(creq->len << 3);
192 memcpy(buf + padlen, &bits, sizeof(bits));
193 } else {
194 __be64 bits = cpu_to_be64(creq->len << 3);
195 memcpy(buf + padlen, &bits, sizeof(bits));
196 }
191 197
192 return padlen + 8; 198 return padlen + 8;
193} 199}
@@ -203,8 +209,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
203 size_t len; 209 size_t len;
204 210
205 if (creq->cache_ptr) 211 if (creq->cache_ptr)
206 memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, 212 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
207 creq->cache_ptr); 213 creq->cache, creq->cache_ptr);
208 214
209 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 215 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
210 CESA_SA_SRAM_PAYLOAD_SIZE); 216 CESA_SA_SRAM_PAYLOAD_SIZE);
@@ -245,10 +251,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
245 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 251 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
246 len &= CESA_HASH_BLOCK_SIZE_MSK; 252 len &= CESA_HASH_BLOCK_SIZE_MSK;
247 new_cache_ptr = 64 - trailerlen; 253 new_cache_ptr = 64 - trailerlen;
248 memcpy(creq->cache, 254 memcpy_fromio(creq->cache,
249 engine->sram + 255 engine->sram +
250 CESA_SA_DATA_SRAM_OFFSET + len, 256 CESA_SA_DATA_SRAM_OFFSET + len,
251 new_cache_ptr); 257 new_cache_ptr);
252 } else { 258 } else {
253 len += mv_cesa_ahash_pad_req(creq, 259 len += mv_cesa_ahash_pad_req(creq,
254 engine->sram + len + 260 engine->sram + len +
@@ -266,7 +272,7 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
266 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 272 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
267 273
268 /* FIXME: only update enc_len field */ 274 /* FIXME: only update enc_len field */
269 memcpy(engine->sram, op, sizeof(*op)); 275 memcpy_toio(engine->sram, op, sizeof(*op));
270 276
271 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 277 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
272 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 278 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
@@ -275,7 +281,7 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
275 creq->cache_ptr = new_cache_ptr; 281 creq->cache_ptr = new_cache_ptr;
276 282
277 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 283 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
278 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 284 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
279 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 285 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
280} 286}
281 287
@@ -306,7 +312,7 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
306 312
307 sreq->offset = 0; 313 sreq->offset = 0;
308 mv_cesa_adjust_op(engine, &creq->op_tmpl); 314 mv_cesa_adjust_op(engine, &creq->op_tmpl);
309 memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 315 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
310} 316}
311 317
312static void mv_cesa_ahash_step(struct crypto_async_request *req) 318static void mv_cesa_ahash_step(struct crypto_async_request *req)
@@ -338,7 +344,7 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
338 344
339 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 345 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
340 for (i = 0; i < digsize / 4; i++) 346 for (i = 0; i < digsize / 4; i++)
341 creq->state[i] = readl(engine->regs + CESA_IVDIG(i)); 347 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
342 348
343 if (creq->cache_ptr) 349 if (creq->cache_ptr)
344 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 350 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
@@ -347,18 +353,21 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
347 ahashreq->nbytes - creq->cache_ptr); 353 ahashreq->nbytes - creq->cache_ptr);
348 354
349 if (creq->last_req) { 355 if (creq->last_req) {
350 for (i = 0; i < digsize / 4; i++) { 356 /*
351 /* 357 * Hardware's MD5 digest is in little endian format, but
352 * Hardware provides MD5 digest in a different 358 * SHA in big endian format
353 * endianness than SHA-1 and SHA-256 ones. 359 */
354 */ 360 if (creq->algo_le) {
355 if (digsize == MD5_DIGEST_SIZE) 361 __le32 *result = (void *)ahashreq->result;
356 creq->state[i] = cpu_to_le32(creq->state[i]); 362
357 else 363 for (i = 0; i < digsize / 4; i++)
358 creq->state[i] = cpu_to_be32(creq->state[i]); 364 result[i] = cpu_to_le32(creq->state[i]);
359 } 365 } else {
366 __be32 *result = (void *)ahashreq->result;
360 367
361 memcpy(ahashreq->result, creq->state, digsize); 368 for (i = 0; i < digsize / 4; i++)
369 result[i] = cpu_to_be32(creq->state[i]);
370 }
362 } 371 }
363 372
364 return ret; 373 return ret;
@@ -381,8 +390,7 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
381 390
382 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 391 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
383 for (i = 0; i < digsize / 4; i++) 392 for (i = 0; i < digsize / 4; i++)
384 writel(creq->state[i], 393 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
385 engine->regs + CESA_IVDIG(i));
386} 394}
387 395
388static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 396static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
@@ -404,7 +412,7 @@ static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
404}; 412};
405 413
406static int mv_cesa_ahash_init(struct ahash_request *req, 414static int mv_cesa_ahash_init(struct ahash_request *req,
407 struct mv_cesa_op_ctx *tmpl) 415 struct mv_cesa_op_ctx *tmpl, bool algo_le)
408{ 416{
409 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 417 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
410 418
@@ -418,6 +426,7 @@ static int mv_cesa_ahash_init(struct ahash_request *req,
418 mv_cesa_set_mac_op_frag_len(tmpl, 0); 426 mv_cesa_set_mac_op_frag_len(tmpl, 0);
419 creq->op_tmpl = *tmpl; 427 creq->op_tmpl = *tmpl;
420 creq->len = 0; 428 creq->len = 0;
429 creq->algo_le = algo_le;
421 430
422 return 0; 431 return 0;
423} 432}
@@ -462,145 +471,114 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
462} 471}
463 472
464static struct mv_cesa_op_ctx * 473static struct mv_cesa_op_ctx *
465mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 474mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
466 struct mv_cesa_ahash_dma_iter *dma_iter, 475 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
467 struct mv_cesa_ahash_req *creq, 476 gfp_t flags)
468 gfp_t flags)
469{ 477{
470 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 478 struct mv_cesa_op_ctx *op;
471 struct mv_cesa_op_ctx *op = NULL;
472 int ret; 479 int ret;
473 480
474 if (!creq->cache_ptr) 481 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
475 return NULL; 482 if (IS_ERR(op))
483 return op;
476 484
477 ret = mv_cesa_dma_add_data_transfer(chain, 485 /* Set the operation block fragment length. */
478 CESA_SA_DATA_SRAM_OFFSET, 486 mv_cesa_set_mac_op_frag_len(op, frag_len);
479 ahashdreq->cache_dma, 487
480 creq->cache_ptr, 488 /* Append dummy desc to launch operation */
481 CESA_TDMA_DST_IN_SRAM, 489 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
482 flags);
483 if (ret) 490 if (ret)
484 return ERR_PTR(ret); 491 return ERR_PTR(ret);
485 492
486 if (!dma_iter->base.op_len) { 493 if (mv_cesa_mac_op_is_first_frag(tmpl))
487 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); 494 mv_cesa_update_op_cfg(tmpl,
488 if (IS_ERR(op)) 495 CESA_SA_DESC_CFG_MID_FRAG,
489 return op; 496 CESA_SA_DESC_CFG_FRAG_MSK);
490
491 mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr);
492
493 /* Add dummy desc to launch crypto operation */
494 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
495 if (ret)
496 return ERR_PTR(ret);
497 }
498 497
499 return op; 498 return op;
500} 499}
501 500
502static struct mv_cesa_op_ctx * 501static int
503mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, 502mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
504 struct mv_cesa_ahash_dma_iter *dma_iter, 503 struct mv_cesa_ahash_dma_iter *dma_iter,
505 struct mv_cesa_ahash_req *creq, 504 struct mv_cesa_ahash_req *creq,
506 gfp_t flags) 505 gfp_t flags)
507{ 506{
508 struct mv_cesa_op_ctx *op; 507 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
509 int ret;
510
511 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags);
512 if (IS_ERR(op))
513 return op;
514
515 mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len);
516
517 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) ==
518 CESA_SA_DESC_CFG_FIRST_FRAG)
519 mv_cesa_update_op_cfg(&creq->op_tmpl,
520 CESA_SA_DESC_CFG_MID_FRAG,
521 CESA_SA_DESC_CFG_FRAG_MSK);
522
523 /* Add input transfers */
524 ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base,
525 &dma_iter->src, flags);
526 if (ret)
527 return ERR_PTR(ret);
528 508
529 /* Add dummy desc to launch crypto operation */ 509 if (!creq->cache_ptr)
530 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 510 return 0;
531 if (ret)
532 return ERR_PTR(ret);
533 511
534 return op; 512 return mv_cesa_dma_add_data_transfer(chain,
513 CESA_SA_DATA_SRAM_OFFSET,
514 ahashdreq->cache_dma,
515 creq->cache_ptr,
516 CESA_TDMA_DST_IN_SRAM,
517 flags);
535} 518}
536 519
537static struct mv_cesa_op_ctx * 520static struct mv_cesa_op_ctx *
538mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 521mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
539 struct mv_cesa_ahash_dma_iter *dma_iter, 522 struct mv_cesa_ahash_dma_iter *dma_iter,
540 struct mv_cesa_ahash_req *creq, 523 struct mv_cesa_ahash_req *creq,
541 struct mv_cesa_op_ctx *op, 524 unsigned int frag_len, gfp_t flags)
542 gfp_t flags)
543{ 525{
544 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 526 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
545 unsigned int len, trailerlen, padoff = 0; 527 unsigned int len, trailerlen, padoff = 0;
528 struct mv_cesa_op_ctx *op;
546 int ret; 529 int ret;
547 530
548 if (!creq->last_req) 531 /*
549 return op; 532 * If the transfer is smaller than our maximum length, and we have
550 533 * some data outstanding, we can ask the engine to finish the hash.
551 if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 534 */
552 u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; 535 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
553 536 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
554 if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != 537 flags);
555 CESA_SA_DESC_CFG_FIRST_FRAG) 538 if (IS_ERR(op))
556 frag = CESA_SA_DESC_CFG_LAST_FRAG; 539 return op;
557 540
558 mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); 541 mv_cesa_set_mac_op_total_len(op, creq->len);
542 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
543 CESA_SA_DESC_CFG_NOT_FRAG :
544 CESA_SA_DESC_CFG_LAST_FRAG,
545 CESA_SA_DESC_CFG_FRAG_MSK);
559 546
560 return op; 547 return op;
561 } 548 }
562 549
550 /*
551 * The request is longer than the engine can handle, or we have
552 * no data outstanding. Manually generate the padding, adding it
553 * as a "mid" fragment.
554 */
563 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 555 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
564 if (ret) 556 if (ret)
565 return ERR_PTR(ret); 557 return ERR_PTR(ret);
566 558
567 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 559 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
568 560
569 if (op) { 561 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
570 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, 562 if (len) {
571 trailerlen); 563 ret = mv_cesa_dma_add_data_transfer(chain,
572 if (len) {
573 ret = mv_cesa_dma_add_data_transfer(chain,
574 CESA_SA_DATA_SRAM_OFFSET + 564 CESA_SA_DATA_SRAM_OFFSET +
575 dma_iter->base.op_len, 565 frag_len,
576 ahashdreq->padding_dma, 566 ahashdreq->padding_dma,
577 len, CESA_TDMA_DST_IN_SRAM, 567 len, CESA_TDMA_DST_IN_SRAM,
578 flags); 568 flags);
579 if (ret) 569 if (ret)
580 return ERR_PTR(ret); 570 return ERR_PTR(ret);
581
582 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
583 CESA_SA_DESC_CFG_FRAG_MSK);
584 mv_cesa_set_mac_op_frag_len(op,
585 dma_iter->base.op_len + len);
586 padoff += len;
587 }
588 }
589
590 if (padoff >= trailerlen)
591 return op;
592 571
593 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != 572 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
594 CESA_SA_DESC_CFG_FIRST_FRAG) 573 flags);
595 mv_cesa_update_op_cfg(&creq->op_tmpl, 574 if (IS_ERR(op))
596 CESA_SA_DESC_CFG_MID_FRAG, 575 return op;
597 CESA_SA_DESC_CFG_FRAG_MSK);
598 576
599 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); 577 if (len == trailerlen)
600 if (IS_ERR(op)) 578 return op;
601 return op;
602 579
603 mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); 580 padoff += len;
581 }
604 582
605 ret = mv_cesa_dma_add_data_transfer(chain, 583 ret = mv_cesa_dma_add_data_transfer(chain,
606 CESA_SA_DATA_SRAM_OFFSET, 584 CESA_SA_DATA_SRAM_OFFSET,
@@ -612,12 +590,8 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
612 if (ret) 590 if (ret)
613 return ERR_PTR(ret); 591 return ERR_PTR(ret);
614 592
615 /* Add dummy desc to launch crypto operation */ 593 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
616 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 594 flags);
617 if (ret)
618 return ERR_PTR(ret);
619
620 return op;
621} 595}
622 596
623static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 597static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
@@ -627,9 +601,9 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
627 GFP_KERNEL : GFP_ATOMIC; 601 GFP_KERNEL : GFP_ATOMIC;
628 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 602 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
629 struct mv_cesa_tdma_req *dreq = &ahashdreq->base; 603 struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
630 struct mv_cesa_tdma_chain chain;
631 struct mv_cesa_ahash_dma_iter iter; 604 struct mv_cesa_ahash_dma_iter iter;
632 struct mv_cesa_op_ctx *op = NULL; 605 struct mv_cesa_op_ctx *op = NULL;
606 unsigned int frag_len;
633 int ret; 607 int ret;
634 608
635 dreq->chain.first = NULL; 609 dreq->chain.first = NULL;
@@ -644,29 +618,59 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
644 } 618 }
645 } 619 }
646 620
647 mv_cesa_tdma_desc_iter_init(&chain); 621 mv_cesa_tdma_desc_iter_init(&dreq->chain);
648 mv_cesa_ahash_req_iter_init(&iter, req); 622 mv_cesa_ahash_req_iter_init(&iter, req);
649 623
650 op = mv_cesa_ahash_dma_add_cache(&chain, &iter, 624 /*
651 creq, flags); 625 * Add the cache (left-over data from a previous block) first.
652 if (IS_ERR(op)) { 626 * This will never overflow the SRAM size.
653 ret = PTR_ERR(op); 627 */
628 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
629 if (ret)
654 goto err_free_tdma; 630 goto err_free_tdma;
655 }
656 631
657 do { 632 if (iter.src.sg) {
658 if (!iter.base.op_len) 633 /*
659 break; 634 * Add all the new data, inserting an operation block and
635 * launch command between each full SRAM block-worth of
636 * data. We intentionally do not add the final op block.
637 */
638 while (true) {
639 ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
640 &iter.base,
641 &iter.src, flags);
642 if (ret)
643 goto err_free_tdma;
644
645 frag_len = iter.base.op_len;
660 646
661 op = mv_cesa_ahash_dma_add_data(&chain, &iter, 647 if (!mv_cesa_ahash_req_iter_next_op(&iter))
662 creq, flags); 648 break;
663 if (IS_ERR(op)) { 649
664 ret = PTR_ERR(op); 650 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
665 goto err_free_tdma; 651 frag_len, flags);
652 if (IS_ERR(op)) {
653 ret = PTR_ERR(op);
654 goto err_free_tdma;
655 }
666 } 656 }
667 } while (mv_cesa_ahash_req_iter_next_op(&iter)); 657 } else {
658 /* Account for the data that was in the cache. */
659 frag_len = iter.base.op_len;
660 }
661
662 /*
663 * At this point, frag_len indicates whether we have any data
664 * outstanding which needs an operation. Queue up the final
665 * operation, which depends whether this is the final request.
666 */
667 if (creq->last_req)
668 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
669 frag_len, flags);
670 else if (frag_len)
671 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
672 frag_len, flags);
668 673
669 op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags);
670 if (IS_ERR(op)) { 674 if (IS_ERR(op)) {
671 ret = PTR_ERR(op); 675 ret = PTR_ERR(op);
672 goto err_free_tdma; 676 goto err_free_tdma;
@@ -674,7 +678,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
674 678
675 if (op) { 679 if (op) {
676 /* Add dummy desc to wait for crypto operation end */ 680 /* Add dummy desc to wait for crypto operation end */
677 ret = mv_cesa_dma_add_dummy_end(&chain, flags); 681 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
678 if (ret) 682 if (ret)
679 goto err_free_tdma; 683 goto err_free_tdma;
680 } 684 }
@@ -685,8 +689,6 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
685 else 689 else
686 creq->cache_ptr = 0; 690 creq->cache_ptr = 0;
687 691
688 dreq->chain = chain;
689
690 return 0; 692 return 0;
691 693
692err_free_tdma: 694err_free_tdma:
@@ -795,47 +797,50 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
795 return ret; 797 return ret;
796} 798}
797 799
798static int mv_cesa_md5_init(struct ahash_request *req) 800static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
801 u64 *len, void *cache)
799{ 802{
800 struct mv_cesa_op_ctx tmpl;
801
802 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
803
804 mv_cesa_ahash_init(req, &tmpl);
805
806 return 0;
807}
808
809static int mv_cesa_md5_export(struct ahash_request *req, void *out)
810{
811 struct md5_state *out_state = out;
812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 803 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
813 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 804 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
814 unsigned int digsize = crypto_ahash_digestsize(ahash); 805 unsigned int digsize = crypto_ahash_digestsize(ahash);
806 unsigned int blocksize;
807
808 blocksize = crypto_ahash_blocksize(ahash);
815 809
816 out_state->byte_count = creq->len; 810 *len = creq->len;
817 memcpy(out_state->hash, creq->state, digsize); 811 memcpy(hash, creq->state, digsize);
818 memset(out_state->block, 0, sizeof(out_state->block)); 812 memset(cache, 0, blocksize);
819 if (creq->cache) 813 if (creq->cache)
820 memcpy(out_state->block, creq->cache, creq->cache_ptr); 814 memcpy(cache, creq->cache, creq->cache_ptr);
821 815
822 return 0; 816 return 0;
823} 817}
824 818
825static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 819static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
820 u64 len, const void *cache)
826{ 821{
827 const struct md5_state *in_state = in;
828 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 822 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
829 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 823 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
830 unsigned int digsize = crypto_ahash_digestsize(ahash); 824 unsigned int digsize = crypto_ahash_digestsize(ahash);
825 unsigned int blocksize;
831 unsigned int cache_ptr; 826 unsigned int cache_ptr;
832 int ret; 827 int ret;
833 828
834 creq->len = in_state->byte_count; 829 ret = crypto_ahash_init(req);
835 memcpy(creq->state, in_state->hash, digsize); 830 if (ret)
831 return ret;
832
833 blocksize = crypto_ahash_blocksize(ahash);
834 if (len >= blocksize)
835 mv_cesa_update_op_cfg(&creq->op_tmpl,
836 CESA_SA_DESC_CFG_MID_FRAG,
837 CESA_SA_DESC_CFG_FRAG_MSK);
838
839 creq->len = len;
840 memcpy(creq->state, hash, digsize);
836 creq->cache_ptr = 0; 841 creq->cache_ptr = 0;
837 842
838 cache_ptr = creq->len % sizeof(in_state->block); 843 cache_ptr = do_div(len, blocksize);
839 if (!cache_ptr) 844 if (!cache_ptr)
840 return 0; 845 return 0;
841 846
@@ -843,12 +848,39 @@ static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
843 if (ret) 848 if (ret)
844 return ret; 849 return ret;
845 850
846 memcpy(creq->cache, in_state->block, cache_ptr); 851 memcpy(creq->cache, cache, cache_ptr);
847 creq->cache_ptr = cache_ptr; 852 creq->cache_ptr = cache_ptr;
848 853
849 return 0; 854 return 0;
850} 855}
851 856
857static int mv_cesa_md5_init(struct ahash_request *req)
858{
859 struct mv_cesa_op_ctx tmpl = { };
860
861 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
862
863 mv_cesa_ahash_init(req, &tmpl, true);
864
865 return 0;
866}
867
868static int mv_cesa_md5_export(struct ahash_request *req, void *out)
869{
870 struct md5_state *out_state = out;
871
872 return mv_cesa_ahash_export(req, out_state->hash,
873 &out_state->byte_count, out_state->block);
874}
875
876static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
877{
878 const struct md5_state *in_state = in;
879
880 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
881 in_state->block);
882}
883
852static int mv_cesa_md5_digest(struct ahash_request *req) 884static int mv_cesa_md5_digest(struct ahash_request *req)
853{ 885{
854 int ret; 886 int ret;
@@ -870,6 +902,7 @@ struct ahash_alg mv_md5_alg = {
870 .import = mv_cesa_md5_import, 902 .import = mv_cesa_md5_import,
871 .halg = { 903 .halg = {
872 .digestsize = MD5_DIGEST_SIZE, 904 .digestsize = MD5_DIGEST_SIZE,
905 .statesize = sizeof(struct md5_state),
873 .base = { 906 .base = {
874 .cra_name = "md5", 907 .cra_name = "md5",
875 .cra_driver_name = "mv-md5", 908 .cra_driver_name = "mv-md5",
@@ -886,11 +919,11 @@ struct ahash_alg mv_md5_alg = {
886 919
887static int mv_cesa_sha1_init(struct ahash_request *req) 920static int mv_cesa_sha1_init(struct ahash_request *req)
888{ 921{
889 struct mv_cesa_op_ctx tmpl; 922 struct mv_cesa_op_ctx tmpl = { };
890 923
891 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 924 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
892 925
893 mv_cesa_ahash_init(req, &tmpl); 926 mv_cesa_ahash_init(req, &tmpl, false);
894 927
895 return 0; 928 return 0;
896} 929}
@@ -898,44 +931,17 @@ static int mv_cesa_sha1_init(struct ahash_request *req)
898static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 931static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
899{ 932{
900 struct sha1_state *out_state = out; 933 struct sha1_state *out_state = out;
901 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
902 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
903 unsigned int digsize = crypto_ahash_digestsize(ahash);
904
905 out_state->count = creq->len;
906 memcpy(out_state->state, creq->state, digsize);
907 memset(out_state->buffer, 0, sizeof(out_state->buffer));
908 if (creq->cache)
909 memcpy(out_state->buffer, creq->cache, creq->cache_ptr);
910 934
911 return 0; 935 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
936 out_state->buffer);
912} 937}
913 938
914static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 939static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
915{ 940{
916 const struct sha1_state *in_state = in; 941 const struct sha1_state *in_state = in;
917 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
918 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
919 unsigned int digsize = crypto_ahash_digestsize(ahash);
920 unsigned int cache_ptr;
921 int ret;
922 942
923 creq->len = in_state->count; 943 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
924 memcpy(creq->state, in_state->state, digsize); 944 in_state->buffer);
925 creq->cache_ptr = 0;
926
927 cache_ptr = creq->len % SHA1_BLOCK_SIZE;
928 if (!cache_ptr)
929 return 0;
930
931 ret = mv_cesa_ahash_alloc_cache(req);
932 if (ret)
933 return ret;
934
935 memcpy(creq->cache, in_state->buffer, cache_ptr);
936 creq->cache_ptr = cache_ptr;
937
938 return 0;
939} 945}
940 946
941static int mv_cesa_sha1_digest(struct ahash_request *req) 947static int mv_cesa_sha1_digest(struct ahash_request *req)
@@ -959,6 +965,7 @@ struct ahash_alg mv_sha1_alg = {
959 .import = mv_cesa_sha1_import, 965 .import = mv_cesa_sha1_import,
960 .halg = { 966 .halg = {
961 .digestsize = SHA1_DIGEST_SIZE, 967 .digestsize = SHA1_DIGEST_SIZE,
968 .statesize = sizeof(struct sha1_state),
962 .base = { 969 .base = {
963 .cra_name = "sha1", 970 .cra_name = "sha1",
964 .cra_driver_name = "mv-sha1", 971 .cra_driver_name = "mv-sha1",
@@ -975,11 +982,11 @@ struct ahash_alg mv_sha1_alg = {
975 982
976static int mv_cesa_sha256_init(struct ahash_request *req) 983static int mv_cesa_sha256_init(struct ahash_request *req)
977{ 984{
978 struct mv_cesa_op_ctx tmpl; 985 struct mv_cesa_op_ctx tmpl = { };
979 986
980 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 987 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
981 988
982 mv_cesa_ahash_init(req, &tmpl); 989 mv_cesa_ahash_init(req, &tmpl, false);
983 990
984 return 0; 991 return 0;
985} 992}
@@ -998,44 +1005,17 @@ static int mv_cesa_sha256_digest(struct ahash_request *req)
998static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1005static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
999{ 1006{
1000 struct sha256_state *out_state = out; 1007 struct sha256_state *out_state = out;
1001 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1002 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1003 unsigned int ds = crypto_ahash_digestsize(ahash);
1004 1008
1005 out_state->count = creq->len; 1009 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1006 memcpy(out_state->state, creq->state, ds); 1010 out_state->buf);
1007 memset(out_state->buf, 0, sizeof(out_state->buf));
1008 if (creq->cache)
1009 memcpy(out_state->buf, creq->cache, creq->cache_ptr);
1010
1011 return 0;
1012} 1011}
1013 1012
1014static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1013static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1015{ 1014{
1016 const struct sha256_state *in_state = in; 1015 const struct sha256_state *in_state = in;
1017 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1018 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1019 unsigned int digsize = crypto_ahash_digestsize(ahash);
1020 unsigned int cache_ptr;
1021 int ret;
1022
1023 creq->len = in_state->count;
1024 memcpy(creq->state, in_state->state, digsize);
1025 creq->cache_ptr = 0;
1026
1027 cache_ptr = creq->len % SHA256_BLOCK_SIZE;
1028 if (!cache_ptr)
1029 return 0;
1030
1031 ret = mv_cesa_ahash_alloc_cache(req);
1032 if (ret)
1033 return ret;
1034
1035 memcpy(creq->cache, in_state->buf, cache_ptr);
1036 creq->cache_ptr = cache_ptr;
1037 1016
1038 return 0; 1017 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1018 in_state->buf);
1039} 1019}
1040 1020
1041struct ahash_alg mv_sha256_alg = { 1021struct ahash_alg mv_sha256_alg = {
@@ -1048,6 +1028,7 @@ struct ahash_alg mv_sha256_alg = {
1048 .import = mv_cesa_sha256_import, 1028 .import = mv_cesa_sha256_import,
1049 .halg = { 1029 .halg = {
1050 .digestsize = SHA256_DIGEST_SIZE, 1030 .digestsize = SHA256_DIGEST_SIZE,
1031 .statesize = sizeof(struct sha256_state),
1051 .base = { 1032 .base = {
1052 .cra_name = "sha256", 1033 .cra_name = "sha256",
1053 .cra_driver_name = "mv-sha256", 1034 .cra_driver_name = "mv-sha256",
@@ -1231,12 +1212,12 @@ static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1231static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1212static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1232{ 1213{
1233 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1214 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1234 struct mv_cesa_op_ctx tmpl; 1215 struct mv_cesa_op_ctx tmpl = { };
1235 1216
1236 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1217 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1237 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1218 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1238 1219
1239 mv_cesa_ahash_init(req, &tmpl); 1220 mv_cesa_ahash_init(req, &tmpl, true);
1240 1221
1241 return 0; 1222 return 0;
1242} 1223}
@@ -1301,12 +1282,12 @@ struct ahash_alg mv_ahmac_md5_alg = {
1301static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1282static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1302{ 1283{
1303 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1284 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1304 struct mv_cesa_op_ctx tmpl; 1285 struct mv_cesa_op_ctx tmpl = { };
1305 1286
1306 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1287 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1307 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1288 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1308 1289
1309 mv_cesa_ahash_init(req, &tmpl); 1290 mv_cesa_ahash_init(req, &tmpl, false);
1310 1291
1311 return 0; 1292 return 0;
1312} 1293}
@@ -1391,12 +1372,12 @@ static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1391static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1372static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1392{ 1373{
1393 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1374 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1394 struct mv_cesa_op_ctx tmpl; 1375 struct mv_cesa_op_ctx tmpl = { };
1395 1376
1396 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1377 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1397 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1378 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1398 1379
1399 mv_cesa_ahash_init(req, &tmpl); 1380 mv_cesa_ahash_init(req, &tmpl, false);
1400 1381
1401 return 0; 1382 return 0;
1402} 1383}
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 64a366c50174..76427981275b 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -41,18 +41,18 @@ void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
41{ 41{
42 struct mv_cesa_engine *engine = dreq->base.engine; 42 struct mv_cesa_engine *engine = dreq->base.engine;
43 43
44 writel(0, engine->regs + CESA_SA_CFG); 44 writel_relaxed(0, engine->regs + CESA_SA_CFG);
45 45
46 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); 46 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
47 writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | 47 writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
48 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, 48 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
49 engine->regs + CESA_TDMA_CONTROL); 49 engine->regs + CESA_TDMA_CONTROL);
50 50
51 writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | 51 writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
52 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, 52 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
53 engine->regs + CESA_SA_CFG); 53 engine->regs + CESA_SA_CFG);
54 writel(dreq->chain.first->cur_dma, 54 writel_relaxed(dreq->chain.first->cur_dma,
55 engine->regs + CESA_TDMA_NEXT_ADDR); 55 engine->regs + CESA_TDMA_NEXT_ADDR);
56 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 56 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
57} 57}
58 58
@@ -69,7 +69,7 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
69 69
70 tdma = tdma->next; 70 tdma = tdma->next;
71 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, 71 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
72 le32_to_cpu(old_tdma->cur_dma)); 72 old_tdma->cur_dma);
73 } 73 }
74 74
75 dreq->chain.first = NULL; 75 dreq->chain.first = NULL;
@@ -105,9 +105,9 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
105 return ERR_PTR(-ENOMEM); 105 return ERR_PTR(-ENOMEM);
106 106
107 memset(new_tdma, 0, sizeof(*new_tdma)); 107 memset(new_tdma, 0, sizeof(*new_tdma));
108 new_tdma->cur_dma = cpu_to_le32(dma_handle); 108 new_tdma->cur_dma = dma_handle;
109 if (chain->last) { 109 if (chain->last) {
110 chain->last->next_dma = new_tdma->cur_dma; 110 chain->last->next_dma = cpu_to_le32(dma_handle);
111 chain->last->next = new_tdma; 111 chain->last->next = new_tdma;
112 } else { 112 } else {
113 chain->first = new_tdma; 113 chain->first = new_tdma;
@@ -126,6 +126,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
126 struct mv_cesa_tdma_desc *tdma; 126 struct mv_cesa_tdma_desc *tdma;
127 struct mv_cesa_op_ctx *op; 127 struct mv_cesa_op_ctx *op;
128 dma_addr_t dma_handle; 128 dma_addr_t dma_handle;
129 unsigned int size;
129 130
130 tdma = mv_cesa_dma_add_desc(chain, flags); 131 tdma = mv_cesa_dma_add_desc(chain, flags);
131 if (IS_ERR(tdma)) 132 if (IS_ERR(tdma))
@@ -137,10 +138,12 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
137 138
138 *op = *op_templ; 139 *op = *op_templ;
139 140
141 size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
142
140 tdma = chain->last; 143 tdma = chain->last;
141 tdma->op = op; 144 tdma->op = op;
142 tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31); 145 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
143 tdma->src = dma_handle; 146 tdma->src = cpu_to_le32(dma_handle);
144 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; 147 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
145 148
146 return op; 149 return op;
@@ -156,7 +159,7 @@ int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
156 if (IS_ERR(tdma)) 159 if (IS_ERR(tdma))
157 return PTR_ERR(tdma); 160 return PTR_ERR(tdma);
158 161
159 tdma->byte_cnt = size | BIT(31); 162 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
160 tdma->src = src; 163 tdma->src = src;
161 tdma->dst = dst; 164 tdma->dst = dst;
162 165
@@ -166,8 +169,7 @@ int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
166 return 0; 169 return 0;
167} 170}
168 171
169int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, 172int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
170 u32 flags)
171{ 173{
172 struct mv_cesa_tdma_desc *tdma; 174 struct mv_cesa_tdma_desc *tdma;
173 175
@@ -178,7 +180,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain,
178 return 0; 180 return 0;
179} 181}
180 182
181int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags) 183int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
182{ 184{
183 struct mv_cesa_tdma_desc *tdma; 185 struct mv_cesa_tdma_desc *tdma;
184 186
@@ -186,7 +188,7 @@ int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags)
186 if (IS_ERR(tdma)) 188 if (IS_ERR(tdma))
187 return PTR_ERR(tdma); 189 return PTR_ERR(tdma);
188 190
189 tdma->byte_cnt = BIT(31); 191 tdma->byte_cnt = cpu_to_le32(BIT(31));
190 192
191 return 0; 193 return 0;
192} 194}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 2e8dab9d4263..5450880abb7b 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -34,7 +34,7 @@
34#define DRV_MODULE_VERSION "0.2" 34#define DRV_MODULE_VERSION "0.2"
35#define DRV_MODULE_RELDATE "July 28, 2011" 35#define DRV_MODULE_RELDATE "July 28, 2011"
36 36
37static char version[] = 37static const char version[] =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39 39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 3750e13d8721..9ef51fafdbff 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -491,7 +491,7 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
491 void *wmem) 491 void *wmem)
492{ 492{
493 return nx842_powernv_function(in, inlen, out, outlenp, 493 return nx842_powernv_function(in, inlen, out, outlenp,
494 wmem, CCW_FC_842_COMP_NOCRC); 494 wmem, CCW_FC_842_COMP_CRC);
495} 495}
496 496
497/** 497/**
@@ -519,7 +519,7 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
519 void *wmem) 519 void *wmem)
520{ 520{
521 return nx842_powernv_function(in, inlen, out, outlenp, 521 return nx842_powernv_function(in, inlen, out, outlenp,
522 wmem, CCW_FC_842_DECOMP_NOCRC); 522 wmem, CCW_FC_842_DECOMP_CRC);
523} 523}
524 524
525static int __init nx842_powernv_probe(struct device_node *dn) 525static int __init nx842_powernv_probe(struct device_node *dn)
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index f4cbde03c6ad..cddc6d8b55d9 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -234,6 +234,10 @@ static int nx842_validate_result(struct device *dev,
234 dev_dbg(dev, "%s: Out of space in output buffer\n", 234 dev_dbg(dev, "%s: Out of space in output buffer\n",
235 __func__); 235 __func__);
236 return -ENOSPC; 236 return -ENOSPC;
237 case 65: /* Calculated CRC doesn't match the passed value */
238 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
239 __func__);
240 return -EINVAL;
237 case 66: /* Input data contains an illegal template field */ 241 case 66: /* Input data contains an illegal template field */
238 case 67: /* Template indicates data past the end of the input stream */ 242 case 67: /* Template indicates data past the end of the input stream */
239 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", 243 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
@@ -324,7 +328,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
324 slout.entries = (struct nx842_slentry *)workmem->slout; 328 slout.entries = (struct nx842_slentry *)workmem->slout;
325 329
326 /* Init operation */ 330 /* Init operation */
327 op.flags = NX842_OP_COMPRESS; 331 op.flags = NX842_OP_COMPRESS_CRC;
328 csbcpb = &workmem->csbcpb; 332 csbcpb = &workmem->csbcpb;
329 memset(csbcpb, 0, sizeof(*csbcpb)); 333 memset(csbcpb, 0, sizeof(*csbcpb));
330 op.csbcpb = nx842_get_pa(csbcpb); 334 op.csbcpb = nx842_get_pa(csbcpb);
@@ -457,7 +461,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
457 slout.entries = (struct nx842_slentry *)workmem->slout; 461 slout.entries = (struct nx842_slentry *)workmem->slout;
458 462
459 /* Init operation */ 463 /* Init operation */
460 op.flags = NX842_OP_DECOMPRESS; 464 op.flags = NX842_OP_DECOMPRESS_CRC;
461 csbcpb = &workmem->csbcpb; 465 csbcpb = &workmem->csbcpb;
462 memset(csbcpb, 0, sizeof(*csbcpb)); 466 memset(csbcpb, 0, sizeof(*csbcpb));
463 op.csbcpb = nx842_get_pa(csbcpb); 467 op.csbcpb = nx842_get_pa(csbcpb);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index da36de26a4dc..615da961c4d8 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1591,6 +1591,7 @@ static const struct of_device_id spacc_of_id_table[] = {
1591 { .compatible = "picochip,spacc-l2" }, 1591 { .compatible = "picochip,spacc-l2" },
1592 {} 1592 {}
1593}; 1593};
1594MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1594#endif /* CONFIG_OF */ 1595#endif /* CONFIG_OF */
1595 1596
1596static bool spacc_is_compatible(struct platform_device *pdev, 1597static bool spacc_is_compatible(struct platform_device *pdev,
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index df20a9de1c58..9e9e196c6d51 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,5 +1,10 @@
1$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h 1$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
2clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h 2 $(obj)/qat_rsapubkey-asn1.h
3$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
4 $(obj)/qat_rsaprivkey-asn1.h
5
6clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
7clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
3 8
4obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o 9obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
5intel_qat-objs := adf_cfg.o \ 10intel_qat-objs := adf_cfg.o \
@@ -13,7 +18,8 @@ intel_qat-objs := adf_cfg.o \
13 adf_hw_arbiter.o \ 18 adf_hw_arbiter.o \
14 qat_crypto.o \ 19 qat_crypto.o \
15 qat_algs.o \ 20 qat_algs.o \
16 qat_rsakey-asn1.o \ 21 qat_rsapubkey-asn1.o \
22 qat_rsaprivkey-asn1.o \
17 qat_asym_algs.o \ 23 qat_asym_algs.o \
18 qat_uclo.o \ 24 qat_uclo.o \
19 qat_hal.o 25 qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 7836dffc3d47..3f76bd495bcb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -163,10 +163,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
163void qat_crypto_put_instance(struct qat_crypto_instance *inst); 163void qat_crypto_put_instance(struct qat_crypto_instance *inst);
164void qat_alg_callback(void *resp); 164void qat_alg_callback(void *resp);
165void qat_alg_asym_callback(void *resp); 165void qat_alg_asym_callback(void *resp);
166int qat_algs_init(void);
167void qat_algs_exit(void);
168int qat_algs_register(void); 166int qat_algs_register(void);
169int qat_algs_unregister(void); 167void qat_algs_unregister(void);
170int qat_asym_algs_register(void); 168int qat_asym_algs_register(void);
171void qat_asym_algs_unregister(void); 169void qat_asym_algs_unregister(void);
172 170
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index cd8a12af8ec5..03856ad280b9 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -463,9 +463,6 @@ static int __init adf_register_ctl_device_driver(void)
463{ 463{
464 mutex_init(&adf_ctl_lock); 464 mutex_init(&adf_ctl_lock);
465 465
466 if (qat_algs_init())
467 goto err_algs_init;
468
469 if (adf_chr_drv_create()) 466 if (adf_chr_drv_create())
470 goto err_chr_dev; 467 goto err_chr_dev;
471 468
@@ -482,8 +479,6 @@ err_crypto_register:
482err_aer: 479err_aer:
483 adf_chr_drv_destroy(); 480 adf_chr_drv_destroy();
484err_chr_dev: 481err_chr_dev:
485 qat_algs_exit();
486err_algs_init:
487 mutex_destroy(&adf_ctl_lock); 482 mutex_destroy(&adf_ctl_lock);
488 return -EFAULT; 483 return -EFAULT;
489} 484}
@@ -493,7 +488,6 @@ static void __exit adf_unregister_ctl_device_driver(void)
493 adf_chr_drv_destroy(); 488 adf_chr_drv_destroy();
494 adf_exit_aer(); 489 adf_exit_aer();
495 qat_crypto_unregister(); 490 qat_crypto_unregister();
496 qat_algs_exit();
497 adf_clean_vf_map(false); 491 adf_clean_vf_map(false);
498 mutex_destroy(&adf_ctl_lock); 492 mutex_destroy(&adf_ctl_lock);
499} 493}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index ac37a89965ac..d873eeecc363 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -272,12 +272,10 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
272 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 272 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
273 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 273 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
274 274
275 if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister()) 275 if (!list_empty(&accel_dev->crypto_list)) {
276 dev_err(&GET_DEV(accel_dev), 276 qat_algs_unregister();
277 "Failed to unregister crypto algs\n");
278
279 if (!list_empty(&accel_dev->crypto_list))
280 qat_asym_algs_unregister(); 277 qat_asym_algs_unregister();
278 }
281 279
282 list_for_each(list_itr, &service_table) { 280 list_for_each(list_itr, &service_table) {
283 service = list_entry(list_itr, struct service_hndl, list); 281 service = list_entry(list_itr, struct service_hndl, list);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 2f77a4a8cecb..1117a8b58280 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -244,11 +244,8 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
244 return -EFAULT; 244 return -EFAULT;
245 } 245 }
246 246
247 if (!iommu_present(&pci_bus_type)) { 247 if (!iommu_present(&pci_bus_type))
248 dev_err(&pdev->dev, 248 dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
249 "IOMMU must be enabled for SR-IOV to work\n");
250 return -EINVAL;
251 }
252 249
253 if (accel_dev->pf.vf_info) { 250 if (accel_dev->pf.vf_info) {
254 dev_info(&pdev->dev, "Already enabled for this device\n"); 251 dev_info(&pdev->dev, "Already enabled for this device\n");
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 2bd913aceaeb..59e4c3af15ed 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -62,13 +62,13 @@
62#include "icp_qat_fw.h" 62#include "icp_qat_fw.h"
63#include "icp_qat_fw_la.h" 63#include "icp_qat_fw_la.h"
64 64
65#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ 65#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ 66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \ 67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT) 68 ICP_QAT_HW_CIPHER_ENCRYPT)
69 69
70#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ 70#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ 71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT) 73 ICP_QAT_HW_CIPHER_DECRYPT)
74 74
@@ -271,7 +271,8 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
271 271
272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, 272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
273 int alg, 273 int alg,
274 struct crypto_authenc_keys *keys) 274 struct crypto_authenc_keys *keys,
275 int mode)
275{ 276{
276 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); 277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
277 unsigned int digestsize = crypto_aead_authsize(aead_tfm); 278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
@@ -288,7 +289,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
288 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; 289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
289 290
290 /* CD setup */ 291 /* CD setup */
291 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); 292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
292 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); 293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
293 hash->sha.inner_setup.auth_config.config = 294 hash->sha.inner_setup.auth_config.config =
294 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, 295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -351,7 +352,8 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
351 352
352static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, 353static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
353 int alg, 354 int alg,
354 struct crypto_authenc_keys *keys) 355 struct crypto_authenc_keys *keys,
356 int mode)
355{ 357{
356 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); 358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
357 unsigned int digestsize = crypto_aead_authsize(aead_tfm); 359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
@@ -373,7 +375,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
373 sizeof(struct icp_qat_fw_la_cipher_req_params)); 375 sizeof(struct icp_qat_fw_la_cipher_req_params));
374 376
375 /* CD setup */ 377 /* CD setup */
376 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); 378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
377 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); 379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
378 hash->sha.inner_setup.auth_config.config = 380 hash->sha.inner_setup.auth_config.config =
379 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, 381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -464,7 +466,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
464 466
465static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, 467static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
466 int alg, const uint8_t *key, 468 int alg, const uint8_t *key,
467 unsigned int keylen) 469 unsigned int keylen, int mode)
468{ 470{
469 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; 471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
470 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; 472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
@@ -472,12 +474,12 @@ static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
472 474
473 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); 475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
474 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; 476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
475 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); 477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
476} 478}
477 479
478static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, 480static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
479 int alg, const uint8_t *key, 481 int alg, const uint8_t *key,
480 unsigned int keylen) 482 unsigned int keylen, int mode)
481{ 483{
482 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; 484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
483 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; 485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
@@ -485,29 +487,48 @@ static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
485 487
486 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); 488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
487 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; 489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
488 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); 490
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
494 else
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
489} 497}
490 498
491static int qat_alg_validate_key(int key_len, int *alg) 499static int qat_alg_validate_key(int key_len, int *alg, int mode)
492{ 500{
493 switch (key_len) { 501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
494 case AES_KEYSIZE_128: 502 switch (key_len) {
495 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; 503 case AES_KEYSIZE_128:
496 break; 504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
497 case AES_KEYSIZE_192: 505 break;
498 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; 506 case AES_KEYSIZE_192:
499 break; 507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
500 case AES_KEYSIZE_256: 508 break;
501 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; 509 case AES_KEYSIZE_256:
502 break; 510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
503 default: 511 break;
504 return -EINVAL; 512 default:
513 return -EINVAL;
514 }
515 } else {
516 switch (key_len) {
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
519 break;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522 break;
523 default:
524 return -EINVAL;
525 }
505 } 526 }
506 return 0; 527 return 0;
507} 528}
508 529
509static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, 530static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
510 const uint8_t *key, unsigned int keylen) 531 unsigned int keylen, int mode)
511{ 532{
512 struct crypto_authenc_keys keys; 533 struct crypto_authenc_keys keys;
513 int alg; 534 int alg;
@@ -515,13 +536,13 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
515 if (crypto_authenc_extractkeys(&keys, key, keylen)) 536 if (crypto_authenc_extractkeys(&keys, key, keylen))
516 goto bad_key; 537 goto bad_key;
517 538
518 if (qat_alg_validate_key(keys.enckeylen, &alg)) 539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
519 goto bad_key; 540 goto bad_key;
520 541
521 if (qat_alg_aead_init_enc_session(tfm, alg, &keys)) 542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
522 goto error; 543 goto error;
523 544
524 if (qat_alg_aead_init_dec_session(tfm, alg, &keys)) 545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
525 goto error; 546 goto error;
526 547
527 return 0; 548 return 0;
@@ -534,15 +555,16 @@ error:
534 555
535static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, 556static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
536 const uint8_t *key, 557 const uint8_t *key,
537 unsigned int keylen) 558 unsigned int keylen,
559 int mode)
538{ 560{
539 int alg; 561 int alg;
540 562
541 if (qat_alg_validate_key(keylen, &alg)) 563 if (qat_alg_validate_key(keylen, &alg, mode))
542 goto bad_key; 564 goto bad_key;
543 565
544 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen); 566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
545 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen); 567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
546 return 0; 568 return 0;
547bad_key: 569bad_key:
548 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -586,7 +608,8 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
586 goto out_free_enc; 608 goto out_free_enc;
587 } 609 }
588 } 610 }
589 if (qat_alg_aead_init_sessions(tfm, key, keylen)) 611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
590 goto out_free_all; 613 goto out_free_all;
591 614
592 return 0; 615 return 0;
@@ -876,8 +899,8 @@ static int qat_alg_aead_enc(struct aead_request *areq)
876} 899}
877 900
878static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 901static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
879 const uint8_t *key, 902 const u8 *key, unsigned int keylen,
880 unsigned int keylen) 903 int mode)
881{ 904{
882 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 905 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
883 struct device *dev; 906 struct device *dev;
@@ -918,7 +941,7 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
918 } 941 }
919 } 942 }
920 spin_unlock(&ctx->lock); 943 spin_unlock(&ctx->lock);
921 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen)) 944 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
922 goto out_free_all; 945 goto out_free_all;
923 946
924 return 0; 947 return 0;
@@ -936,6 +959,27 @@ out_free_enc:
936 return -ENOMEM; 959 return -ENOMEM;
937} 960}
938 961
962static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
963 const u8 *key, unsigned int keylen)
964{
965 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
966 ICP_QAT_HW_CIPHER_CBC_MODE);
967}
968
969static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
970 const u8 *key, unsigned int keylen)
971{
972 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
973 ICP_QAT_HW_CIPHER_CTR_MODE);
974}
975
976static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
978{
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_XTS_MODE);
981}
982
939static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) 983static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
940{ 984{
941 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 985 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1171,7 +1215,51 @@ static struct crypto_alg qat_algs[] = { {
1171 .cra_exit = qat_alg_ablkcipher_exit, 1215 .cra_exit = qat_alg_ablkcipher_exit,
1172 .cra_u = { 1216 .cra_u = {
1173 .ablkcipher = { 1217 .ablkcipher = {
1174 .setkey = qat_alg_ablkcipher_setkey, 1218 .setkey = qat_alg_ablkcipher_cbc_setkey,
1219 .decrypt = qat_alg_ablkcipher_decrypt,
1220 .encrypt = qat_alg_ablkcipher_encrypt,
1221 .min_keysize = AES_MIN_KEY_SIZE,
1222 .max_keysize = AES_MAX_KEY_SIZE,
1223 .ivsize = AES_BLOCK_SIZE,
1224 },
1225 },
1226}, {
1227 .cra_name = "ctr(aes)",
1228 .cra_driver_name = "qat_aes_ctr",
1229 .cra_priority = 4001,
1230 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1231 .cra_blocksize = AES_BLOCK_SIZE,
1232 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1233 .cra_alignmask = 0,
1234 .cra_type = &crypto_ablkcipher_type,
1235 .cra_module = THIS_MODULE,
1236 .cra_init = qat_alg_ablkcipher_init,
1237 .cra_exit = qat_alg_ablkcipher_exit,
1238 .cra_u = {
1239 .ablkcipher = {
1240 .setkey = qat_alg_ablkcipher_ctr_setkey,
1241 .decrypt = qat_alg_ablkcipher_decrypt,
1242 .encrypt = qat_alg_ablkcipher_encrypt,
1243 .min_keysize = AES_MIN_KEY_SIZE,
1244 .max_keysize = AES_MAX_KEY_SIZE,
1245 .ivsize = AES_BLOCK_SIZE,
1246 },
1247 },
1248}, {
1249 .cra_name = "xts(aes)",
1250 .cra_driver_name = "qat_aes_xts",
1251 .cra_priority = 4001,
1252 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1253 .cra_blocksize = AES_BLOCK_SIZE,
1254 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1255 .cra_alignmask = 0,
1256 .cra_type = &crypto_ablkcipher_type,
1257 .cra_module = THIS_MODULE,
1258 .cra_init = qat_alg_ablkcipher_init,
1259 .cra_exit = qat_alg_ablkcipher_exit,
1260 .cra_u = {
1261 .ablkcipher = {
1262 .setkey = qat_alg_ablkcipher_xts_setkey,
1175 .decrypt = qat_alg_ablkcipher_decrypt, 1263 .decrypt = qat_alg_ablkcipher_decrypt,
1176 .encrypt = qat_alg_ablkcipher_encrypt, 1264 .encrypt = qat_alg_ablkcipher_encrypt,
1177 .min_keysize = AES_MIN_KEY_SIZE, 1265 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1212,7 +1300,7 @@ unreg_algs:
1212 goto unlock; 1300 goto unlock;
1213} 1301}
1214 1302
1215int qat_algs_unregister(void) 1303void qat_algs_unregister(void)
1216{ 1304{
1217 mutex_lock(&algs_lock); 1305 mutex_lock(&algs_lock);
1218 if (--active_devs != 0) 1306 if (--active_devs != 0)
@@ -1223,14 +1311,4 @@ int qat_algs_unregister(void)
1223 1311
1224unlock: 1312unlock:
1225 mutex_unlock(&algs_lock); 1313 mutex_unlock(&algs_lock);
1226 return 0;
1227}
1228
1229int qat_algs_init(void)
1230{
1231 return 0;
1232}
1233
1234void qat_algs_exit(void)
1235{
1236} 1314}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e87f51023ba4..51c594fdacdc 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -51,7 +51,9 @@
51#include <crypto/akcipher.h> 51#include <crypto/akcipher.h>
52#include <linux/dma-mapping.h> 52#include <linux/dma-mapping.h>
53#include <linux/fips.h> 53#include <linux/fips.h>
54#include "qat_rsakey-asn1.h" 54#include <crypto/scatterwalk.h>
55#include "qat_rsapubkey-asn1.h"
56#include "qat_rsaprivkey-asn1.h"
55#include "icp_qat_fw_pke.h" 57#include "icp_qat_fw_pke.h"
56#include "adf_accel_devices.h" 58#include "adf_accel_devices.h"
57#include "adf_transport.h" 59#include "adf_transport.h"
@@ -106,6 +108,7 @@ struct qat_rsa_request {
106 dma_addr_t phy_in; 108 dma_addr_t phy_in;
107 dma_addr_t phy_out; 109 dma_addr_t phy_out;
108 char *src_align; 110 char *src_align;
111 char *dst_align;
109 struct icp_qat_fw_pke_request req; 112 struct icp_qat_fw_pke_request req;
110 struct qat_rsa_ctx *ctx; 113 struct qat_rsa_ctx *ctx;
111 int err; 114 int err;
@@ -118,7 +121,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
118 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); 121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
119 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( 122 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
120 resp->pke_resp_hdr.comn_resp_flags); 123 resp->pke_resp_hdr.comn_resp_flags);
121 char *ptr = areq->dst;
122 124
123 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 125 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
124 126
@@ -129,24 +131,44 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
129 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, 131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
130 DMA_TO_DEVICE); 132 DMA_TO_DEVICE);
131 133
132 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, 134 areq->dst_len = req->ctx->key_sz;
133 DMA_FROM_DEVICE); 135 if (req->dst_align) {
136 char *ptr = req->dst_align;
137
138 while (!(*ptr) && areq->dst_len) {
139 areq->dst_len--;
140 ptr++;
141 }
142
143 if (areq->dst_len != req->ctx->key_sz)
144 memmove(req->dst_align, ptr, areq->dst_len);
145
146 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
147 areq->dst_len, 1);
148
149 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
150 req->out.enc.c);
151 } else {
152 char *ptr = sg_virt(areq->dst);
153
154 while (!(*ptr) && areq->dst_len) {
155 areq->dst_len--;
156 ptr++;
157 }
158
159 if (sg_virt(areq->dst) != ptr && areq->dst_len)
160 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
161
162 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
163 DMA_FROM_DEVICE);
164 }
165
134 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), 166 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
135 DMA_TO_DEVICE); 167 DMA_TO_DEVICE);
136 dma_unmap_single(dev, req->phy_out, 168 dma_unmap_single(dev, req->phy_out,
137 sizeof(struct qat_rsa_output_params), 169 sizeof(struct qat_rsa_output_params),
138 DMA_TO_DEVICE); 170 DMA_TO_DEVICE);
139 171
140 areq->dst_len = req->ctx->key_sz;
141 /* Need to set the corect length of the output */
142 while (!(*ptr) && areq->dst_len) {
143 areq->dst_len--;
144 ptr++;
145 }
146
147 if (areq->dst_len != req->ctx->key_sz)
148 memmove(areq->dst, ptr, areq->dst_len);
149
150 akcipher_request_complete(areq, err); 172 akcipher_request_complete(areq, err);
151} 173}
152 174
@@ -255,8 +277,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
255 * same as modulo n so in case it is different we need to allocate a 277 * same as modulo n so in case it is different we need to allocate a
256 * new buf and copy src data. 278 * new buf and copy src data.
257 * In other case we just need to map the user provided buffer. 279 * In other case we just need to map the user provided buffer.
280 * Also need to make sure that it is in contiguous buffer.
258 */ 281 */
259 if (req->src_len < ctx->key_sz) { 282 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
283 qat_req->src_align = NULL;
284 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
285 req->src_len, DMA_TO_DEVICE);
286 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
287 return ret;
288
289 } else {
260 int shift = ctx->key_sz - req->src_len; 290 int shift = ctx->key_sz - req->src_len;
261 291
262 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 292 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
@@ -265,29 +295,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
265 if (unlikely(!qat_req->src_align)) 295 if (unlikely(!qat_req->src_align))
266 return ret; 296 return ret;
267 297
268 memcpy(qat_req->src_align + shift, req->src, req->src_len); 298 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
299 0, req->src_len, 0);
300 }
301 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
302 qat_req->dst_align = NULL;
303 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
304 req->dst_len,
305 DMA_FROM_DEVICE);
306
307 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
308 goto unmap_src;
309
269 } else { 310 } else {
270 qat_req->src_align = NULL; 311 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
271 qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, 312 &qat_req->out.enc.c,
272 DMA_TO_DEVICE); 313 GFP_KERNEL);
314 if (unlikely(!qat_req->dst_align))
315 goto unmap_src;
316
273 } 317 }
274 qat_req->in.in_tab[3] = 0; 318 qat_req->in.in_tab[3] = 0;
275 qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
276 DMA_FROM_DEVICE);
277 qat_req->out.out_tab[1] = 0; 319 qat_req->out.out_tab[1] = 0;
278 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, 320 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
279 sizeof(struct qat_rsa_input_params), 321 sizeof(struct qat_rsa_input_params),
280 DMA_TO_DEVICE); 322 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
324 goto unmap_dst;
325
281 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, 326 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
282 sizeof(struct qat_rsa_output_params), 327 sizeof(struct qat_rsa_output_params),
283 DMA_TO_DEVICE); 328 DMA_TO_DEVICE);
284 329 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
285 if (unlikely((!qat_req->src_align && 330 goto unmap_in_params;
286 dma_mapping_error(dev, qat_req->in.enc.m)) ||
287 dma_mapping_error(dev, qat_req->out.enc.c) ||
288 dma_mapping_error(dev, qat_req->phy_in) ||
289 dma_mapping_error(dev, qat_req->phy_out)))
290 goto unmap;
291 331
292 msg->pke_mid.src_data_addr = qat_req->phy_in; 332 msg->pke_mid.src_data_addr = qat_req->phy_in;
293 msg->pke_mid.dest_data_addr = qat_req->phy_out; 333 msg->pke_mid.dest_data_addr = qat_req->phy_out;
@@ -300,7 +340,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
300 340
301 if (!ret) 341 if (!ret)
302 return -EINPROGRESS; 342 return -EINPROGRESS;
303unmap: 343unmap_src:
304 if (qat_req->src_align) 344 if (qat_req->src_align)
305 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 345 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
306 qat_req->in.enc.m); 346 qat_req->in.enc.m);
@@ -308,9 +348,15 @@ unmap:
308 if (!dma_mapping_error(dev, qat_req->in.enc.m)) 348 if (!dma_mapping_error(dev, qat_req->in.enc.m))
309 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, 349 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
310 DMA_TO_DEVICE); 350 DMA_TO_DEVICE);
311 if (!dma_mapping_error(dev, qat_req->out.enc.c)) 351unmap_dst:
312 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, 352 if (qat_req->dst_align)
313 DMA_FROM_DEVICE); 353 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
354 qat_req->out.enc.c);
355 else
356 if (!dma_mapping_error(dev, qat_req->out.enc.c))
357 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
358 DMA_FROM_DEVICE);
359unmap_in_params:
314 if (!dma_mapping_error(dev, qat_req->phy_in)) 360 if (!dma_mapping_error(dev, qat_req->phy_in))
315 dma_unmap_single(dev, qat_req->phy_in, 361 dma_unmap_single(dev, qat_req->phy_in,
316 sizeof(struct qat_rsa_input_params), 362 sizeof(struct qat_rsa_input_params),
@@ -362,8 +408,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
362 * same as modulo n so in case it is different we need to allocate a 408 * same as modulo n so in case it is different we need to allocate a
363 * new buf and copy src data. 409 * new buf and copy src data.
364 * In other case we just need to map the user provided buffer. 410 * In other case we just need to map the user provided buffer.
411 * Also need to make sure that it is in contiguous buffer.
365 */ 412 */
366 if (req->src_len < ctx->key_sz) { 413 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
414 qat_req->src_align = NULL;
415 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
416 req->dst_len, DMA_TO_DEVICE);
417 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
418 return ret;
419
420 } else {
367 int shift = ctx->key_sz - req->src_len; 421 int shift = ctx->key_sz - req->src_len;
368 422
369 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 423 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
@@ -372,29 +426,40 @@ static int qat_rsa_dec(struct akcipher_request *req)
372 if (unlikely(!qat_req->src_align)) 426 if (unlikely(!qat_req->src_align))
373 return ret; 427 return ret;
374 428
375 memcpy(qat_req->src_align + shift, req->src, req->src_len); 429 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
430 0, req->src_len, 0);
431 }
432 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
433 qat_req->dst_align = NULL;
434 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
435 req->dst_len,
436 DMA_FROM_DEVICE);
437
438 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
439 goto unmap_src;
440
376 } else { 441 } else {
377 qat_req->src_align = NULL; 442 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
378 qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, 443 &qat_req->out.dec.m,
379 DMA_TO_DEVICE); 444 GFP_KERNEL);
445 if (unlikely(!qat_req->dst_align))
446 goto unmap_src;
447
380 } 448 }
449
381 qat_req->in.in_tab[3] = 0; 450 qat_req->in.in_tab[3] = 0;
382 qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
383 DMA_FROM_DEVICE);
384 qat_req->out.out_tab[1] = 0; 451 qat_req->out.out_tab[1] = 0;
385 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, 452 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
386 sizeof(struct qat_rsa_input_params), 453 sizeof(struct qat_rsa_input_params),
387 DMA_TO_DEVICE); 454 DMA_TO_DEVICE);
455 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
456 goto unmap_dst;
457
388 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, 458 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
389 sizeof(struct qat_rsa_output_params), 459 sizeof(struct qat_rsa_output_params),
390 DMA_TO_DEVICE); 460 DMA_TO_DEVICE);
391 461 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
392 if (unlikely((!qat_req->src_align && 462 goto unmap_in_params;
393 dma_mapping_error(dev, qat_req->in.dec.c)) ||
394 dma_mapping_error(dev, qat_req->out.dec.m) ||
395 dma_mapping_error(dev, qat_req->phy_in) ||
396 dma_mapping_error(dev, qat_req->phy_out)))
397 goto unmap;
398 463
399 msg->pke_mid.src_data_addr = qat_req->phy_in; 464 msg->pke_mid.src_data_addr = qat_req->phy_in;
400 msg->pke_mid.dest_data_addr = qat_req->phy_out; 465 msg->pke_mid.dest_data_addr = qat_req->phy_out;
@@ -407,7 +472,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
407 472
408 if (!ret) 473 if (!ret)
409 return -EINPROGRESS; 474 return -EINPROGRESS;
410unmap: 475unmap_src:
411 if (qat_req->src_align) 476 if (qat_req->src_align)
412 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 477 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
413 qat_req->in.dec.c); 478 qat_req->in.dec.c);
@@ -415,9 +480,15 @@ unmap:
415 if (!dma_mapping_error(dev, qat_req->in.dec.c)) 480 if (!dma_mapping_error(dev, qat_req->in.dec.c))
416 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, 481 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
417 DMA_TO_DEVICE); 482 DMA_TO_DEVICE);
418 if (!dma_mapping_error(dev, qat_req->out.dec.m)) 483unmap_dst:
419 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, 484 if (qat_req->dst_align)
420 DMA_FROM_DEVICE); 485 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
486 qat_req->out.dec.m);
487 else
488 if (!dma_mapping_error(dev, qat_req->out.dec.m))
489 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
490 DMA_FROM_DEVICE);
491unmap_in_params:
421 if (!dma_mapping_error(dev, qat_req->phy_in)) 492 if (!dma_mapping_error(dev, qat_req->phy_in))
422 dma_unmap_single(dev, qat_req->phy_in, 493 dma_unmap_single(dev, qat_req->phy_in,
423 sizeof(struct qat_rsa_input_params), 494 sizeof(struct qat_rsa_input_params),
@@ -531,7 +602,7 @@ err:
531} 602}
532 603
533static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 604static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
534 unsigned int keylen) 605 unsigned int keylen, bool private)
535{ 606{
536 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 607 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
537 struct device *dev = &GET_DEV(ctx->inst->accel_dev); 608 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
@@ -550,7 +621,13 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
550 ctx->n = NULL; 621 ctx->n = NULL;
551 ctx->e = NULL; 622 ctx->e = NULL;
552 ctx->d = NULL; 623 ctx->d = NULL;
553 ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); 624
625 if (private)
626 ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
627 keylen);
628 else
629 ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
630 keylen);
554 if (ret < 0) 631 if (ret < 0)
555 goto free; 632 goto free;
556 633
@@ -559,6 +636,11 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
559 ret = -EINVAL; 636 ret = -EINVAL;
560 goto free; 637 goto free;
561 } 638 }
639 if (private && !ctx->d) {
640 /* invalid private key provided */
641 ret = -EINVAL;
642 goto free;
643 }
562 644
563 return 0; 645 return 0;
564free: 646free:
@@ -579,6 +661,25 @@ free:
579 return ret; 661 return ret;
580} 662}
581 663
664static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
665 unsigned int keylen)
666{
667 return qat_rsa_setkey(tfm, key, keylen, false);
668}
669
670static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
671 unsigned int keylen)
672{
673 return qat_rsa_setkey(tfm, key, keylen, true);
674}
675
676static int qat_rsa_max_size(struct crypto_akcipher *tfm)
677{
678 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
679
680 return (ctx->n) ? ctx->key_sz : -EINVAL;
681}
682
582static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) 683static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
583{ 684{
584 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 685 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
@@ -617,7 +718,9 @@ static struct akcipher_alg rsa = {
617 .decrypt = qat_rsa_dec, 718 .decrypt = qat_rsa_dec,
618 .sign = qat_rsa_dec, 719 .sign = qat_rsa_dec,
619 .verify = qat_rsa_enc, 720 .verify = qat_rsa_enc,
620 .setkey = qat_rsa_setkey, 721 .set_pub_key = qat_rsa_setpubkey,
722 .set_priv_key = qat_rsa_setprivkey,
723 .max_size = qat_rsa_max_size,
621 .init = qat_rsa_init_tfm, 724 .init = qat_rsa_init_tfm,
622 .exit = qat_rsa_exit_tfm, 725 .exit = qat_rsa_exit_tfm,
623 .reqsize = sizeof(struct qat_rsa_request) + 64, 726 .reqsize = sizeof(struct qat_rsa_request) + 64,
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 07c2f9f9d1fc..9cab15497f04 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -60,8 +60,8 @@ static struct service_hndl qat_crypto;
60 60
61void qat_crypto_put_instance(struct qat_crypto_instance *inst) 61void qat_crypto_put_instance(struct qat_crypto_instance *inst)
62{ 62{
63 if (atomic_sub_return(1, &inst->refctr) == 0) 63 atomic_dec(&inst->refctr);
64 adf_dev_put(inst->accel_dev); 64 adf_dev_put(inst->accel_dev);
65} 65}
66 66
67static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) 67static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
@@ -97,49 +97,66 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
97struct qat_crypto_instance *qat_crypto_get_instance_node(int node) 97struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
98{ 98{
99 struct adf_accel_dev *accel_dev = NULL; 99 struct adf_accel_dev *accel_dev = NULL;
100 struct qat_crypto_instance *inst_best = NULL; 100 struct qat_crypto_instance *inst = NULL;
101 struct list_head *itr; 101 struct list_head *itr;
102 unsigned long best = ~0; 102 unsigned long best = ~0;
103 103
104 list_for_each(itr, adf_devmgr_get_head()) { 104 list_for_each(itr, adf_devmgr_get_head()) {
105 accel_dev = list_entry(itr, struct adf_accel_dev, list); 105 struct adf_accel_dev *tmp_dev;
106 unsigned long ctr;
107
108 tmp_dev = list_entry(itr, struct adf_accel_dev, list);
109
110 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
111 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
112 adf_dev_started(tmp_dev) &&
113 !list_empty(&tmp_dev->crypto_list)) {
114 ctr = atomic_read(&tmp_dev->ref_count);
115 if (best > ctr) {
116 accel_dev = tmp_dev;
117 best = ctr;
118 }
119 }
120 }
121 if (!accel_dev)
122 pr_info("QAT: Could not find a device on node %d\n", node);
123
124 /* Get any started device */
125 list_for_each(itr, adf_devmgr_get_head()) {
126 struct adf_accel_dev *tmp_dev;
106 127
107 if ((node == dev_to_node(&GET_DEV(accel_dev)) || 128 tmp_dev = list_entry(itr, struct adf_accel_dev, list);
108 dev_to_node(&GET_DEV(accel_dev)) < 0) && 129
109 adf_dev_started(accel_dev) && 130 if (adf_dev_started(tmp_dev) &&
110 !list_empty(&accel_dev->crypto_list)) 131 !list_empty(&tmp_dev->crypto_list)) {
132 accel_dev = tmp_dev;
111 break; 133 break;
112 accel_dev = NULL; 134 }
113 }
114 if (!accel_dev) {
115 pr_err("QAT: Could not find a device on node %d\n", node);
116 accel_dev = adf_devmgr_get_first();
117 } 135 }
118 if (!accel_dev || !adf_dev_started(accel_dev)) 136
137 if (!accel_dev)
119 return NULL; 138 return NULL;
120 139
140 best = ~0;
121 list_for_each(itr, &accel_dev->crypto_list) { 141 list_for_each(itr, &accel_dev->crypto_list) {
122 struct qat_crypto_instance *inst; 142 struct qat_crypto_instance *tmp_inst;
123 unsigned long cur; 143 unsigned long ctr;
124 144
125 inst = list_entry(itr, struct qat_crypto_instance, list); 145 tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
126 cur = atomic_read(&inst->refctr); 146 ctr = atomic_read(&tmp_inst->refctr);
127 if (best > cur) { 147 if (best > ctr) {
128 inst_best = inst; 148 inst = tmp_inst;
129 best = cur; 149 best = ctr;
130 } 150 }
131 } 151 }
132 if (inst_best) { 152 if (inst) {
133 if (atomic_add_return(1, &inst_best->refctr) == 1) { 153 if (adf_dev_get(accel_dev)) {
134 if (adf_dev_get(accel_dev)) { 154 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
135 atomic_dec(&inst_best->refctr); 155 return NULL;
136 dev_err(&GET_DEV(accel_dev),
137 "Could not increment dev refctr\n");
138 return NULL;
139 }
140 } 156 }
157 atomic_inc(&inst->refctr);
141 } 158 }
142 return inst_best; 159 return inst;
143} 160}
144 161
145static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) 162static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8e711d1c3084..380e761801a7 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -1034,7 +1034,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1034 unsigned int inst_num, unsigned int size, 1034 unsigned int inst_num, unsigned int size,
1035 unsigned int addr, unsigned int *value) 1035 unsigned int addr, unsigned int *value)
1036{ 1036{
1037 int i, val_indx; 1037 int i;
1038 unsigned int cur_value; 1038 unsigned int cur_value;
1039 const uint64_t *inst_arr; 1039 const uint64_t *inst_arr;
1040 int fixup_offset; 1040 int fixup_offset;
@@ -1042,8 +1042,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1042 int orig_num; 1042 int orig_num;
1043 1043
1044 orig_num = inst_num; 1044 orig_num = inst_num;
1045 val_indx = 0; 1045 cur_value = value[0];
1046 cur_value = value[val_indx++];
1047 inst_arr = inst_4b; 1046 inst_arr = inst_4b;
1048 usize = ARRAY_SIZE(inst_4b); 1047 usize = ARRAY_SIZE(inst_4b);
1049 fixup_offset = inst_num; 1048 fixup_offset = inst_num;
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
deleted file mode 100644
index 97b0e02b600a..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsakey.asn1
+++ /dev/null
@@ -1,5 +0,0 @@
1RsaKey ::= SEQUENCE {
2 n INTEGER ({ qat_rsa_get_n }),
3 e INTEGER ({ qat_rsa_get_e }),
4 d INTEGER ({ qat_rsa_get_d })
5}
diff --git a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
new file mode 100644
index 000000000000..f0066adb79b8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
@@ -0,0 +1,11 @@
1RsaPrivKey ::= SEQUENCE {
2 version INTEGER,
3 n INTEGER ({ qat_rsa_get_n }),
4 e INTEGER ({ qat_rsa_get_e }),
5 d INTEGER ({ qat_rsa_get_d }),
6 prime1 INTEGER,
7 prime2 INTEGER,
8 exponent1 INTEGER,
9 exponent2 INTEGER,
10 coefficient INTEGER
11}
diff --git a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
new file mode 100644
index 000000000000..bd667b31a21a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
@@ -0,0 +1,4 @@
1RsaPubKey ::= SEQUENCE {
2 n INTEGER ({ qat_rsa_get_n }),
3 e INTEGER ({ qat_rsa_get_e })
4}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ad592de475a4..2c0d63d48747 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
44 error); 44 error);
45 45
46 if (diff_dst) 46 if (diff_dst)
47 qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, 47 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
48 rctx->dst_chained); 48 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
49 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
50 rctx->dst_chained);
51 49
52 sg_free_table(&rctx->dst_tbl); 50 sg_free_table(&rctx->dst_tbl);
53 51
@@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 78 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 79 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
82 80
83 rctx->src_nents = qce_countsg(req->src, req->nbytes, 81 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
84 &rctx->src_chained); 82 if (diff_dst)
85 if (diff_dst) { 83 rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
86 rctx->dst_nents = qce_countsg(req->dst, req->nbytes, 84 else
87 &rctx->dst_chained);
88 } else {
89 rctx->dst_nents = rctx->src_nents; 85 rctx->dst_nents = rctx->src_nents;
90 rctx->dst_chained = rctx->src_chained;
91 }
92 86
93 rctx->dst_nents += 1; 87 rctx->dst_nents += 1;
94 88
@@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
116 sg_mark_end(sg); 110 sg_mark_end(sg);
117 rctx->dst_sg = rctx->dst_tbl.sgl; 111 rctx->dst_sg = rctx->dst_tbl.sgl;
118 112
119 ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 113 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
120 rctx->dst_chained);
121 if (ret < 0) 114 if (ret < 0)
122 goto error_free; 115 goto error_free;
123 116
124 if (diff_dst) { 117 if (diff_dst) {
125 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, 118 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
126 rctx->src_chained);
127 if (ret < 0) 119 if (ret < 0)
128 goto error_unmap_dst; 120 goto error_unmap_dst;
129 rctx->src_sg = req->src; 121 rctx->src_sg = req->src;
@@ -149,11 +141,9 @@ error_terminate:
149 qce_dma_terminate_all(&qce->dma); 141 qce_dma_terminate_all(&qce->dma);
150error_unmap_src: 142error_unmap_src:
151 if (diff_dst) 143 if (diff_dst)
152 qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, 144 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
153 rctx->src_chained);
154error_unmap_dst: 145error_unmap_dst:
155 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 146 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
156 rctx->dst_chained);
157error_free: 147error_free:
158 sg_free_table(&rctx->dst_tbl); 148 sg_free_table(&rctx->dst_tbl);
159 return ret; 149 return ret;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index d5757cfcda2d..5c6a5f8633e5 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -32,8 +32,6 @@ struct qce_cipher_ctx {
32 * @ivsize: IV size 32 * @ivsize: IV size
33 * @src_nents: source entries 33 * @src_nents: source entries
34 * @dst_nents: destination entries 34 * @dst_nents: destination entries
35 * @src_chained: is source chained
36 * @dst_chained: is destination chained
37 * @result_sg: scatterlist used for result buffer 35 * @result_sg: scatterlist used for result buffer
38 * @dst_tbl: destination sg table 36 * @dst_tbl: destination sg table
39 * @dst_sg: destination sg pointer table beginning 37 * @dst_sg: destination sg pointer table beginning
@@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
47 unsigned int ivsize; 45 unsigned int ivsize;
48 int src_nents; 46 int src_nents;
49 int dst_nents; 47 int dst_nents;
50 bool src_chained;
51 bool dst_chained;
52 struct scatterlist result_sg; 48 struct scatterlist result_sg;
53 struct sg_table dst_tbl; 49 struct sg_table dst_tbl;
54 struct scatterlist *dst_sg; 50 struct scatterlist *dst_sg;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 378cb768647f..4797e795c9b9 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
54 kfree(dma->result_buf); 54 kfree(dma->result_buf);
55} 55}
56 56
57int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
58 enum dma_data_direction dir, bool chained)
59{
60 int err;
61
62 if (chained) {
63 while (sg) {
64 err = dma_map_sg(dev, sg, 1, dir);
65 if (!err)
66 return -EFAULT;
67 sg = sg_next(sg);
68 }
69 } else {
70 err = dma_map_sg(dev, sg, nents, dir);
71 if (!err)
72 return -EFAULT;
73 }
74
75 return nents;
76}
77
78void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
79 enum dma_data_direction dir, bool chained)
80{
81 if (chained)
82 while (sg) {
83 dma_unmap_sg(dev, sg, 1, dir);
84 sg = sg_next(sg);
85 }
86 else
87 dma_unmap_sg(dev, sg, nents, dir);
88}
89
90int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
91{
92 struct scatterlist *sg = sglist;
93 int nents = 0;
94
95 if (chained)
96 *chained = false;
97
98 while (nbytes > 0 && sg) {
99 nents++;
100 nbytes -= sg->length;
101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
102 *chained = true;
103 sg = sg_next(sg);
104 }
105
106 return nents;
107}
108
109struct scatterlist * 57struct scatterlist *
110qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) 58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
111{ 59{
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 65bedb81de0b..130235d17bb4 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
49 dma_async_tx_callback cb, void *cb_param); 49 dma_async_tx_callback cb, void *cb_param);
50void qce_dma_issue_pending(struct qce_dma_data *dma); 50void qce_dma_issue_pending(struct qce_dma_data *dma);
51int qce_dma_terminate_all(struct qce_dma_data *dma); 51int qce_dma_terminate_all(struct qce_dma_data *dma);
52int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
53void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
54 enum dma_data_direction dir, bool chained);
55int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
56 enum dma_data_direction dir, bool chained);
57struct scatterlist * 52struct scatterlist *
58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); 53qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
59 54
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index be2f5049256a..0c9973ec80eb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
51 if (error) 51 if (error)
52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); 52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
53 53
54 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 54 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
55 rctx->src_chained); 55 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
56 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
57 56
58 memcpy(rctx->digest, result->auth_iv, digestsize); 57 memcpy(rctx->digest, result->auth_iv, digestsize);
59 if (req->result) 58 if (req->result)
@@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
92 rctx->authklen = AES_KEYSIZE_128; 91 rctx->authklen = AES_KEYSIZE_128;
93 } 92 }
94 93
95 rctx->src_nents = qce_countsg(req->src, req->nbytes, 94 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
96 &rctx->src_chained); 95 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
97 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
98 rctx->src_chained);
99 if (ret < 0) 96 if (ret < 0)
100 return ret; 97 return ret;
101 98
102 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 99 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
103 100
104 ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 101 ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
105 if (ret < 0) 102 if (ret < 0)
106 goto error_unmap_src; 103 goto error_unmap_src;
107 104
@@ -121,10 +118,9 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
121error_terminate: 118error_terminate:
122 qce_dma_terminate_all(&qce->dma); 119 qce_dma_terminate_all(&qce->dma);
123error_unmap_dst: 120error_unmap_dst:
124 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 121 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
125error_unmap_src: 122error_unmap_src:
126 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 123 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
127 rctx->src_chained);
128 return ret; 124 return ret;
129} 125}
130 126
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
index 286f0d5397f3..236bb5e9ae75 100644
--- a/drivers/crypto/qce/sha.h
+++ b/drivers/crypto/qce/sha.h
@@ -36,7 +36,6 @@ struct qce_sha_ctx {
36 * @flags: operation flags 36 * @flags: operation flags
37 * @src_orig: original request sg list 37 * @src_orig: original request sg list
38 * @nbytes_orig: original request number of bytes 38 * @nbytes_orig: original request number of bytes
39 * @src_chained: is source scatterlist chained
40 * @src_nents: source number of entries 39 * @src_nents: source number of entries
41 * @byte_count: byte count 40 * @byte_count: byte count
42 * @count: save count in states during update, import and export 41 * @count: save count in states during update, import and export
@@ -55,7 +54,6 @@ struct qce_sha_reqctx {
55 unsigned long flags; 54 unsigned long flags;
56 struct scatterlist *src_orig; 55 struct scatterlist *src_orig;
57 unsigned int nbytes_orig; 56 unsigned int nbytes_orig;
58 bool src_chained;
59 int src_nents; 57 int src_nents;
60 __be32 byte_count[2]; 58 __be32 byte_count[2];
61 u64 count; 59 u64 count;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 820dc3acb28c..f68c24a98277 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -173,7 +173,6 @@ struct sahara_aes_reqctx {
173 * @sg_in_idx: number of hw links 173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data 174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data 175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer 176 * @total: total number of bytes for transfer
178 * @last: is this the last block 177 * @last: is this the last block
179 * @first: is this the first block 178 * @first: is this the first block
@@ -191,7 +190,6 @@ struct sahara_sha_reqctx {
191 unsigned int sg_in_idx; 190 unsigned int sg_in_idx;
192 struct scatterlist *in_sg; 191 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2]; 192 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total; 193 size_t total;
196 unsigned int last; 194 unsigned int last;
197 unsigned int first; 195 unsigned int first;
@@ -274,31 +272,7 @@ static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
274 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; 272 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275} 273}
276 274
277static int sahara_sg_length(struct scatterlist *sg, 275static const char *sahara_err_src[16] = {
278 unsigned int total)
279{
280 int sg_nb;
281 unsigned int len;
282 struct scatterlist *sg_list;
283
284 sg_nb = 0;
285 sg_list = sg;
286
287 while (total) {
288 len = min(sg_list->length, total);
289
290 sg_nb++;
291 total -= len;
292
293 sg_list = sg_next(sg_list);
294 if (!sg_list)
295 total = 0;
296 }
297
298 return sg_nb;
299}
300
301static char *sahara_err_src[16] = {
302 "No error", 276 "No error",
303 "Header error", 277 "Header error",
304 "Descriptor length error", 278 "Descriptor length error",
@@ -317,14 +291,14 @@ static char *sahara_err_src[16] = {
317 "DMA error" 291 "DMA error"
318}; 292};
319 293
320static char *sahara_err_dmasize[4] = { 294static const char *sahara_err_dmasize[4] = {
321 "Byte transfer", 295 "Byte transfer",
322 "Half-word transfer", 296 "Half-word transfer",
323 "Word transfer", 297 "Word transfer",
324 "Reserved" 298 "Reserved"
325}; 299};
326 300
327static char *sahara_err_dmasrc[8] = { 301static const char *sahara_err_dmasrc[8] = {
328 "No error", 302 "No error",
329 "AHB bus error", 303 "AHB bus error",
330 "Internal IP bus error", 304 "Internal IP bus error",
@@ -335,7 +309,7 @@ static char *sahara_err_dmasrc[8] = {
335 "DMA HW error" 309 "DMA HW error"
336}; 310};
337 311
338static char *sahara_cha_errsrc[12] = { 312static const char *sahara_cha_errsrc[12] = {
339 "Input buffer non-empty", 313 "Input buffer non-empty",
340 "Illegal address", 314 "Illegal address",
341 "Illegal mode", 315 "Illegal mode",
@@ -350,7 +324,7 @@ static char *sahara_cha_errsrc[12] = {
350 "Reserved" 324 "Reserved"
351}; 325};
352 326
353static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" }; 327static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
354 328
355static void sahara_decode_error(struct sahara_dev *dev, unsigned int error) 329static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
356{ 330{
@@ -380,7 +354,7 @@ static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
380 dev_err(dev->device, "\n"); 354 dev_err(dev->device, "\n");
381} 355}
382 356
383static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" }; 357static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
384 358
385static void sahara_decode_status(struct sahara_dev *dev, unsigned int status) 359static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
386{ 360{
@@ -502,8 +476,8 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
502 idx++; 476 idx++;
503 } 477 }
504 478
505 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total); 479 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
506 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total); 480 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
507 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { 481 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
508 dev_err(dev->device, "not enough hw links (%d)\n", 482 dev_err(dev->device, "not enough hw links (%d)\n",
509 dev->nb_in_sg + dev->nb_out_sg); 483 dev->nb_in_sg + dev->nb_out_sg);
@@ -818,45 +792,26 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
818 792
819 dev->in_sg = rctx->in_sg; 793 dev->in_sg = rctx->in_sg;
820 794
821 dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total); 795 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
822 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { 796 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
823 dev_err(dev->device, "not enough hw links (%d)\n", 797 dev_err(dev->device, "not enough hw links (%d)\n",
824 dev->nb_in_sg + dev->nb_out_sg); 798 dev->nb_in_sg + dev->nb_out_sg);
825 return -EINVAL; 799 return -EINVAL;
826 } 800 }
827 801
828 if (rctx->in_sg_chained) { 802 sg = dev->in_sg;
829 i = start; 803 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
830 sg = dev->in_sg; 804 if (!ret)
831 while (sg) { 805 return -EFAULT;
832 ret = dma_map_sg(dev->device, sg, 1, 806
833 DMA_TO_DEVICE); 807 for (i = start; i < dev->nb_in_sg + start; i++) {
834 if (!ret) 808 dev->hw_link[i]->len = sg->length;
835 return -EFAULT; 809 dev->hw_link[i]->p = sg->dma_address;
836 810 if (i == (dev->nb_in_sg + start - 1)) {
837 dev->hw_link[i]->len = sg->length; 811 dev->hw_link[i]->next = 0;
838 dev->hw_link[i]->p = sg->dma_address; 812 } else {
839 dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; 813 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
840 sg = sg_next(sg); 814 sg = sg_next(sg);
841 i += 1;
842 }
843 dev->hw_link[i-1]->next = 0;
844 } else {
845 sg = dev->in_sg;
846 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
847 DMA_TO_DEVICE);
848 if (!ret)
849 return -EFAULT;
850
851 for (i = start; i < dev->nb_in_sg + start; i++) {
852 dev->hw_link[i]->len = sg->length;
853 dev->hw_link[i]->p = sg->dma_address;
854 if (i == (dev->nb_in_sg + start - 1)) {
855 dev->hw_link[i]->next = 0;
856 } else {
857 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
858 sg = sg_next(sg);
859 }
860 } 815 }
861 } 816 }
862 817
@@ -1004,7 +959,6 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
1004 rctx->total = req->nbytes + rctx->buf_cnt; 959 rctx->total = req->nbytes + rctx->buf_cnt;
1005 rctx->in_sg = rctx->in_sg_chain; 960 rctx->in_sg = rctx->in_sg_chain;
1006 961
1007 rctx->in_sg_chained = true;
1008 req->src = rctx->in_sg_chain; 962 req->src = rctx->in_sg_chain;
1009 /* only data from previous operation */ 963 /* only data from previous operation */
1010 } else if (rctx->buf_cnt) { 964 } else if (rctx->buf_cnt) {
@@ -1015,13 +969,11 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
1015 /* buf was copied into rembuf above */ 969 /* buf was copied into rembuf above */
1016 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); 970 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1017 rctx->total = rctx->buf_cnt; 971 rctx->total = rctx->buf_cnt;
1018 rctx->in_sg_chained = false;
1019 /* no data from previous operation */ 972 /* no data from previous operation */
1020 } else { 973 } else {
1021 rctx->in_sg = req->src; 974 rctx->in_sg = req->src;
1022 rctx->total = req->nbytes; 975 rctx->total = req->nbytes;
1023 req->src = rctx->in_sg; 976 req->src = rctx->in_sg;
1024 rctx->in_sg_chained = false;
1025 } 977 }
1026 978
1027 /* on next call, we only have the remaining data in the buffer */ 979 /* on next call, we only have the remaining data in the buffer */
@@ -1030,23 +982,6 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
1030 return -EINPROGRESS; 982 return -EINPROGRESS;
1031} 983}
1032 984
1033static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1034 struct sahara_sha_reqctx *rctx)
1035{
1036 struct scatterlist *sg;
1037
1038 if (rctx->in_sg_chained) {
1039 sg = dev->in_sg;
1040 while (sg) {
1041 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1042 sg = sg_next(sg);
1043 }
1044 } else {
1045 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1046 DMA_TO_DEVICE);
1047 }
1048}
1049
1050static int sahara_sha_process(struct ahash_request *req) 985static int sahara_sha_process(struct ahash_request *req)
1051{ 986{
1052 struct sahara_dev *dev = dev_ptr; 987 struct sahara_dev *dev = dev_ptr;
@@ -1086,7 +1021,8 @@ static int sahara_sha_process(struct ahash_request *req)
1086 } 1021 }
1087 1022
1088 if (rctx->sg_in_idx) 1023 if (rctx->sg_in_idx)
1089 sahara_sha_unmap_sg(dev, rctx); 1024 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025 DMA_TO_DEVICE);
1090 1026
1091 memcpy(rctx->context, dev->context_base, rctx->context_size); 1027 memcpy(rctx->context, dev->context_base, rctx->context_size);
1092 1028
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 3b20a1bce703..46f531e19ccf 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -857,8 +857,6 @@ badkey:
857 * talitos_edesc - s/w-extended descriptor 857 * talitos_edesc - s/w-extended descriptor
858 * @src_nents: number of segments in input scatterlist 858 * @src_nents: number of segments in input scatterlist
859 * @dst_nents: number of segments in output scatterlist 859 * @dst_nents: number of segments in output scatterlist
860 * @src_chained: whether src is chained or not
861 * @dst_chained: whether dst is chained or not
862 * @icv_ool: whether ICV is out-of-line 860 * @icv_ool: whether ICV is out-of-line
863 * @iv_dma: dma address of iv for checking continuity and link table 861 * @iv_dma: dma address of iv for checking continuity and link table
864 * @dma_len: length of dma mapped link_tbl space 862 * @dma_len: length of dma mapped link_tbl space
@@ -874,8 +872,6 @@ badkey:
874struct talitos_edesc { 872struct talitos_edesc {
875 int src_nents; 873 int src_nents;
876 int dst_nents; 874 int dst_nents;
877 bool src_chained;
878 bool dst_chained;
879 bool icv_ool; 875 bool icv_ool;
880 dma_addr_t iv_dma; 876 dma_addr_t iv_dma;
881 int dma_len; 877 int dma_len;
@@ -887,29 +883,6 @@ struct talitos_edesc {
887 }; 883 };
888}; 884};
889 885
890static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
891 unsigned int nents, enum dma_data_direction dir,
892 bool chained)
893{
894 if (unlikely(chained))
895 while (sg) {
896 dma_map_sg(dev, sg, 1, dir);
897 sg = sg_next(sg);
898 }
899 else
900 dma_map_sg(dev, sg, nents, dir);
901 return nents;
902}
903
904static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
905 enum dma_data_direction dir)
906{
907 while (sg) {
908 dma_unmap_sg(dev, sg, 1, dir);
909 sg = sg_next(sg);
910 }
911}
912
913static void talitos_sg_unmap(struct device *dev, 886static void talitos_sg_unmap(struct device *dev,
914 struct talitos_edesc *edesc, 887 struct talitos_edesc *edesc,
915 struct scatterlist *src, 888 struct scatterlist *src,
@@ -919,24 +892,13 @@ static void talitos_sg_unmap(struct device *dev,
919 unsigned int dst_nents = edesc->dst_nents ? : 1; 892 unsigned int dst_nents = edesc->dst_nents ? : 1;
920 893
921 if (src != dst) { 894 if (src != dst) {
922 if (edesc->src_chained) 895 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
923 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
924 else
925 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
926 896
927 if (dst) { 897 if (dst) {
928 if (edesc->dst_chained) 898 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
929 talitos_unmap_sg_chain(dev, dst,
930 DMA_FROM_DEVICE);
931 else
932 dma_unmap_sg(dev, dst, dst_nents,
933 DMA_FROM_DEVICE);
934 } 899 }
935 } else 900 } else
936 if (edesc->src_chained) 901 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
937 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
938 else
939 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
940} 902}
941 903
942static void ipsec_esp_unmap(struct device *dev, 904static void ipsec_esp_unmap(struct device *dev,
@@ -1118,10 +1080,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1118 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1080 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1119 DMA_TO_DEVICE); 1081 DMA_TO_DEVICE);
1120 1082
1121 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1122 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1123 : DMA_TO_DEVICE, 1085 : DMA_TO_DEVICE);
1124 edesc->src_chained);
1125 1086
1126 /* hmac data */ 1087 /* hmac data */
1127 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1088 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
@@ -1185,9 +1146,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1185 desc->ptr[5].j_extent = authsize; 1146 desc->ptr[5].j_extent = authsize;
1186 1147
1187 if (areq->src != areq->dst) 1148 if (areq->src != areq->dst)
1188 sg_count = talitos_map_sg(dev, areq->dst, 1149 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1189 edesc->dst_nents ? : 1, 1150 DMA_FROM_DEVICE);
1190 DMA_FROM_DEVICE, edesc->dst_chained);
1191 1151
1192 edesc->icv_ool = false; 1152 edesc->icv_ool = false;
1193 1153
@@ -1234,26 +1194,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1234} 1194}
1235 1195
1236/* 1196/*
1237 * derive number of elements in scatterlist
1238 */
1239static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1240{
1241 struct scatterlist *sg = sg_list;
1242 int sg_nents = 0;
1243
1244 *chained = false;
1245 while (nbytes > 0 && sg) {
1246 sg_nents++;
1247 nbytes -= sg->length;
1248 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1249 *chained = true;
1250 sg = sg_next(sg);
1251 }
1252
1253 return sg_nents;
1254}
1255
1256/*
1257 * allocate and map the extended descriptor 1197 * allocate and map the extended descriptor
1258 */ 1198 */
1259static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1199static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
@@ -1270,7 +1210,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1270{ 1210{
1271 struct talitos_edesc *edesc; 1211 struct talitos_edesc *edesc;
1272 int src_nents, dst_nents, alloc_len, dma_len; 1212 int src_nents, dst_nents, alloc_len, dma_len;
1273 bool src_chained = false, dst_chained = false;
1274 dma_addr_t iv_dma = 0; 1213 dma_addr_t iv_dma = 0;
1275 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1214 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1276 GFP_ATOMIC; 1215 GFP_ATOMIC;
@@ -1287,18 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1287 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1226 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1288 1227
1289 if (!dst || dst == src) { 1228 if (!dst || dst == src) {
1290 src_nents = sg_count(src, assoclen + cryptlen + authsize, 1229 src_nents = sg_nents_for_len(src,
1291 &src_chained); 1230 assoclen + cryptlen + authsize);
1292 src_nents = (src_nents == 1) ? 0 : src_nents; 1231 src_nents = (src_nents == 1) ? 0 : src_nents;
1293 dst_nents = dst ? src_nents : 0; 1232 dst_nents = dst ? src_nents : 0;
1294 } else { /* dst && dst != src*/ 1233 } else { /* dst && dst != src*/
1295 src_nents = sg_count(src, assoclen + cryptlen + 1234 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1296 (encrypt ? 0 : authsize), 1235 (encrypt ? 0 : authsize));
1297 &src_chained);
1298 src_nents = (src_nents == 1) ? 0 : src_nents; 1236 src_nents = (src_nents == 1) ? 0 : src_nents;
1299 dst_nents = sg_count(dst, assoclen + cryptlen + 1237 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1300 (encrypt ? authsize : 0), 1238 (encrypt ? authsize : 0));
1301 &dst_chained);
1302 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1239 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1303 } 1240 }
1304 1241
@@ -1332,8 +1269,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1332 1269
1333 edesc->src_nents = src_nents; 1270 edesc->src_nents = src_nents;
1334 edesc->dst_nents = dst_nents; 1271 edesc->dst_nents = dst_nents;
1335 edesc->src_chained = src_chained;
1336 edesc->dst_chained = dst_chained;
1337 edesc->iv_dma = iv_dma; 1272 edesc->iv_dma = iv_dma;
1338 edesc->dma_len = dma_len; 1273 edesc->dma_len = dma_len;
1339 if (dma_len) 1274 if (dma_len)
@@ -1518,8 +1453,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1518 } else { 1453 } else {
1519 to_talitos_ptr_extent_clear(ptr, is_sec1); 1454 to_talitos_ptr_extent_clear(ptr, is_sec1);
1520 1455
1521 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, 1456 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1522 edesc->src_chained);
1523 1457
1524 if (sg_count == 1) { 1458 if (sg_count == 1) {
1525 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1459 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
@@ -1552,8 +1486,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1552 bool is_sec1 = has_ftr_sec1(priv); 1486 bool is_sec1 = has_ftr_sec1(priv);
1553 1487
1554 if (dir != DMA_NONE) 1488 if (dir != DMA_NONE)
1555 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, 1489 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1556 dir, edesc->dst_chained);
1557 1490
1558 to_talitos_ptr_len(ptr, len, is_sec1); 1491 to_talitos_ptr_len(ptr, len, is_sec1);
1559 1492
@@ -1897,12 +1830,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1897 unsigned int nbytes_to_hash; 1830 unsigned int nbytes_to_hash;
1898 unsigned int to_hash_later; 1831 unsigned int to_hash_later;
1899 unsigned int nsg; 1832 unsigned int nsg;
1900 bool chained;
1901 1833
1902 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 1834 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1903 /* Buffer up to one whole block */ 1835 /* Buffer up to one whole block */
1904 sg_copy_to_buffer(areq->src, 1836 sg_copy_to_buffer(areq->src,
1905 sg_count(areq->src, nbytes, &chained), 1837 sg_nents_for_len(areq->src, nbytes),
1906 req_ctx->buf + req_ctx->nbuf, nbytes); 1838 req_ctx->buf + req_ctx->nbuf, nbytes);
1907 req_ctx->nbuf += nbytes; 1839 req_ctx->nbuf += nbytes;
1908 return 0; 1840 return 0;
@@ -1935,7 +1867,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1935 req_ctx->psrc = areq->src; 1867 req_ctx->psrc = areq->src;
1936 1868
1937 if (to_hash_later) { 1869 if (to_hash_later) {
1938 int nents = sg_count(areq->src, nbytes, &chained); 1870 int nents = sg_nents_for_len(areq->src, nbytes);
1939 sg_pcopy_to_buffer(areq->src, nents, 1871 sg_pcopy_to_buffer(areq->src, nents,
1940 req_ctx->bufnext, 1872 req_ctx->bufnext,
1941 to_hash_later, 1873 to_hash_later,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index fded0a5cfcd7..4c243c1ffc7f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1414,7 +1414,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1414 struct device *dev = &pdev->dev; 1414 struct device *dev = &pdev->dev;
1415 1415
1416 dev_dbg(dev, "[%s]", __func__); 1416 dev_dbg(dev, "[%s]", __func__);
1417 device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC); 1417 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1418 if (!device_data) { 1418 if (!device_data) {
1419 dev_err(dev, "[%s]: kzalloc() failed!", __func__); 1419 dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1420 ret = -ENOMEM; 1420 ret = -ENOMEM;
@@ -1435,23 +1435,15 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1435 dev_err(dev, "[%s]: platform_get_resource() failed", 1435 dev_err(dev, "[%s]: platform_get_resource() failed",
1436 __func__); 1436 __func__);
1437 ret = -ENODEV; 1437 ret = -ENODEV;
1438 goto out_kfree; 1438 goto out;
1439 }
1440
1441 res = request_mem_region(res->start, resource_size(res), pdev->name);
1442 if (res == NULL) {
1443 dev_err(dev, "[%s]: request_mem_region() failed",
1444 __func__);
1445 ret = -EBUSY;
1446 goto out_kfree;
1447 } 1439 }
1448 1440
1449 device_data->phybase = res->start; 1441 device_data->phybase = res->start;
1450 device_data->base = ioremap(res->start, resource_size(res)); 1442 device_data->base = devm_ioremap_resource(dev, res);
1451 if (!device_data->base) { 1443 if (!device_data->base) {
1452 dev_err(dev, "[%s]: ioremap failed!", __func__); 1444 dev_err(dev, "[%s]: ioremap failed!", __func__);
1453 ret = -ENOMEM; 1445 ret = -ENOMEM;
1454 goto out_free_mem; 1446 goto out;
1455 } 1447 }
1456 1448
1457 spin_lock_init(&device_data->ctx_lock); 1449 spin_lock_init(&device_data->ctx_lock);
@@ -1463,11 +1455,11 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1463 dev_err(dev, "[%s]: could not get cryp regulator", __func__); 1455 dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1464 ret = PTR_ERR(device_data->pwr_regulator); 1456 ret = PTR_ERR(device_data->pwr_regulator);
1465 device_data->pwr_regulator = NULL; 1457 device_data->pwr_regulator = NULL;
1466 goto out_unmap; 1458 goto out;
1467 } 1459 }
1468 1460
1469 /* Enable the clk for CRYP hardware block */ 1461 /* Enable the clk for CRYP hardware block */
1470 device_data->clk = clk_get(&pdev->dev, NULL); 1462 device_data->clk = devm_clk_get(&pdev->dev, NULL);
1471 if (IS_ERR(device_data->clk)) { 1463 if (IS_ERR(device_data->clk)) {
1472 dev_err(dev, "[%s]: clk_get() failed!", __func__); 1464 dev_err(dev, "[%s]: clk_get() failed!", __func__);
1473 ret = PTR_ERR(device_data->clk); 1465 ret = PTR_ERR(device_data->clk);
@@ -1477,7 +1469,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1477 ret = clk_prepare(device_data->clk); 1469 ret = clk_prepare(device_data->clk);
1478 if (ret) { 1470 if (ret) {
1479 dev_err(dev, "[%s]: clk_prepare() failed!", __func__); 1471 dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
1480 goto out_clk; 1472 goto out_regulator;
1481 } 1473 }
1482 1474
1483 /* Enable device power (and clock) */ 1475 /* Enable device power (and clock) */
@@ -1510,11 +1502,8 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1510 goto out_power; 1502 goto out_power;
1511 } 1503 }
1512 1504
1513 ret = request_irq(res_irq->start, 1505 ret = devm_request_irq(&pdev->dev, res_irq->start,
1514 cryp_interrupt_handler, 1506 cryp_interrupt_handler, 0, "cryp1", device_data);
1515 0,
1516 "cryp1",
1517 device_data);
1518 if (ret) { 1507 if (ret) {
1519 dev_err(dev, "[%s]: Unable to request IRQ", __func__); 1508 dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1520 goto out_power; 1509 goto out_power;
@@ -1550,28 +1539,15 @@ out_power:
1550out_clk_unprepare: 1539out_clk_unprepare:
1551 clk_unprepare(device_data->clk); 1540 clk_unprepare(device_data->clk);
1552 1541
1553out_clk:
1554 clk_put(device_data->clk);
1555
1556out_regulator: 1542out_regulator:
1557 regulator_put(device_data->pwr_regulator); 1543 regulator_put(device_data->pwr_regulator);
1558 1544
1559out_unmap:
1560 iounmap(device_data->base);
1561
1562out_free_mem:
1563 release_mem_region(res->start, resource_size(res));
1564
1565out_kfree:
1566 kfree(device_data);
1567out: 1545out:
1568 return ret; 1546 return ret;
1569} 1547}
1570 1548
1571static int ux500_cryp_remove(struct platform_device *pdev) 1549static int ux500_cryp_remove(struct platform_device *pdev)
1572{ 1550{
1573 struct resource *res = NULL;
1574 struct resource *res_irq = NULL;
1575 struct cryp_device_data *device_data; 1551 struct cryp_device_data *device_data;
1576 1552
1577 dev_dbg(&pdev->dev, "[%s]", __func__); 1553 dev_dbg(&pdev->dev, "[%s]", __func__);
@@ -1607,37 +1583,18 @@ static int ux500_cryp_remove(struct platform_device *pdev)
1607 if (list_empty(&driver_data.device_list.k_list)) 1583 if (list_empty(&driver_data.device_list.k_list))
1608 cryp_algs_unregister_all(); 1584 cryp_algs_unregister_all();
1609 1585
1610 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1611 if (!res_irq)
1612 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1613 __func__);
1614 else {
1615 disable_irq(res_irq->start);
1616 free_irq(res_irq->start, device_data);
1617 }
1618
1619 if (cryp_disable_power(&pdev->dev, device_data, false)) 1586 if (cryp_disable_power(&pdev->dev, device_data, false))
1620 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", 1587 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1621 __func__); 1588 __func__);
1622 1589
1623 clk_unprepare(device_data->clk); 1590 clk_unprepare(device_data->clk);
1624 clk_put(device_data->clk);
1625 regulator_put(device_data->pwr_regulator); 1591 regulator_put(device_data->pwr_regulator);
1626 1592
1627 iounmap(device_data->base);
1628
1629 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1630 if (res)
1631 release_mem_region(res->start, resource_size(res));
1632
1633 kfree(device_data);
1634
1635 return 0; 1593 return 0;
1636} 1594}
1637 1595
1638static void ux500_cryp_shutdown(struct platform_device *pdev) 1596static void ux500_cryp_shutdown(struct platform_device *pdev)
1639{ 1597{
1640 struct resource *res_irq = NULL;
1641 struct cryp_device_data *device_data; 1598 struct cryp_device_data *device_data;
1642 1599
1643 dev_dbg(&pdev->dev, "[%s]", __func__); 1600 dev_dbg(&pdev->dev, "[%s]", __func__);
@@ -1673,15 +1630,6 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
1673 if (list_empty(&driver_data.device_list.k_list)) 1630 if (list_empty(&driver_data.device_list.k_list))
1674 cryp_algs_unregister_all(); 1631 cryp_algs_unregister_all();
1675 1632
1676 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1677 if (!res_irq)
1678 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1679 __func__);
1680 else {
1681 disable_irq(res_irq->start);
1682 free_irq(res_irq->start, device_data);
1683 }
1684
1685 if (cryp_disable_power(&pdev->dev, device_data, false)) 1633 if (cryp_disable_power(&pdev->dev, device_data, false))
1686 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", 1634 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1687 __func__); 1635 __func__);
@@ -1777,6 +1725,7 @@ static const struct of_device_id ux500_cryp_match[] = {
1777 { .compatible = "stericsson,ux500-cryp" }, 1725 { .compatible = "stericsson,ux500-cryp" },
1778 { }, 1726 { },
1779}; 1727};
1728MODULE_DEVICE_TABLE(of, ux500_cryp_match);
1780 1729
1781static struct platform_driver cryp_driver = { 1730static struct platform_driver cryp_driver = {
1782 .probe = ux500_cryp_probe, 1731 .probe = ux500_cryp_probe,
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 5f5f360628fc..f47d112041b2 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1657,7 +1657,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
1657 struct hash_device_data *device_data; 1657 struct hash_device_data *device_data;
1658 struct device *dev = &pdev->dev; 1658 struct device *dev = &pdev->dev;
1659 1659
1660 device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); 1660 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1661 if (!device_data) { 1661 if (!device_data) {
1662 ret = -ENOMEM; 1662 ret = -ENOMEM;
1663 goto out; 1663 goto out;
@@ -1670,22 +1670,15 @@ static int ux500_hash_probe(struct platform_device *pdev)
1670 if (!res) { 1670 if (!res) {
1671 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); 1671 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1672 ret = -ENODEV; 1672 ret = -ENODEV;
1673 goto out_kfree; 1673 goto out;
1674 }
1675
1676 res = request_mem_region(res->start, resource_size(res), pdev->name);
1677 if (res == NULL) {
1678 dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__);
1679 ret = -EBUSY;
1680 goto out_kfree;
1681 } 1674 }
1682 1675
1683 device_data->phybase = res->start; 1676 device_data->phybase = res->start;
1684 device_data->base = ioremap(res->start, resource_size(res)); 1677 device_data->base = devm_ioremap_resource(dev, res);
1685 if (!device_data->base) { 1678 if (!device_data->base) {
1686 dev_err(dev, "%s: ioremap() failed!\n", __func__); 1679 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1687 ret = -ENOMEM; 1680 ret = -ENOMEM;
1688 goto out_free_mem; 1681 goto out;
1689 } 1682 }
1690 spin_lock_init(&device_data->ctx_lock); 1683 spin_lock_init(&device_data->ctx_lock);
1691 spin_lock_init(&device_data->power_state_lock); 1684 spin_lock_init(&device_data->power_state_lock);
@@ -1696,11 +1689,11 @@ static int ux500_hash_probe(struct platform_device *pdev)
1696 dev_err(dev, "%s: regulator_get() failed!\n", __func__); 1689 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1697 ret = PTR_ERR(device_data->regulator); 1690 ret = PTR_ERR(device_data->regulator);
1698 device_data->regulator = NULL; 1691 device_data->regulator = NULL;
1699 goto out_unmap; 1692 goto out;
1700 } 1693 }
1701 1694
1702 /* Enable the clock for HASH1 hardware block */ 1695 /* Enable the clock for HASH1 hardware block */
1703 device_data->clk = clk_get(dev, NULL); 1696 device_data->clk = devm_clk_get(dev, NULL);
1704 if (IS_ERR(device_data->clk)) { 1697 if (IS_ERR(device_data->clk)) {
1705 dev_err(dev, "%s: clk_get() failed!\n", __func__); 1698 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1706 ret = PTR_ERR(device_data->clk); 1699 ret = PTR_ERR(device_data->clk);
@@ -1710,7 +1703,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
1710 ret = clk_prepare(device_data->clk); 1703 ret = clk_prepare(device_data->clk);
1711 if (ret) { 1704 if (ret) {
1712 dev_err(dev, "%s: clk_prepare() failed!\n", __func__); 1705 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1713 goto out_clk; 1706 goto out_regulator;
1714 } 1707 }
1715 1708
1716 /* Enable device power (and clock) */ 1709 /* Enable device power (and clock) */
@@ -1752,20 +1745,9 @@ out_power:
1752out_clk_unprepare: 1745out_clk_unprepare:
1753 clk_unprepare(device_data->clk); 1746 clk_unprepare(device_data->clk);
1754 1747
1755out_clk:
1756 clk_put(device_data->clk);
1757
1758out_regulator: 1748out_regulator:
1759 regulator_put(device_data->regulator); 1749 regulator_put(device_data->regulator);
1760 1750
1761out_unmap:
1762 iounmap(device_data->base);
1763
1764out_free_mem:
1765 release_mem_region(res->start, resource_size(res));
1766
1767out_kfree:
1768 kfree(device_data);
1769out: 1751out:
1770 return ret; 1752 return ret;
1771} 1753}
@@ -1776,7 +1758,6 @@ out:
1776 */ 1758 */
1777static int ux500_hash_remove(struct platform_device *pdev) 1759static int ux500_hash_remove(struct platform_device *pdev)
1778{ 1760{
1779 struct resource *res;
1780 struct hash_device_data *device_data; 1761 struct hash_device_data *device_data;
1781 struct device *dev = &pdev->dev; 1762 struct device *dev = &pdev->dev;
1782 1763
@@ -1816,17 +1797,8 @@ static int ux500_hash_remove(struct platform_device *pdev)
1816 __func__); 1797 __func__);
1817 1798
1818 clk_unprepare(device_data->clk); 1799 clk_unprepare(device_data->clk);
1819 clk_put(device_data->clk);
1820 regulator_put(device_data->regulator); 1800 regulator_put(device_data->regulator);
1821 1801
1822 iounmap(device_data->base);
1823
1824 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1825 if (res)
1826 release_mem_region(res->start, resource_size(res));
1827
1828 kfree(device_data);
1829
1830 return 0; 1802 return 0;
1831} 1803}
1832 1804
@@ -1836,7 +1808,6 @@ static int ux500_hash_remove(struct platform_device *pdev)
1836 */ 1808 */
1837static void ux500_hash_shutdown(struct platform_device *pdev) 1809static void ux500_hash_shutdown(struct platform_device *pdev)
1838{ 1810{
1839 struct resource *res = NULL;
1840 struct hash_device_data *device_data; 1811 struct hash_device_data *device_data;
1841 1812
1842 device_data = platform_get_drvdata(pdev); 1813 device_data = platform_get_drvdata(pdev);
@@ -1870,12 +1841,6 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1870 if (list_empty(&driver_data.device_list.k_list)) 1841 if (list_empty(&driver_data.device_list.k_list))
1871 ahash_algs_unregister_all(device_data); 1842 ahash_algs_unregister_all(device_data);
1872 1843
1873 iounmap(device_data->base);
1874
1875 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1876 if (res)
1877 release_mem_region(res->start, resource_size(res));
1878
1879 if (hash_disable_power(device_data, false)) 1844 if (hash_disable_power(device_data, false))
1880 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", 1845 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1881 __func__); 1846 __func__);
@@ -1958,6 +1923,7 @@ static const struct of_device_id ux500_hash_match[] = {
1958 { .compatible = "stericsson,ux500-hash" }, 1923 { .compatible = "stericsson,ux500-hash" },
1959 { }, 1924 { },
1960}; 1925};
1926MODULE_DEVICE_TABLE(of, ux500_hash_match);
1961 1927
1962static struct platform_driver hash_driver = { 1928static struct platform_driver hash_driver = {
1963 .probe = ux500_hash_probe, 1929 .probe = ux500_hash_probe,