diff options
Diffstat (limited to 'drivers/crypto')
145 files changed, 9318 insertions, 3107 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index d7c85c79094b..1fb622f2a87d 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -27,7 +27,7 @@ config CRYPTO_DEV_PADLOCK_AES | |||
27 | tristate "PadLock driver for AES algorithm" | 27 | tristate "PadLock driver for AES algorithm" |
28 | depends on CRYPTO_DEV_PADLOCK | 28 | depends on CRYPTO_DEV_PADLOCK |
29 | select CRYPTO_BLKCIPHER | 29 | select CRYPTO_BLKCIPHER |
30 | select CRYPTO_AES | 30 | select CRYPTO_LIB_AES |
31 | help | 31 | help |
32 | Use VIA PadLock for AES algorithm. | 32 | Use VIA PadLock for AES algorithm. |
33 | 33 | ||
@@ -170,7 +170,7 @@ config CRYPTO_DES_S390 | |||
170 | depends on S390 | 170 | depends on S390 |
171 | select CRYPTO_ALGAPI | 171 | select CRYPTO_ALGAPI |
172 | select CRYPTO_BLKCIPHER | 172 | select CRYPTO_BLKCIPHER |
173 | select CRYPTO_DES | 173 | select CRYPTO_LIB_DES |
174 | help | 174 | help |
175 | This is the s390 hardware accelerated implementation of the | 175 | This is the s390 hardware accelerated implementation of the |
176 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | 176 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). |
@@ -209,12 +209,12 @@ config S390_PRNG | |||
209 | It is available as of z9. | 209 | It is available as of z9. |
210 | 210 | ||
211 | config CRYPTO_GHASH_S390 | 211 | config CRYPTO_GHASH_S390 |
212 | tristate "GHASH digest algorithm" | 212 | tristate "GHASH hash function" |
213 | depends on S390 | 213 | depends on S390 |
214 | select CRYPTO_HASH | 214 | select CRYPTO_HASH |
215 | help | 215 | help |
216 | This is the s390 hardware accelerated implementation of the | 216 | This is the s390 hardware accelerated implementation of GHASH, |
217 | GHASH message digest algorithm for GCM (Galois/Counter Mode). | 217 | the hash function used in GCM (Galois/Counter mode). |
218 | 218 | ||
219 | It is available as of z196. | 219 | It is available as of z196. |
220 | 220 | ||
@@ -234,8 +234,8 @@ config CRYPTO_CRC32_S390 | |||
234 | config CRYPTO_DEV_MARVELL_CESA | 234 | config CRYPTO_DEV_MARVELL_CESA |
235 | tristate "Marvell's Cryptographic Engine driver" | 235 | tristate "Marvell's Cryptographic Engine driver" |
236 | depends on PLAT_ORION || ARCH_MVEBU | 236 | depends on PLAT_ORION || ARCH_MVEBU |
237 | select CRYPTO_AES | 237 | select CRYPTO_LIB_AES |
238 | select CRYPTO_DES | 238 | select CRYPTO_LIB_DES |
239 | select CRYPTO_BLKCIPHER | 239 | select CRYPTO_BLKCIPHER |
240 | select CRYPTO_HASH | 240 | select CRYPTO_HASH |
241 | select SRAM | 241 | select SRAM |
@@ -247,7 +247,7 @@ config CRYPTO_DEV_MARVELL_CESA | |||
247 | 247 | ||
248 | config CRYPTO_DEV_NIAGARA2 | 248 | config CRYPTO_DEV_NIAGARA2 |
249 | tristate "Niagara2 Stream Processing Unit driver" | 249 | tristate "Niagara2 Stream Processing Unit driver" |
250 | select CRYPTO_DES | 250 | select CRYPTO_LIB_DES |
251 | select CRYPTO_BLKCIPHER | 251 | select CRYPTO_BLKCIPHER |
252 | select CRYPTO_HASH | 252 | select CRYPTO_HASH |
253 | select CRYPTO_MD5 | 253 | select CRYPTO_MD5 |
@@ -264,7 +264,7 @@ config CRYPTO_DEV_NIAGARA2 | |||
264 | 264 | ||
265 | config CRYPTO_DEV_HIFN_795X | 265 | config CRYPTO_DEV_HIFN_795X |
266 | tristate "Driver HIFN 795x crypto accelerator chips" | 266 | tristate "Driver HIFN 795x crypto accelerator chips" |
267 | select CRYPTO_DES | 267 | select CRYPTO_LIB_DES |
268 | select CRYPTO_BLKCIPHER | 268 | select CRYPTO_BLKCIPHER |
269 | select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG | 269 | select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG |
270 | depends on PCI | 270 | depends on PCI |
@@ -320,7 +320,7 @@ config CRYPTO_DEV_TALITOS2 | |||
320 | config CRYPTO_DEV_IXP4XX | 320 | config CRYPTO_DEV_IXP4XX |
321 | tristate "Driver for IXP4xx crypto hardware acceleration" | 321 | tristate "Driver for IXP4xx crypto hardware acceleration" |
322 | depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE | 322 | depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE |
323 | select CRYPTO_DES | 323 | select CRYPTO_LIB_DES |
324 | select CRYPTO_AEAD | 324 | select CRYPTO_AEAD |
325 | select CRYPTO_AUTHENC | 325 | select CRYPTO_AUTHENC |
326 | select CRYPTO_BLKCIPHER | 326 | select CRYPTO_BLKCIPHER |
@@ -332,7 +332,7 @@ config CRYPTO_DEV_PPC4XX | |||
332 | depends on PPC && 4xx | 332 | depends on PPC && 4xx |
333 | select CRYPTO_HASH | 333 | select CRYPTO_HASH |
334 | select CRYPTO_AEAD | 334 | select CRYPTO_AEAD |
335 | select CRYPTO_AES | 335 | select CRYPTO_LIB_AES |
336 | select CRYPTO_CCM | 336 | select CRYPTO_CCM |
337 | select CRYPTO_CTR | 337 | select CRYPTO_CTR |
338 | select CRYPTO_GCM | 338 | select CRYPTO_GCM |
@@ -386,7 +386,7 @@ config CRYPTO_DEV_OMAP_AES | |||
386 | config CRYPTO_DEV_OMAP_DES | 386 | config CRYPTO_DEV_OMAP_DES |
387 | tristate "Support for OMAP DES/3DES hw engine" | 387 | tristate "Support for OMAP DES/3DES hw engine" |
388 | depends on ARCH_OMAP2PLUS | 388 | depends on ARCH_OMAP2PLUS |
389 | select CRYPTO_DES | 389 | select CRYPTO_LIB_DES |
390 | select CRYPTO_BLKCIPHER | 390 | select CRYPTO_BLKCIPHER |
391 | select CRYPTO_ENGINE | 391 | select CRYPTO_ENGINE |
392 | help | 392 | help |
@@ -404,7 +404,7 @@ config CRYPTO_DEV_PICOXCELL | |||
404 | select CRYPTO_AES | 404 | select CRYPTO_AES |
405 | select CRYPTO_AUTHENC | 405 | select CRYPTO_AUTHENC |
406 | select CRYPTO_BLKCIPHER | 406 | select CRYPTO_BLKCIPHER |
407 | select CRYPTO_DES | 407 | select CRYPTO_LIB_DES |
408 | select CRYPTO_CBC | 408 | select CRYPTO_CBC |
409 | select CRYPTO_ECB | 409 | select CRYPTO_ECB |
410 | select CRYPTO_SEQIV | 410 | select CRYPTO_SEQIV |
@@ -413,7 +413,7 @@ config CRYPTO_DEV_PICOXCELL | |||
413 | Picochip picoXcell SoC devices. Select this for IPSEC ESP offload | 413 | Picochip picoXcell SoC devices. Select this for IPSEC ESP offload |
414 | and for 3gpp Layer 2 ciphering support. | 414 | and for 3gpp Layer 2 ciphering support. |
415 | 415 | ||
416 | Saying m here will build a module named pipcoxcell_crypto. | 416 | Saying m here will build a module named picoxcell_crypto. |
417 | 417 | ||
418 | config CRYPTO_DEV_SAHARA | 418 | config CRYPTO_DEV_SAHARA |
419 | tristate "Support for SAHARA crypto accelerator" | 419 | tristate "Support for SAHARA crypto accelerator" |
@@ -517,7 +517,7 @@ config CRYPTO_DEV_ATMEL_AES | |||
517 | config CRYPTO_DEV_ATMEL_TDES | 517 | config CRYPTO_DEV_ATMEL_TDES |
518 | tristate "Support for Atmel DES/TDES hw accelerator" | 518 | tristate "Support for Atmel DES/TDES hw accelerator" |
519 | depends on ARCH_AT91 || COMPILE_TEST | 519 | depends on ARCH_AT91 || COMPILE_TEST |
520 | select CRYPTO_DES | 520 | select CRYPTO_LIB_DES |
521 | select CRYPTO_BLKCIPHER | 521 | select CRYPTO_BLKCIPHER |
522 | help | 522 | help |
523 | Some Atmel processors have DES/TDES hw accelerator. | 523 | Some Atmel processors have DES/TDES hw accelerator. |
@@ -615,7 +615,7 @@ config CRYPTO_DEV_QCE | |||
615 | depends on ARCH_QCOM || COMPILE_TEST | 615 | depends on ARCH_QCOM || COMPILE_TEST |
616 | depends on HAS_IOMEM | 616 | depends on HAS_IOMEM |
617 | select CRYPTO_AES | 617 | select CRYPTO_AES |
618 | select CRYPTO_DES | 618 | select CRYPTO_LIB_DES |
619 | select CRYPTO_ECB | 619 | select CRYPTO_ECB |
620 | select CRYPTO_CBC | 620 | select CRYPTO_CBC |
621 | select CRYPTO_XTS | 621 | select CRYPTO_XTS |
@@ -663,7 +663,7 @@ config CRYPTO_DEV_SUN4I_SS | |||
663 | select CRYPTO_MD5 | 663 | select CRYPTO_MD5 |
664 | select CRYPTO_SHA1 | 664 | select CRYPTO_SHA1 |
665 | select CRYPTO_AES | 665 | select CRYPTO_AES |
666 | select CRYPTO_DES | 666 | select CRYPTO_LIB_DES |
667 | select CRYPTO_BLKCIPHER | 667 | select CRYPTO_BLKCIPHER |
668 | help | 668 | help |
669 | Some Allwinner SoC have a crypto accelerator named | 669 | Some Allwinner SoC have a crypto accelerator named |
@@ -686,7 +686,7 @@ config CRYPTO_DEV_ROCKCHIP | |||
686 | tristate "Rockchip's Cryptographic Engine driver" | 686 | tristate "Rockchip's Cryptographic Engine driver" |
687 | depends on OF && ARCH_ROCKCHIP | 687 | depends on OF && ARCH_ROCKCHIP |
688 | select CRYPTO_AES | 688 | select CRYPTO_AES |
689 | select CRYPTO_DES | 689 | select CRYPTO_LIB_DES |
690 | select CRYPTO_MD5 | 690 | select CRYPTO_MD5 |
691 | select CRYPTO_SHA1 | 691 | select CRYPTO_SHA1 |
692 | select CRYPTO_SHA256 | 692 | select CRYPTO_SHA256 |
@@ -723,7 +723,7 @@ config CRYPTO_DEV_BCM_SPU | |||
723 | depends on MAILBOX | 723 | depends on MAILBOX |
724 | default m | 724 | default m |
725 | select CRYPTO_AUTHENC | 725 | select CRYPTO_AUTHENC |
726 | select CRYPTO_DES | 726 | select CRYPTO_LIB_DES |
727 | select CRYPTO_MD5 | 727 | select CRYPTO_MD5 |
728 | select CRYPTO_SHA1 | 728 | select CRYPTO_SHA1 |
729 | select CRYPTO_SHA256 | 729 | select CRYPTO_SHA256 |
@@ -737,12 +737,11 @@ source "drivers/crypto/stm32/Kconfig" | |||
737 | 737 | ||
738 | config CRYPTO_DEV_SAFEXCEL | 738 | config CRYPTO_DEV_SAFEXCEL |
739 | tristate "Inside Secure's SafeXcel cryptographic engine driver" | 739 | tristate "Inside Secure's SafeXcel cryptographic engine driver" |
740 | depends on OF | 740 | depends on OF || PCI || COMPILE_TEST |
741 | depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) | 741 | select CRYPTO_LIB_AES |
742 | select CRYPTO_AES | ||
743 | select CRYPTO_AUTHENC | 742 | select CRYPTO_AUTHENC |
744 | select CRYPTO_BLKCIPHER | 743 | select CRYPTO_BLKCIPHER |
745 | select CRYPTO_DES | 744 | select CRYPTO_LIB_DES |
746 | select CRYPTO_HASH | 745 | select CRYPTO_HASH |
747 | select CRYPTO_HMAC | 746 | select CRYPTO_HMAC |
748 | select CRYPTO_MD5 | 747 | select CRYPTO_MD5 |
@@ -750,10 +749,11 @@ config CRYPTO_DEV_SAFEXCEL | |||
750 | select CRYPTO_SHA256 | 749 | select CRYPTO_SHA256 |
751 | select CRYPTO_SHA512 | 750 | select CRYPTO_SHA512 |
752 | help | 751 | help |
753 | This driver interfaces with the SafeXcel EIP-197 cryptographic engine | 752 | This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic |
754 | designed by Inside Secure. Select this if you want to use CBC/ECB | 753 | engines designed by Inside Secure. It currently accelerates DES, 3DES and |
755 | chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash | 754 | AES block ciphers in ECB and CBC mode, as well as SHA1, SHA224, SHA256, |
756 | algorithms. | 755 | SHA384 and SHA512 hash algorithms for both basic hash and HMAC. |
756 | Additionally, it accelerates combined AES-CBC/HMAC-SHA AEAD operations. | ||
757 | 757 | ||
758 | config CRYPTO_DEV_ARTPEC6 | 758 | config CRYPTO_DEV_ARTPEC6 |
759 | tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." | 759 | tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." |
@@ -780,7 +780,7 @@ config CRYPTO_DEV_CCREE | |||
780 | default n | 780 | default n |
781 | select CRYPTO_HASH | 781 | select CRYPTO_HASH |
782 | select CRYPTO_BLKCIPHER | 782 | select CRYPTO_BLKCIPHER |
783 | select CRYPTO_DES | 783 | select CRYPTO_LIB_DES |
784 | select CRYPTO_AEAD | 784 | select CRYPTO_AEAD |
785 | select CRYPTO_AUTHENC | 785 | select CRYPTO_AUTHENC |
786 | select CRYPTO_SHA1 | 786 | select CRYPTO_SHA1 |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index cbfc607282f4..a42f8619589d 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -527,28 +527,20 @@ static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen) | |||
527 | static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, | 527 | static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, |
528 | unsigned int keylen) | 528 | unsigned int keylen) |
529 | { | 529 | { |
530 | struct crypto_cipher *aes_tfm = NULL; | 530 | struct crypto_aes_ctx ctx; |
531 | uint8_t src[16] = { 0 }; | 531 | uint8_t src[16] = { 0 }; |
532 | int rc = 0; | 532 | int rc; |
533 | |||
534 | aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); | ||
535 | if (IS_ERR(aes_tfm)) { | ||
536 | rc = PTR_ERR(aes_tfm); | ||
537 | pr_warn("could not load aes cipher driver: %d\n", rc); | ||
538 | return rc; | ||
539 | } | ||
540 | 533 | ||
541 | rc = crypto_cipher_setkey(aes_tfm, key, keylen); | 534 | rc = aes_expandkey(&ctx, key, keylen); |
542 | if (rc) { | 535 | if (rc) { |
543 | pr_err("setkey() failed: %d\n", rc); | 536 | pr_err("aes_expandkey() failed: %d\n", rc); |
544 | goto out; | 537 | return rc; |
545 | } | 538 | } |
546 | 539 | ||
547 | crypto_cipher_encrypt_one(aes_tfm, src, src); | 540 | aes_encrypt(&ctx, src, src); |
548 | crypto4xx_memcpy_to_le32(hash_start, src, 16); | 541 | crypto4xx_memcpy_to_le32(hash_start, src, 16); |
549 | out: | 542 | memzero_explicit(&ctx, sizeof(ctx)); |
550 | crypto_free_cipher(aes_tfm); | 543 | return 0; |
551 | return rc; | ||
552 | } | 544 | } |
553 | 545 | ||
554 | int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, | 546 | int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 2b7af44c7b85..026f193556f9 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -2673,7 +2673,6 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
2673 | /* Get the IRQ */ | 2673 | /* Get the IRQ */ |
2674 | aes_dd->irq = platform_get_irq(pdev, 0); | 2674 | aes_dd->irq = platform_get_irq(pdev, 0); |
2675 | if (aes_dd->irq < 0) { | 2675 | if (aes_dd->irq < 0) { |
2676 | dev_err(dev, "no IRQ resource info\n"); | ||
2677 | err = aes_dd->irq; | 2676 | err = aes_dd->irq; |
2678 | goto res_err; | 2677 | goto res_err; |
2679 | } | 2678 | } |
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index dc876fab2882..1d3355913b40 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c | |||
@@ -21,6 +21,18 @@ | |||
21 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
22 | #include "atmel-i2c.h" | 22 | #include "atmel-i2c.h" |
23 | 23 | ||
24 | static const struct { | ||
25 | u8 value; | ||
26 | const char *error_text; | ||
27 | } error_list[] = { | ||
28 | { 0x01, "CheckMac or Verify miscompare" }, | ||
29 | { 0x03, "Parse Error" }, | ||
30 | { 0x05, "ECC Fault" }, | ||
31 | { 0x0F, "Execution Error" }, | ||
32 | { 0xEE, "Watchdog about to expire" }, | ||
33 | { 0xFF, "CRC or other communication error" }, | ||
34 | }; | ||
35 | |||
24 | /** | 36 | /** |
25 | * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. | 37 | * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. |
26 | * CRC16 verification of the count, opcode, param1, param2 and data bytes. | 38 | * CRC16 verification of the count, opcode, param1, param2 and data bytes. |
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h index 21860b99c3e3..63b97b104f16 100644 --- a/drivers/crypto/atmel-i2c.h +++ b/drivers/crypto/atmel-i2c.h | |||
@@ -62,18 +62,6 @@ struct atmel_i2c_cmd { | |||
62 | #define STATUS_NOERR 0x00 | 62 | #define STATUS_NOERR 0x00 |
63 | #define STATUS_WAKE_SUCCESSFUL 0x11 | 63 | #define STATUS_WAKE_SUCCESSFUL 0x11 |
64 | 64 | ||
65 | static const struct { | ||
66 | u8 value; | ||
67 | const char *error_text; | ||
68 | } error_list[] = { | ||
69 | { 0x01, "CheckMac or Verify miscompare" }, | ||
70 | { 0x03, "Parse Error" }, | ||
71 | { 0x05, "ECC Fault" }, | ||
72 | { 0x0F, "Execution Error" }, | ||
73 | { 0xEE, "Watchdog about to expire" }, | ||
74 | { 0xFF, "CRC or other communication error" }, | ||
75 | }; | ||
76 | |||
77 | /* Definitions for eeprom organization */ | 65 | /* Definitions for eeprom organization */ |
78 | #define CONFIG_ZONE 0 | 66 | #define CONFIG_ZONE 0 |
79 | 67 | ||
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index ab0cfe748931..84cb8748a795 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -2779,7 +2779,6 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
2779 | /* Get the IRQ */ | 2779 | /* Get the IRQ */ |
2780 | sha_dd->irq = platform_get_irq(pdev, 0); | 2780 | sha_dd->irq = platform_get_irq(pdev, 0); |
2781 | if (sha_dd->irq < 0) { | 2781 | if (sha_dd->irq < 0) { |
2782 | dev_err(dev, "no IRQ resource info\n"); | ||
2783 | err = sha_dd->irq; | 2782 | err = sha_dd->irq; |
2784 | goto res_err; | 2783 | goto res_err; |
2785 | } | 2784 | } |
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index ea0d2068ea4f..c96c14e7dab1 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c | |||
@@ -109,7 +109,7 @@ static int atmel_sha204a_probe(struct i2c_client *client, | |||
109 | i2c_priv->hwrng.read = atmel_sha204a_rng_read; | 109 | i2c_priv->hwrng.read = atmel_sha204a_rng_read; |
110 | i2c_priv->hwrng.quality = 1024; | 110 | i2c_priv->hwrng.quality = 1024; |
111 | 111 | ||
112 | ret = hwrng_register(&i2c_priv->hwrng); | 112 | ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); |
113 | if (ret) | 113 | if (ret) |
114 | dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); | 114 | dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); |
115 | 115 | ||
@@ -127,7 +127,6 @@ static int atmel_sha204a_remove(struct i2c_client *client) | |||
127 | 127 | ||
128 | if (i2c_priv->hwrng.priv) | 128 | if (i2c_priv->hwrng.priv) |
129 | kfree((void *)i2c_priv->hwrng.priv); | 129 | kfree((void *)i2c_priv->hwrng.priv); |
130 | hwrng_unregister(&i2c_priv->hwrng); | ||
131 | 130 | ||
132 | return 0; | 131 | return 0; |
133 | } | 132 | } |
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index fa76620281e8..1a6c86ae6148 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/cryptohash.h> | 33 | #include <linux/cryptohash.h> |
34 | #include <crypto/scatterwalk.h> | 34 | #include <crypto/scatterwalk.h> |
35 | #include <crypto/algapi.h> | 35 | #include <crypto/algapi.h> |
36 | #include <crypto/des.h> | 36 | #include <crypto/internal/des.h> |
37 | #include <crypto/hash.h> | 37 | #include <crypto/hash.h> |
38 | #include <crypto/internal/hash.h> | 38 | #include <crypto/internal/hash.h> |
39 | #include <linux/platform_data/crypto-atmel.h> | 39 | #include <linux/platform_data/crypto-atmel.h> |
@@ -773,22 +773,12 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | |||
773 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 773 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
774 | unsigned int keylen) | 774 | unsigned int keylen) |
775 | { | 775 | { |
776 | u32 tmp[DES_EXPKEY_WORDS]; | ||
777 | int err; | ||
778 | struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); | ||
779 | |||
780 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 776 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
777 | int err; | ||
781 | 778 | ||
782 | if (keylen != DES_KEY_SIZE) { | 779 | err = verify_ablkcipher_des_key(tfm, key); |
783 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 780 | if (err) |
784 | return -EINVAL; | 781 | return err; |
785 | } | ||
786 | |||
787 | err = des_ekey(tmp, key); | ||
788 | if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | ||
789 | ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | 782 | ||
793 | memcpy(ctx->key, key, keylen); | 783 | memcpy(ctx->key, key, keylen); |
794 | ctx->keylen = keylen; | 784 | ctx->keylen = keylen; |
@@ -800,15 +790,11 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
800 | unsigned int keylen) | 790 | unsigned int keylen) |
801 | { | 791 | { |
802 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 792 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
803 | u32 flags; | ||
804 | int err; | 793 | int err; |
805 | 794 | ||
806 | flags = crypto_ablkcipher_get_flags(tfm); | 795 | err = verify_ablkcipher_des3_key(tfm, key); |
807 | err = __des3_verify_key(&flags, key); | 796 | if (err) |
808 | if (unlikely(err)) { | ||
809 | crypto_ablkcipher_set_flags(tfm, flags); | ||
810 | return err; | 797 | return err; |
811 | } | ||
812 | 798 | ||
813 | memcpy(ctx->key, key, keylen); | 799 | memcpy(ctx->key, key, keylen); |
814 | ctx->keylen = keylen; | 800 | ctx->keylen = keylen; |
@@ -1281,7 +1267,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1281 | /* Get the IRQ */ | 1267 | /* Get the IRQ */ |
1282 | tdes_dd->irq = platform_get_irq(pdev, 0); | 1268 | tdes_dd->irq = platform_get_irq(pdev, 0); |
1283 | if (tdes_dd->irq < 0) { | 1269 | if (tdes_dd->irq < 0) { |
1284 | dev_err(dev, "no IRQ resource info\n"); | ||
1285 | err = tdes_dd->irq; | 1270 | err = tdes_dd->irq; |
1286 | goto res_err; | 1271 | goto res_err; |
1287 | } | 1272 | } |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 80fa04ef215f..4b20606983a4 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
@@ -2854,7 +2854,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev) | |||
2854 | struct artpec6_crypto *ac; | 2854 | struct artpec6_crypto *ac; |
2855 | struct device *dev = &pdev->dev; | 2855 | struct device *dev = &pdev->dev; |
2856 | void __iomem *base; | 2856 | void __iomem *base; |
2857 | struct resource *res; | ||
2858 | int irq; | 2857 | int irq; |
2859 | int err; | 2858 | int err; |
2860 | 2859 | ||
@@ -2867,8 +2866,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev) | |||
2867 | 2866 | ||
2868 | variant = (enum artpec6_crypto_variant)match->data; | 2867 | variant = (enum artpec6_crypto_variant)match->data; |
2869 | 2868 | ||
2870 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2869 | base = devm_platform_ioremap_resource(pdev, 0); |
2871 | base = devm_ioremap_resource(&pdev->dev, res); | ||
2872 | if (IS_ERR(base)) | 2870 | if (IS_ERR(base)) |
2873 | return PTR_ERR(base); | 2871 | return PTR_ERR(base); |
2874 | 2872 | ||
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 869602fcfd96..f85356a48e7e 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <crypto/aead.h> | 24 | #include <crypto/aead.h> |
25 | #include <crypto/internal/aead.h> | 25 | #include <crypto/internal/aead.h> |
26 | #include <crypto/aes.h> | 26 | #include <crypto/aes.h> |
27 | #include <crypto/des.h> | 27 | #include <crypto/internal/des.h> |
28 | #include <crypto/hmac.h> | 28 | #include <crypto/hmac.h> |
29 | #include <crypto/sha.h> | 29 | #include <crypto/sha.h> |
30 | #include <crypto/md5.h> | 30 | #include <crypto/md5.h> |
@@ -1802,24 +1802,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
1802 | unsigned int keylen) | 1802 | unsigned int keylen) |
1803 | { | 1803 | { |
1804 | struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); | 1804 | struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); |
1805 | u32 tmp[DES_EXPKEY_WORDS]; | 1805 | int err; |
1806 | |||
1807 | if (keylen == DES_KEY_SIZE) { | ||
1808 | if (des_ekey(tmp, key) == 0) { | ||
1809 | if (crypto_ablkcipher_get_flags(cipher) & | ||
1810 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { | ||
1811 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; | ||
1812 | 1806 | ||
1813 | crypto_ablkcipher_set_flags(cipher, flags); | 1807 | err = verify_ablkcipher_des_key(cipher, key); |
1814 | return -EINVAL; | 1808 | if (err) |
1815 | } | 1809 | return err; |
1816 | } | ||
1817 | 1810 | ||
1818 | ctx->cipher_type = CIPHER_TYPE_DES; | 1811 | ctx->cipher_type = CIPHER_TYPE_DES; |
1819 | } else { | ||
1820 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1821 | return -EINVAL; | ||
1822 | } | ||
1823 | return 0; | 1812 | return 0; |
1824 | } | 1813 | } |
1825 | 1814 | ||
@@ -1827,23 +1816,13 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
1827 | unsigned int keylen) | 1816 | unsigned int keylen) |
1828 | { | 1817 | { |
1829 | struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); | 1818 | struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); |
1819 | int err; | ||
1830 | 1820 | ||
1831 | if (keylen == (DES_KEY_SIZE * 3)) { | 1821 | err = verify_ablkcipher_des3_key(cipher, key); |
1832 | u32 flags; | 1822 | if (err) |
1833 | int ret; | 1823 | return err; |
1834 | |||
1835 | flags = crypto_ablkcipher_get_flags(cipher); | ||
1836 | ret = __des3_verify_key(&flags, key); | ||
1837 | if (unlikely(ret)) { | ||
1838 | crypto_ablkcipher_set_flags(cipher, flags); | ||
1839 | return ret; | ||
1840 | } | ||
1841 | 1824 | ||
1842 | ctx->cipher_type = CIPHER_TYPE_3DES; | 1825 | ctx->cipher_type = CIPHER_TYPE_3DES; |
1843 | } else { | ||
1844 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1845 | return -EINVAL; | ||
1846 | } | ||
1847 | return 0; | 1826 | return 0; |
1848 | } | 1827 | } |
1849 | 1828 | ||
@@ -2629,6 +2608,19 @@ static int aead_need_fallback(struct aead_request *req) | |||
2629 | return 1; | 2608 | return 1; |
2630 | } | 2609 | } |
2631 | 2610 | ||
2611 | /* | ||
2612 | * RFC4106 and RFC4543 cannot handle the case where AAD is other than | ||
2613 | * 16 or 20 bytes long. So use fallback in this case. | ||
2614 | */ | ||
2615 | if (ctx->cipher.mode == CIPHER_MODE_GCM && | ||
2616 | ctx->cipher.alg == CIPHER_ALG_AES && | ||
2617 | rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE && | ||
2618 | req->assoclen != 16 && req->assoclen != 20) { | ||
2619 | flow_log("RFC4106/RFC4543 needs fallback for assoclen" | ||
2620 | " other than 16 or 20 bytes\n"); | ||
2621 | return 1; | ||
2622 | } | ||
2623 | |||
2632 | payload_len = req->cryptlen; | 2624 | payload_len = req->cryptlen; |
2633 | if (spu->spu_type == SPU_TYPE_SPUM) | 2625 | if (spu->spu_type == SPU_TYPE_SPUM) |
2634 | payload_len += req->assoclen; | 2626 | payload_len += req->assoclen; |
@@ -2855,40 +2847,16 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
2855 | 2847 | ||
2856 | switch (ctx->alg->cipher_info.alg) { | 2848 | switch (ctx->alg->cipher_info.alg) { |
2857 | case CIPHER_ALG_DES: | 2849 | case CIPHER_ALG_DES: |
2858 | if (ctx->enckeylen == DES_KEY_SIZE) { | 2850 | if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen)) |
2859 | u32 tmp[DES_EXPKEY_WORDS]; | 2851 | return -EINVAL; |
2860 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; | ||
2861 | |||
2862 | if (des_ekey(tmp, keys.enckey) == 0) { | ||
2863 | if (crypto_aead_get_flags(cipher) & | ||
2864 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { | ||
2865 | crypto_aead_set_flags(cipher, flags); | ||
2866 | return -EINVAL; | ||
2867 | } | ||
2868 | } | ||
2869 | 2852 | ||
2870 | ctx->cipher_type = CIPHER_TYPE_DES; | 2853 | ctx->cipher_type = CIPHER_TYPE_DES; |
2871 | } else { | ||
2872 | goto badkey; | ||
2873 | } | ||
2874 | break; | 2854 | break; |
2875 | case CIPHER_ALG_3DES: | 2855 | case CIPHER_ALG_3DES: |
2876 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { | 2856 | if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen)) |
2877 | u32 flags; | ||
2878 | |||
2879 | flags = crypto_aead_get_flags(cipher); | ||
2880 | ret = __des3_verify_key(&flags, keys.enckey); | ||
2881 | if (unlikely(ret)) { | ||
2882 | crypto_aead_set_flags(cipher, flags); | ||
2883 | return ret; | ||
2884 | } | ||
2885 | |||
2886 | ctx->cipher_type = CIPHER_TYPE_3DES; | ||
2887 | } else { | ||
2888 | crypto_aead_set_flags(cipher, | ||
2889 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
2890 | return -EINVAL; | 2857 | return -EINVAL; |
2891 | } | 2858 | |
2859 | ctx->cipher_type = CIPHER_TYPE_3DES; | ||
2892 | break; | 2860 | break; |
2893 | case CIPHER_ALG_AES: | 2861 | case CIPHER_ALG_AES: |
2894 | switch (ctx->enckeylen) { | 2862 | switch (ctx->enckeylen) { |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 3720ddabb507..137ed3df0c74 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -98,7 +98,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
98 | select CRYPTO_AEAD | 98 | select CRYPTO_AEAD |
99 | select CRYPTO_AUTHENC | 99 | select CRYPTO_AUTHENC |
100 | select CRYPTO_BLKCIPHER | 100 | select CRYPTO_BLKCIPHER |
101 | select CRYPTO_DES | 101 | select CRYPTO_LIB_DES |
102 | help | 102 | help |
103 | Selecting this will offload crypto for users of the | 103 | Selecting this will offload crypto for users of the |
104 | scatterlist crypto API (such as the linux native IPSec | 104 | scatterlist crypto API (such as the linux native IPSec |
@@ -111,6 +111,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI | |||
111 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | 111 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC |
112 | select CRYPTO_AUTHENC | 112 | select CRYPTO_AUTHENC |
113 | select CRYPTO_BLKCIPHER | 113 | select CRYPTO_BLKCIPHER |
114 | select CRYPTO_DES | ||
114 | help | 115 | help |
115 | Selecting this will use CAAM Queue Interface (QI) for sending | 116 | Selecting this will use CAAM Queue Interface (QI) for sending |
116 | & receiving crypto jobs to/from CAAM. This gives better performance | 117 | & receiving crypto jobs to/from CAAM. This gives better performance |
@@ -161,6 +162,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM | |||
161 | select CRYPTO_AUTHENC | 162 | select CRYPTO_AUTHENC |
162 | select CRYPTO_AEAD | 163 | select CRYPTO_AEAD |
163 | select CRYPTO_HASH | 164 | select CRYPTO_HASH |
165 | select CRYPTO_DES | ||
164 | help | 166 | help |
165 | CAAM driver for QorIQ Data Path Acceleration Architecture 2. | 167 | CAAM driver for QorIQ Data Path Acceleration Architecture 2. |
166 | It handles DPSECI DPAA2 objects that sit on the Management Complex | 168 | It handles DPSECI DPAA2 objects that sit on the Management Complex |
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 9ab4e81ea21e..68d5cc0f28e2 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
@@ -30,3 +30,4 @@ endif | |||
30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o | 30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o |
31 | 31 | ||
32 | dpaa2_caam-y := caamalg_qi2.o dpseci.o | 32 | dpaa2_caam-y := caamalg_qi2.o dpseci.o |
33 | dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 43f18253e5b6..2912006b946b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -74,7 +74,7 @@ | |||
74 | 74 | ||
75 | #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) | 75 | #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) |
76 | 76 | ||
77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) | 77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) |
78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | 78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
79 | 79 | ||
80 | struct caam_alg_entry { | 80 | struct caam_alg_entry { |
@@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
205 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); | 205 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | ||
209 | * In case |user key| > |derived key|, using DKP<imm,imm> | ||
210 | * would result in invalid opcodes (last bytes of user key) in | ||
211 | * the resulting descriptor. Use DKP<ptr,imm> instead => both | ||
212 | * virtual and dma key addresses are needed. | ||
213 | */ | ||
214 | ctx->adata.key_virt = ctx->key; | ||
215 | ctx->adata.key_dma = ctx->key_dma; | ||
216 | |||
217 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
218 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
219 | |||
208 | data_len[0] = ctx->adata.keylen_pad; | 220 | data_len[0] = ctx->adata.keylen_pad; |
209 | data_len[1] = ctx->cdata.keylen; | 221 | data_len[1] = ctx->cdata.keylen; |
210 | 222 | ||
@@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
221 | ARRAY_SIZE(data_len)) < 0) | 233 | ARRAY_SIZE(data_len)) < 0) |
222 | return -EINVAL; | 234 | return -EINVAL; |
223 | 235 | ||
224 | if (inl_mask & 1) | ||
225 | ctx->adata.key_virt = ctx->key; | ||
226 | else | ||
227 | ctx->adata.key_dma = ctx->key_dma; | ||
228 | |||
229 | if (inl_mask & 2) | ||
230 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
231 | else | ||
232 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
233 | |||
234 | ctx->adata.key_inline = !!(inl_mask & 1); | 236 | ctx->adata.key_inline = !!(inl_mask & 1); |
235 | ctx->cdata.key_inline = !!(inl_mask & 2); | 237 | ctx->cdata.key_inline = !!(inl_mask & 2); |
236 | 238 | ||
@@ -253,16 +255,6 @@ skip_enc: | |||
253 | ARRAY_SIZE(data_len)) < 0) | 255 | ARRAY_SIZE(data_len)) < 0) |
254 | return -EINVAL; | 256 | return -EINVAL; |
255 | 257 | ||
256 | if (inl_mask & 1) | ||
257 | ctx->adata.key_virt = ctx->key; | ||
258 | else | ||
259 | ctx->adata.key_dma = ctx->key_dma; | ||
260 | |||
261 | if (inl_mask & 2) | ||
262 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
263 | else | ||
264 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
265 | |||
266 | ctx->adata.key_inline = !!(inl_mask & 1); | 258 | ctx->adata.key_inline = !!(inl_mask & 1); |
267 | ctx->cdata.key_inline = !!(inl_mask & 2); | 259 | ctx->cdata.key_inline = !!(inl_mask & 2); |
268 | 260 | ||
@@ -287,16 +279,6 @@ skip_enc: | |||
287 | ARRAY_SIZE(data_len)) < 0) | 279 | ARRAY_SIZE(data_len)) < 0) |
288 | return -EINVAL; | 280 | return -EINVAL; |
289 | 281 | ||
290 | if (inl_mask & 1) | ||
291 | ctx->adata.key_virt = ctx->key; | ||
292 | else | ||
293 | ctx->adata.key_dma = ctx->key_dma; | ||
294 | |||
295 | if (inl_mask & 2) | ||
296 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
297 | else | ||
298 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
299 | |||
300 | ctx->adata.key_inline = !!(inl_mask & 1); | 282 | ctx->adata.key_inline = !!(inl_mask & 1); |
301 | ctx->cdata.key_inline = !!(inl_mask & 2); | 283 | ctx->cdata.key_inline = !!(inl_mask & 2); |
302 | 284 | ||
@@ -376,6 +358,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
376 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) | 358 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
377 | { | 359 | { |
378 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 360 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
361 | int err; | ||
362 | |||
363 | err = crypto_gcm_check_authsize(authsize); | ||
364 | if (err) | ||
365 | return err; | ||
379 | 366 | ||
380 | ctx->authsize = authsize; | 367 | ctx->authsize = authsize; |
381 | gcm_set_sh_desc(authenc); | 368 | gcm_set_sh_desc(authenc); |
@@ -439,6 +426,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, | |||
439 | unsigned int authsize) | 426 | unsigned int authsize) |
440 | { | 427 | { |
441 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 428 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
429 | int err; | ||
430 | |||
431 | err = crypto_rfc4106_check_authsize(authsize); | ||
432 | if (err) | ||
433 | return err; | ||
442 | 434 | ||
443 | ctx->authsize = authsize; | 435 | ctx->authsize = authsize; |
444 | rfc4106_set_sh_desc(authenc); | 436 | rfc4106_set_sh_desc(authenc); |
@@ -503,6 +495,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, | |||
503 | { | 495 | { |
504 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 496 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
505 | 497 | ||
498 | if (authsize != 16) | ||
499 | return -EINVAL; | ||
500 | |||
506 | ctx->authsize = authsize; | 501 | ctx->authsize = authsize; |
507 | rfc4543_set_sh_desc(authenc); | 502 | rfc4543_set_sh_desc(authenc); |
508 | 503 | ||
@@ -633,33 +628,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
633 | unsigned int keylen) | 628 | unsigned int keylen) |
634 | { | 629 | { |
635 | struct crypto_authenc_keys keys; | 630 | struct crypto_authenc_keys keys; |
636 | u32 flags; | ||
637 | int err; | 631 | int err; |
638 | 632 | ||
639 | err = crypto_authenc_extractkeys(&keys, key, keylen); | 633 | err = crypto_authenc_extractkeys(&keys, key, keylen); |
640 | if (unlikely(err)) | 634 | if (unlikely(err)) |
641 | goto badkey; | 635 | return err; |
642 | |||
643 | err = -EINVAL; | ||
644 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | ||
645 | goto badkey; | ||
646 | |||
647 | flags = crypto_aead_get_flags(aead); | ||
648 | err = __des3_verify_key(&flags, keys.enckey); | ||
649 | if (unlikely(err)) { | ||
650 | crypto_aead_set_flags(aead, flags); | ||
651 | goto out; | ||
652 | } | ||
653 | 636 | ||
654 | err = aead_setkey(aead, key, keylen); | 637 | err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: |
638 | aead_setkey(aead, key, keylen); | ||
655 | 639 | ||
656 | out: | ||
657 | memzero_explicit(&keys, sizeof(keys)); | 640 | memzero_explicit(&keys, sizeof(keys)); |
658 | return err; | 641 | return err; |
659 | |||
660 | badkey: | ||
661 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
662 | goto out; | ||
663 | } | 642 | } |
664 | 643 | ||
665 | static int gcm_setkey(struct crypto_aead *aead, | 644 | static int gcm_setkey(struct crypto_aead *aead, |
@@ -667,6 +646,13 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
667 | { | 646 | { |
668 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 647 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
669 | struct device *jrdev = ctx->jrdev; | 648 | struct device *jrdev = ctx->jrdev; |
649 | int err; | ||
650 | |||
651 | err = aes_check_keylen(keylen); | ||
652 | if (err) { | ||
653 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
654 | return err; | ||
655 | } | ||
670 | 656 | ||
671 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", | 657 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
672 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 658 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -683,9 +669,13 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
683 | { | 669 | { |
684 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 670 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
685 | struct device *jrdev = ctx->jrdev; | 671 | struct device *jrdev = ctx->jrdev; |
672 | int err; | ||
686 | 673 | ||
687 | if (keylen < 4) | 674 | err = aes_check_keylen(keylen - 4); |
688 | return -EINVAL; | 675 | if (err) { |
676 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
677 | return err; | ||
678 | } | ||
689 | 679 | ||
690 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", | 680 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
691 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 681 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -707,9 +697,13 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
707 | { | 697 | { |
708 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 698 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
709 | struct device *jrdev = ctx->jrdev; | 699 | struct device *jrdev = ctx->jrdev; |
700 | int err; | ||
710 | 701 | ||
711 | if (keylen < 4) | 702 | err = aes_check_keylen(keylen - 4); |
712 | return -EINVAL; | 703 | if (err) { |
704 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
705 | return err; | ||
706 | } | ||
713 | 707 | ||
714 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", | 708 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
715 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 709 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -727,7 +721,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
727 | } | 721 | } |
728 | 722 | ||
729 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 723 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
730 | unsigned int keylen) | 724 | unsigned int keylen, const u32 ctx1_iv_off) |
731 | { | 725 | { |
732 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 726 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
733 | struct caam_skcipher_alg *alg = | 727 | struct caam_skcipher_alg *alg = |
@@ -736,30 +730,10 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
736 | struct device *jrdev = ctx->jrdev; | 730 | struct device *jrdev = ctx->jrdev; |
737 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); | 731 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); |
738 | u32 *desc; | 732 | u32 *desc; |
739 | u32 ctx1_iv_off = 0; | ||
740 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | ||
741 | OP_ALG_AAI_CTR_MOD128); | ||
742 | const bool is_rfc3686 = alg->caam.rfc3686; | 733 | const bool is_rfc3686 = alg->caam.rfc3686; |
743 | 734 | ||
744 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", | 735 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
745 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 736 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
746 | /* | ||
747 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
748 | * at an offset of 128bits (16bytes) | ||
749 | * CONTEXT1[255:128] = IV | ||
750 | */ | ||
751 | if (ctr_mode) | ||
752 | ctx1_iv_off = 16; | ||
753 | |||
754 | /* | ||
755 | * RFC3686 specific: | ||
756 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
757 | * | *key = {KEY, NONCE} | ||
758 | */ | ||
759 | if (is_rfc3686) { | ||
760 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
761 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
762 | } | ||
763 | 737 | ||
764 | ctx->cdata.keylen = keylen; | 738 | ctx->cdata.keylen = keylen; |
765 | ctx->cdata.key_virt = key; | 739 | ctx->cdata.key_virt = key; |
@@ -782,25 +756,86 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
782 | return 0; | 756 | return 0; |
783 | } | 757 | } |
784 | 758 | ||
785 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, | 759 | static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, |
786 | const u8 *key, unsigned int keylen) | 760 | const u8 *key, unsigned int keylen) |
787 | { | 761 | { |
788 | u32 tmp[DES3_EDE_EXPKEY_WORDS]; | 762 | int err; |
789 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | ||
790 | 763 | ||
791 | if (keylen == DES3_EDE_KEY_SIZE && | 764 | err = aes_check_keylen(keylen); |
792 | __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { | 765 | if (err) { |
793 | return -EINVAL; | 766 | crypto_skcipher_set_flags(skcipher, |
767 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
768 | return err; | ||
794 | } | 769 | } |
795 | 770 | ||
796 | if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & | 771 | return skcipher_setkey(skcipher, key, keylen, 0); |
797 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 772 | } |
773 | |||
774 | static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
775 | const u8 *key, unsigned int keylen) | ||
776 | { | ||
777 | u32 ctx1_iv_off; | ||
778 | int err; | ||
779 | |||
780 | /* | ||
781 | * RFC3686 specific: | ||
782 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
783 | * | *key = {KEY, NONCE} | ||
784 | */ | ||
785 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
786 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
787 | |||
788 | err = aes_check_keylen(keylen); | ||
789 | if (err) { | ||
798 | crypto_skcipher_set_flags(skcipher, | 790 | crypto_skcipher_set_flags(skcipher, |
799 | CRYPTO_TFM_RES_WEAK_KEY); | 791 | CRYPTO_TFM_RES_BAD_KEY_LEN); |
800 | return -EINVAL; | 792 | return err; |
801 | } | 793 | } |
802 | 794 | ||
803 | return skcipher_setkey(skcipher, key, keylen); | 795 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); |
796 | } | ||
797 | |||
798 | static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
799 | const u8 *key, unsigned int keylen) | ||
800 | { | ||
801 | u32 ctx1_iv_off; | ||
802 | int err; | ||
803 | |||
804 | /* | ||
805 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
806 | * at an offset of 128bits (16bytes) | ||
807 | * CONTEXT1[255:128] = IV | ||
808 | */ | ||
809 | ctx1_iv_off = 16; | ||
810 | |||
811 | err = aes_check_keylen(keylen); | ||
812 | if (err) { | ||
813 | crypto_skcipher_set_flags(skcipher, | ||
814 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
815 | return err; | ||
816 | } | ||
817 | |||
818 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); | ||
819 | } | ||
820 | |||
821 | static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
822 | const u8 *key, unsigned int keylen) | ||
823 | { | ||
824 | return skcipher_setkey(skcipher, key, keylen, 0); | ||
825 | } | ||
826 | |||
827 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
828 | const u8 *key, unsigned int keylen) | ||
829 | { | ||
830 | return verify_skcipher_des_key(skcipher, key) ?: | ||
831 | skcipher_setkey(skcipher, key, keylen, 0); | ||
832 | } | ||
833 | |||
834 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
835 | const u8 *key, unsigned int keylen) | ||
836 | { | ||
837 | return verify_skcipher_des3_key(skcipher, key) ?: | ||
838 | skcipher_setkey(skcipher, key, keylen, 0); | ||
804 | } | 839 | } |
805 | 840 | ||
806 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 841 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
@@ -930,19 +965,20 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
930 | { | 965 | { |
931 | struct aead_request *req = context; | 966 | struct aead_request *req = context; |
932 | struct aead_edesc *edesc; | 967 | struct aead_edesc *edesc; |
968 | int ecode = 0; | ||
933 | 969 | ||
934 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 970 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
935 | 971 | ||
936 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 972 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
937 | 973 | ||
938 | if (err) | 974 | if (err) |
939 | caam_jr_strstatus(jrdev, err); | 975 | ecode = caam_jr_strstatus(jrdev, err); |
940 | 976 | ||
941 | aead_unmap(jrdev, edesc, req); | 977 | aead_unmap(jrdev, edesc, req); |
942 | 978 | ||
943 | kfree(edesc); | 979 | kfree(edesc); |
944 | 980 | ||
945 | aead_request_complete(req, err); | 981 | aead_request_complete(req, ecode); |
946 | } | 982 | } |
947 | 983 | ||
948 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | 984 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, |
@@ -950,25 +986,20 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
950 | { | 986 | { |
951 | struct aead_request *req = context; | 987 | struct aead_request *req = context; |
952 | struct aead_edesc *edesc; | 988 | struct aead_edesc *edesc; |
989 | int ecode = 0; | ||
953 | 990 | ||
954 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 991 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
955 | 992 | ||
956 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 993 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
957 | 994 | ||
958 | if (err) | 995 | if (err) |
959 | caam_jr_strstatus(jrdev, err); | 996 | ecode = caam_jr_strstatus(jrdev, err); |
960 | 997 | ||
961 | aead_unmap(jrdev, edesc, req); | 998 | aead_unmap(jrdev, edesc, req); |
962 | 999 | ||
963 | /* | ||
964 | * verify hw auth check passed else return -EBADMSG | ||
965 | */ | ||
966 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | ||
967 | err = -EBADMSG; | ||
968 | |||
969 | kfree(edesc); | 1000 | kfree(edesc); |
970 | 1001 | ||
971 | aead_request_complete(req, err); | 1002 | aead_request_complete(req, ecode); |
972 | } | 1003 | } |
973 | 1004 | ||
974 | static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 1005 | static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, |
@@ -978,13 +1009,14 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
978 | struct skcipher_edesc *edesc; | 1009 | struct skcipher_edesc *edesc; |
979 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1010 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
980 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1011 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1012 | int ecode = 0; | ||
981 | 1013 | ||
982 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1014 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
983 | 1015 | ||
984 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 1016 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
985 | 1017 | ||
986 | if (err) | 1018 | if (err) |
987 | caam_jr_strstatus(jrdev, err); | 1019 | ecode = caam_jr_strstatus(jrdev, err); |
988 | 1020 | ||
989 | skcipher_unmap(jrdev, edesc, req); | 1021 | skcipher_unmap(jrdev, edesc, req); |
990 | 1022 | ||
@@ -993,10 +1025,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
993 | * ciphertext block (CBC mode) or last counter (CTR mode). | 1025 | * ciphertext block (CBC mode) or last counter (CTR mode). |
994 | * This is used e.g. by the CTS mode. | 1026 | * This is used e.g. by the CTS mode. |
995 | */ | 1027 | */ |
996 | if (ivsize) { | 1028 | if (ivsize && !ecode) { |
997 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, | 1029 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, |
998 | ivsize); | 1030 | ivsize); |
999 | |||
1000 | print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", | 1031 | print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", |
1001 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1032 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1002 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1033 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
@@ -1008,7 +1039,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1008 | 1039 | ||
1009 | kfree(edesc); | 1040 | kfree(edesc); |
1010 | 1041 | ||
1011 | skcipher_request_complete(req, err); | 1042 | skcipher_request_complete(req, ecode); |
1012 | } | 1043 | } |
1013 | 1044 | ||
1014 | static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | 1045 | static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, |
@@ -1018,12 +1049,13 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1018 | struct skcipher_edesc *edesc; | 1049 | struct skcipher_edesc *edesc; |
1019 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1050 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1020 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1051 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1052 | int ecode = 0; | ||
1021 | 1053 | ||
1022 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1054 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
1023 | 1055 | ||
1024 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 1056 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
1025 | if (err) | 1057 | if (err) |
1026 | caam_jr_strstatus(jrdev, err); | 1058 | ecode = caam_jr_strstatus(jrdev, err); |
1027 | 1059 | ||
1028 | skcipher_unmap(jrdev, edesc, req); | 1060 | skcipher_unmap(jrdev, edesc, req); |
1029 | 1061 | ||
@@ -1032,7 +1064,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1032 | * ciphertext block (CBC mode) or last counter (CTR mode). | 1064 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1033 | * This is used e.g. by the CTS mode. | 1065 | * This is used e.g. by the CTS mode. |
1034 | */ | 1066 | */ |
1035 | if (ivsize) { | 1067 | if (ivsize && !ecode) { |
1036 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, | 1068 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, |
1037 | ivsize); | 1069 | ivsize); |
1038 | 1070 | ||
@@ -1047,7 +1079,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1047 | 1079 | ||
1048 | kfree(edesc); | 1080 | kfree(edesc); |
1049 | 1081 | ||
1050 | skcipher_request_complete(req, err); | 1082 | skcipher_request_complete(req, ecode); |
1051 | } | 1083 | } |
1052 | 1084 | ||
1053 | /* | 1085 | /* |
@@ -1525,10 +1557,7 @@ static int chachapoly_decrypt(struct aead_request *req) | |||
1525 | 1557 | ||
1526 | static int ipsec_gcm_encrypt(struct aead_request *req) | 1558 | static int ipsec_gcm_encrypt(struct aead_request *req) |
1527 | { | 1559 | { |
1528 | if (req->assoclen < 8) | 1560 | return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); |
1529 | return -EINVAL; | ||
1530 | |||
1531 | return gcm_encrypt(req); | ||
1532 | } | 1561 | } |
1533 | 1562 | ||
1534 | static int aead_encrypt(struct aead_request *req) | 1563 | static int aead_encrypt(struct aead_request *req) |
@@ -1602,10 +1631,7 @@ static int gcm_decrypt(struct aead_request *req) | |||
1602 | 1631 | ||
1603 | static int ipsec_gcm_decrypt(struct aead_request *req) | 1632 | static int ipsec_gcm_decrypt(struct aead_request *req) |
1604 | { | 1633 | { |
1605 | if (req->assoclen < 8) | 1634 | return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); |
1606 | return -EINVAL; | ||
1607 | |||
1608 | return gcm_decrypt(req); | ||
1609 | } | 1635 | } |
1610 | 1636 | ||
1611 | static int aead_decrypt(struct aead_request *req) | 1637 | static int aead_decrypt(struct aead_request *req) |
@@ -1817,6 +1843,9 @@ static int skcipher_encrypt(struct skcipher_request *req) | |||
1817 | u32 *desc; | 1843 | u32 *desc; |
1818 | int ret = 0; | 1844 | int ret = 0; |
1819 | 1845 | ||
1846 | if (!req->cryptlen) | ||
1847 | return 0; | ||
1848 | |||
1820 | /* allocate extended descriptor */ | 1849 | /* allocate extended descriptor */ |
1821 | edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); | 1850 | edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); |
1822 | if (IS_ERR(edesc)) | 1851 | if (IS_ERR(edesc)) |
@@ -1851,6 +1880,9 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1851 | u32 *desc; | 1880 | u32 *desc; |
1852 | int ret = 0; | 1881 | int ret = 0; |
1853 | 1882 | ||
1883 | if (!req->cryptlen) | ||
1884 | return 0; | ||
1885 | |||
1854 | /* allocate extended descriptor */ | 1886 | /* allocate extended descriptor */ |
1855 | edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); | 1887 | edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); |
1856 | if (IS_ERR(edesc)) | 1888 | if (IS_ERR(edesc)) |
@@ -1883,7 +1915,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1883 | .cra_driver_name = "cbc-aes-caam", | 1915 | .cra_driver_name = "cbc-aes-caam", |
1884 | .cra_blocksize = AES_BLOCK_SIZE, | 1916 | .cra_blocksize = AES_BLOCK_SIZE, |
1885 | }, | 1917 | }, |
1886 | .setkey = skcipher_setkey, | 1918 | .setkey = aes_skcipher_setkey, |
1887 | .encrypt = skcipher_encrypt, | 1919 | .encrypt = skcipher_encrypt, |
1888 | .decrypt = skcipher_decrypt, | 1920 | .decrypt = skcipher_decrypt, |
1889 | .min_keysize = AES_MIN_KEY_SIZE, | 1921 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1899,7 +1931,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1899 | .cra_driver_name = "cbc-3des-caam", | 1931 | .cra_driver_name = "cbc-3des-caam", |
1900 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1932 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1901 | }, | 1933 | }, |
1902 | .setkey = des_skcipher_setkey, | 1934 | .setkey = des3_skcipher_setkey, |
1903 | .encrypt = skcipher_encrypt, | 1935 | .encrypt = skcipher_encrypt, |
1904 | .decrypt = skcipher_decrypt, | 1936 | .decrypt = skcipher_decrypt, |
1905 | .min_keysize = DES3_EDE_KEY_SIZE, | 1937 | .min_keysize = DES3_EDE_KEY_SIZE, |
@@ -1931,7 +1963,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1931 | .cra_driver_name = "ctr-aes-caam", | 1963 | .cra_driver_name = "ctr-aes-caam", |
1932 | .cra_blocksize = 1, | 1964 | .cra_blocksize = 1, |
1933 | }, | 1965 | }, |
1934 | .setkey = skcipher_setkey, | 1966 | .setkey = ctr_skcipher_setkey, |
1935 | .encrypt = skcipher_encrypt, | 1967 | .encrypt = skcipher_encrypt, |
1936 | .decrypt = skcipher_decrypt, | 1968 | .decrypt = skcipher_decrypt, |
1937 | .min_keysize = AES_MIN_KEY_SIZE, | 1969 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1949,7 +1981,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1949 | .cra_driver_name = "rfc3686-ctr-aes-caam", | 1981 | .cra_driver_name = "rfc3686-ctr-aes-caam", |
1950 | .cra_blocksize = 1, | 1982 | .cra_blocksize = 1, |
1951 | }, | 1983 | }, |
1952 | .setkey = skcipher_setkey, | 1984 | .setkey = rfc3686_skcipher_setkey, |
1953 | .encrypt = skcipher_encrypt, | 1985 | .encrypt = skcipher_encrypt, |
1954 | .decrypt = skcipher_decrypt, | 1986 | .decrypt = skcipher_decrypt, |
1955 | .min_keysize = AES_MIN_KEY_SIZE + | 1987 | .min_keysize = AES_MIN_KEY_SIZE + |
@@ -2003,7 +2035,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
2003 | .cra_driver_name = "ecb-aes-caam", | 2035 | .cra_driver_name = "ecb-aes-caam", |
2004 | .cra_blocksize = AES_BLOCK_SIZE, | 2036 | .cra_blocksize = AES_BLOCK_SIZE, |
2005 | }, | 2037 | }, |
2006 | .setkey = skcipher_setkey, | 2038 | .setkey = aes_skcipher_setkey, |
2007 | .encrypt = skcipher_encrypt, | 2039 | .encrypt = skcipher_encrypt, |
2008 | .decrypt = skcipher_decrypt, | 2040 | .decrypt = skcipher_decrypt, |
2009 | .min_keysize = AES_MIN_KEY_SIZE, | 2041 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -2018,7 +2050,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
2018 | .cra_driver_name = "ecb-des3-caam", | 2050 | .cra_driver_name = "ecb-des3-caam", |
2019 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2051 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2020 | }, | 2052 | }, |
2021 | .setkey = des_skcipher_setkey, | 2053 | .setkey = des3_skcipher_setkey, |
2022 | .encrypt = skcipher_encrypt, | 2054 | .encrypt = skcipher_encrypt, |
2023 | .decrypt = skcipher_decrypt, | 2055 | .decrypt = skcipher_decrypt, |
2024 | .min_keysize = DES3_EDE_KEY_SIZE, | 2056 | .min_keysize = DES3_EDE_KEY_SIZE, |
@@ -2033,7 +2065,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
2033 | .cra_driver_name = "ecb-arc4-caam", | 2065 | .cra_driver_name = "ecb-arc4-caam", |
2034 | .cra_blocksize = ARC4_BLOCK_SIZE, | 2066 | .cra_blocksize = ARC4_BLOCK_SIZE, |
2035 | }, | 2067 | }, |
2036 | .setkey = skcipher_setkey, | 2068 | .setkey = arc4_skcipher_setkey, |
2037 | .encrypt = skcipher_encrypt, | 2069 | .encrypt = skcipher_encrypt, |
2038 | .decrypt = skcipher_decrypt, | 2070 | .decrypt = skcipher_decrypt, |
2039 | .min_keysize = ARC4_MIN_KEY_SIZE, | 2071 | .min_keysize = ARC4_MIN_KEY_SIZE, |
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 72531837571e..aa9ccca67045 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c | |||
@@ -503,6 +503,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, | |||
503 | const bool is_qi, int era) | 503 | const bool is_qi, int era) |
504 | { | 504 | { |
505 | u32 geniv, moveiv; | 505 | u32 geniv, moveiv; |
506 | u32 *wait_cmd; | ||
506 | 507 | ||
507 | /* Note: Context registers are saved. */ | 508 | /* Note: Context registers are saved. */ |
508 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); | 509 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
@@ -598,6 +599,14 @@ copy_iv: | |||
598 | 599 | ||
599 | /* Will read cryptlen */ | 600 | /* Will read cryptlen */ |
600 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 601 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
602 | |||
603 | /* | ||
604 | * Wait for IV transfer (ofifo -> class2) to finish before starting | ||
605 | * ciphertext transfer (ofifo -> external memory). | ||
606 | */ | ||
607 | wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); | ||
608 | set_jump_tgt_here(desc, wait_cmd); | ||
609 | |||
601 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | | 610 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | |
602 | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); | 611 | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); |
603 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | 612 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); |
@@ -843,13 +852,16 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); | |||
843 | * @ivsize: initialization vector size | 852 | * @ivsize: initialization vector size |
844 | * @icvsize: integrity check value (ICV) size (truncated or full) | 853 | * @icvsize: integrity check value (ICV) size (truncated or full) |
845 | * @is_qi: true when called from caam/qi | 854 | * @is_qi: true when called from caam/qi |
855 | * | ||
856 | * Input sequence: AAD | PTXT | ||
857 | * Output sequence: AAD | CTXT | ICV | ||
858 | * AAD length (assoclen), which includes the IV length, is available in Math3. | ||
846 | */ | 859 | */ |
847 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | 860 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, |
848 | unsigned int ivsize, unsigned int icvsize, | 861 | unsigned int ivsize, unsigned int icvsize, |
849 | const bool is_qi) | 862 | const bool is_qi) |
850 | { | 863 | { |
851 | u32 *key_jump_cmd; | 864 | u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions; |
852 | |||
853 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 865 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
854 | 866 | ||
855 | /* Skip key loading if it is loaded due to sharing */ | 867 | /* Skip key loading if it is loaded due to sharing */ |
@@ -892,24 +904,26 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | |||
892 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); | 904 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); |
893 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 905 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
894 | 906 | ||
895 | /* Read assoc data */ | 907 | /* Skip AAD */ |
896 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 908 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
897 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
898 | 909 | ||
899 | /* Skip IV */ | 910 | /* Read cryptlen and set this value into VARSEQOUTLEN */ |
900 | append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); | 911 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); |
901 | 912 | ||
902 | /* Will read cryptlen bytes */ | 913 | /* If cryptlen is ZERO jump to AAD command */ |
903 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 914 | zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL | |
915 | JUMP_COND_MATH_Z); | ||
904 | 916 | ||
905 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | 917 | /* Read AAD data */ |
906 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | 918 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
919 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
907 | 920 | ||
908 | /* Skip assoc data */ | 921 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ |
909 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 922 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA); |
910 | 923 | ||
911 | /* cryptlen = seqoutlen - assoclen */ | 924 | /* Skip IV */ |
912 | append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); | 925 | append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); |
926 | append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
913 | 927 | ||
914 | /* Write encrypted data */ | 928 | /* Write encrypted data */ |
915 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 929 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
@@ -918,6 +932,18 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | |||
918 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 932 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
919 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | 933 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); |
920 | 934 | ||
935 | /* Jump instructions to avoid double reading of AAD */ | ||
936 | skip_instructions = append_jump(desc, JUMP_TEST_ALL); | ||
937 | |||
938 | /* There is no input data, cryptlen = 0 */ | ||
939 | set_jump_tgt_here(desc, zero_cryptlen_jump_cmd); | ||
940 | |||
941 | /* Read AAD */ | ||
942 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
943 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); | ||
944 | |||
945 | set_jump_tgt_here(desc, skip_instructions); | ||
946 | |||
921 | /* Write ICV */ | 947 | /* Write ICV */ |
922 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 948 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
923 | LDST_SRCDST_BYTE_CONTEXT); | 949 | LDST_SRCDST_BYTE_CONTEXT); |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index da4a4ee60c80..f2893393ba5e 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 12 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
13 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) | 13 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) |
14 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) | 14 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) |
15 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | 15 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ) |
16 | #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) | 16 | #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) |
17 | #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) | 17 | #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) |
18 | #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) | 18 | #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) |
@@ -31,7 +31,7 @@ | |||
31 | #define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) | 31 | #define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) |
32 | 32 | ||
33 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) | 33 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) |
34 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | 34 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ) |
35 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | 35 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) |
36 | #define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) | 36 | #define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) |
37 | #define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) | 37 | #define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 32f0f8a72067..8e3449670d2f 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
105 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); | 105 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
106 | } | 106 | } |
107 | 107 | ||
108 | /* | ||
109 | * In case |user key| > |derived key|, using DKP<imm,imm> would result | ||
110 | * in invalid opcodes (last bytes of user key) in the resulting | ||
111 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key | ||
112 | * addresses are needed. | ||
113 | */ | ||
114 | ctx->adata.key_virt = ctx->key; | ||
115 | ctx->adata.key_dma = ctx->key_dma; | ||
116 | |||
117 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
118 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
119 | |||
108 | data_len[0] = ctx->adata.keylen_pad; | 120 | data_len[0] = ctx->adata.keylen_pad; |
109 | data_len[1] = ctx->cdata.keylen; | 121 | data_len[1] = ctx->cdata.keylen; |
110 | 122 | ||
@@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
118 | ARRAY_SIZE(data_len)) < 0) | 130 | ARRAY_SIZE(data_len)) < 0) |
119 | return -EINVAL; | 131 | return -EINVAL; |
120 | 132 | ||
121 | if (inl_mask & 1) | ||
122 | ctx->adata.key_virt = ctx->key; | ||
123 | else | ||
124 | ctx->adata.key_dma = ctx->key_dma; | ||
125 | |||
126 | if (inl_mask & 2) | ||
127 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
128 | else | ||
129 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
130 | |||
131 | ctx->adata.key_inline = !!(inl_mask & 1); | 133 | ctx->adata.key_inline = !!(inl_mask & 1); |
132 | ctx->cdata.key_inline = !!(inl_mask & 2); | 134 | ctx->cdata.key_inline = !!(inl_mask & 2); |
133 | 135 | ||
@@ -143,16 +145,6 @@ skip_enc: | |||
143 | ARRAY_SIZE(data_len)) < 0) | 145 | ARRAY_SIZE(data_len)) < 0) |
144 | return -EINVAL; | 146 | return -EINVAL; |
145 | 147 | ||
146 | if (inl_mask & 1) | ||
147 | ctx->adata.key_virt = ctx->key; | ||
148 | else | ||
149 | ctx->adata.key_dma = ctx->key_dma; | ||
150 | |||
151 | if (inl_mask & 2) | ||
152 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
153 | else | ||
154 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
155 | |||
156 | ctx->adata.key_inline = !!(inl_mask & 1); | 148 | ctx->adata.key_inline = !!(inl_mask & 1); |
157 | ctx->cdata.key_inline = !!(inl_mask & 2); | 149 | ctx->cdata.key_inline = !!(inl_mask & 2); |
158 | 150 | ||
@@ -171,16 +163,6 @@ skip_enc: | |||
171 | ARRAY_SIZE(data_len)) < 0) | 163 | ARRAY_SIZE(data_len)) < 0) |
172 | return -EINVAL; | 164 | return -EINVAL; |
173 | 165 | ||
174 | if (inl_mask & 1) | ||
175 | ctx->adata.key_virt = ctx->key; | ||
176 | else | ||
177 | ctx->adata.key_dma = ctx->key_dma; | ||
178 | |||
179 | if (inl_mask & 2) | ||
180 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
181 | else | ||
182 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
183 | |||
184 | ctx->adata.key_inline = !!(inl_mask & 1); | 166 | ctx->adata.key_inline = !!(inl_mask & 1); |
185 | ctx->cdata.key_inline = !!(inl_mask & 2); | 167 | ctx->cdata.key_inline = !!(inl_mask & 2); |
186 | 168 | ||
@@ -252,11 +234,10 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
252 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, | 234 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
253 | ctx->adata.keylen_pad + keys.enckeylen, | 235 | ctx->adata.keylen_pad + keys.enckeylen, |
254 | ctx->dir); | 236 | ctx->dir); |
255 | #ifdef DEBUG | 237 | |
256 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 238 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", |
257 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 239 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
258 | ctx->adata.keylen_pad + keys.enckeylen, 1); | 240 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
259 | #endif | ||
260 | 241 | ||
261 | skip_split_key: | 242 | skip_split_key: |
262 | ctx->cdata.keylen = keys.enckeylen; | 243 | ctx->cdata.keylen = keys.enckeylen; |
@@ -296,33 +277,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
296 | unsigned int keylen) | 277 | unsigned int keylen) |
297 | { | 278 | { |
298 | struct crypto_authenc_keys keys; | 279 | struct crypto_authenc_keys keys; |
299 | u32 flags; | ||
300 | int err; | 280 | int err; |
301 | 281 | ||
302 | err = crypto_authenc_extractkeys(&keys, key, keylen); | 282 | err = crypto_authenc_extractkeys(&keys, key, keylen); |
303 | if (unlikely(err)) | 283 | if (unlikely(err)) |
304 | goto badkey; | 284 | return err; |
305 | |||
306 | err = -EINVAL; | ||
307 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | ||
308 | goto badkey; | ||
309 | |||
310 | flags = crypto_aead_get_flags(aead); | ||
311 | err = __des3_verify_key(&flags, keys.enckey); | ||
312 | if (unlikely(err)) { | ||
313 | crypto_aead_set_flags(aead, flags); | ||
314 | goto out; | ||
315 | } | ||
316 | 285 | ||
317 | err = aead_setkey(aead, key, keylen); | 286 | err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: |
287 | aead_setkey(aead, key, keylen); | ||
318 | 288 | ||
319 | out: | ||
320 | memzero_explicit(&keys, sizeof(keys)); | 289 | memzero_explicit(&keys, sizeof(keys)); |
321 | return err; | 290 | return err; |
322 | |||
323 | badkey: | ||
324 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
325 | goto out; | ||
326 | } | 291 | } |
327 | 292 | ||
328 | static int gcm_set_sh_desc(struct crypto_aead *aead) | 293 | static int gcm_set_sh_desc(struct crypto_aead *aead) |
@@ -371,6 +336,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
371 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) | 336 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
372 | { | 337 | { |
373 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 338 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
339 | int err; | ||
340 | |||
341 | err = crypto_gcm_check_authsize(authsize); | ||
342 | if (err) | ||
343 | return err; | ||
374 | 344 | ||
375 | ctx->authsize = authsize; | 345 | ctx->authsize = authsize; |
376 | gcm_set_sh_desc(authenc); | 346 | gcm_set_sh_desc(authenc); |
@@ -385,6 +355,12 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
385 | struct device *jrdev = ctx->jrdev; | 355 | struct device *jrdev = ctx->jrdev; |
386 | int ret; | 356 | int ret; |
387 | 357 | ||
358 | ret = aes_check_keylen(keylen); | ||
359 | if (ret) { | ||
360 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
388 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 364 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
389 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 365 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
390 | 366 | ||
@@ -466,6 +442,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, | |||
466 | unsigned int authsize) | 442 | unsigned int authsize) |
467 | { | 443 | { |
468 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 444 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
445 | int err; | ||
446 | |||
447 | err = crypto_rfc4106_check_authsize(authsize); | ||
448 | if (err) | ||
449 | return err; | ||
469 | 450 | ||
470 | ctx->authsize = authsize; | 451 | ctx->authsize = authsize; |
471 | rfc4106_set_sh_desc(authenc); | 452 | rfc4106_set_sh_desc(authenc); |
@@ -480,8 +461,11 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
480 | struct device *jrdev = ctx->jrdev; | 461 | struct device *jrdev = ctx->jrdev; |
481 | int ret; | 462 | int ret; |
482 | 463 | ||
483 | if (keylen < 4) | 464 | ret = aes_check_keylen(keylen - 4); |
484 | return -EINVAL; | 465 | if (ret) { |
466 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
467 | return ret; | ||
468 | } | ||
485 | 469 | ||
486 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 470 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
487 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 471 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -569,6 +553,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, | |||
569 | { | 553 | { |
570 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 554 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
571 | 555 | ||
556 | if (authsize != 16) | ||
557 | return -EINVAL; | ||
558 | |||
572 | ctx->authsize = authsize; | 559 | ctx->authsize = authsize; |
573 | rfc4543_set_sh_desc(authenc); | 560 | rfc4543_set_sh_desc(authenc); |
574 | 561 | ||
@@ -582,8 +569,11 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
582 | struct device *jrdev = ctx->jrdev; | 569 | struct device *jrdev = ctx->jrdev; |
583 | int ret; | 570 | int ret; |
584 | 571 | ||
585 | if (keylen < 4) | 572 | ret = aes_check_keylen(keylen - 4); |
586 | return -EINVAL; | 573 | if (ret) { |
574 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
575 | return ret; | ||
576 | } | ||
587 | 577 | ||
588 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 578 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
589 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 579 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -624,7 +614,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
624 | } | 614 | } |
625 | 615 | ||
626 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 616 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
627 | unsigned int keylen) | 617 | unsigned int keylen, const u32 ctx1_iv_off) |
628 | { | 618 | { |
629 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 619 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
630 | struct caam_skcipher_alg *alg = | 620 | struct caam_skcipher_alg *alg = |
@@ -632,33 +622,12 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
632 | skcipher); | 622 | skcipher); |
633 | struct device *jrdev = ctx->jrdev; | 623 | struct device *jrdev = ctx->jrdev; |
634 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); | 624 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); |
635 | u32 ctx1_iv_off = 0; | ||
636 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | ||
637 | OP_ALG_AAI_CTR_MOD128); | ||
638 | const bool is_rfc3686 = alg->caam.rfc3686; | 625 | const bool is_rfc3686 = alg->caam.rfc3686; |
639 | int ret = 0; | 626 | int ret = 0; |
640 | 627 | ||
641 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 628 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
642 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 629 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
643 | 630 | ||
644 | /* | ||
645 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
646 | * at an offset of 128bits (16bytes) | ||
647 | * CONTEXT1[255:128] = IV | ||
648 | */ | ||
649 | if (ctr_mode) | ||
650 | ctx1_iv_off = 16; | ||
651 | |||
652 | /* | ||
653 | * RFC3686 specific: | ||
654 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
655 | * | *key = {KEY, NONCE} | ||
656 | */ | ||
657 | if (is_rfc3686) { | ||
658 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
659 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
660 | } | ||
661 | |||
662 | ctx->cdata.keylen = keylen; | 631 | ctx->cdata.keylen = keylen; |
663 | ctx->cdata.key_virt = key; | 632 | ctx->cdata.key_virt = key; |
664 | ctx->cdata.key_inline = true; | 633 | ctx->cdata.key_inline = true; |
@@ -694,11 +663,80 @@ badkey: | |||
694 | return -EINVAL; | 663 | return -EINVAL; |
695 | } | 664 | } |
696 | 665 | ||
666 | static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
667 | const u8 *key, unsigned int keylen) | ||
668 | { | ||
669 | int err; | ||
670 | |||
671 | err = aes_check_keylen(keylen); | ||
672 | if (err) { | ||
673 | crypto_skcipher_set_flags(skcipher, | ||
674 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
675 | return err; | ||
676 | } | ||
677 | |||
678 | return skcipher_setkey(skcipher, key, keylen, 0); | ||
679 | } | ||
680 | |||
681 | static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
682 | const u8 *key, unsigned int keylen) | ||
683 | { | ||
684 | u32 ctx1_iv_off; | ||
685 | int err; | ||
686 | |||
687 | /* | ||
688 | * RFC3686 specific: | ||
689 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
690 | * | *key = {KEY, NONCE} | ||
691 | */ | ||
692 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
693 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
694 | |||
695 | err = aes_check_keylen(keylen); | ||
696 | if (err) { | ||
697 | crypto_skcipher_set_flags(skcipher, | ||
698 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
699 | return err; | ||
700 | } | ||
701 | |||
702 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); | ||
703 | } | ||
704 | |||
705 | static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
706 | const u8 *key, unsigned int keylen) | ||
707 | { | ||
708 | u32 ctx1_iv_off; | ||
709 | int err; | ||
710 | |||
711 | /* | ||
712 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
713 | * at an offset of 128bits (16bytes) | ||
714 | * CONTEXT1[255:128] = IV | ||
715 | */ | ||
716 | ctx1_iv_off = 16; | ||
717 | |||
718 | err = aes_check_keylen(keylen); | ||
719 | if (err) { | ||
720 | crypto_skcipher_set_flags(skcipher, | ||
721 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
722 | return err; | ||
723 | } | ||
724 | |||
725 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); | ||
726 | } | ||
727 | |||
697 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, | 728 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, |
698 | const u8 *key, unsigned int keylen) | 729 | const u8 *key, unsigned int keylen) |
699 | { | 730 | { |
700 | return unlikely(des3_verify_key(skcipher, key)) ?: | 731 | return verify_skcipher_des3_key(skcipher, key) ?: |
701 | skcipher_setkey(skcipher, key, keylen); | 732 | skcipher_setkey(skcipher, key, keylen, 0); |
733 | } | ||
734 | |||
735 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
736 | const u8 *key, unsigned int keylen) | ||
737 | { | ||
738 | return verify_skcipher_des_key(skcipher, key) ?: | ||
739 | skcipher_setkey(skcipher, key, keylen, 0); | ||
702 | } | 740 | } |
703 | 741 | ||
704 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 742 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
@@ -884,20 +922,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status) | |||
884 | 922 | ||
885 | qidev = caam_ctx->qidev; | 923 | qidev = caam_ctx->qidev; |
886 | 924 | ||
887 | if (unlikely(status)) { | 925 | if (unlikely(status)) |
888 | u32 ssrc = status & JRSTA_SSRC_MASK; | 926 | ecode = caam_jr_strstatus(qidev, status); |
889 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; | ||
890 | |||
891 | caam_jr_strstatus(qidev, status); | ||
892 | /* | ||
893 | * verify hw auth check passed else return -EBADMSG | ||
894 | */ | ||
895 | if (ssrc == JRSTA_SSRC_CCB_ERROR && | ||
896 | err_id == JRSTA_CCBERR_ERRID_ICVCHK) | ||
897 | ecode = -EBADMSG; | ||
898 | else | ||
899 | ecode = -EIO; | ||
900 | } | ||
901 | 927 | ||
902 | edesc = container_of(drv_req, typeof(*edesc), drv_req); | 928 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
903 | aead_unmap(qidev, edesc, aead_req); | 929 | aead_unmap(qidev, edesc, aead_req); |
@@ -1168,18 +1194,14 @@ static int aead_decrypt(struct aead_request *req) | |||
1168 | 1194 | ||
1169 | static int ipsec_gcm_encrypt(struct aead_request *req) | 1195 | static int ipsec_gcm_encrypt(struct aead_request *req) |
1170 | { | 1196 | { |
1171 | if (req->assoclen < 8) | 1197 | return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, |
1172 | return -EINVAL; | 1198 | true); |
1173 | |||
1174 | return aead_crypt(req, true); | ||
1175 | } | 1199 | } |
1176 | 1200 | ||
1177 | static int ipsec_gcm_decrypt(struct aead_request *req) | 1201 | static int ipsec_gcm_decrypt(struct aead_request *req) |
1178 | { | 1202 | { |
1179 | if (req->assoclen < 8) | 1203 | return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, |
1180 | return -EINVAL; | 1204 | false); |
1181 | |||
1182 | return aead_crypt(req, false); | ||
1183 | } | 1205 | } |
1184 | 1206 | ||
1185 | static void skcipher_done(struct caam_drv_req *drv_req, u32 status) | 1207 | static void skcipher_done(struct caam_drv_req *drv_req, u32 status) |
@@ -1190,13 +1212,14 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) | |||
1190 | struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); | 1212 | struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); |
1191 | struct device *qidev = caam_ctx->qidev; | 1213 | struct device *qidev = caam_ctx->qidev; |
1192 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1214 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1215 | int ecode = 0; | ||
1193 | 1216 | ||
1194 | dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); | 1217 | dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); |
1195 | 1218 | ||
1196 | edesc = container_of(drv_req, typeof(*edesc), drv_req); | 1219 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
1197 | 1220 | ||
1198 | if (status) | 1221 | if (status) |
1199 | caam_jr_strstatus(qidev, status); | 1222 | ecode = caam_jr_strstatus(qidev, status); |
1200 | 1223 | ||
1201 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1224 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1202 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1225 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
@@ -1212,10 +1235,12 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) | |||
1212 | * ciphertext block (CBC mode) or last counter (CTR mode). | 1235 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1213 | * This is used e.g. by the CTS mode. | 1236 | * This is used e.g. by the CTS mode. |
1214 | */ | 1237 | */ |
1215 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); | 1238 | if (!ecode) |
1239 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, | ||
1240 | ivsize); | ||
1216 | 1241 | ||
1217 | qi_cache_free(edesc); | 1242 | qi_cache_free(edesc); |
1218 | skcipher_request_complete(req, status); | 1243 | skcipher_request_complete(req, ecode); |
1219 | } | 1244 | } |
1220 | 1245 | ||
1221 | static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | 1246 | static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, |
@@ -1377,6 +1402,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) | |||
1377 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1402 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1378 | int ret; | 1403 | int ret; |
1379 | 1404 | ||
1405 | if (!req->cryptlen) | ||
1406 | return 0; | ||
1407 | |||
1380 | if (unlikely(caam_congested)) | 1408 | if (unlikely(caam_congested)) |
1381 | return -EAGAIN; | 1409 | return -EAGAIN; |
1382 | 1410 | ||
@@ -1414,7 +1442,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1414 | .cra_driver_name = "cbc-aes-caam-qi", | 1442 | .cra_driver_name = "cbc-aes-caam-qi", |
1415 | .cra_blocksize = AES_BLOCK_SIZE, | 1443 | .cra_blocksize = AES_BLOCK_SIZE, |
1416 | }, | 1444 | }, |
1417 | .setkey = skcipher_setkey, | 1445 | .setkey = aes_skcipher_setkey, |
1418 | .encrypt = skcipher_encrypt, | 1446 | .encrypt = skcipher_encrypt, |
1419 | .decrypt = skcipher_decrypt, | 1447 | .decrypt = skcipher_decrypt, |
1420 | .min_keysize = AES_MIN_KEY_SIZE, | 1448 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1446,7 +1474,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1446 | .cra_driver_name = "cbc-des-caam-qi", | 1474 | .cra_driver_name = "cbc-des-caam-qi", |
1447 | .cra_blocksize = DES_BLOCK_SIZE, | 1475 | .cra_blocksize = DES_BLOCK_SIZE, |
1448 | }, | 1476 | }, |
1449 | .setkey = skcipher_setkey, | 1477 | .setkey = des_skcipher_setkey, |
1450 | .encrypt = skcipher_encrypt, | 1478 | .encrypt = skcipher_encrypt, |
1451 | .decrypt = skcipher_decrypt, | 1479 | .decrypt = skcipher_decrypt, |
1452 | .min_keysize = DES_KEY_SIZE, | 1480 | .min_keysize = DES_KEY_SIZE, |
@@ -1462,7 +1490,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1462 | .cra_driver_name = "ctr-aes-caam-qi", | 1490 | .cra_driver_name = "ctr-aes-caam-qi", |
1463 | .cra_blocksize = 1, | 1491 | .cra_blocksize = 1, |
1464 | }, | 1492 | }, |
1465 | .setkey = skcipher_setkey, | 1493 | .setkey = ctr_skcipher_setkey, |
1466 | .encrypt = skcipher_encrypt, | 1494 | .encrypt = skcipher_encrypt, |
1467 | .decrypt = skcipher_decrypt, | 1495 | .decrypt = skcipher_decrypt, |
1468 | .min_keysize = AES_MIN_KEY_SIZE, | 1496 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1480,7 +1508,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1480 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi", | 1508 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi", |
1481 | .cra_blocksize = 1, | 1509 | .cra_blocksize = 1, |
1482 | }, | 1510 | }, |
1483 | .setkey = skcipher_setkey, | 1511 | .setkey = rfc3686_skcipher_setkey, |
1484 | .encrypt = skcipher_encrypt, | 1512 | .encrypt = skcipher_encrypt, |
1485 | .decrypt = skcipher_decrypt, | 1513 | .decrypt = skcipher_decrypt, |
1486 | .min_keysize = AES_MIN_KEY_SIZE + | 1514 | .min_keysize = AES_MIN_KEY_SIZE + |
@@ -2523,10 +2551,9 @@ int caam_qi_algapi_init(struct device *ctrldev) | |||
2523 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 2551 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
2524 | bool registered = false; | 2552 | bool registered = false; |
2525 | 2553 | ||
2526 | if (caam_dpaa2) { | 2554 | /* Make sure this runs only on (DPAA 1.x) QI */ |
2527 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); | 2555 | if (!priv->qi_present || caam_dpaa2) |
2528 | return -ENODEV; | 2556 | return 0; |
2529 | } | ||
2530 | 2557 | ||
2531 | /* | 2558 | /* |
2532 | * Register crypto algorithms the device supports. | 2559 | * Register crypto algorithms the device supports. |
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 06bf32c32cbd..3443f6d6dd83 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "key_gen.h" | 15 | #include "key_gen.h" |
16 | #include "caamalg_desc.h" | 16 | #include "caamalg_desc.h" |
17 | #include "caamhash_desc.h" | 17 | #include "caamhash_desc.h" |
18 | #include "dpseci-debugfs.h" | ||
18 | #include <linux/fsl/mc.h> | 19 | #include <linux/fsl/mc.h> |
19 | #include <soc/fsl/dpaa2-io.h> | 20 | #include <soc/fsl/dpaa2-io.h> |
20 | #include <soc/fsl/dpaa2-fd.h> | 21 | #include <soc/fsl/dpaa2-fd.h> |
@@ -198,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
198 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); | 199 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
199 | } | 200 | } |
200 | 201 | ||
202 | /* | ||
203 | * In case |user key| > |derived key|, using DKP<imm,imm> would result | ||
204 | * in invalid opcodes (last bytes of user key) in the resulting | ||
205 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key | ||
206 | * addresses are needed. | ||
207 | */ | ||
208 | ctx->adata.key_virt = ctx->key; | ||
209 | ctx->adata.key_dma = ctx->key_dma; | ||
210 | |||
211 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
212 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
213 | |||
201 | data_len[0] = ctx->adata.keylen_pad; | 214 | data_len[0] = ctx->adata.keylen_pad; |
202 | data_len[1] = ctx->cdata.keylen; | 215 | data_len[1] = ctx->cdata.keylen; |
203 | 216 | ||
@@ -209,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
209 | ARRAY_SIZE(data_len)) < 0) | 222 | ARRAY_SIZE(data_len)) < 0) |
210 | return -EINVAL; | 223 | return -EINVAL; |
211 | 224 | ||
212 | if (inl_mask & 1) | ||
213 | ctx->adata.key_virt = ctx->key; | ||
214 | else | ||
215 | ctx->adata.key_dma = ctx->key_dma; | ||
216 | |||
217 | if (inl_mask & 2) | ||
218 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
219 | else | ||
220 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
221 | |||
222 | ctx->adata.key_inline = !!(inl_mask & 1); | 225 | ctx->adata.key_inline = !!(inl_mask & 1); |
223 | ctx->cdata.key_inline = !!(inl_mask & 2); | 226 | ctx->cdata.key_inline = !!(inl_mask & 2); |
224 | 227 | ||
@@ -247,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
247 | ARRAY_SIZE(data_len)) < 0) | 250 | ARRAY_SIZE(data_len)) < 0) |
248 | return -EINVAL; | 251 | return -EINVAL; |
249 | 252 | ||
250 | if (inl_mask & 1) | ||
251 | ctx->adata.key_virt = ctx->key; | ||
252 | else | ||
253 | ctx->adata.key_dma = ctx->key_dma; | ||
254 | |||
255 | if (inl_mask & 2) | ||
256 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; | ||
257 | else | ||
258 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
259 | |||
260 | ctx->adata.key_inline = !!(inl_mask & 1); | 253 | ctx->adata.key_inline = !!(inl_mask & 1); |
261 | ctx->cdata.key_inline = !!(inl_mask & 2); | 254 | ctx->cdata.key_inline = !!(inl_mask & 2); |
262 | 255 | ||
@@ -329,7 +322,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
329 | unsigned int keylen) | 322 | unsigned int keylen) |
330 | { | 323 | { |
331 | struct crypto_authenc_keys keys; | 324 | struct crypto_authenc_keys keys; |
332 | u32 flags; | ||
333 | int err; | 325 | int err; |
334 | 326 | ||
335 | err = crypto_authenc_extractkeys(&keys, key, keylen); | 327 | err = crypto_authenc_extractkeys(&keys, key, keylen); |
@@ -340,14 +332,8 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
340 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | 332 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) |
341 | goto badkey; | 333 | goto badkey; |
342 | 334 | ||
343 | flags = crypto_aead_get_flags(aead); | 335 | err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: |
344 | err = __des3_verify_key(&flags, keys.enckey); | 336 | aead_setkey(aead, key, keylen); |
345 | if (unlikely(err)) { | ||
346 | crypto_aead_set_flags(aead, flags); | ||
347 | goto out; | ||
348 | } | ||
349 | |||
350 | err = aead_setkey(aead, key, keylen); | ||
351 | 337 | ||
352 | out: | 338 | out: |
353 | memzero_explicit(&keys, sizeof(keys)); | 339 | memzero_explicit(&keys, sizeof(keys)); |
@@ -719,6 +705,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
719 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) | 705 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
720 | { | 706 | { |
721 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 707 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
708 | int err; | ||
709 | |||
710 | err = crypto_gcm_check_authsize(authsize); | ||
711 | if (err) | ||
712 | return err; | ||
722 | 713 | ||
723 | ctx->authsize = authsize; | 714 | ctx->authsize = authsize; |
724 | gcm_set_sh_desc(authenc); | 715 | gcm_set_sh_desc(authenc); |
@@ -731,7 +722,13 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
731 | { | 722 | { |
732 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 723 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
733 | struct device *dev = ctx->dev; | 724 | struct device *dev = ctx->dev; |
725 | int ret; | ||
734 | 726 | ||
727 | ret = aes_check_keylen(keylen); | ||
728 | if (ret) { | ||
729 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
730 | return ret; | ||
731 | } | ||
735 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 732 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
736 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 733 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
737 | 734 | ||
@@ -805,6 +802,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, | |||
805 | unsigned int authsize) | 802 | unsigned int authsize) |
806 | { | 803 | { |
807 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 804 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
805 | int err; | ||
806 | |||
807 | err = crypto_rfc4106_check_authsize(authsize); | ||
808 | if (err) | ||
809 | return err; | ||
808 | 810 | ||
809 | ctx->authsize = authsize; | 811 | ctx->authsize = authsize; |
810 | rfc4106_set_sh_desc(authenc); | 812 | rfc4106_set_sh_desc(authenc); |
@@ -817,9 +819,13 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
817 | { | 819 | { |
818 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 820 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
819 | struct device *dev = ctx->dev; | 821 | struct device *dev = ctx->dev; |
822 | int ret; | ||
820 | 823 | ||
821 | if (keylen < 4) | 824 | ret = aes_check_keylen(keylen - 4); |
822 | return -EINVAL; | 825 | if (ret) { |
826 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
827 | return ret; | ||
828 | } | ||
823 | 829 | ||
824 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 830 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
825 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 831 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -900,6 +906,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, | |||
900 | { | 906 | { |
901 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 907 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
902 | 908 | ||
909 | if (authsize != 16) | ||
910 | return -EINVAL; | ||
911 | |||
903 | ctx->authsize = authsize; | 912 | ctx->authsize = authsize; |
904 | rfc4543_set_sh_desc(authenc); | 913 | rfc4543_set_sh_desc(authenc); |
905 | 914 | ||
@@ -911,9 +920,13 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
911 | { | 920 | { |
912 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 921 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
913 | struct device *dev = ctx->dev; | 922 | struct device *dev = ctx->dev; |
923 | int ret; | ||
914 | 924 | ||
915 | if (keylen < 4) | 925 | ret = aes_check_keylen(keylen - 4); |
916 | return -EINVAL; | 926 | if (ret) { |
927 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
928 | return ret; | ||
929 | } | ||
917 | 930 | ||
918 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 931 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
919 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 932 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -931,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
931 | } | 944 | } |
932 | 945 | ||
933 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 946 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
934 | unsigned int keylen) | 947 | unsigned int keylen, const u32 ctx1_iv_off) |
935 | { | 948 | { |
936 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 949 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
937 | struct caam_skcipher_alg *alg = | 950 | struct caam_skcipher_alg *alg = |
@@ -941,34 +954,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
941 | struct caam_flc *flc; | 954 | struct caam_flc *flc; |
942 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); | 955 | unsigned int ivsize = crypto_skcipher_ivsize(skcipher); |
943 | u32 *desc; | 956 | u32 *desc; |
944 | u32 ctx1_iv_off = 0; | ||
945 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | ||
946 | OP_ALG_AAI_CTR_MOD128) && | ||
947 | ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) != | ||
948 | OP_ALG_ALGSEL_CHACHA20); | ||
949 | const bool is_rfc3686 = alg->caam.rfc3686; | 957 | const bool is_rfc3686 = alg->caam.rfc3686; |
950 | 958 | ||
951 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", | 959 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
952 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 960 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
953 | 961 | ||
954 | /* | ||
955 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
956 | * at an offset of 128bits (16bytes) | ||
957 | * CONTEXT1[255:128] = IV | ||
958 | */ | ||
959 | if (ctr_mode) | ||
960 | ctx1_iv_off = 16; | ||
961 | |||
962 | /* | ||
963 | * RFC3686 specific: | ||
964 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
965 | * | *key = {KEY, NONCE} | ||
966 | */ | ||
967 | if (is_rfc3686) { | ||
968 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
969 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
970 | } | ||
971 | |||
972 | ctx->cdata.keylen = keylen; | 962 | ctx->cdata.keylen = keylen; |
973 | ctx->cdata.key_virt = key; | 963 | ctx->cdata.key_virt = key; |
974 | ctx->cdata.key_inline = true; | 964 | ctx->cdata.key_inline = true; |
@@ -996,11 +986,92 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
996 | return 0; | 986 | return 0; |
997 | } | 987 | } |
998 | 988 | ||
989 | static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
990 | const u8 *key, unsigned int keylen) | ||
991 | { | ||
992 | int err; | ||
993 | |||
994 | err = aes_check_keylen(keylen); | ||
995 | if (err) { | ||
996 | crypto_skcipher_set_flags(skcipher, | ||
997 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
998 | return err; | ||
999 | } | ||
1000 | |||
1001 | return skcipher_setkey(skcipher, key, keylen, 0); | ||
1002 | } | ||
1003 | |||
1004 | static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
1005 | const u8 *key, unsigned int keylen) | ||
1006 | { | ||
1007 | u32 ctx1_iv_off; | ||
1008 | int err; | ||
1009 | |||
1010 | /* | ||
1011 | * RFC3686 specific: | ||
1012 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} | ||
1013 | * | *key = {KEY, NONCE} | ||
1014 | */ | ||
1015 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | ||
1016 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
1017 | |||
1018 | err = aes_check_keylen(keylen); | ||
1019 | if (err) { | ||
1020 | crypto_skcipher_set_flags(skcipher, | ||
1021 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1022 | return err; | ||
1023 | } | ||
1024 | |||
1025 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); | ||
1026 | } | ||
1027 | |||
1028 | static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
1029 | const u8 *key, unsigned int keylen) | ||
1030 | { | ||
1031 | u32 ctx1_iv_off; | ||
1032 | int err; | ||
1033 | |||
1034 | /* | ||
1035 | * AES-CTR needs to load IV in CONTEXT1 reg | ||
1036 | * at an offset of 128bits (16bytes) | ||
1037 | * CONTEXT1[255:128] = IV | ||
1038 | */ | ||
1039 | ctx1_iv_off = 16; | ||
1040 | |||
1041 | err = aes_check_keylen(keylen); | ||
1042 | if (err) { | ||
1043 | crypto_skcipher_set_flags(skcipher, | ||
1044 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1045 | return err; | ||
1046 | } | ||
1047 | |||
1048 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); | ||
1049 | } | ||
1050 | |||
1051 | static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
1052 | const u8 *key, unsigned int keylen) | ||
1053 | { | ||
1054 | if (keylen != CHACHA_KEY_SIZE) { | ||
1055 | crypto_skcipher_set_flags(skcipher, | ||
1056 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1057 | return -EINVAL; | ||
1058 | } | ||
1059 | |||
1060 | return skcipher_setkey(skcipher, key, keylen, 0); | ||
1061 | } | ||
1062 | |||
1063 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, | ||
1064 | const u8 *key, unsigned int keylen) | ||
1065 | { | ||
1066 | return verify_skcipher_des_key(skcipher, key) ?: | ||
1067 | skcipher_setkey(skcipher, key, keylen, 0); | ||
1068 | } | ||
1069 | |||
999 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, | 1070 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, |
1000 | const u8 *key, unsigned int keylen) | 1071 | const u8 *key, unsigned int keylen) |
1001 | { | 1072 | { |
1002 | return unlikely(des3_verify_key(skcipher, key)) ?: | 1073 | return verify_skcipher_des3_key(skcipher, key) ?: |
1003 | skcipher_setkey(skcipher, key, keylen); | 1074 | skcipher_setkey(skcipher, key, keylen, 0); |
1004 | } | 1075 | } |
1005 | 1076 | ||
1006 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | 1077 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
@@ -1227,10 +1298,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status) | |||
1227 | 1298 | ||
1228 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 1299 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
1229 | 1300 | ||
1230 | if (unlikely(status)) { | 1301 | if (unlikely(status)) |
1231 | caam_qi2_strstatus(ctx->dev, status); | 1302 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1232 | ecode = -EIO; | ||
1233 | } | ||
1234 | 1303 | ||
1235 | aead_unmap(ctx->dev, edesc, req); | 1304 | aead_unmap(ctx->dev, edesc, req); |
1236 | qi_cache_free(edesc); | 1305 | qi_cache_free(edesc); |
@@ -1250,17 +1319,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status) | |||
1250 | 1319 | ||
1251 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 1320 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
1252 | 1321 | ||
1253 | if (unlikely(status)) { | 1322 | if (unlikely(status)) |
1254 | caam_qi2_strstatus(ctx->dev, status); | 1323 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1255 | /* | ||
1256 | * verify hw auth check passed else return -EBADMSG | ||
1257 | */ | ||
1258 | if ((status & JRSTA_CCBERR_ERRID_MASK) == | ||
1259 | JRSTA_CCBERR_ERRID_ICVCHK) | ||
1260 | ecode = -EBADMSG; | ||
1261 | else | ||
1262 | ecode = -EIO; | ||
1263 | } | ||
1264 | 1324 | ||
1265 | aead_unmap(ctx->dev, edesc, req); | 1325 | aead_unmap(ctx->dev, edesc, req); |
1266 | qi_cache_free(edesc); | 1326 | qi_cache_free(edesc); |
@@ -1325,18 +1385,12 @@ static int aead_decrypt(struct aead_request *req) | |||
1325 | 1385 | ||
1326 | static int ipsec_gcm_encrypt(struct aead_request *req) | 1386 | static int ipsec_gcm_encrypt(struct aead_request *req) |
1327 | { | 1387 | { |
1328 | if (req->assoclen < 8) | 1388 | return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); |
1329 | return -EINVAL; | ||
1330 | |||
1331 | return aead_encrypt(req); | ||
1332 | } | 1389 | } |
1333 | 1390 | ||
1334 | static int ipsec_gcm_decrypt(struct aead_request *req) | 1391 | static int ipsec_gcm_decrypt(struct aead_request *req) |
1335 | { | 1392 | { |
1336 | if (req->assoclen < 8) | 1393 | return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); |
1337 | return -EINVAL; | ||
1338 | |||
1339 | return aead_decrypt(req); | ||
1340 | } | 1394 | } |
1341 | 1395 | ||
1342 | static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | 1396 | static void skcipher_encrypt_done(void *cbk_ctx, u32 status) |
@@ -1352,10 +1406,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
1352 | 1406 | ||
1353 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 1407 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
1354 | 1408 | ||
1355 | if (unlikely(status)) { | 1409 | if (unlikely(status)) |
1356 | caam_qi2_strstatus(ctx->dev, status); | 1410 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1357 | ecode = -EIO; | ||
1358 | } | ||
1359 | 1411 | ||
1360 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1412 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1361 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1413 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
@@ -1371,7 +1423,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
1371 | * ciphertext block (CBC mode) or last counter (CTR mode). | 1423 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1372 | * This is used e.g. by the CTS mode. | 1424 | * This is used e.g. by the CTS mode. |
1373 | */ | 1425 | */ |
1374 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); | 1426 | if (!ecode) |
1427 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, | ||
1428 | ivsize); | ||
1375 | 1429 | ||
1376 | qi_cache_free(edesc); | 1430 | qi_cache_free(edesc); |
1377 | skcipher_request_complete(req, ecode); | 1431 | skcipher_request_complete(req, ecode); |
@@ -1390,10 +1444,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) | |||
1390 | 1444 | ||
1391 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 1445 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
1392 | 1446 | ||
1393 | if (unlikely(status)) { | 1447 | if (unlikely(status)) |
1394 | caam_qi2_strstatus(ctx->dev, status); | 1448 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1395 | ecode = -EIO; | ||
1396 | } | ||
1397 | 1449 | ||
1398 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1450 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1399 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1451 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
@@ -1409,7 +1461,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) | |||
1409 | * ciphertext block (CBC mode) or last counter (CTR mode). | 1461 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1410 | * This is used e.g. by the CTS mode. | 1462 | * This is used e.g. by the CTS mode. |
1411 | */ | 1463 | */ |
1412 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); | 1464 | if (!ecode) |
1465 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, | ||
1466 | ivsize); | ||
1413 | 1467 | ||
1414 | qi_cache_free(edesc); | 1468 | qi_cache_free(edesc); |
1415 | skcipher_request_complete(req, ecode); | 1469 | skcipher_request_complete(req, ecode); |
@@ -1423,6 +1477,9 @@ static int skcipher_encrypt(struct skcipher_request *req) | |||
1423 | struct caam_request *caam_req = skcipher_request_ctx(req); | 1477 | struct caam_request *caam_req = skcipher_request_ctx(req); |
1424 | int ret; | 1478 | int ret; |
1425 | 1479 | ||
1480 | if (!req->cryptlen) | ||
1481 | return 0; | ||
1482 | |||
1426 | /* allocate extended descriptor */ | 1483 | /* allocate extended descriptor */ |
1427 | edesc = skcipher_edesc_alloc(req); | 1484 | edesc = skcipher_edesc_alloc(req); |
1428 | if (IS_ERR(edesc)) | 1485 | if (IS_ERR(edesc)) |
@@ -1451,6 +1508,8 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1451 | struct caam_request *caam_req = skcipher_request_ctx(req); | 1508 | struct caam_request *caam_req = skcipher_request_ctx(req); |
1452 | int ret; | 1509 | int ret; |
1453 | 1510 | ||
1511 | if (!req->cryptlen) | ||
1512 | return 0; | ||
1454 | /* allocate extended descriptor */ | 1513 | /* allocate extended descriptor */ |
1455 | edesc = skcipher_edesc_alloc(req); | 1514 | edesc = skcipher_edesc_alloc(req); |
1456 | if (IS_ERR(edesc)) | 1515 | if (IS_ERR(edesc)) |
@@ -1545,7 +1604,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1545 | .cra_driver_name = "cbc-aes-caam-qi2", | 1604 | .cra_driver_name = "cbc-aes-caam-qi2", |
1546 | .cra_blocksize = AES_BLOCK_SIZE, | 1605 | .cra_blocksize = AES_BLOCK_SIZE, |
1547 | }, | 1606 | }, |
1548 | .setkey = skcipher_setkey, | 1607 | .setkey = aes_skcipher_setkey, |
1549 | .encrypt = skcipher_encrypt, | 1608 | .encrypt = skcipher_encrypt, |
1550 | .decrypt = skcipher_decrypt, | 1609 | .decrypt = skcipher_decrypt, |
1551 | .min_keysize = AES_MIN_KEY_SIZE, | 1610 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1577,7 +1636,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1577 | .cra_driver_name = "cbc-des-caam-qi2", | 1636 | .cra_driver_name = "cbc-des-caam-qi2", |
1578 | .cra_blocksize = DES_BLOCK_SIZE, | 1637 | .cra_blocksize = DES_BLOCK_SIZE, |
1579 | }, | 1638 | }, |
1580 | .setkey = skcipher_setkey, | 1639 | .setkey = des_skcipher_setkey, |
1581 | .encrypt = skcipher_encrypt, | 1640 | .encrypt = skcipher_encrypt, |
1582 | .decrypt = skcipher_decrypt, | 1641 | .decrypt = skcipher_decrypt, |
1583 | .min_keysize = DES_KEY_SIZE, | 1642 | .min_keysize = DES_KEY_SIZE, |
@@ -1593,7 +1652,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1593 | .cra_driver_name = "ctr-aes-caam-qi2", | 1652 | .cra_driver_name = "ctr-aes-caam-qi2", |
1594 | .cra_blocksize = 1, | 1653 | .cra_blocksize = 1, |
1595 | }, | 1654 | }, |
1596 | .setkey = skcipher_setkey, | 1655 | .setkey = ctr_skcipher_setkey, |
1597 | .encrypt = skcipher_encrypt, | 1656 | .encrypt = skcipher_encrypt, |
1598 | .decrypt = skcipher_decrypt, | 1657 | .decrypt = skcipher_decrypt, |
1599 | .min_keysize = AES_MIN_KEY_SIZE, | 1658 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1611,7 +1670,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1611 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", | 1670 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", |
1612 | .cra_blocksize = 1, | 1671 | .cra_blocksize = 1, |
1613 | }, | 1672 | }, |
1614 | .setkey = skcipher_setkey, | 1673 | .setkey = rfc3686_skcipher_setkey, |
1615 | .encrypt = skcipher_encrypt, | 1674 | .encrypt = skcipher_encrypt, |
1616 | .decrypt = skcipher_decrypt, | 1675 | .decrypt = skcipher_decrypt, |
1617 | .min_keysize = AES_MIN_KEY_SIZE + | 1676 | .min_keysize = AES_MIN_KEY_SIZE + |
@@ -1650,7 +1709,7 @@ static struct caam_skcipher_alg driver_algs[] = { | |||
1650 | .cra_driver_name = "chacha20-caam-qi2", | 1709 | .cra_driver_name = "chacha20-caam-qi2", |
1651 | .cra_blocksize = 1, | 1710 | .cra_blocksize = 1, |
1652 | }, | 1711 | }, |
1653 | .setkey = skcipher_setkey, | 1712 | .setkey = chacha20_skcipher_setkey, |
1654 | .encrypt = skcipher_encrypt, | 1713 | .encrypt = skcipher_encrypt, |
1655 | .decrypt = skcipher_decrypt, | 1714 | .decrypt = skcipher_decrypt, |
1656 | .min_keysize = CHACHA_KEY_SIZE, | 1715 | .min_keysize = CHACHA_KEY_SIZE, |
@@ -2918,6 +2977,7 @@ enum hash_optype { | |||
2918 | /** | 2977 | /** |
2919 | * caam_hash_ctx - ahash per-session context | 2978 | * caam_hash_ctx - ahash per-session context |
2920 | * @flc: Flow Contexts array | 2979 | * @flc: Flow Contexts array |
2980 | * @key: authentication key | ||
2921 | * @flc_dma: I/O virtual addresses of the Flow Contexts | 2981 | * @flc_dma: I/O virtual addresses of the Flow Contexts |
2922 | * @dev: dpseci device | 2982 | * @dev: dpseci device |
2923 | * @ctx_len: size of Context Register | 2983 | * @ctx_len: size of Context Register |
@@ -2925,6 +2985,7 @@ enum hash_optype { | |||
2925 | */ | 2985 | */ |
2926 | struct caam_hash_ctx { | 2986 | struct caam_hash_ctx { |
2927 | struct caam_flc flc[HASH_NUM_OP]; | 2987 | struct caam_flc flc[HASH_NUM_OP]; |
2988 | u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | ||
2928 | dma_addr_t flc_dma[HASH_NUM_OP]; | 2989 | dma_addr_t flc_dma[HASH_NUM_OP]; |
2929 | struct device *dev; | 2990 | struct device *dev; |
2930 | int ctx_len; | 2991 | int ctx_len; |
@@ -3094,10 +3155,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err) | |||
3094 | 3155 | ||
3095 | dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 3156 | dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
3096 | 3157 | ||
3097 | if (err) | 3158 | res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; |
3098 | caam_qi2_strstatus(res->dev, err); | ||
3099 | |||
3100 | res->err = err; | ||
3101 | complete(&res->completion); | 3159 | complete(&res->completion); |
3102 | } | 3160 | } |
3103 | 3161 | ||
@@ -3228,6 +3286,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, | |||
3228 | ctx->adata.key_virt = key; | 3286 | ctx->adata.key_virt = key; |
3229 | ctx->adata.key_inline = true; | 3287 | ctx->adata.key_inline = true; |
3230 | 3288 | ||
3289 | /* | ||
3290 | * In case |user key| > |derived key|, using DKP<imm,imm> would result | ||
3291 | * in invalid opcodes (last bytes of user key) in the resulting | ||
3292 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key | ||
3293 | * addresses are needed. | ||
3294 | */ | ||
3295 | if (keylen > ctx->adata.keylen_pad) { | ||
3296 | memcpy(ctx->key, key, keylen); | ||
3297 | dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, | ||
3298 | ctx->adata.keylen_pad, | ||
3299 | DMA_TO_DEVICE); | ||
3300 | } | ||
3301 | |||
3231 | ret = ahash_set_sh_desc(ahash); | 3302 | ret = ahash_set_sh_desc(ahash); |
3232 | kfree(hashed_key); | 3303 | kfree(hashed_key); |
3233 | return ret; | 3304 | return ret; |
@@ -3282,10 +3353,8 @@ static void ahash_done(void *cbk_ctx, u32 status) | |||
3282 | 3353 | ||
3283 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 3354 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
3284 | 3355 | ||
3285 | if (unlikely(status)) { | 3356 | if (unlikely(status)) |
3286 | caam_qi2_strstatus(ctx->dev, status); | 3357 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3287 | ecode = -EIO; | ||
3288 | } | ||
3289 | 3358 | ||
3290 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); | 3359 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); |
3291 | memcpy(req->result, state->caam_ctx, digestsize); | 3360 | memcpy(req->result, state->caam_ctx, digestsize); |
@@ -3310,10 +3379,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) | |||
3310 | 3379 | ||
3311 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 3380 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
3312 | 3381 | ||
3313 | if (unlikely(status)) { | 3382 | if (unlikely(status)) |
3314 | caam_qi2_strstatus(ctx->dev, status); | 3383 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3315 | ecode = -EIO; | ||
3316 | } | ||
3317 | 3384 | ||
3318 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); | 3385 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); |
3319 | switch_buf(state); | 3386 | switch_buf(state); |
@@ -3343,10 +3410,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) | |||
3343 | 3410 | ||
3344 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 3411 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
3345 | 3412 | ||
3346 | if (unlikely(status)) { | 3413 | if (unlikely(status)) |
3347 | caam_qi2_strstatus(ctx->dev, status); | 3414 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3348 | ecode = -EIO; | ||
3349 | } | ||
3350 | 3415 | ||
3351 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); | 3416 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); |
3352 | memcpy(req->result, state->caam_ctx, digestsize); | 3417 | memcpy(req->result, state->caam_ctx, digestsize); |
@@ -3371,10 +3436,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) | |||
3371 | 3436 | ||
3372 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); | 3437 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
3373 | 3438 | ||
3374 | if (unlikely(status)) { | 3439 | if (unlikely(status)) |
3375 | caam_qi2_strstatus(ctx->dev, status); | 3440 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3376 | ecode = -EIO; | ||
3377 | } | ||
3378 | 3441 | ||
3379 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); | 3442 | ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); |
3380 | switch_buf(state); | 3443 | switch_buf(state); |
@@ -4466,11 +4529,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
4466 | 4529 | ||
4467 | ctx->dev = caam_hash->dev; | 4530 | ctx->dev = caam_hash->dev; |
4468 | 4531 | ||
4532 | if (alg->setkey) { | ||
4533 | ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, | ||
4534 | ARRAY_SIZE(ctx->key), | ||
4535 | DMA_TO_DEVICE, | ||
4536 | DMA_ATTR_SKIP_CPU_SYNC); | ||
4537 | if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { | ||
4538 | dev_err(ctx->dev, "unable to map key\n"); | ||
4539 | return -ENOMEM; | ||
4540 | } | ||
4541 | } | ||
4542 | |||
4469 | dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), | 4543 | dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), |
4470 | DMA_BIDIRECTIONAL, | 4544 | DMA_BIDIRECTIONAL, |
4471 | DMA_ATTR_SKIP_CPU_SYNC); | 4545 | DMA_ATTR_SKIP_CPU_SYNC); |
4472 | if (dma_mapping_error(ctx->dev, dma_addr)) { | 4546 | if (dma_mapping_error(ctx->dev, dma_addr)) { |
4473 | dev_err(ctx->dev, "unable to map shared descriptors\n"); | 4547 | dev_err(ctx->dev, "unable to map shared descriptors\n"); |
4548 | if (ctx->adata.key_dma) | ||
4549 | dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, | ||
4550 | ARRAY_SIZE(ctx->key), | ||
4551 | DMA_TO_DEVICE, | ||
4552 | DMA_ATTR_SKIP_CPU_SYNC); | ||
4474 | return -ENOMEM; | 4553 | return -ENOMEM; |
4475 | } | 4554 | } |
4476 | 4555 | ||
@@ -4496,6 +4575,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
4496 | 4575 | ||
4497 | dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), | 4576 | dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), |
4498 | DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); | 4577 | DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); |
4578 | if (ctx->adata.key_dma) | ||
4579 | dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, | ||
4580 | ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, | ||
4581 | DMA_ATTR_SKIP_CPU_SYNC); | ||
4499 | } | 4582 | } |
4500 | 4583 | ||
4501 | static struct caam_hash_alg *caam_hash_alloc(struct device *dev, | 4584 | static struct caam_hash_alg *caam_hash_alloc(struct device *dev, |
@@ -4700,7 +4783,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, | |||
4700 | 4783 | ||
4701 | fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; | 4784 | fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; |
4702 | if (unlikely(fd_err)) | 4785 | if (unlikely(fd_err)) |
4703 | dev_err(priv->dev, "FD error: %08x\n", fd_err); | 4786 | dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); |
4704 | 4787 | ||
4705 | /* | 4788 | /* |
4706 | * FD[ADDR] is guaranteed to be valid, irrespective of errors reported | 4789 | * FD[ADDR] is guaranteed to be valid, irrespective of errors reported |
@@ -5098,6 +5181,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) | |||
5098 | goto err_bind; | 5181 | goto err_bind; |
5099 | } | 5182 | } |
5100 | 5183 | ||
5184 | dpaa2_dpseci_debugfs_init(priv); | ||
5185 | |||
5101 | /* register crypto algorithms the device supports */ | 5186 | /* register crypto algorithms the device supports */ |
5102 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 5187 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
5103 | struct caam_skcipher_alg *t_alg = driver_algs + i; | 5188 | struct caam_skcipher_alg *t_alg = driver_algs + i; |
@@ -5265,6 +5350,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) | |||
5265 | dev = &ls_dev->dev; | 5350 | dev = &ls_dev->dev; |
5266 | priv = dev_get_drvdata(dev); | 5351 | priv = dev_get_drvdata(dev); |
5267 | 5352 | ||
5353 | dpaa2_dpseci_debugfs_exit(priv); | ||
5354 | |||
5268 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { | 5355 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
5269 | struct caam_aead_alg *t_alg = driver_aeads + i; | 5356 | struct caam_aead_alg *t_alg = driver_aeads + i; |
5270 | 5357 | ||
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index be5085451053..706736776b47 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h | |||
@@ -10,12 +10,13 @@ | |||
10 | #include <soc/fsl/dpaa2-io.h> | 10 | #include <soc/fsl/dpaa2-io.h> |
11 | #include <soc/fsl/dpaa2-fd.h> | 11 | #include <soc/fsl/dpaa2-fd.h> |
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/netdevice.h> | ||
13 | #include "dpseci.h" | 14 | #include "dpseci.h" |
14 | #include "desc_constr.h" | 15 | #include "desc_constr.h" |
15 | 16 | ||
16 | #define DPAA2_CAAM_STORE_SIZE 16 | 17 | #define DPAA2_CAAM_STORE_SIZE 16 |
17 | /* NAPI weight *must* be a multiple of the store size. */ | 18 | /* NAPI weight *must* be a multiple of the store size. */ |
18 | #define DPAA2_CAAM_NAPI_WEIGHT 64 | 19 | #define DPAA2_CAAM_NAPI_WEIGHT 512 |
19 | 20 | ||
20 | /* The congestion entrance threshold was chosen so that on LS2088 | 21 | /* The congestion entrance threshold was chosen so that on LS2088 |
21 | * we support the maximum throughput for the available memory | 22 | * we support the maximum throughput for the available memory |
@@ -64,6 +65,7 @@ struct dpaa2_caam_priv { | |||
64 | struct iommu_domain *domain; | 65 | struct iommu_domain *domain; |
65 | 66 | ||
66 | struct dpaa2_caam_priv_per_cpu __percpu *ppriv; | 67 | struct dpaa2_caam_priv_per_cpu __percpu *ppriv; |
68 | struct dentry *dfs_root; | ||
67 | }; | 69 | }; |
68 | 70 | ||
69 | /** | 71 | /** |
@@ -90,33 +92,6 @@ struct dpaa2_caam_priv_per_cpu { | |||
90 | struct dpaa2_io *dpio; | 92 | struct dpaa2_io *dpio; |
91 | }; | 93 | }; |
92 | 94 | ||
93 | /* | ||
94 | * The CAAM QI hardware constructs a job descriptor which points | ||
95 | * to shared descriptor (as pointed by context_a of FQ to CAAM). | ||
96 | * When the job descriptor is executed by deco, the whole job | ||
97 | * descriptor together with shared descriptor gets loaded in | ||
98 | * deco buffer which is 64 words long (each 32-bit). | ||
99 | * | ||
100 | * The job descriptor constructed by QI hardware has layout: | ||
101 | * | ||
102 | * HEADER (1 word) | ||
103 | * Shdesc ptr (1 or 2 words) | ||
104 | * SEQ_OUT_PTR (1 word) | ||
105 | * Out ptr (1 or 2 words) | ||
106 | * Out length (1 word) | ||
107 | * SEQ_IN_PTR (1 word) | ||
108 | * In ptr (1 or 2 words) | ||
109 | * In length (1 word) | ||
110 | * | ||
111 | * The shdesc ptr is used to fetch shared descriptor contents | ||
112 | * into deco buffer. | ||
113 | * | ||
114 | * Apart from shdesc contents, the total number of words that | ||
115 | * get loaded in deco buffer are '8' or '11'. The remaining words | ||
116 | * in deco buffer can be used for storing shared descriptor. | ||
117 | */ | ||
118 | #define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) | ||
119 | |||
120 | /* Length of a single buffer in the QI driver memory cache */ | 95 | /* Length of a single buffer in the QI driver memory cache */ |
121 | #define CAAM_QI_MEMCACHE_SIZE 512 | 96 | #define CAAM_QI_MEMCACHE_SIZE 512 |
122 | 97 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e4ac5d591ad6..65399cb2a770 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -95,8 +95,8 @@ struct caam_hash_ctx { | |||
95 | dma_addr_t sh_desc_update_first_dma; | 95 | dma_addr_t sh_desc_update_first_dma; |
96 | dma_addr_t sh_desc_fin_dma; | 96 | dma_addr_t sh_desc_fin_dma; |
97 | dma_addr_t sh_desc_digest_dma; | 97 | dma_addr_t sh_desc_digest_dma; |
98 | dma_addr_t key_dma; | ||
99 | enum dma_data_direction dir; | 98 | enum dma_data_direction dir; |
99 | enum dma_data_direction key_dir; | ||
100 | struct device *jrdev; | 100 | struct device *jrdev; |
101 | int ctx_len; | 101 | int ctx_len; |
102 | struct alginfo adata; | 102 | struct alginfo adata; |
@@ -282,13 +282,10 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
282 | struct device *jrdev = ctx->jrdev; | 282 | struct device *jrdev = ctx->jrdev; |
283 | u32 *desc; | 283 | u32 *desc; |
284 | 284 | ||
285 | /* key is loaded from memory for UPDATE and FINALIZE states */ | ||
286 | ctx->adata.key_dma = ctx->key_dma; | ||
287 | |||
288 | /* shared descriptor for ahash_update */ | 285 | /* shared descriptor for ahash_update */ |
289 | desc = ctx->sh_desc_update; | 286 | desc = ctx->sh_desc_update; |
290 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, | 287 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, |
291 | ctx->ctx_len, ctx->ctx_len, 0); | 288 | ctx->ctx_len, ctx->ctx_len); |
292 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, | 289 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
293 | desc_bytes(desc), ctx->dir); | 290 | desc_bytes(desc), ctx->dir); |
294 | print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", | 291 | print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", |
@@ -298,7 +295,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
298 | /* shared descriptor for ahash_{final,finup} */ | 295 | /* shared descriptor for ahash_{final,finup} */ |
299 | desc = ctx->sh_desc_fin; | 296 | desc = ctx->sh_desc_fin; |
300 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, | 297 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, |
301 | digestsize, ctx->ctx_len, 0); | 298 | digestsize, ctx->ctx_len); |
302 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, | 299 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
303 | desc_bytes(desc), ctx->dir); | 300 | desc_bytes(desc), ctx->dir); |
304 | print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", | 301 | print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", |
@@ -311,7 +308,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
311 | /* shared descriptor for first invocation of ahash_update */ | 308 | /* shared descriptor for first invocation of ahash_update */ |
312 | desc = ctx->sh_desc_update_first; | 309 | desc = ctx->sh_desc_update_first; |
313 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, | 310 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
314 | ctx->ctx_len, ctx->key_dma); | 311 | ctx->ctx_len); |
315 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 312 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
316 | desc_bytes(desc), ctx->dir); | 313 | desc_bytes(desc), ctx->dir); |
317 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) | 314 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) |
@@ -321,7 +318,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
321 | /* shared descriptor for ahash_digest */ | 318 | /* shared descriptor for ahash_digest */ |
322 | desc = ctx->sh_desc_digest; | 319 | desc = ctx->sh_desc_digest; |
323 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, | 320 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, |
324 | digestsize, ctx->ctx_len, 0); | 321 | digestsize, ctx->ctx_len); |
325 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, | 322 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
326 | desc_bytes(desc), ctx->dir); | 323 | desc_bytes(desc), ctx->dir); |
327 | print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", | 324 | print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", |
@@ -340,7 +337,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
340 | /* shared descriptor for ahash_update */ | 337 | /* shared descriptor for ahash_update */ |
341 | desc = ctx->sh_desc_update; | 338 | desc = ctx->sh_desc_update; |
342 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, | 339 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, |
343 | ctx->ctx_len, ctx->ctx_len, 0); | 340 | ctx->ctx_len, ctx->ctx_len); |
344 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, | 341 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
345 | desc_bytes(desc), ctx->dir); | 342 | desc_bytes(desc), ctx->dir); |
346 | print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", | 343 | print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", |
@@ -350,7 +347,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
350 | /* shared descriptor for ahash_{final,finup} */ | 347 | /* shared descriptor for ahash_{final,finup} */ |
351 | desc = ctx->sh_desc_fin; | 348 | desc = ctx->sh_desc_fin; |
352 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, | 349 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, |
353 | digestsize, ctx->ctx_len, 0); | 350 | digestsize, ctx->ctx_len); |
354 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, | 351 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
355 | desc_bytes(desc), ctx->dir); | 352 | desc_bytes(desc), ctx->dir); |
356 | print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", | 353 | print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", |
@@ -360,7 +357,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
360 | /* shared descriptor for first invocation of ahash_update */ | 357 | /* shared descriptor for first invocation of ahash_update */ |
361 | desc = ctx->sh_desc_update_first; | 358 | desc = ctx->sh_desc_update_first; |
362 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, | 359 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
363 | ctx->ctx_len, 0); | 360 | ctx->ctx_len); |
364 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 361 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
365 | desc_bytes(desc), ctx->dir); | 362 | desc_bytes(desc), ctx->dir); |
366 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) | 363 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) |
@@ -370,7 +367,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
370 | /* shared descriptor for ahash_digest */ | 367 | /* shared descriptor for ahash_digest */ |
371 | desc = ctx->sh_desc_digest; | 368 | desc = ctx->sh_desc_digest; |
372 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, | 369 | cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, |
373 | digestsize, ctx->ctx_len, 0); | 370 | digestsize, ctx->ctx_len); |
374 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, | 371 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
375 | desc_bytes(desc), ctx->dir); | 372 | desc_bytes(desc), ctx->dir); |
376 | print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", | 373 | print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", |
@@ -480,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
480 | goto bad_free_key; | 477 | goto bad_free_key; |
481 | 478 | ||
482 | memcpy(ctx->key, key, keylen); | 479 | memcpy(ctx->key, key, keylen); |
480 | |||
481 | /* | ||
482 | * In case |user key| > |derived key|, using DKP<imm,imm> | ||
483 | * would result in invalid opcodes (last bytes of user key) in | ||
484 | * the resulting descriptor. Use DKP<ptr,imm> instead => both | ||
485 | * virtual and dma key addresses are needed. | ||
486 | */ | ||
487 | if (keylen > ctx->adata.keylen_pad) | ||
488 | dma_sync_single_for_device(ctx->jrdev, | ||
489 | ctx->adata.key_dma, | ||
490 | ctx->adata.keylen_pad, | ||
491 | DMA_TO_DEVICE); | ||
483 | } else { | 492 | } else { |
484 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, | 493 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, |
485 | keylen, CAAM_MAX_HASH_KEY_SIZE); | 494 | keylen, CAAM_MAX_HASH_KEY_SIZE); |
@@ -501,8 +510,14 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, | |||
501 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 510 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
502 | struct device *jrdev = ctx->jrdev; | 511 | struct device *jrdev = ctx->jrdev; |
503 | 512 | ||
513 | if (keylen != AES_KEYSIZE_128) { | ||
514 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | |||
504 | memcpy(ctx->key, key, keylen); | 518 | memcpy(ctx->key, key, keylen); |
505 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | 519 | dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, |
520 | DMA_TO_DEVICE); | ||
506 | ctx->adata.keylen = keylen; | 521 | ctx->adata.keylen = keylen; |
507 | 522 | ||
508 | print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", | 523 | print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", |
@@ -515,6 +530,13 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, | |||
515 | unsigned int keylen) | 530 | unsigned int keylen) |
516 | { | 531 | { |
517 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 532 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
533 | int err; | ||
534 | |||
535 | err = aes_check_keylen(keylen); | ||
536 | if (err) { | ||
537 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
538 | return err; | ||
539 | } | ||
518 | 540 | ||
519 | /* key is immediate data for all cmac shared descriptors */ | 541 | /* key is immediate data for all cmac shared descriptors */ |
520 | ctx->adata.key_virt = key; | 542 | ctx->adata.key_virt = key; |
@@ -538,7 +560,7 @@ struct ahash_edesc { | |||
538 | dma_addr_t sec4_sg_dma; | 560 | dma_addr_t sec4_sg_dma; |
539 | int src_nents; | 561 | int src_nents; |
540 | int sec4_sg_bytes; | 562 | int sec4_sg_bytes; |
541 | u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; | 563 | u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; |
542 | struct sec4_sg_entry sec4_sg[0]; | 564 | struct sec4_sg_entry sec4_sg[0]; |
543 | }; | 565 | }; |
544 | 566 | ||
@@ -584,12 +606,13 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
584 | int digestsize = crypto_ahash_digestsize(ahash); | 606 | int digestsize = crypto_ahash_digestsize(ahash); |
585 | struct caam_hash_state *state = ahash_request_ctx(req); | 607 | struct caam_hash_state *state = ahash_request_ctx(req); |
586 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 608 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
609 | int ecode = 0; | ||
587 | 610 | ||
588 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 611 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
589 | 612 | ||
590 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 613 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
591 | if (err) | 614 | if (err) |
592 | caam_jr_strstatus(jrdev, err); | 615 | ecode = caam_jr_strstatus(jrdev, err); |
593 | 616 | ||
594 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | 617 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); |
595 | memcpy(req->result, state->caam_ctx, digestsize); | 618 | memcpy(req->result, state->caam_ctx, digestsize); |
@@ -599,7 +622,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
599 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 622 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
600 | ctx->ctx_len, 1); | 623 | ctx->ctx_len, 1); |
601 | 624 | ||
602 | req->base.complete(&req->base, err); | 625 | req->base.complete(&req->base, ecode); |
603 | } | 626 | } |
604 | 627 | ||
605 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | 628 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, |
@@ -611,12 +634,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
611 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 634 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
612 | struct caam_hash_state *state = ahash_request_ctx(req); | 635 | struct caam_hash_state *state = ahash_request_ctx(req); |
613 | int digestsize = crypto_ahash_digestsize(ahash); | 636 | int digestsize = crypto_ahash_digestsize(ahash); |
637 | int ecode = 0; | ||
614 | 638 | ||
615 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 639 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
616 | 640 | ||
617 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 641 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
618 | if (err) | 642 | if (err) |
619 | caam_jr_strstatus(jrdev, err); | 643 | ecode = caam_jr_strstatus(jrdev, err); |
620 | 644 | ||
621 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); | 645 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); |
622 | switch_buf(state); | 646 | switch_buf(state); |
@@ -630,7 +654,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
630 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 654 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
631 | digestsize, 1); | 655 | digestsize, 1); |
632 | 656 | ||
633 | req->base.complete(&req->base, err); | 657 | req->base.complete(&req->base, ecode); |
634 | } | 658 | } |
635 | 659 | ||
636 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | 660 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, |
@@ -642,12 +666,13 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
642 | int digestsize = crypto_ahash_digestsize(ahash); | 666 | int digestsize = crypto_ahash_digestsize(ahash); |
643 | struct caam_hash_state *state = ahash_request_ctx(req); | 667 | struct caam_hash_state *state = ahash_request_ctx(req); |
644 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 668 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
669 | int ecode = 0; | ||
645 | 670 | ||
646 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 671 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
647 | 672 | ||
648 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 673 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
649 | if (err) | 674 | if (err) |
650 | caam_jr_strstatus(jrdev, err); | 675 | ecode = caam_jr_strstatus(jrdev, err); |
651 | 676 | ||
652 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); | 677 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); |
653 | memcpy(req->result, state->caam_ctx, digestsize); | 678 | memcpy(req->result, state->caam_ctx, digestsize); |
@@ -657,7 +682,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
657 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 682 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
658 | ctx->ctx_len, 1); | 683 | ctx->ctx_len, 1); |
659 | 684 | ||
660 | req->base.complete(&req->base, err); | 685 | req->base.complete(&req->base, ecode); |
661 | } | 686 | } |
662 | 687 | ||
663 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | 688 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, |
@@ -669,12 +694,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
669 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 694 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
670 | struct caam_hash_state *state = ahash_request_ctx(req); | 695 | struct caam_hash_state *state = ahash_request_ctx(req); |
671 | int digestsize = crypto_ahash_digestsize(ahash); | 696 | int digestsize = crypto_ahash_digestsize(ahash); |
697 | int ecode = 0; | ||
672 | 698 | ||
673 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 699 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
674 | 700 | ||
675 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 701 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
676 | if (err) | 702 | if (err) |
677 | caam_jr_strstatus(jrdev, err); | 703 | ecode = caam_jr_strstatus(jrdev, err); |
678 | 704 | ||
679 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); | 705 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); |
680 | switch_buf(state); | 706 | switch_buf(state); |
@@ -688,7 +714,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
688 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 714 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
689 | digestsize, 1); | 715 | digestsize, 1); |
690 | 716 | ||
691 | req->base.complete(&req->base, err); | 717 | req->base.complete(&req->base, ecode); |
692 | } | 718 | } |
693 | 719 | ||
694 | /* | 720 | /* |
@@ -1812,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
1812 | 1838 | ||
1813 | if (is_xcbc_aes(caam_hash->alg_type)) { | 1839 | if (is_xcbc_aes(caam_hash->alg_type)) { |
1814 | ctx->dir = DMA_TO_DEVICE; | 1840 | ctx->dir = DMA_TO_DEVICE; |
1841 | ctx->key_dir = DMA_BIDIRECTIONAL; | ||
1815 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; | 1842 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; |
1816 | ctx->ctx_len = 48; | 1843 | ctx->ctx_len = 48; |
1817 | |||
1818 | ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, | ||
1819 | ARRAY_SIZE(ctx->key), | ||
1820 | DMA_BIDIRECTIONAL, | ||
1821 | DMA_ATTR_SKIP_CPU_SYNC); | ||
1822 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { | ||
1823 | dev_err(ctx->jrdev, "unable to map key\n"); | ||
1824 | caam_jr_free(ctx->jrdev); | ||
1825 | return -ENOMEM; | ||
1826 | } | ||
1827 | } else if (is_cmac_aes(caam_hash->alg_type)) { | 1844 | } else if (is_cmac_aes(caam_hash->alg_type)) { |
1828 | ctx->dir = DMA_TO_DEVICE; | 1845 | ctx->dir = DMA_TO_DEVICE; |
1846 | ctx->key_dir = DMA_NONE; | ||
1829 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; | 1847 | ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; |
1830 | ctx->ctx_len = 32; | 1848 | ctx->ctx_len = 32; |
1831 | } else { | 1849 | } else { |
1832 | ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | 1850 | if (priv->era >= 6) { |
1851 | ctx->dir = DMA_BIDIRECTIONAL; | ||
1852 | ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; | ||
1853 | } else { | ||
1854 | ctx->dir = DMA_TO_DEVICE; | ||
1855 | ctx->key_dir = DMA_NONE; | ||
1856 | } | ||
1833 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | 1857 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
1834 | ctx->ctx_len = runninglen[(ctx->adata.algtype & | 1858 | ctx->ctx_len = runninglen[(ctx->adata.algtype & |
1835 | OP_ALG_ALGSEL_SUBMASK) >> | 1859 | OP_ALG_ALGSEL_SUBMASK) >> |
1836 | OP_ALG_ALGSEL_SHIFT]; | 1860 | OP_ALG_ALGSEL_SHIFT]; |
1837 | } | 1861 | } |
1838 | 1862 | ||
1863 | if (ctx->key_dir != DMA_NONE) { | ||
1864 | ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, | ||
1865 | ARRAY_SIZE(ctx->key), | ||
1866 | ctx->key_dir, | ||
1867 | DMA_ATTR_SKIP_CPU_SYNC); | ||
1868 | if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { | ||
1869 | dev_err(ctx->jrdev, "unable to map key\n"); | ||
1870 | caam_jr_free(ctx->jrdev); | ||
1871 | return -ENOMEM; | ||
1872 | } | ||
1873 | } | ||
1874 | |||
1839 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, | 1875 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, |
1840 | offsetof(struct caam_hash_ctx, key), | 1876 | offsetof(struct caam_hash_ctx, key), |
1841 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); | 1877 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
1842 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { | 1878 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
1843 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); | 1879 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); |
1844 | 1880 | ||
1845 | if (is_xcbc_aes(caam_hash->alg_type)) | 1881 | if (ctx->key_dir != DMA_NONE) |
1846 | dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, | 1882 | dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, |
1847 | ARRAY_SIZE(ctx->key), | 1883 | ARRAY_SIZE(ctx->key), |
1848 | DMA_BIDIRECTIONAL, | 1884 | ctx->key_dir, |
1849 | DMA_ATTR_SKIP_CPU_SYNC); | 1885 | DMA_ATTR_SKIP_CPU_SYNC); |
1850 | 1886 | ||
1851 | caam_jr_free(ctx->jrdev); | 1887 | caam_jr_free(ctx->jrdev); |
@@ -1878,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
1878 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, | 1914 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, |
1879 | offsetof(struct caam_hash_ctx, key), | 1915 | offsetof(struct caam_hash_ctx, key), |
1880 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); | 1916 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
1881 | if (is_xcbc_aes(ctx->adata.algtype)) | 1917 | if (ctx->key_dir != DMA_NONE) |
1882 | dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, | 1918 | dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, |
1883 | ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, | 1919 | ARRAY_SIZE(ctx->key), ctx->key_dir, |
1884 | DMA_ATTR_SKIP_CPU_SYNC); | 1920 | DMA_ATTR_SKIP_CPU_SYNC); |
1885 | caam_jr_free(ctx->jrdev); | 1921 | caam_jr_free(ctx->jrdev); |
1886 | } | 1922 | } |
@@ -1971,7 +2007,7 @@ int caam_algapi_hash_init(struct device *ctrldev) | |||
1971 | * is not present. | 2007 | * is not present. |
1972 | */ | 2008 | */ |
1973 | if (!md_inst) | 2009 | if (!md_inst) |
1974 | return -ENODEV; | 2010 | return 0; |
1975 | 2011 | ||
1976 | /* Limit digest size based on LP256 */ | 2012 | /* Limit digest size based on LP256 */ |
1977 | if (md_vid == CHA_VER_VID_MD_LP256) | 2013 | if (md_vid == CHA_VER_VID_MD_LP256) |
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index 71d018343ee4..78383d77da99 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c | |||
@@ -83,10 +83,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ahash); | |||
83 | * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} | 83 | * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} |
84 | * @digestsize: algorithm's digest size | 84 | * @digestsize: algorithm's digest size |
85 | * @ctx_len: size of Context Register | 85 | * @ctx_len: size of Context Register |
86 | * @key_dma: I/O Virtual Address of the key | ||
87 | */ | 86 | */ |
88 | void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, | 87 | void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, |
89 | int digestsize, int ctx_len, dma_addr_t key_dma) | 88 | int digestsize, int ctx_len) |
90 | { | 89 | { |
91 | u32 *skip_key_load; | 90 | u32 *skip_key_load; |
92 | 91 | ||
@@ -136,7 +135,7 @@ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, | |||
136 | LDST_SRCDST_BYTE_CONTEXT); | 135 | LDST_SRCDST_BYTE_CONTEXT); |
137 | if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) | 136 | if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) |
138 | /* Save K1 */ | 137 | /* Save K1 */ |
139 | append_fifo_store(desc, key_dma, adata->keylen, | 138 | append_fifo_store(desc, adata->key_dma, adata->keylen, |
140 | LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); | 139 | LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); |
141 | } | 140 | } |
142 | EXPORT_SYMBOL(cnstr_shdsc_sk_hash); | 141 | EXPORT_SYMBOL(cnstr_shdsc_sk_hash); |
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h index 6947ee1f200c..4f369b8cb6ae 100644 --- a/drivers/crypto/caam/caamhash_desc.h +++ b/drivers/crypto/caam/caamhash_desc.h | |||
@@ -25,5 +25,5 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, | |||
25 | int digestsize, int ctx_len, bool import_ctx, int era); | 25 | int digestsize, int ctx_len, bool import_ctx, int era); |
26 | 26 | ||
27 | void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, | 27 | void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, |
28 | int digestsize, int ctx_len, dma_addr_t key_dma); | 28 | int digestsize, int ctx_len); |
29 | #endif /* _CAAMHASH_DESC_H_ */ | 29 | #endif /* _CAAMHASH_DESC_H_ */ |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 80574106af29..83f96d4f86e0 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
@@ -17,18 +17,29 @@ | |||
17 | #include "sg_sw_sec4.h" | 17 | #include "sg_sw_sec4.h" |
18 | #include "caampkc.h" | 18 | #include "caampkc.h" |
19 | 19 | ||
20 | #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) | 20 | #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) |
21 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ | 21 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ |
22 | sizeof(struct rsa_priv_f1_pdb)) | 22 | SIZEOF_RSA_PRIV_F1_PDB) |
23 | #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ | 23 | #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ |
24 | sizeof(struct rsa_priv_f2_pdb)) | 24 | SIZEOF_RSA_PRIV_F2_PDB) |
25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ | 25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ |
26 | sizeof(struct rsa_priv_f3_pdb)) | 26 | SIZEOF_RSA_PRIV_F3_PDB) |
27 | #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ | 27 | #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ |
28 | 28 | ||
29 | /* buffer filled with zeros, used for padding */ | 29 | /* buffer filled with zeros, used for padding */ |
30 | static u8 *zero_buffer; | 30 | static u8 *zero_buffer; |
31 | 31 | ||
32 | /* | ||
33 | * variable used to avoid double free of resources in case | ||
34 | * algorithm registration was unsuccessful | ||
35 | */ | ||
36 | static bool init_done; | ||
37 | |||
38 | struct caam_akcipher_alg { | ||
39 | struct akcipher_alg akcipher; | ||
40 | bool registered; | ||
41 | }; | ||
42 | |||
32 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, | 43 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
33 | struct akcipher_request *req) | 44 | struct akcipher_request *req) |
34 | { | 45 | { |
@@ -107,9 +118,10 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) | |||
107 | { | 118 | { |
108 | struct akcipher_request *req = context; | 119 | struct akcipher_request *req = context; |
109 | struct rsa_edesc *edesc; | 120 | struct rsa_edesc *edesc; |
121 | int ecode = 0; | ||
110 | 122 | ||
111 | if (err) | 123 | if (err) |
112 | caam_jr_strstatus(dev, err); | 124 | ecode = caam_jr_strstatus(dev, err); |
113 | 125 | ||
114 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | 126 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
115 | 127 | ||
@@ -117,7 +129,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) | |||
117 | rsa_io_unmap(dev, edesc, req); | 129 | rsa_io_unmap(dev, edesc, req); |
118 | kfree(edesc); | 130 | kfree(edesc); |
119 | 131 | ||
120 | akcipher_request_complete(req, err); | 132 | akcipher_request_complete(req, ecode); |
121 | } | 133 | } |
122 | 134 | ||
123 | static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, | 135 | static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, |
@@ -125,9 +137,10 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, | |||
125 | { | 137 | { |
126 | struct akcipher_request *req = context; | 138 | struct akcipher_request *req = context; |
127 | struct rsa_edesc *edesc; | 139 | struct rsa_edesc *edesc; |
140 | int ecode = 0; | ||
128 | 141 | ||
129 | if (err) | 142 | if (err) |
130 | caam_jr_strstatus(dev, err); | 143 | ecode = caam_jr_strstatus(dev, err); |
131 | 144 | ||
132 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | 145 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
133 | 146 | ||
@@ -135,7 +148,7 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, | |||
135 | rsa_io_unmap(dev, edesc, req); | 148 | rsa_io_unmap(dev, edesc, req); |
136 | kfree(edesc); | 149 | kfree(edesc); |
137 | 150 | ||
138 | akcipher_request_complete(req, err); | 151 | akcipher_request_complete(req, ecode); |
139 | } | 152 | } |
140 | 153 | ||
141 | static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, | 154 | static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, |
@@ -143,9 +156,10 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, | |||
143 | { | 156 | { |
144 | struct akcipher_request *req = context; | 157 | struct akcipher_request *req = context; |
145 | struct rsa_edesc *edesc; | 158 | struct rsa_edesc *edesc; |
159 | int ecode = 0; | ||
146 | 160 | ||
147 | if (err) | 161 | if (err) |
148 | caam_jr_strstatus(dev, err); | 162 | ecode = caam_jr_strstatus(dev, err); |
149 | 163 | ||
150 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | 164 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
151 | 165 | ||
@@ -153,7 +167,7 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, | |||
153 | rsa_io_unmap(dev, edesc, req); | 167 | rsa_io_unmap(dev, edesc, req); |
154 | kfree(edesc); | 168 | kfree(edesc); |
155 | 169 | ||
156 | akcipher_request_complete(req, err); | 170 | akcipher_request_complete(req, ecode); |
157 | } | 171 | } |
158 | 172 | ||
159 | static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | 173 | static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, |
@@ -161,9 +175,10 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | |||
161 | { | 175 | { |
162 | struct akcipher_request *req = context; | 176 | struct akcipher_request *req = context; |
163 | struct rsa_edesc *edesc; | 177 | struct rsa_edesc *edesc; |
178 | int ecode = 0; | ||
164 | 179 | ||
165 | if (err) | 180 | if (err) |
166 | caam_jr_strstatus(dev, err); | 181 | ecode = caam_jr_strstatus(dev, err); |
167 | 182 | ||
168 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | 183 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
169 | 184 | ||
@@ -171,7 +186,7 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | |||
171 | rsa_io_unmap(dev, edesc, req); | 186 | rsa_io_unmap(dev, edesc, req); |
172 | kfree(edesc); | 187 | kfree(edesc); |
173 | 188 | ||
174 | akcipher_request_complete(req, err); | 189 | akcipher_request_complete(req, ecode); |
175 | } | 190 | } |
176 | 191 | ||
177 | /** | 192 | /** |
@@ -867,7 +882,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, | |||
867 | return ret; | 882 | return ret; |
868 | 883 | ||
869 | /* Copy key in DMA zone */ | 884 | /* Copy key in DMA zone */ |
870 | rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); | 885 | rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
871 | if (!rsa_key->e) | 886 | if (!rsa_key->e) |
872 | goto err; | 887 | goto err; |
873 | 888 | ||
@@ -889,8 +904,6 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, | |||
889 | rsa_key->e_sz = raw_key.e_sz; | 904 | rsa_key->e_sz = raw_key.e_sz; |
890 | rsa_key->n_sz = raw_key.n_sz; | 905 | rsa_key->n_sz = raw_key.n_sz; |
891 | 906 | ||
892 | memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); | ||
893 | |||
894 | return 0; | 907 | return 0; |
895 | err: | 908 | err: |
896 | caam_rsa_free_key(rsa_key); | 909 | caam_rsa_free_key(rsa_key); |
@@ -971,11 +984,11 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
971 | return ret; | 984 | return ret; |
972 | 985 | ||
973 | /* Copy key in DMA zone */ | 986 | /* Copy key in DMA zone */ |
974 | rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); | 987 | rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); |
975 | if (!rsa_key->d) | 988 | if (!rsa_key->d) |
976 | goto err; | 989 | goto err; |
977 | 990 | ||
978 | rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); | 991 | rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
979 | if (!rsa_key->e) | 992 | if (!rsa_key->e) |
980 | goto err; | 993 | goto err; |
981 | 994 | ||
@@ -998,9 +1011,6 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
998 | rsa_key->e_sz = raw_key.e_sz; | 1011 | rsa_key->e_sz = raw_key.e_sz; |
999 | rsa_key->n_sz = raw_key.n_sz; | 1012 | rsa_key->n_sz = raw_key.n_sz; |
1000 | 1013 | ||
1001 | memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); | ||
1002 | memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); | ||
1003 | |||
1004 | caam_rsa_set_priv_key_form(ctx, &raw_key); | 1014 | caam_rsa_set_priv_key_form(ctx, &raw_key); |
1005 | 1015 | ||
1006 | return 0; | 1016 | return 0; |
@@ -1053,22 +1063,24 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) | |||
1053 | caam_jr_free(ctx->dev); | 1063 | caam_jr_free(ctx->dev); |
1054 | } | 1064 | } |
1055 | 1065 | ||
1056 | static struct akcipher_alg caam_rsa = { | 1066 | static struct caam_akcipher_alg caam_rsa = { |
1057 | .encrypt = caam_rsa_enc, | 1067 | .akcipher = { |
1058 | .decrypt = caam_rsa_dec, | 1068 | .encrypt = caam_rsa_enc, |
1059 | .set_pub_key = caam_rsa_set_pub_key, | 1069 | .decrypt = caam_rsa_dec, |
1060 | .set_priv_key = caam_rsa_set_priv_key, | 1070 | .set_pub_key = caam_rsa_set_pub_key, |
1061 | .max_size = caam_rsa_max_size, | 1071 | .set_priv_key = caam_rsa_set_priv_key, |
1062 | .init = caam_rsa_init_tfm, | 1072 | .max_size = caam_rsa_max_size, |
1063 | .exit = caam_rsa_exit_tfm, | 1073 | .init = caam_rsa_init_tfm, |
1064 | .reqsize = sizeof(struct caam_rsa_req_ctx), | 1074 | .exit = caam_rsa_exit_tfm, |
1065 | .base = { | 1075 | .reqsize = sizeof(struct caam_rsa_req_ctx), |
1066 | .cra_name = "rsa", | 1076 | .base = { |
1067 | .cra_driver_name = "rsa-caam", | 1077 | .cra_name = "rsa", |
1068 | .cra_priority = 3000, | 1078 | .cra_driver_name = "rsa-caam", |
1069 | .cra_module = THIS_MODULE, | 1079 | .cra_priority = 3000, |
1070 | .cra_ctxsize = sizeof(struct caam_rsa_ctx), | 1080 | .cra_module = THIS_MODULE, |
1071 | }, | 1081 | .cra_ctxsize = sizeof(struct caam_rsa_ctx), |
1082 | }, | ||
1083 | } | ||
1072 | }; | 1084 | }; |
1073 | 1085 | ||
1074 | /* Public Key Cryptography module initialization handler */ | 1086 | /* Public Key Cryptography module initialization handler */ |
@@ -1077,6 +1089,7 @@ int caam_pkc_init(struct device *ctrldev) | |||
1077 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); | 1089 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
1078 | u32 pk_inst; | 1090 | u32 pk_inst; |
1079 | int err; | 1091 | int err; |
1092 | init_done = false; | ||
1080 | 1093 | ||
1081 | /* Determine public key hardware accelerator presence. */ | 1094 | /* Determine public key hardware accelerator presence. */ |
1082 | if (priv->era < 10) | 1095 | if (priv->era < 10) |
@@ -1095,12 +1108,15 @@ int caam_pkc_init(struct device *ctrldev) | |||
1095 | if (!zero_buffer) | 1108 | if (!zero_buffer) |
1096 | return -ENOMEM; | 1109 | return -ENOMEM; |
1097 | 1110 | ||
1098 | err = crypto_register_akcipher(&caam_rsa); | 1111 | err = crypto_register_akcipher(&caam_rsa.akcipher); |
1112 | |||
1099 | if (err) { | 1113 | if (err) { |
1100 | kfree(zero_buffer); | 1114 | kfree(zero_buffer); |
1101 | dev_warn(ctrldev, "%s alg registration failed\n", | 1115 | dev_warn(ctrldev, "%s alg registration failed\n", |
1102 | caam_rsa.base.cra_driver_name); | 1116 | caam_rsa.akcipher.base.cra_driver_name); |
1103 | } else { | 1117 | } else { |
1118 | init_done = true; | ||
1119 | caam_rsa.registered = true; | ||
1104 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); | 1120 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); |
1105 | } | 1121 | } |
1106 | 1122 | ||
@@ -1109,6 +1125,11 @@ int caam_pkc_init(struct device *ctrldev) | |||
1109 | 1125 | ||
1110 | void caam_pkc_exit(void) | 1126 | void caam_pkc_exit(void) |
1111 | { | 1127 | { |
1128 | if (!init_done) | ||
1129 | return; | ||
1130 | |||
1131 | if (caam_rsa.registered) | ||
1132 | crypto_unregister_akcipher(&caam_rsa.akcipher); | ||
1133 | |||
1112 | kfree(zero_buffer); | 1134 | kfree(zero_buffer); |
1113 | crypto_unregister_akcipher(&caam_rsa); | ||
1114 | } | 1135 | } |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 561bcb535184..e8baacaabe07 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -53,7 +53,7 @@ | |||
53 | L1_CACHE_BYTES) | 53 | L1_CACHE_BYTES) |
54 | 54 | ||
55 | /* length of descriptors */ | 55 | /* length of descriptors */ |
56 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) | 56 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2) |
57 | #define DESC_RNG_LEN (3 * CAAM_CMD_SZ) | 57 | #define DESC_RNG_LEN (3 * CAAM_CMD_SZ) |
58 | 58 | ||
59 | /* Buffer, its dma address and lock */ | 59 | /* Buffer, its dma address and lock */ |
@@ -80,6 +80,12 @@ struct caam_rng_ctx { | |||
80 | 80 | ||
81 | static struct caam_rng_ctx *rng_ctx; | 81 | static struct caam_rng_ctx *rng_ctx; |
82 | 82 | ||
83 | /* | ||
84 | * Variable used to avoid double free of resources in case | ||
85 | * algorithm registration was unsuccessful | ||
86 | */ | ||
87 | static bool init_done; | ||
88 | |||
83 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) | 89 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) |
84 | { | 90 | { |
85 | if (bd->addr) | 91 | if (bd->addr) |
@@ -296,6 +302,9 @@ static struct hwrng caam_rng = { | |||
296 | 302 | ||
297 | void caam_rng_exit(void) | 303 | void caam_rng_exit(void) |
298 | { | 304 | { |
305 | if (!init_done) | ||
306 | return; | ||
307 | |||
299 | caam_jr_free(rng_ctx->jrdev); | 308 | caam_jr_free(rng_ctx->jrdev); |
300 | hwrng_unregister(&caam_rng); | 309 | hwrng_unregister(&caam_rng); |
301 | kfree(rng_ctx); | 310 | kfree(rng_ctx); |
@@ -307,6 +316,7 @@ int caam_rng_init(struct device *ctrldev) | |||
307 | u32 rng_inst; | 316 | u32 rng_inst; |
308 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); | 317 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
309 | int err; | 318 | int err; |
319 | init_done = false; | ||
310 | 320 | ||
311 | /* Check for an instantiated RNG before registration */ | 321 | /* Check for an instantiated RNG before registration */ |
312 | if (priv->era < 10) | 322 | if (priv->era < 10) |
@@ -333,7 +343,12 @@ int caam_rng_init(struct device *ctrldev) | |||
333 | goto free_rng_ctx; | 343 | goto free_rng_ctx; |
334 | 344 | ||
335 | dev_info(dev, "registering rng-caam\n"); | 345 | dev_info(dev, "registering rng-caam\n"); |
336 | return hwrng_register(&caam_rng); | 346 | |
347 | err = hwrng_register(&caam_rng); | ||
348 | if (!err) { | ||
349 | init_done = true; | ||
350 | return err; | ||
351 | } | ||
337 | 352 | ||
338 | free_rng_ctx: | 353 | free_rng_ctx: |
339 | kfree(rng_ctx); | 354 | kfree(rng_ctx); |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 8639b2df0371..60e2a54c19f1 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <crypto/null.h> | 32 | #include <crypto/null.h> |
33 | #include <crypto/aes.h> | 33 | #include <crypto/aes.h> |
34 | #include <crypto/ctr.h> | 34 | #include <crypto/ctr.h> |
35 | #include <crypto/des.h> | 35 | #include <crypto/internal/des.h> |
36 | #include <crypto/gcm.h> | 36 | #include <crypto/gcm.h> |
37 | #include <crypto/sha.h> | 37 | #include <crypto/sha.h> |
38 | #include <crypto/md5.h> | 38 | #include <crypto/md5.h> |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 4e43ca4d3656..db22777d59b4 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -26,16 +26,6 @@ EXPORT_SYMBOL(caam_dpaa2); | |||
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * i.MX targets tend to have clock control subsystems that can | ||
30 | * enable/disable clocking to our device. | ||
31 | */ | ||
32 | static inline struct clk *caam_drv_identify_clk(struct device *dev, | ||
33 | char *clk_name) | ||
34 | { | ||
35 | return caam_imx ? devm_clk_get(dev, clk_name) : NULL; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 29 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
40 | * load the JDKEK, TDKEK and TDSK registers | 30 | * load the JDKEK, TDKEK and TDSK registers |
41 | */ | 31 | */ |
@@ -107,7 +97,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
107 | int i; | 97 | int i; |
108 | 98 | ||
109 | 99 | ||
110 | if (ctrlpriv->virt_en == 1) { | 100 | if (ctrlpriv->virt_en == 1 || |
101 | /* | ||
102 | * Apparently on i.MX8MQ it doesn't matter if virt_en == 1 | ||
103 | * and the following steps should be performed regardless | ||
104 | */ | ||
105 | of_machine_is_compatible("fsl,imx8mq")) { | ||
111 | clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); | 106 | clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); |
112 | 107 | ||
113 | while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && | 108 | while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && |
@@ -342,13 +337,6 @@ static int caam_remove(struct platform_device *pdev) | |||
342 | /* Unmap controller region */ | 337 | /* Unmap controller region */ |
343 | iounmap(ctrl); | 338 | iounmap(ctrl); |
344 | 339 | ||
345 | /* shut clocks off before finalizing shutdown */ | ||
346 | clk_disable_unprepare(ctrlpriv->caam_ipg); | ||
347 | if (ctrlpriv->caam_mem) | ||
348 | clk_disable_unprepare(ctrlpriv->caam_mem); | ||
349 | clk_disable_unprepare(ctrlpriv->caam_aclk); | ||
350 | if (ctrlpriv->caam_emi_slow) | ||
351 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | ||
352 | return 0; | 340 | return 0; |
353 | } | 341 | } |
354 | 342 | ||
@@ -497,20 +485,99 @@ static const struct of_device_id caam_match[] = { | |||
497 | }; | 485 | }; |
498 | MODULE_DEVICE_TABLE(of, caam_match); | 486 | MODULE_DEVICE_TABLE(of, caam_match); |
499 | 487 | ||
488 | struct caam_imx_data { | ||
489 | const struct clk_bulk_data *clks; | ||
490 | int num_clks; | ||
491 | }; | ||
492 | |||
493 | static const struct clk_bulk_data caam_imx6_clks[] = { | ||
494 | { .id = "ipg" }, | ||
495 | { .id = "mem" }, | ||
496 | { .id = "aclk" }, | ||
497 | { .id = "emi_slow" }, | ||
498 | }; | ||
499 | |||
500 | static const struct caam_imx_data caam_imx6_data = { | ||
501 | .clks = caam_imx6_clks, | ||
502 | .num_clks = ARRAY_SIZE(caam_imx6_clks), | ||
503 | }; | ||
504 | |||
505 | static const struct clk_bulk_data caam_imx7_clks[] = { | ||
506 | { .id = "ipg" }, | ||
507 | { .id = "aclk" }, | ||
508 | }; | ||
509 | |||
510 | static const struct caam_imx_data caam_imx7_data = { | ||
511 | .clks = caam_imx7_clks, | ||
512 | .num_clks = ARRAY_SIZE(caam_imx7_clks), | ||
513 | }; | ||
514 | |||
515 | static const struct clk_bulk_data caam_imx6ul_clks[] = { | ||
516 | { .id = "ipg" }, | ||
517 | { .id = "mem" }, | ||
518 | { .id = "aclk" }, | ||
519 | }; | ||
520 | |||
521 | static const struct caam_imx_data caam_imx6ul_data = { | ||
522 | .clks = caam_imx6ul_clks, | ||
523 | .num_clks = ARRAY_SIZE(caam_imx6ul_clks), | ||
524 | }; | ||
525 | |||
526 | static const struct soc_device_attribute caam_imx_soc_table[] = { | ||
527 | { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, | ||
528 | { .soc_id = "i.MX6*", .data = &caam_imx6_data }, | ||
529 | { .soc_id = "i.MX7*", .data = &caam_imx7_data }, | ||
530 | { .soc_id = "i.MX8MQ", .data = &caam_imx7_data }, | ||
531 | { .family = "Freescale i.MX" }, | ||
532 | { /* sentinel */ } | ||
533 | }; | ||
534 | |||
535 | static void disable_clocks(void *data) | ||
536 | { | ||
537 | struct caam_drv_private *ctrlpriv = data; | ||
538 | |||
539 | clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks); | ||
540 | } | ||
541 | |||
542 | static int init_clocks(struct device *dev, const struct caam_imx_data *data) | ||
543 | { | ||
544 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); | ||
545 | int ret; | ||
546 | |||
547 | ctrlpriv->num_clks = data->num_clks; | ||
548 | ctrlpriv->clks = devm_kmemdup(dev, data->clks, | ||
549 | data->num_clks * sizeof(data->clks[0]), | ||
550 | GFP_KERNEL); | ||
551 | if (!ctrlpriv->clks) | ||
552 | return -ENOMEM; | ||
553 | |||
554 | ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks); | ||
555 | if (ret) { | ||
556 | dev_err(dev, | ||
557 | "Failed to request all necessary clocks\n"); | ||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks); | ||
562 | if (ret) { | ||
563 | dev_err(dev, | ||
564 | "Failed to prepare/enable all necessary clocks\n"); | ||
565 | return ret; | ||
566 | } | ||
567 | |||
568 | return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv); | ||
569 | } | ||
570 | |||
500 | /* Probe routine for CAAM top (controller) level */ | 571 | /* Probe routine for CAAM top (controller) level */ |
501 | static int caam_probe(struct platform_device *pdev) | 572 | static int caam_probe(struct platform_device *pdev) |
502 | { | 573 | { |
503 | int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; | 574 | int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; |
504 | u64 caam_id; | 575 | u64 caam_id; |
505 | static const struct soc_device_attribute imx_soc[] = { | 576 | const struct soc_device_attribute *imx_soc_match; |
506 | {.family = "Freescale i.MX"}, | ||
507 | {}, | ||
508 | }; | ||
509 | struct device *dev; | 577 | struct device *dev; |
510 | struct device_node *nprop, *np; | 578 | struct device_node *nprop, *np; |
511 | struct caam_ctrl __iomem *ctrl; | 579 | struct caam_ctrl __iomem *ctrl; |
512 | struct caam_drv_private *ctrlpriv; | 580 | struct caam_drv_private *ctrlpriv; |
513 | struct clk *clk; | ||
514 | #ifdef CONFIG_DEBUG_FS | 581 | #ifdef CONFIG_DEBUG_FS |
515 | struct caam_perfmon *perfmon; | 582 | struct caam_perfmon *perfmon; |
516 | #endif | 583 | #endif |
@@ -527,103 +594,68 @@ static int caam_probe(struct platform_device *pdev) | |||
527 | dev_set_drvdata(dev, ctrlpriv); | 594 | dev_set_drvdata(dev, ctrlpriv); |
528 | nprop = pdev->dev.of_node; | 595 | nprop = pdev->dev.of_node; |
529 | 596 | ||
530 | caam_imx = (bool)soc_device_match(imx_soc); | 597 | imx_soc_match = soc_device_match(caam_imx_soc_table); |
598 | caam_imx = (bool)imx_soc_match; | ||
531 | 599 | ||
532 | /* Enable clocking */ | 600 | if (imx_soc_match) { |
533 | clk = caam_drv_identify_clk(&pdev->dev, "ipg"); | 601 | if (!imx_soc_match->data) { |
534 | if (IS_ERR(clk)) { | 602 | dev_err(dev, "No clock data provided for i.MX SoC"); |
535 | ret = PTR_ERR(clk); | 603 | return -EINVAL; |
536 | dev_err(&pdev->dev, | ||
537 | "can't identify CAAM ipg clk: %d\n", ret); | ||
538 | return ret; | ||
539 | } | ||
540 | ctrlpriv->caam_ipg = clk; | ||
541 | |||
542 | if (!of_machine_is_compatible("fsl,imx7d") && | ||
543 | !of_machine_is_compatible("fsl,imx7s") && | ||
544 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
545 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); | ||
546 | if (IS_ERR(clk)) { | ||
547 | ret = PTR_ERR(clk); | ||
548 | dev_err(&pdev->dev, | ||
549 | "can't identify CAAM mem clk: %d\n", ret); | ||
550 | return ret; | ||
551 | } | 604 | } |
552 | ctrlpriv->caam_mem = clk; | ||
553 | } | ||
554 | 605 | ||
555 | clk = caam_drv_identify_clk(&pdev->dev, "aclk"); | 606 | ret = init_clocks(dev, imx_soc_match->data); |
556 | if (IS_ERR(clk)) { | 607 | if (ret) |
557 | ret = PTR_ERR(clk); | ||
558 | dev_err(&pdev->dev, | ||
559 | "can't identify CAAM aclk clk: %d\n", ret); | ||
560 | return ret; | ||
561 | } | ||
562 | ctrlpriv->caam_aclk = clk; | ||
563 | |||
564 | if (!of_machine_is_compatible("fsl,imx6ul") && | ||
565 | !of_machine_is_compatible("fsl,imx7d") && | ||
566 | !of_machine_is_compatible("fsl,imx7s") && | ||
567 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
568 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); | ||
569 | if (IS_ERR(clk)) { | ||
570 | ret = PTR_ERR(clk); | ||
571 | dev_err(&pdev->dev, | ||
572 | "can't identify CAAM emi_slow clk: %d\n", ret); | ||
573 | return ret; | 608 | return ret; |
574 | } | ||
575 | ctrlpriv->caam_emi_slow = clk; | ||
576 | } | ||
577 | |||
578 | ret = clk_prepare_enable(ctrlpriv->caam_ipg); | ||
579 | if (ret < 0) { | ||
580 | dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret); | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | if (ctrlpriv->caam_mem) { | ||
585 | ret = clk_prepare_enable(ctrlpriv->caam_mem); | ||
586 | if (ret < 0) { | ||
587 | dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", | ||
588 | ret); | ||
589 | goto disable_caam_ipg; | ||
590 | } | ||
591 | } | 609 | } |
592 | 610 | ||
593 | ret = clk_prepare_enable(ctrlpriv->caam_aclk); | ||
594 | if (ret < 0) { | ||
595 | dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret); | ||
596 | goto disable_caam_mem; | ||
597 | } | ||
598 | |||
599 | if (ctrlpriv->caam_emi_slow) { | ||
600 | ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); | ||
601 | if (ret < 0) { | ||
602 | dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", | ||
603 | ret); | ||
604 | goto disable_caam_aclk; | ||
605 | } | ||
606 | } | ||
607 | 611 | ||
608 | /* Get configuration properties from device tree */ | 612 | /* Get configuration properties from device tree */ |
609 | /* First, get register page */ | 613 | /* First, get register page */ |
610 | ctrl = of_iomap(nprop, 0); | 614 | ctrl = of_iomap(nprop, 0); |
611 | if (ctrl == NULL) { | 615 | if (!ctrl) { |
612 | dev_err(dev, "caam: of_iomap() failed\n"); | 616 | dev_err(dev, "caam: of_iomap() failed\n"); |
613 | ret = -ENOMEM; | 617 | return -ENOMEM; |
614 | goto disable_caam_emi_slow; | ||
615 | } | 618 | } |
616 | 619 | ||
617 | caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & | 620 | caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & |
618 | (CSTA_PLEND | CSTA_ALT_PLEND)); | 621 | (CSTA_PLEND | CSTA_ALT_PLEND)); |
619 | |||
620 | /* Finding the page size for using the CTPR_MS register */ | ||
621 | comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); | 622 | comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); |
622 | pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; | 623 | if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR) |
624 | caam_ptr_sz = sizeof(u64); | ||
625 | else | ||
626 | caam_ptr_sz = sizeof(u32); | ||
627 | caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); | ||
628 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); | ||
629 | |||
630 | #ifdef CONFIG_CAAM_QI | ||
631 | /* If (DPAA 1.x) QI present, check whether dependencies are available */ | ||
632 | if (ctrlpriv->qi_present && !caam_dpaa2) { | ||
633 | ret = qman_is_probed(); | ||
634 | if (!ret) { | ||
635 | ret = -EPROBE_DEFER; | ||
636 | goto iounmap_ctrl; | ||
637 | } else if (ret < 0) { | ||
638 | dev_err(dev, "failing probe due to qman probe error\n"); | ||
639 | ret = -ENODEV; | ||
640 | goto iounmap_ctrl; | ||
641 | } | ||
642 | |||
643 | ret = qman_portals_probed(); | ||
644 | if (!ret) { | ||
645 | ret = -EPROBE_DEFER; | ||
646 | goto iounmap_ctrl; | ||
647 | } else if (ret < 0) { | ||
648 | dev_err(dev, "failing probe due to qman portals probe error\n"); | ||
649 | ret = -ENODEV; | ||
650 | goto iounmap_ctrl; | ||
651 | } | ||
652 | } | ||
653 | #endif | ||
623 | 654 | ||
624 | /* Allocating the BLOCK_OFFSET based on the supported page size on | 655 | /* Allocating the BLOCK_OFFSET based on the supported page size on |
625 | * the platform | 656 | * the platform |
626 | */ | 657 | */ |
658 | pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; | ||
627 | if (pg_size == 0) | 659 | if (pg_size == 0) |
628 | BLOCK_OFFSET = PG_SIZE_4K; | 660 | BLOCK_OFFSET = PG_SIZE_4K; |
629 | else | 661 | else |
@@ -648,7 +680,6 @@ static int caam_probe(struct platform_device *pdev) | |||
648 | * In case of SoCs with Management Complex, MC f/w performs | 680 | * In case of SoCs with Management Complex, MC f/w performs |
649 | * the configuration. | 681 | * the configuration. |
650 | */ | 682 | */ |
651 | caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); | ||
652 | np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); | 683 | np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); |
653 | ctrlpriv->mc_en = !!np; | 684 | ctrlpriv->mc_en = !!np; |
654 | of_node_put(np); | 685 | of_node_put(np); |
@@ -688,16 +719,7 @@ static int caam_probe(struct platform_device *pdev) | |||
688 | JRSTART_JR1_START | JRSTART_JR2_START | | 719 | JRSTART_JR1_START | JRSTART_JR2_START | |
689 | JRSTART_JR3_START); | 720 | JRSTART_JR3_START); |
690 | 721 | ||
691 | if (sizeof(dma_addr_t) == sizeof(u64)) { | 722 | ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev)); |
692 | if (caam_dpaa2) | ||
693 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); | ||
694 | else if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) | ||
695 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); | ||
696 | else | ||
697 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); | ||
698 | } else { | ||
699 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
700 | } | ||
701 | if (ret) { | 723 | if (ret) { |
702 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); | 724 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); |
703 | goto iounmap_ctrl; | 725 | goto iounmap_ctrl; |
@@ -719,7 +741,6 @@ static int caam_probe(struct platform_device *pdev) | |||
719 | #endif | 741 | #endif |
720 | 742 | ||
721 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ | 743 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ |
722 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); | ||
723 | if (ctrlpriv->qi_present && !caam_dpaa2) { | 744 | if (ctrlpriv->qi_present && !caam_dpaa2) { |
724 | ctrlpriv->qi = (struct caam_queue_if __iomem __force *) | 745 | ctrlpriv->qi = (struct caam_queue_if __iomem __force *) |
725 | ((__force uint8_t *)ctrl + | 746 | ((__force uint8_t *)ctrl + |
@@ -908,16 +929,6 @@ shutdown_qi: | |||
908 | #endif | 929 | #endif |
909 | iounmap_ctrl: | 930 | iounmap_ctrl: |
910 | iounmap(ctrl); | 931 | iounmap(ctrl); |
911 | disable_caam_emi_slow: | ||
912 | if (ctrlpriv->caam_emi_slow) | ||
913 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | ||
914 | disable_caam_aclk: | ||
915 | clk_disable_unprepare(ctrlpriv->caam_aclk); | ||
916 | disable_caam_mem: | ||
917 | if (ctrlpriv->caam_mem) | ||
918 | clk_disable_unprepare(ctrlpriv->caam_mem); | ||
919 | disable_caam_ipg: | ||
920 | clk_disable_unprepare(ctrlpriv->caam_ipg); | ||
921 | return ret; | 932 | return ret; |
922 | } | 933 | } |
923 | 934 | ||
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 5988a26a2441..62ce6421bb3f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -14,9 +14,41 @@ | |||
14 | 14 | ||
15 | #define IMMEDIATE (1 << 23) | 15 | #define IMMEDIATE (1 << 23) |
16 | #define CAAM_CMD_SZ sizeof(u32) | 16 | #define CAAM_CMD_SZ sizeof(u32) |
17 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | 17 | #define CAAM_PTR_SZ caam_ptr_sz |
18 | #define CAAM_PTR_SZ_MAX sizeof(dma_addr_t) | ||
19 | #define CAAM_PTR_SZ_MIN sizeof(u32) | ||
18 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) | 20 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) |
19 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | 21 | #define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3) |
22 | #define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ) | ||
23 | #define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX) | ||
24 | #define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN) | ||
25 | |||
26 | /* | ||
27 | * The CAAM QI hardware constructs a job descriptor which points | ||
28 | * to shared descriptor (as pointed by context_a of FQ to CAAM). | ||
29 | * When the job descriptor is executed by deco, the whole job | ||
30 | * descriptor together with shared descriptor gets loaded in | ||
31 | * deco buffer which is 64 words long (each 32-bit). | ||
32 | * | ||
33 | * The job descriptor constructed by QI hardware has layout: | ||
34 | * | ||
35 | * HEADER (1 word) | ||
36 | * Shdesc ptr (1 or 2 words) | ||
37 | * SEQ_OUT_PTR (1 word) | ||
38 | * Out ptr (1 or 2 words) | ||
39 | * Out length (1 word) | ||
40 | * SEQ_IN_PTR (1 word) | ||
41 | * In ptr (1 or 2 words) | ||
42 | * In length (1 word) | ||
43 | * | ||
44 | * The shdesc ptr is used to fetch shared descriptor contents | ||
45 | * into deco buffer. | ||
46 | * | ||
47 | * Apart from shdesc contents, the total number of words that | ||
48 | * get loaded in deco buffer are '8' or '11'. The remaining words | ||
49 | * in deco buffer can be used for storing shared descriptor. | ||
50 | */ | ||
51 | #define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ) | ||
20 | 52 | ||
21 | #ifdef DEBUG | 53 | #ifdef DEBUG |
22 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | 54 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ |
@@ -37,6 +69,7 @@ | |||
37 | (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | 69 | (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) |
38 | 70 | ||
39 | extern bool caam_little_end; | 71 | extern bool caam_little_end; |
72 | extern size_t caam_ptr_sz; | ||
40 | 73 | ||
41 | /* | 74 | /* |
42 | * HW fetches 4 S/G table entries at a time, irrespective of how many entries | 75 | * HW fetches 4 S/G table entries at a time, irrespective of how many entries |
@@ -103,9 +136,15 @@ static inline void init_job_desc_pdb(u32 * const desc, u32 options, | |||
103 | 136 | ||
104 | static inline void append_ptr(u32 * const desc, dma_addr_t ptr) | 137 | static inline void append_ptr(u32 * const desc, dma_addr_t ptr) |
105 | { | 138 | { |
106 | dma_addr_t *offset = (dma_addr_t *)desc_end(desc); | 139 | if (caam_ptr_sz == sizeof(dma_addr_t)) { |
140 | dma_addr_t *offset = (dma_addr_t *)desc_end(desc); | ||
107 | 141 | ||
108 | *offset = cpu_to_caam_dma(ptr); | 142 | *offset = cpu_to_caam_dma(ptr); |
143 | } else { | ||
144 | u32 *offset = (u32 *)desc_end(desc); | ||
145 | |||
146 | *offset = cpu_to_caam_dma(ptr); | ||
147 | } | ||
109 | 148 | ||
110 | (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + | 149 | (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + |
111 | CAAM_PTR_SZ / CAAM_CMD_SZ); | 150 | CAAM_PTR_SZ / CAAM_CMD_SZ); |
@@ -457,8 +496,8 @@ do { \ | |||
457 | * functions where it is used. | 496 | * functions where it is used. |
458 | * @keylen: length of the provided algorithm key, in bytes | 497 | * @keylen: length of the provided algorithm key, in bytes |
459 | * @keylen_pad: padded length of the provided algorithm key, in bytes | 498 | * @keylen_pad: padded length of the provided algorithm key, in bytes |
460 | * @key: address where algorithm key resides; virtual address if key_inline | 499 | * @key_dma: dma (bus) address where algorithm key resides |
461 | * is true, dma (bus) address if key_inline is false. | 500 | * @key_virt: virtual address where algorithm key resides |
462 | * @key_inline: true - key can be inlined in the descriptor; false - key is | 501 | * @key_inline: true - key can be inlined in the descriptor; false - key is |
463 | * referenced by the descriptor | 502 | * referenced by the descriptor |
464 | */ | 503 | */ |
@@ -466,10 +505,8 @@ struct alginfo { | |||
466 | u32 algtype; | 505 | u32 algtype; |
467 | unsigned int keylen; | 506 | unsigned int keylen; |
468 | unsigned int keylen_pad; | 507 | unsigned int keylen_pad; |
469 | union { | 508 | dma_addr_t key_dma; |
470 | dma_addr_t key_dma; | 509 | const void *key_virt; |
471 | const void *key_virt; | ||
472 | }; | ||
473 | bool key_inline; | 510 | bool key_inline; |
474 | }; | 511 | }; |
475 | 512 | ||
@@ -535,14 +572,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) | |||
535 | if (adata->key_inline) { | 572 | if (adata->key_inline) { |
536 | int words; | 573 | int words; |
537 | 574 | ||
538 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | | 575 | if (adata->keylen > adata->keylen_pad) { |
539 | OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | | 576 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | |
540 | adata->keylen); | 577 | OP_PCL_DKP_SRC_PTR | |
541 | append_data(desc, adata->key_virt, adata->keylen); | 578 | OP_PCL_DKP_DST_IMM | adata->keylen); |
579 | append_ptr(desc, adata->key_dma); | ||
580 | |||
581 | words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - | ||
582 | CAAM_PTR_SZ) / CAAM_CMD_SZ; | ||
583 | } else { | ||
584 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | | ||
585 | OP_PCL_DKP_SRC_IMM | | ||
586 | OP_PCL_DKP_DST_IMM | adata->keylen); | ||
587 | append_data(desc, adata->key_virt, adata->keylen); | ||
588 | |||
589 | words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - | ||
590 | ALIGN(adata->keylen, CAAM_CMD_SZ)) / | ||
591 | CAAM_CMD_SZ; | ||
592 | } | ||
542 | 593 | ||
543 | /* Reserve space in descriptor buffer for the derived key */ | 594 | /* Reserve space in descriptor buffer for the derived key */ |
544 | words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - | ||
545 | ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; | ||
546 | if (words) | 595 | if (words) |
547 | (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); | 596 | (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); |
548 | } else { | 597 | } else { |
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c new file mode 100644 index 000000000000..c5bfc923abd8 --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.c | |||
@@ -0,0 +1,79 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) | ||
2 | /* Copyright 2019 NXP */ | ||
3 | |||
4 | #include <linux/module.h> | ||
5 | #include <linux/device.h> | ||
6 | #include <linux/debugfs.h> | ||
7 | #include "dpseci-debugfs.h" | ||
8 | |||
9 | static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset) | ||
10 | { | ||
11 | struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private; | ||
12 | u32 fqid, fcnt, bcnt; | ||
13 | int i, err; | ||
14 | |||
15 | seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev)); | ||
16 | seq_printf(file, "%s%16s%16s\n", | ||
17 | "Rx-VFQID", | ||
18 | "Pending frames", | ||
19 | "Pending bytes"); | ||
20 | |||
21 | for (i = 0; i < priv->num_pairs; i++) { | ||
22 | fqid = priv->rx_queue_attr[i].fqid; | ||
23 | err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); | ||
24 | if (err) | ||
25 | continue; | ||
26 | |||
27 | seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); | ||
28 | } | ||
29 | |||
30 | seq_printf(file, "%s%16s%16s\n", | ||
31 | "Tx-VFQID", | ||
32 | "Pending frames", | ||
33 | "Pending bytes"); | ||
34 | |||
35 | for (i = 0; i < priv->num_pairs; i++) { | ||
36 | fqid = priv->tx_queue_attr[i].fqid; | ||
37 | err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); | ||
38 | if (err) | ||
39 | continue; | ||
40 | |||
41 | seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); | ||
42 | } | ||
43 | |||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file) | ||
48 | { | ||
49 | int err; | ||
50 | struct dpaa2_caam_priv *priv; | ||
51 | |||
52 | priv = (struct dpaa2_caam_priv *)inode->i_private; | ||
53 | |||
54 | err = single_open(file, dpseci_dbg_fqs_show, priv); | ||
55 | if (err < 0) | ||
56 | dev_err(priv->dev, "single_open() failed\n"); | ||
57 | |||
58 | return err; | ||
59 | } | ||
60 | |||
61 | static const struct file_operations dpseci_dbg_fq_ops = { | ||
62 | .open = dpseci_dbg_fqs_open, | ||
63 | .read = seq_read, | ||
64 | .llseek = seq_lseek, | ||
65 | .release = single_release, | ||
66 | }; | ||
67 | |||
68 | void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) | ||
69 | { | ||
70 | priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL); | ||
71 | |||
72 | debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv, | ||
73 | &dpseci_dbg_fq_ops); | ||
74 | } | ||
75 | |||
76 | void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) | ||
77 | { | ||
78 | debugfs_remove_recursive(priv->dfs_root); | ||
79 | } | ||
diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h new file mode 100644 index 000000000000..bc22af7bec37 --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ | ||
2 | /* Copyright 2019 NXP */ | ||
3 | |||
4 | #ifndef DPSECI_DEBUGFS_H | ||
5 | #define DPSECI_DEBUGFS_H | ||
6 | |||
7 | #include <linux/dcache.h> | ||
8 | #include "caamalg_qi2.h" | ||
9 | |||
10 | #ifdef CONFIG_DEBUG_FS | ||
11 | void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv); | ||
12 | void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv); | ||
13 | #else | ||
14 | static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {} | ||
15 | static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {} | ||
16 | #endif /* CONFIG_DEBUG_FS */ | ||
17 | |||
18 | #endif /* DPSECI_DEBUGFS_H */ | ||
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 4f0d45865aa2..17c6108b6d41 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -56,6 +56,9 @@ EXPORT_SYMBOL(caam_little_end); | |||
56 | bool caam_imx; | 56 | bool caam_imx; |
57 | EXPORT_SYMBOL(caam_imx); | 57 | EXPORT_SYMBOL(caam_imx); |
58 | 58 | ||
59 | size_t caam_ptr_sz; | ||
60 | EXPORT_SYMBOL(caam_ptr_sz); | ||
61 | |||
59 | static const struct { | 62 | static const struct { |
60 | u8 value; | 63 | u8 value; |
61 | const char *error_text; | 64 | const char *error_text; |
@@ -118,6 +121,7 @@ static const struct { | |||
118 | u8 value; | 121 | u8 value; |
119 | const char *error_text; | 122 | const char *error_text; |
120 | } qi_error_list[] = { | 123 | } qi_error_list[] = { |
124 | { 0x00, "No error" }, | ||
121 | { 0x1F, "Job terminated by FQ or ICID flush" }, | 125 | { 0x1F, "Job terminated by FQ or ICID flush" }, |
122 | { 0x20, "FD format error"}, | 126 | { 0x20, "FD format error"}, |
123 | { 0x21, "FD command format error"}, | 127 | { 0x21, "FD command format error"}, |
@@ -210,8 +214,8 @@ static const char * const rng_err_id_list[] = { | |||
210 | "Secure key generation", | 214 | "Secure key generation", |
211 | }; | 215 | }; |
212 | 216 | ||
213 | static void report_ccb_status(struct device *jrdev, const u32 status, | 217 | static int report_ccb_status(struct device *jrdev, const u32 status, |
214 | const char *error) | 218 | const char *error) |
215 | { | 219 | { |
216 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> | 220 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> |
217 | JRSTA_CCBERR_CHAID_SHIFT; | 221 | JRSTA_CCBERR_CHAID_SHIFT; |
@@ -247,22 +251,27 @@ static void report_ccb_status(struct device *jrdev, const u32 status, | |||
247 | * CCB ICV check failures are part of normal operation life; | 251 | * CCB ICV check failures are part of normal operation life; |
248 | * we leave the upper layers to do what they want with them. | 252 | * we leave the upper layers to do what they want with them. |
249 | */ | 253 | */ |
250 | if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) | 254 | if (err_id == JRSTA_CCBERR_ERRID_ICVCHK) |
251 | dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", | 255 | return -EBADMSG; |
252 | status, error, idx_str, idx, | 256 | |
253 | cha_str, cha_err_code, | 257 | dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status, |
254 | err_str, err_err_code); | 258 | error, idx_str, idx, cha_str, cha_err_code, |
259 | err_str, err_err_code); | ||
260 | |||
261 | return -EINVAL; | ||
255 | } | 262 | } |
256 | 263 | ||
257 | static void report_jump_status(struct device *jrdev, const u32 status, | 264 | static int report_jump_status(struct device *jrdev, const u32 status, |
258 | const char *error) | 265 | const char *error) |
259 | { | 266 | { |
260 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", | 267 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", |
261 | status, error, __func__); | 268 | status, error, __func__); |
269 | |||
270 | return -EINVAL; | ||
262 | } | 271 | } |
263 | 272 | ||
264 | static void report_deco_status(struct device *jrdev, const u32 status, | 273 | static int report_deco_status(struct device *jrdev, const u32 status, |
265 | const char *error) | 274 | const char *error) |
266 | { | 275 | { |
267 | u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; | 276 | u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; |
268 | u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> | 277 | u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> |
@@ -288,10 +297,12 @@ static void report_deco_status(struct device *jrdev, const u32 status, | |||
288 | 297 | ||
289 | dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", | 298 | dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", |
290 | status, error, idx_str, idx, err_str, err_err_code); | 299 | status, error, idx_str, idx, err_str, err_err_code); |
300 | |||
301 | return -EINVAL; | ||
291 | } | 302 | } |
292 | 303 | ||
293 | static void report_qi_status(struct device *qidev, const u32 status, | 304 | static int report_qi_status(struct device *qidev, const u32 status, |
294 | const char *error) | 305 | const char *error) |
295 | { | 306 | { |
296 | u8 err_id = status & JRSTA_QIERR_ERROR_MASK; | 307 | u8 err_id = status & JRSTA_QIERR_ERROR_MASK; |
297 | const char *err_str = "unidentified error value 0x"; | 308 | const char *err_str = "unidentified error value 0x"; |
@@ -309,27 +320,33 @@ static void report_qi_status(struct device *qidev, const u32 status, | |||
309 | 320 | ||
310 | dev_err(qidev, "%08x: %s: %s%s\n", | 321 | dev_err(qidev, "%08x: %s: %s%s\n", |
311 | status, error, err_str, err_err_code); | 322 | status, error, err_str, err_err_code); |
323 | |||
324 | return -EINVAL; | ||
312 | } | 325 | } |
313 | 326 | ||
314 | static void report_jr_status(struct device *jrdev, const u32 status, | 327 | static int report_jr_status(struct device *jrdev, const u32 status, |
315 | const char *error) | 328 | const char *error) |
316 | { | 329 | { |
317 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", | 330 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", |
318 | status, error, __func__); | 331 | status, error, __func__); |
332 | |||
333 | return -EINVAL; | ||
319 | } | 334 | } |
320 | 335 | ||
321 | static void report_cond_code_status(struct device *jrdev, const u32 status, | 336 | static int report_cond_code_status(struct device *jrdev, const u32 status, |
322 | const char *error) | 337 | const char *error) |
323 | { | 338 | { |
324 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", | 339 | dev_err(jrdev, "%08x: %s: %s() not implemented\n", |
325 | status, error, __func__); | 340 | status, error, __func__); |
341 | |||
342 | return -EINVAL; | ||
326 | } | 343 | } |
327 | 344 | ||
328 | void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) | 345 | int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) |
329 | { | 346 | { |
330 | static const struct stat_src { | 347 | static const struct stat_src { |
331 | void (*report_ssed)(struct device *jrdev, const u32 status, | 348 | int (*report_ssed)(struct device *jrdev, const u32 status, |
332 | const char *error); | 349 | const char *error); |
333 | const char *error; | 350 | const char *error; |
334 | } status_src[16] = { | 351 | } status_src[16] = { |
335 | { NULL, "No error" }, | 352 | { NULL, "No error" }, |
@@ -357,11 +374,14 @@ void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) | |||
357 | * Otherwise print the error source name. | 374 | * Otherwise print the error source name. |
358 | */ | 375 | */ |
359 | if (status_src[ssrc].report_ssed) | 376 | if (status_src[ssrc].report_ssed) |
360 | status_src[ssrc].report_ssed(jrdev, status, error); | 377 | return status_src[ssrc].report_ssed(jrdev, status, error); |
361 | else if (error) | 378 | |
379 | if (error) | ||
362 | dev_err(jrdev, "%d: %s\n", ssrc, error); | 380 | dev_err(jrdev, "%d: %s\n", ssrc, error); |
363 | else | 381 | else |
364 | dev_err(jrdev, "%d: unknown error source\n", ssrc); | 382 | dev_err(jrdev, "%d: unknown error source\n", ssrc); |
383 | |||
384 | return -EINVAL; | ||
365 | } | 385 | } |
366 | EXPORT_SYMBOL(caam_strstatus); | 386 | EXPORT_SYMBOL(caam_strstatus); |
367 | 387 | ||
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index d9726e66edbf..16809fa8fec7 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #define CAAM_ERROR_STR_MAX 302 | 13 | #define CAAM_ERROR_STR_MAX 302 |
14 | 14 | ||
15 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | 15 | int caam_strstatus(struct device *dev, u32 status, bool qi_v2); |
16 | 16 | ||
17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) | 17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) |
18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) | 18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 6af84bbc612c..731b06becd9c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef INTERN_H | 10 | #ifndef INTERN_H |
11 | #define INTERN_H | 11 | #define INTERN_H |
12 | 12 | ||
13 | #include "ctrl.h" | ||
14 | |||
13 | /* Currently comes from Kconfig param as a ^2 (driver-required) */ | 15 | /* Currently comes from Kconfig param as a ^2 (driver-required) */ |
14 | #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) | 16 | #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) |
15 | 17 | ||
@@ -53,10 +55,11 @@ struct caam_drv_private_jr { | |||
53 | spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ | 55 | spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ |
54 | u32 inpring_avail; /* Number of free entries in input ring */ | 56 | u32 inpring_avail; /* Number of free entries in input ring */ |
55 | int head; /* entinfo (s/w ring) head index */ | 57 | int head; /* entinfo (s/w ring) head index */ |
56 | dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ | 58 | void *inpring; /* Base of input ring, alloc |
59 | * DMA-safe */ | ||
57 | int out_ring_read_index; /* Output index "tail" */ | 60 | int out_ring_read_index; /* Output index "tail" */ |
58 | int tail; /* entinfo (s/w ring) tail index */ | 61 | int tail; /* entinfo (s/w ring) tail index */ |
59 | struct jr_outentry *outring; /* Base of output ring, DMA-safe */ | 62 | void *outring; /* Base of output ring, DMA-safe */ |
60 | }; | 63 | }; |
61 | 64 | ||
62 | /* | 65 | /* |
@@ -92,11 +95,8 @@ struct caam_drv_private { | |||
92 | Handles of the RNG4 block are initialized | 95 | Handles of the RNG4 block are initialized |
93 | by this driver */ | 96 | by this driver */ |
94 | 97 | ||
95 | struct clk *caam_ipg; | 98 | struct clk_bulk_data *clks; |
96 | struct clk *caam_mem; | 99 | int num_clks; |
97 | struct clk *caam_aclk; | ||
98 | struct clk *caam_emi_slow; | ||
99 | |||
100 | /* | 100 | /* |
101 | * debugfs entries for developer view into driver/device | 101 | * debugfs entries for developer view into driver/device |
102 | * variables at runtime. | 102 | * variables at runtime. |
@@ -215,4 +215,22 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); | |||
215 | DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); | 215 | DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); |
216 | #endif | 216 | #endif |
217 | 217 | ||
218 | static inline u64 caam_get_dma_mask(struct device *dev) | ||
219 | { | ||
220 | struct device_node *nprop = dev->of_node; | ||
221 | |||
222 | if (caam_ptr_sz != sizeof(u64)) | ||
223 | return DMA_BIT_MASK(32); | ||
224 | |||
225 | if (caam_dpaa2) | ||
226 | return DMA_BIT_MASK(49); | ||
227 | |||
228 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") || | ||
229 | of_device_is_compatible(nprop, "fsl,sec-v5.0")) | ||
230 | return DMA_BIT_MASK(40); | ||
231 | |||
232 | return DMA_BIT_MASK(36); | ||
233 | } | ||
234 | |||
235 | |||
218 | #endif /* INTERN_H */ | 236 | #endif /* INTERN_H */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index cea811fed320..fc97cde27059 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -108,25 +108,12 @@ static int caam_reset_hw_jr(struct device *dev) | |||
108 | static int caam_jr_shutdown(struct device *dev) | 108 | static int caam_jr_shutdown(struct device *dev) |
109 | { | 109 | { |
110 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 110 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
111 | dma_addr_t inpbusaddr, outbusaddr; | ||
112 | int ret; | 111 | int ret; |
113 | 112 | ||
114 | ret = caam_reset_hw_jr(dev); | 113 | ret = caam_reset_hw_jr(dev); |
115 | 114 | ||
116 | tasklet_kill(&jrp->irqtask); | 115 | tasklet_kill(&jrp->irqtask); |
117 | 116 | ||
118 | /* Release interrupt */ | ||
119 | free_irq(jrp->irq, dev); | ||
120 | |||
121 | /* Free rings */ | ||
122 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
123 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
124 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
125 | jrp->inpring, inpbusaddr); | ||
126 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
127 | jrp->outring, outbusaddr); | ||
128 | kfree(jrp->entinfo); | ||
129 | |||
130 | return ret; | 117 | return ret; |
131 | } | 118 | } |
132 | 119 | ||
@@ -159,7 +146,6 @@ static int caam_jr_remove(struct platform_device *pdev) | |||
159 | ret = caam_jr_shutdown(jrdev); | 146 | ret = caam_jr_shutdown(jrdev); |
160 | if (ret) | 147 | if (ret) |
161 | dev_err(jrdev, "Failed to shut down job ring\n"); | 148 | dev_err(jrdev, "Failed to shut down job ring\n"); |
162 | irq_dispose_mapping(jrpriv->irq); | ||
163 | 149 | ||
164 | return ret; | 150 | return ret; |
165 | } | 151 | } |
@@ -224,7 +210,7 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
224 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { | 210 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { |
225 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); | 211 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); |
226 | 212 | ||
227 | if (jrp->outring[hw_idx].desc == | 213 | if (jr_outentry_desc(jrp->outring, hw_idx) == |
228 | caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) | 214 | caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) |
229 | break; /* found */ | 215 | break; /* found */ |
230 | } | 216 | } |
@@ -233,7 +219,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
233 | 219 | ||
234 | /* Unmap just-run descriptor so we can post-process */ | 220 | /* Unmap just-run descriptor so we can post-process */ |
235 | dma_unmap_single(dev, | 221 | dma_unmap_single(dev, |
236 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | 222 | caam_dma_to_cpu(jr_outentry_desc(jrp->outring, |
223 | hw_idx)), | ||
237 | jrp->entinfo[sw_idx].desc_size, | 224 | jrp->entinfo[sw_idx].desc_size, |
238 | DMA_TO_DEVICE); | 225 | DMA_TO_DEVICE); |
239 | 226 | ||
@@ -244,7 +231,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
244 | usercall = jrp->entinfo[sw_idx].callbk; | 231 | usercall = jrp->entinfo[sw_idx].callbk; |
245 | userarg = jrp->entinfo[sw_idx].cbkarg; | 232 | userarg = jrp->entinfo[sw_idx].cbkarg; |
246 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; | 233 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; |
247 | userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus); | 234 | userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring, |
235 | hw_idx)); | ||
248 | 236 | ||
249 | /* | 237 | /* |
250 | * Make sure all information from the job has been obtained | 238 | * Make sure all information from the job has been obtained |
@@ -399,7 +387,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
399 | head_entry->cbkarg = areq; | 387 | head_entry->cbkarg = areq; |
400 | head_entry->desc_addr_dma = desc_dma; | 388 | head_entry->desc_addr_dma = desc_dma; |
401 | 389 | ||
402 | jrp->inpring[head] = cpu_to_caam_dma(desc_dma); | 390 | jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); |
403 | 391 | ||
404 | /* | 392 | /* |
405 | * Guarantee that the descriptor's DMA address has been written to | 393 | * Guarantee that the descriptor's DMA address has been written to |
@@ -441,35 +429,26 @@ static int caam_jr_init(struct device *dev) | |||
441 | 429 | ||
442 | jrp = dev_get_drvdata(dev); | 430 | jrp = dev_get_drvdata(dev); |
443 | 431 | ||
444 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); | ||
445 | |||
446 | /* Connect job ring interrupt handler. */ | ||
447 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | ||
448 | dev_name(dev), dev); | ||
449 | if (error) { | ||
450 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | ||
451 | jrp->ridx, jrp->irq); | ||
452 | goto out_kill_deq; | ||
453 | } | ||
454 | |||
455 | error = caam_reset_hw_jr(dev); | 432 | error = caam_reset_hw_jr(dev); |
456 | if (error) | 433 | if (error) |
457 | goto out_free_irq; | 434 | return error; |
458 | 435 | ||
459 | error = -ENOMEM; | 436 | jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY * |
460 | jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * | 437 | JOBR_DEPTH, &inpbusaddr, |
461 | JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); | 438 | GFP_KERNEL); |
462 | if (!jrp->inpring) | 439 | if (!jrp->inpring) |
463 | goto out_free_irq; | 440 | return -ENOMEM; |
464 | 441 | ||
465 | jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * | 442 | jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY * |
466 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); | 443 | JOBR_DEPTH, &outbusaddr, |
444 | GFP_KERNEL); | ||
467 | if (!jrp->outring) | 445 | if (!jrp->outring) |
468 | goto out_free_inpring; | 446 | return -ENOMEM; |
469 | 447 | ||
470 | jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); | 448 | jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), |
449 | GFP_KERNEL); | ||
471 | if (!jrp->entinfo) | 450 | if (!jrp->entinfo) |
472 | goto out_free_outring; | 451 | return -ENOMEM; |
473 | 452 | ||
474 | for (i = 0; i < JOBR_DEPTH; i++) | 453 | for (i = 0; i < JOBR_DEPTH; i++) |
475 | jrp->entinfo[i].desc_addr_dma = !0; | 454 | jrp->entinfo[i].desc_addr_dma = !0; |
@@ -493,22 +472,24 @@ static int caam_jr_init(struct device *dev) | |||
493 | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | | 472 | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | |
494 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); | 473 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); |
495 | 474 | ||
496 | return 0; | 475 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); |
476 | |||
477 | /* Connect job ring interrupt handler. */ | ||
478 | error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, | ||
479 | dev_name(dev), dev); | ||
480 | if (error) { | ||
481 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | ||
482 | jrp->ridx, jrp->irq); | ||
483 | tasklet_kill(&jrp->irqtask); | ||
484 | } | ||
497 | 485 | ||
498 | out_free_outring: | ||
499 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
500 | jrp->outring, outbusaddr); | ||
501 | out_free_inpring: | ||
502 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
503 | jrp->inpring, inpbusaddr); | ||
504 | dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); | ||
505 | out_free_irq: | ||
506 | free_irq(jrp->irq, dev); | ||
507 | out_kill_deq: | ||
508 | tasklet_kill(&jrp->irqtask); | ||
509 | return error; | 486 | return error; |
510 | } | 487 | } |
511 | 488 | ||
489 | static void caam_jr_irq_dispose_mapping(void *data) | ||
490 | { | ||
491 | irq_dispose_mapping((unsigned long)data); | ||
492 | } | ||
512 | 493 | ||
513 | /* | 494 | /* |
514 | * Probe routine for each detected JobR subsystem. | 495 | * Probe routine for each detected JobR subsystem. |
@@ -520,6 +501,7 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
520 | struct caam_job_ring __iomem *ctrl; | 501 | struct caam_job_ring __iomem *ctrl; |
521 | struct caam_drv_private_jr *jrpriv; | 502 | struct caam_drv_private_jr *jrpriv; |
522 | static int total_jobrs; | 503 | static int total_jobrs; |
504 | struct resource *r; | ||
523 | int error; | 505 | int error; |
524 | 506 | ||
525 | jrdev = &pdev->dev; | 507 | jrdev = &pdev->dev; |
@@ -535,45 +517,43 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
535 | nprop = pdev->dev.of_node; | 517 | nprop = pdev->dev.of_node; |
536 | /* Get configuration properties from device tree */ | 518 | /* Get configuration properties from device tree */ |
537 | /* First, get register page */ | 519 | /* First, get register page */ |
538 | ctrl = of_iomap(nprop, 0); | 520 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
521 | if (!r) { | ||
522 | dev_err(jrdev, "platform_get_resource() failed\n"); | ||
523 | return -ENOMEM; | ||
524 | } | ||
525 | |||
526 | ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); | ||
539 | if (!ctrl) { | 527 | if (!ctrl) { |
540 | dev_err(jrdev, "of_iomap() failed\n"); | 528 | dev_err(jrdev, "devm_ioremap() failed\n"); |
541 | return -ENOMEM; | 529 | return -ENOMEM; |
542 | } | 530 | } |
543 | 531 | ||
544 | jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; | 532 | jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; |
545 | 533 | ||
546 | if (sizeof(dma_addr_t) == sizeof(u64)) { | 534 | error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev)); |
547 | if (caam_dpaa2) | ||
548 | error = dma_set_mask_and_coherent(jrdev, | ||
549 | DMA_BIT_MASK(49)); | ||
550 | else if (of_device_is_compatible(nprop, | ||
551 | "fsl,sec-v5.0-job-ring")) | ||
552 | error = dma_set_mask_and_coherent(jrdev, | ||
553 | DMA_BIT_MASK(40)); | ||
554 | else | ||
555 | error = dma_set_mask_and_coherent(jrdev, | ||
556 | DMA_BIT_MASK(36)); | ||
557 | } else { | ||
558 | error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); | ||
559 | } | ||
560 | if (error) { | 535 | if (error) { |
561 | dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", | 536 | dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", |
562 | error); | 537 | error); |
563 | iounmap(ctrl); | ||
564 | return error; | 538 | return error; |
565 | } | 539 | } |
566 | 540 | ||
567 | /* Identify the interrupt */ | 541 | /* Identify the interrupt */ |
568 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); | 542 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); |
543 | if (!jrpriv->irq) { | ||
544 | dev_err(jrdev, "irq_of_parse_and_map failed\n"); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | |||
548 | error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, | ||
549 | (void *)(unsigned long)jrpriv->irq); | ||
550 | if (error) | ||
551 | return error; | ||
569 | 552 | ||
570 | /* Now do the platform independent part */ | 553 | /* Now do the platform independent part */ |
571 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 554 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
572 | if (error) { | 555 | if (error) |
573 | irq_dispose_mapping(jrpriv->irq); | ||
574 | iounmap(ctrl); | ||
575 | return error; | 556 | return error; |
576 | } | ||
577 | 557 | ||
578 | jrpriv->dev = jrdev; | 558 | jrpriv->dev = jrdev; |
579 | spin_lock(&driver_data.jr_alloc_lock); | 559 | spin_lock(&driver_data.jr_alloc_lock); |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 48dd3536060d..5a851ddc48fb 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -15,13 +15,14 @@ void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
15 | void *context) | 15 | void *context) |
16 | { | 16 | { |
17 | struct split_key_result *res = context; | 17 | struct split_key_result *res = context; |
18 | int ecode = 0; | ||
18 | 19 | ||
19 | dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 20 | dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
20 | 21 | ||
21 | if (err) | 22 | if (err) |
22 | caam_jr_strstatus(dev, err); | 23 | ecode = caam_jr_strstatus(dev, err); |
23 | 24 | ||
24 | res->err = err; | 25 | res->err = ecode; |
25 | 26 | ||
26 | complete(&res->completion); | 27 | complete(&res->completion); |
27 | } | 28 | } |
@@ -47,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
47 | u32 *desc; | 48 | u32 *desc; |
48 | struct split_key_result result; | 49 | struct split_key_result result; |
49 | dma_addr_t dma_addr; | 50 | dma_addr_t dma_addr; |
51 | unsigned int local_max; | ||
50 | int ret = -ENOMEM; | 52 | int ret = -ENOMEM; |
51 | 53 | ||
52 | adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); | 54 | adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); |
53 | adata->keylen_pad = split_key_pad_len(adata->algtype & | 55 | adata->keylen_pad = split_key_pad_len(adata->algtype & |
54 | OP_ALG_ALGSEL_MASK); | 56 | OP_ALG_ALGSEL_MASK); |
57 | local_max = max(keylen, adata->keylen_pad); | ||
55 | 58 | ||
56 | dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", | 59 | dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", |
57 | adata->keylen, adata->keylen_pad); | 60 | adata->keylen, adata->keylen_pad); |
58 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", | 61 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", |
59 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | 62 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); |
60 | 63 | ||
61 | if (adata->keylen_pad > max_keylen) | 64 | if (local_max > max_keylen) |
62 | return -EINVAL; | 65 | return -EINVAL; |
63 | 66 | ||
64 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 67 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
@@ -69,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
69 | 72 | ||
70 | memcpy(key_out, key_in, keylen); | 73 | memcpy(key_out, key_in, keylen); |
71 | 74 | ||
72 | dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, | 75 | dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL); |
73 | DMA_BIDIRECTIONAL); | ||
74 | if (dma_mapping_error(jrdev, dma_addr)) { | 76 | if (dma_mapping_error(jrdev, dma_addr)) { |
75 | dev_err(jrdev, "unable to map key memory\n"); | 77 | dev_err(jrdev, "unable to map key memory\n"); |
76 | goto out_free; | 78 | goto out_free; |
@@ -116,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
116 | adata->keylen_pad, 1); | 118 | adata->keylen_pad, 1); |
117 | } | 119 | } |
118 | 120 | ||
119 | dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); | 121 | dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL); |
120 | out_free: | 122 | out_free: |
121 | kfree(desc); | 123 | kfree(desc); |
122 | return ret; | 124 | return ret; |
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 810f0bef0652..68c1fd5dee5d 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h | |||
@@ -512,7 +512,9 @@ struct rsa_pub_pdb { | |||
512 | dma_addr_t n_dma; | 512 | dma_addr_t n_dma; |
513 | dma_addr_t e_dma; | 513 | dma_addr_t e_dma; |
514 | u32 f_len; | 514 | u32 f_len; |
515 | } __packed; | 515 | }; |
516 | |||
517 | #define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz) | ||
516 | 518 | ||
517 | /** | 519 | /** |
518 | * RSA Decrypt PDB - Private Key Form #1 | 520 | * RSA Decrypt PDB - Private Key Form #1 |
@@ -528,7 +530,9 @@ struct rsa_priv_f1_pdb { | |||
528 | dma_addr_t f_dma; | 530 | dma_addr_t f_dma; |
529 | dma_addr_t n_dma; | 531 | dma_addr_t n_dma; |
530 | dma_addr_t d_dma; | 532 | dma_addr_t d_dma; |
531 | } __packed; | 533 | }; |
534 | |||
535 | #define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz) | ||
532 | 536 | ||
533 | /** | 537 | /** |
534 | * RSA Decrypt PDB - Private Key Form #2 | 538 | * RSA Decrypt PDB - Private Key Form #2 |
@@ -554,7 +558,9 @@ struct rsa_priv_f2_pdb { | |||
554 | dma_addr_t tmp1_dma; | 558 | dma_addr_t tmp1_dma; |
555 | dma_addr_t tmp2_dma; | 559 | dma_addr_t tmp2_dma; |
556 | u32 p_q_len; | 560 | u32 p_q_len; |
557 | } __packed; | 561 | }; |
562 | |||
563 | #define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz) | ||
558 | 564 | ||
559 | /** | 565 | /** |
560 | * RSA Decrypt PDB - Private Key Form #3 | 566 | * RSA Decrypt PDB - Private Key Form #3 |
@@ -586,6 +592,8 @@ struct rsa_priv_f3_pdb { | |||
586 | dma_addr_t tmp1_dma; | 592 | dma_addr_t tmp1_dma; |
587 | dma_addr_t tmp2_dma; | 593 | dma_addr_t tmp2_dma; |
588 | u32 p_q_len; | 594 | u32 p_q_len; |
589 | } __packed; | 595 | }; |
596 | |||
597 | #define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz) | ||
590 | 598 | ||
591 | #endif | 599 | #endif |
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c index 2a8d87ea94bf..0d5ee762e036 100644 --- a/drivers/crypto/caam/pkc_desc.c +++ b/drivers/crypto/caam/pkc_desc.c | |||
@@ -13,7 +13,7 @@ | |||
13 | /* Descriptor for RSA Public operation */ | 13 | /* Descriptor for RSA Public operation */ |
14 | void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) | 14 | void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) |
15 | { | 15 | { |
16 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | 16 | init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB); |
17 | append_cmd(desc, pdb->sgf); | 17 | append_cmd(desc, pdb->sgf); |
18 | append_ptr(desc, pdb->f_dma); | 18 | append_ptr(desc, pdb->f_dma); |
19 | append_ptr(desc, pdb->g_dma); | 19 | append_ptr(desc, pdb->g_dma); |
@@ -26,7 +26,7 @@ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) | |||
26 | /* Descriptor for RSA Private operation - Private Key Form #1 */ | 26 | /* Descriptor for RSA Private operation - Private Key Form #1 */ |
27 | void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) | 27 | void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) |
28 | { | 28 | { |
29 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | 29 | init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB); |
30 | append_cmd(desc, pdb->sgf); | 30 | append_cmd(desc, pdb->sgf); |
31 | append_ptr(desc, pdb->g_dma); | 31 | append_ptr(desc, pdb->g_dma); |
32 | append_ptr(desc, pdb->f_dma); | 32 | append_ptr(desc, pdb->f_dma); |
@@ -39,7 +39,7 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) | |||
39 | /* Descriptor for RSA Private operation - Private Key Form #2 */ | 39 | /* Descriptor for RSA Private operation - Private Key Form #2 */ |
40 | void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) | 40 | void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) |
41 | { | 41 | { |
42 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | 42 | init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB); |
43 | append_cmd(desc, pdb->sgf); | 43 | append_cmd(desc, pdb->sgf); |
44 | append_ptr(desc, pdb->g_dma); | 44 | append_ptr(desc, pdb->g_dma); |
45 | append_ptr(desc, pdb->f_dma); | 45 | append_ptr(desc, pdb->f_dma); |
@@ -56,7 +56,7 @@ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) | |||
56 | /* Descriptor for RSA Private operation - Private Key Form #3 */ | 56 | /* Descriptor for RSA Private operation - Private Key Form #3 */ |
57 | void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) | 57 | void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) |
58 | { | 58 | { |
59 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | 59 | init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB); |
60 | append_cmd(desc, pdb->sgf); | 60 | append_cmd(desc, pdb->sgf); |
61 | append_ptr(desc, pdb->g_dma); | 61 | append_ptr(desc, pdb->g_dma); |
62 | append_ptr(desc, pdb->f_dma); | 62 | append_ptr(desc, pdb->f_dma); |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 0fe618e3804a..378f627e1d64 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c | |||
@@ -163,7 +163,10 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, | |||
163 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), | 163 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
164 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); | 164 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
165 | 165 | ||
166 | drv_req->cbk(drv_req, -EIO); | 166 | if (fd->status) |
167 | drv_req->cbk(drv_req, be32_to_cpu(fd->status)); | ||
168 | else | ||
169 | drv_req->cbk(drv_req, JRSTA_SSRC_QI); | ||
167 | } | 170 | } |
168 | 171 | ||
169 | static struct qman_fq *create_caam_req_fq(struct device *qidev, | 172 | static struct qman_fq *create_caam_req_fq(struct device *qidev, |
@@ -574,8 +577,9 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, | |||
574 | 577 | ||
575 | if (ssrc != JRSTA_SSRC_CCB_ERROR || | 578 | if (ssrc != JRSTA_SSRC_CCB_ERROR || |
576 | err_id != JRSTA_CCBERR_ERRID_ICVCHK) | 579 | err_id != JRSTA_CCBERR_ERRID_ICVCHK) |
577 | dev_err(qidev, "Error: %#x in CAAM response FD\n", | 580 | dev_err_ratelimited(qidev, |
578 | status); | 581 | "Error: %#x in CAAM response FD\n", |
582 | status); | ||
579 | } | 583 | } |
580 | 584 | ||
581 | if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { | 585 | if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { |
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index f93c9c7ed430..db0549549e3b 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h | |||
@@ -14,32 +14,6 @@ | |||
14 | #include "desc.h" | 14 | #include "desc.h" |
15 | #include "desc_constr.h" | 15 | #include "desc_constr.h" |
16 | 16 | ||
17 | /* | ||
18 | * CAAM hardware constructs a job descriptor which points to a shared descriptor | ||
19 | * (as pointed by context_a of to-CAAM FQ). | ||
20 | * When the job descriptor is executed by DECO, the whole job descriptor | ||
21 | * together with shared descriptor gets loaded in DECO buffer, which is | ||
22 | * 64 words (each 32-bit) long. | ||
23 | * | ||
24 | * The job descriptor constructed by CAAM hardware has the following layout: | ||
25 | * | ||
26 | * HEADER (1 word) | ||
27 | * Shdesc ptr (1 or 2 words) | ||
28 | * SEQ_OUT_PTR (1 word) | ||
29 | * Out ptr (1 or 2 words) | ||
30 | * Out length (1 word) | ||
31 | * SEQ_IN_PTR (1 word) | ||
32 | * In ptr (1 or 2 words) | ||
33 | * In length (1 word) | ||
34 | * | ||
35 | * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer. | ||
36 | * | ||
37 | * Apart from shdesc contents, the total number of words that get loaded in DECO | ||
38 | * buffer are '8' or '11'. The remaining words in DECO buffer can be used for | ||
39 | * storing shared descriptor. | ||
40 | */ | ||
41 | #define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) | ||
42 | |||
43 | /* Length of a single buffer in the QI driver memory cache */ | 17 | /* Length of a single buffer in the QI driver memory cache */ |
44 | #define CAAM_QI_MEMCACHE_SIZE 768 | 18 | #define CAAM_QI_MEMCACHE_SIZE 768 |
45 | 19 | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 8591914d5c51..05127b70527d 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/io-64-nonatomic-hi-lo.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Architecture-specific register access methods | 18 | * Architecture-specific register access methods |
@@ -70,6 +71,7 @@ | |||
70 | 71 | ||
71 | extern bool caam_little_end; | 72 | extern bool caam_little_end; |
72 | extern bool caam_imx; | 73 | extern bool caam_imx; |
74 | extern size_t caam_ptr_sz; | ||
73 | 75 | ||
74 | #define caam_to_cpu(len) \ | 76 | #define caam_to_cpu(len) \ |
75 | static inline u##len caam##len ## _to_cpu(u##len val) \ | 77 | static inline u##len caam##len ## _to_cpu(u##len val) \ |
@@ -137,46 +139,38 @@ static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set) | |||
137 | * base + 0x0000 : least-significant 32 bits | 139 | * base + 0x0000 : least-significant 32 bits |
138 | * base + 0x0004 : most-significant 32 bits | 140 | * base + 0x0004 : most-significant 32 bits |
139 | */ | 141 | */ |
140 | #ifdef CONFIG_64BIT | ||
141 | static inline void wr_reg64(void __iomem *reg, u64 data) | 142 | static inline void wr_reg64(void __iomem *reg, u64 data) |
142 | { | 143 | { |
143 | if (caam_little_end) | 144 | if (caam_little_end) { |
144 | iowrite64(data, reg); | 145 | if (caam_imx) { |
145 | else | 146 | iowrite32(data >> 32, (u32 __iomem *)(reg)); |
147 | iowrite32(data, (u32 __iomem *)(reg) + 1); | ||
148 | } else { | ||
149 | iowrite64(data, reg); | ||
150 | } | ||
151 | } else { | ||
146 | iowrite64be(data, reg); | 152 | iowrite64be(data, reg); |
153 | } | ||
147 | } | 154 | } |
148 | 155 | ||
149 | static inline u64 rd_reg64(void __iomem *reg) | 156 | static inline u64 rd_reg64(void __iomem *reg) |
150 | { | 157 | { |
151 | if (caam_little_end) | 158 | if (caam_little_end) { |
152 | return ioread64(reg); | 159 | if (caam_imx) { |
153 | else | 160 | u32 low, high; |
154 | return ioread64be(reg); | ||
155 | } | ||
156 | 161 | ||
157 | #else /* CONFIG_64BIT */ | 162 | high = ioread32(reg); |
158 | static inline void wr_reg64(void __iomem *reg, u64 data) | 163 | low = ioread32(reg + sizeof(u32)); |
159 | { | 164 | |
160 | if (!caam_imx && caam_little_end) { | 165 | return low + ((u64)high << 32); |
161 | wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); | 166 | } else { |
162 | wr_reg32((u32 __iomem *)(reg), data); | 167 | return ioread64(reg); |
168 | } | ||
163 | } else { | 169 | } else { |
164 | wr_reg32((u32 __iomem *)(reg), data >> 32); | 170 | return ioread64be(reg); |
165 | wr_reg32((u32 __iomem *)(reg) + 1, data); | ||
166 | } | 171 | } |
167 | } | 172 | } |
168 | 173 | ||
169 | static inline u64 rd_reg64(void __iomem *reg) | ||
170 | { | ||
171 | if (!caam_imx && caam_little_end) | ||
172 | return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | | ||
173 | (u64)rd_reg32((u32 __iomem *)(reg))); | ||
174 | |||
175 | return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | | ||
176 | (u64)rd_reg32((u32 __iomem *)(reg) + 1)); | ||
177 | } | ||
178 | #endif /* CONFIG_64BIT */ | ||
179 | |||
180 | static inline u64 cpu_to_caam_dma64(dma_addr_t value) | 174 | static inline u64 cpu_to_caam_dma64(dma_addr_t value) |
181 | { | 175 | { |
182 | if (caam_imx) | 176 | if (caam_imx) |
@@ -195,22 +189,89 @@ static inline u64 caam_dma64_to_cpu(u64 value) | |||
195 | return caam64_to_cpu(value); | 189 | return caam64_to_cpu(value); |
196 | } | 190 | } |
197 | 191 | ||
198 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 192 | static inline u64 cpu_to_caam_dma(u64 value) |
199 | #define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) | 193 | { |
200 | #define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) | 194 | if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && |
201 | #else | 195 | caam_ptr_sz == sizeof(u64)) |
202 | #define cpu_to_caam_dma(value) cpu_to_caam32(value) | 196 | return cpu_to_caam_dma64(value); |
203 | #define caam_dma_to_cpu(value) caam32_to_cpu(value) | 197 | else |
204 | #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ | 198 | return cpu_to_caam32(value); |
199 | } | ||
200 | |||
201 | static inline u64 caam_dma_to_cpu(u64 value) | ||
202 | { | ||
203 | if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && | ||
204 | caam_ptr_sz == sizeof(u64)) | ||
205 | return caam_dma64_to_cpu(value); | ||
206 | else | ||
207 | return caam32_to_cpu(value); | ||
208 | } | ||
205 | 209 | ||
206 | /* | 210 | /* |
207 | * jr_outentry | 211 | * jr_outentry |
208 | * Represents each entry in a JobR output ring | 212 | * Represents each entry in a JobR output ring |
209 | */ | 213 | */ |
210 | struct jr_outentry { | 214 | |
211 | dma_addr_t desc;/* Pointer to completed descriptor */ | 215 | static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc, |
212 | u32 jrstatus; /* Status for completed descriptor */ | 216 | u32 *jrstatus) |
213 | } __packed; | 217 | { |
218 | |||
219 | if (caam_ptr_sz == sizeof(u32)) { | ||
220 | struct { | ||
221 | u32 desc; | ||
222 | u32 jrstatus; | ||
223 | } __packed *outentry = outring; | ||
224 | |||
225 | *desc = outentry[hw_idx].desc; | ||
226 | *jrstatus = outentry[hw_idx].jrstatus; | ||
227 | } else { | ||
228 | struct { | ||
229 | dma_addr_t desc;/* Pointer to completed descriptor */ | ||
230 | u32 jrstatus; /* Status for completed descriptor */ | ||
231 | } __packed *outentry = outring; | ||
232 | |||
233 | *desc = outentry[hw_idx].desc; | ||
234 | *jrstatus = outentry[hw_idx].jrstatus; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | #define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32)) | ||
239 | |||
240 | static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx) | ||
241 | { | ||
242 | dma_addr_t desc; | ||
243 | u32 unused; | ||
244 | |||
245 | jr_outentry_get(outring, hw_idx, &desc, &unused); | ||
246 | |||
247 | return desc; | ||
248 | } | ||
249 | |||
250 | static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx) | ||
251 | { | ||
252 | dma_addr_t unused; | ||
253 | u32 jrstatus; | ||
254 | |||
255 | jr_outentry_get(outring, hw_idx, &unused, &jrstatus); | ||
256 | |||
257 | return jrstatus; | ||
258 | } | ||
259 | |||
260 | static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val) | ||
261 | { | ||
262 | if (caam_ptr_sz == sizeof(u32)) { | ||
263 | u32 *inpentry = inpring; | ||
264 | |||
265 | inpentry[hw_idx] = val; | ||
266 | } else { | ||
267 | dma_addr_t *inpentry = inpring; | ||
268 | |||
269 | inpentry[hw_idx] = val; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | #define SIZEOF_JR_INPENTRY caam_ptr_sz | ||
274 | |||
214 | 275 | ||
215 | /* Version registers (Era 10+) e80-eff */ | 276 | /* Version registers (Era 10+) e80-eff */ |
216 | struct version_regs { | 277 | struct version_regs { |
@@ -338,6 +399,7 @@ struct caam_perfmon { | |||
338 | u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ | 399 | u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ |
339 | #define CTPR_MS_QI_SHIFT 25 | 400 | #define CTPR_MS_QI_SHIFT 25 |
340 | #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) | 401 | #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) |
402 | #define CTPR_MS_PS BIT(17) | ||
341 | #define CTPR_MS_DPAA2 BIT(13) | 403 | #define CTPR_MS_DPAA2 BIT(13) |
342 | #define CTPR_MS_VIRT_EN_INCL 0x00000001 | 404 | #define CTPR_MS_VIRT_EN_INCL 0x00000001 |
343 | #define CTPR_MS_VIRT_EN_POR 0x00000002 | 405 | #define CTPR_MS_VIRT_EN_POR 0x00000002 |
@@ -641,6 +703,7 @@ struct caam_job_ring { | |||
641 | #define JRSTA_SSRC_CCB_ERROR 0x20000000 | 703 | #define JRSTA_SSRC_CCB_ERROR 0x20000000 |
642 | #define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 | 704 | #define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 |
643 | #define JRSTA_SSRC_DECO 0x40000000 | 705 | #define JRSTA_SSRC_DECO 0x40000000 |
706 | #define JRSTA_SSRC_QI 0x50000000 | ||
644 | #define JRSTA_SSRC_JRERROR 0x60000000 | 707 | #define JRSTA_SSRC_JRERROR 0x60000000 |
645 | #define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 | 708 | #define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 |
646 | 709 | ||
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index ff3cb1f8f2b6..596ce28b957d 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <crypto/aes.h> | 7 | #include <crypto/aes.h> |
8 | #include <crypto/algapi.h> | 8 | #include <crypto/algapi.h> |
9 | #include <crypto/authenc.h> | 9 | #include <crypto/authenc.h> |
10 | #include <crypto/des.h> | 10 | #include <crypto/internal/des.h> |
11 | #include <crypto/xts.h> | 11 | #include <crypto/xts.h> |
12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -322,31 +322,15 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
322 | static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 322 | static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
323 | u32 keylen) | 323 | u32 keylen) |
324 | { | 324 | { |
325 | u32 flags = crypto_ablkcipher_get_flags(cipher); | 325 | return verify_ablkcipher_des3_key(cipher, key) ?: |
326 | int err; | 326 | cvm_setkey(cipher, key, keylen, DES3_CBC); |
327 | |||
328 | err = __des3_verify_key(&flags, key); | ||
329 | if (unlikely(err)) { | ||
330 | crypto_ablkcipher_set_flags(cipher, flags); | ||
331 | return err; | ||
332 | } | ||
333 | |||
334 | return cvm_setkey(cipher, key, keylen, DES3_CBC); | ||
335 | } | 327 | } |
336 | 328 | ||
337 | static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 329 | static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
338 | u32 keylen) | 330 | u32 keylen) |
339 | { | 331 | { |
340 | u32 flags = crypto_ablkcipher_get_flags(cipher); | 332 | return verify_ablkcipher_des3_key(cipher, key) ?: |
341 | int err; | 333 | cvm_setkey(cipher, key, keylen, DES3_ECB); |
342 | |||
343 | err = __des3_verify_key(&flags, key); | ||
344 | if (unlikely(err)) { | ||
345 | crypto_ablkcipher_set_flags(cipher, flags); | ||
346 | return err; | ||
347 | } | ||
348 | |||
349 | return cvm_setkey(cipher, key, keylen, DES3_ECB); | ||
350 | } | 334 | } |
351 | 335 | ||
352 | static int cvm_enc_dec_init(struct crypto_tfm *tfm) | 336 | static int cvm_enc_dec_init(struct crypto_tfm *tfm) |
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig index dab162af41b8..7b1e751bb9cd 100644 --- a/drivers/crypto/cavium/nitrox/Kconfig +++ b/drivers/crypto/cavium/nitrox/Kconfig | |||
@@ -6,7 +6,7 @@ config CRYPTO_DEV_NITROX | |||
6 | tristate | 6 | tristate |
7 | select CRYPTO_BLKCIPHER | 7 | select CRYPTO_BLKCIPHER |
8 | select CRYPTO_AES | 8 | select CRYPTO_AES |
9 | select CRYPTO_DES | 9 | select CRYPTO_LIB_DES |
10 | select FW_LOADER | 10 | select FW_LOADER |
11 | 11 | ||
12 | config CRYPTO_DEV_NITROX_CNN55XX | 12 | config CRYPTO_DEV_NITROX_CNN55XX |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h index a2a452642b38..1c8715ae0488 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_csr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h | |||
@@ -40,9 +40,77 @@ | |||
40 | #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) | 40 | #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) |
41 | 41 | ||
42 | /* UCD registers */ | 42 | /* UCD registers */ |
43 | #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) | ||
44 | #define UCD_AE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0008 + ((_i) * 0x800)) | ||
43 | #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 | 45 | #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 |
44 | #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) | 46 | #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) |
45 | #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) | 47 | #define UCD_SE_CNTX(_i) (0x12C0040 + ((_i) * 0x1000)) |
48 | #define UCD_AE_CNTX(_i) (0x12C0048 + ((_i) * 0x800)) | ||
49 | |||
50 | /* AQM registers */ | ||
51 | #define AQM_CTL 0x1300000 | ||
52 | #define AQM_INT 0x1300008 | ||
53 | #define AQM_DBELL_OVF_LO 0x1300010 | ||
54 | #define AQM_DBELL_OVF_HI 0x1300018 | ||
55 | #define AQM_DBELL_OVF_LO_W1S 0x1300020 | ||
56 | #define AQM_DBELL_OVF_LO_ENA_W1C 0x1300028 | ||
57 | #define AQM_DBELL_OVF_LO_ENA_W1S 0x1300030 | ||
58 | #define AQM_DBELL_OVF_HI_W1S 0x1300038 | ||
59 | #define AQM_DBELL_OVF_HI_ENA_W1C 0x1300040 | ||
60 | #define AQM_DBELL_OVF_HI_ENA_W1S 0x1300048 | ||
61 | #define AQM_DMA_RD_ERR_LO 0x1300050 | ||
62 | #define AQM_DMA_RD_ERR_HI 0x1300058 | ||
63 | #define AQM_DMA_RD_ERR_LO_W1S 0x1300060 | ||
64 | #define AQM_DMA_RD_ERR_LO_ENA_W1C 0x1300068 | ||
65 | #define AQM_DMA_RD_ERR_LO_ENA_W1S 0x1300070 | ||
66 | #define AQM_DMA_RD_ERR_HI_W1S 0x1300078 | ||
67 | #define AQM_DMA_RD_ERR_HI_ENA_W1C 0x1300080 | ||
68 | #define AQM_DMA_RD_ERR_HI_ENA_W1S 0x1300088 | ||
69 | #define AQM_EXEC_NA_LO 0x1300090 | ||
70 | #define AQM_EXEC_NA_HI 0x1300098 | ||
71 | #define AQM_EXEC_NA_LO_W1S 0x13000A0 | ||
72 | #define AQM_EXEC_NA_LO_ENA_W1C 0x13000A8 | ||
73 | #define AQM_EXEC_NA_LO_ENA_W1S 0x13000B0 | ||
74 | #define AQM_EXEC_NA_HI_W1S 0x13000B8 | ||
75 | #define AQM_EXEC_NA_HI_ENA_W1C 0x13000C0 | ||
76 | #define AQM_EXEC_NA_HI_ENA_W1S 0x13000C8 | ||
77 | #define AQM_EXEC_ERR_LO 0x13000D0 | ||
78 | #define AQM_EXEC_ERR_HI 0x13000D8 | ||
79 | #define AQM_EXEC_ERR_LO_W1S 0x13000E0 | ||
80 | #define AQM_EXEC_ERR_LO_ENA_W1C 0x13000E8 | ||
81 | #define AQM_EXEC_ERR_LO_ENA_W1S 0x13000F0 | ||
82 | #define AQM_EXEC_ERR_HI_W1S 0x13000F8 | ||
83 | #define AQM_EXEC_ERR_HI_ENA_W1C 0x1300100 | ||
84 | #define AQM_EXEC_ERR_HI_ENA_W1S 0x1300108 | ||
85 | #define AQM_ECC_INT 0x1300110 | ||
86 | #define AQM_ECC_INT_W1S 0x1300118 | ||
87 | #define AQM_ECC_INT_ENA_W1C 0x1300120 | ||
88 | #define AQM_ECC_INT_ENA_W1S 0x1300128 | ||
89 | #define AQM_ECC_CTL 0x1300130 | ||
90 | #define AQM_BIST_STATUS 0x1300138 | ||
91 | #define AQM_CMD_INF_THRX(x) (0x1300400 + ((x) * 0x8)) | ||
92 | #define AQM_CMD_INFX(x) (0x1300800 + ((x) * 0x8)) | ||
93 | #define AQM_GRP_EXECMSK_LOX(x) (0x1300C00 + ((x) * 0x10)) | ||
94 | #define AQM_GRP_EXECMSK_HIX(x) (0x1300C08 + ((x) * 0x10)) | ||
95 | #define AQM_ACTIVITY_STAT_LO 0x1300C80 | ||
96 | #define AQM_ACTIVITY_STAT_HI 0x1300C88 | ||
97 | #define AQM_Q_CMD_PROCX(x) (0x1301000 + ((x) * 0x8)) | ||
98 | #define AQM_PERF_CTL_LO 0x1301400 | ||
99 | #define AQM_PERF_CTL_HI 0x1301408 | ||
100 | #define AQM_PERF_CNT 0x1301410 | ||
101 | |||
102 | #define AQMQ_DRBLX(x) (0x20000 + ((x) * 0x40000)) | ||
103 | #define AQMQ_QSZX(x) (0x20008 + ((x) * 0x40000)) | ||
104 | #define AQMQ_BADRX(x) (0x20010 + ((x) * 0x40000)) | ||
105 | #define AQMQ_NXT_CMDX(x) (0x20018 + ((x) * 0x40000)) | ||
106 | #define AQMQ_CMD_CNTX(x) (0x20020 + ((x) * 0x40000)) | ||
107 | #define AQMQ_CMP_THRX(x) (0x20028 + ((x) * 0x40000)) | ||
108 | #define AQMQ_CMP_CNTX(x) (0x20030 + ((x) * 0x40000)) | ||
109 | #define AQMQ_TIM_LDX(x) (0x20038 + ((x) * 0x40000)) | ||
110 | #define AQMQ_TIMERX(x) (0x20040 + ((x) * 0x40000)) | ||
111 | #define AQMQ_ENX(x) (0x20048 + ((x) * 0x40000)) | ||
112 | #define AQMQ_ACTIVITY_STATX(x) (0x20050 + ((x) * 0x40000)) | ||
113 | #define AQM_VF_CMP_STATX(x) (0x28000 + ((x) * 0x40000)) | ||
46 | 114 | ||
47 | /* NPS core registers */ | 115 | /* NPS core registers */ |
48 | #define NPS_CORE_GBL_VFCFG 0x1000000 | 116 | #define NPS_CORE_GBL_VFCFG 0x1000000 |
@@ -135,6 +203,171 @@ | |||
135 | #define PEM0_INT 0x1080428 | 203 | #define PEM0_INT 0x1080428 |
136 | 204 | ||
137 | /** | 205 | /** |
206 | * struct ucd_core_eid_ucode_block_num - Core Eid to Ucode Blk Mapping Registers | ||
207 | * @ucode_len: Ucode length identifier 32KB or 64KB | ||
208 | * @ucode_blk: Ucode Block Number | ||
209 | */ | ||
210 | union ucd_core_eid_ucode_block_num { | ||
211 | u64 value; | ||
212 | struct { | ||
213 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
214 | u64 raz_4_63 : 60; | ||
215 | u64 ucode_len : 1; | ||
216 | u64 ucode_blk : 3; | ||
217 | #else | ||
218 | u64 ucode_blk : 3; | ||
219 | u64 ucode_len : 1; | ||
220 | u64 raz_4_63 : 60; | ||
221 | #endif | ||
222 | }; | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * struct aqm_grp_execmsk_lo - Available AE engines for the group | ||
227 | * @exec_0_to_39: AE engines 0 to 39 status | ||
228 | */ | ||
229 | union aqm_grp_execmsk_lo { | ||
230 | u64 value; | ||
231 | struct { | ||
232 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
233 | u64 raz_40_63 : 24; | ||
234 | u64 exec_0_to_39 : 40; | ||
235 | #else | ||
236 | u64 exec_0_to_39 : 40; | ||
237 | u64 raz_40_63 : 24; | ||
238 | #endif | ||
239 | }; | ||
240 | }; | ||
241 | |||
242 | /** | ||
243 | * struct aqm_grp_execmsk_hi - Available AE engines for the group | ||
244 | * @exec_40_to_79: AE engines 40 to 79 status | ||
245 | */ | ||
246 | union aqm_grp_execmsk_hi { | ||
247 | u64 value; | ||
248 | struct { | ||
249 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
250 | u64 raz_40_63 : 24; | ||
251 | u64 exec_40_to_79 : 40; | ||
252 | #else | ||
253 | u64 exec_40_to_79 : 40; | ||
254 | u64 raz_40_63 : 24; | ||
255 | #endif | ||
256 | }; | ||
257 | }; | ||
258 | |||
259 | /** | ||
260 | * struct aqmq_drbl - AQM Queue Doorbell Counter Registers | ||
261 | * @dbell_count: Doorbell Counter | ||
262 | */ | ||
263 | union aqmq_drbl { | ||
264 | u64 value; | ||
265 | struct { | ||
266 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
267 | u64 raz_32_63 : 32; | ||
268 | u64 dbell_count : 32; | ||
269 | #else | ||
270 | u64 dbell_count : 32; | ||
271 | u64 raz_32_63 : 32; | ||
272 | #endif | ||
273 | }; | ||
274 | }; | ||
275 | |||
276 | /** | ||
277 | * struct aqmq_qsz - AQM Queue Host Queue Size Registers | ||
278 | * @host_queue_size: Size, in numbers of 'aqmq_command_s' command | ||
279 | * of the Host Ring. | ||
280 | */ | ||
281 | union aqmq_qsz { | ||
282 | u64 value; | ||
283 | struct { | ||
284 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
285 | u64 raz_32_63 : 32; | ||
286 | u64 host_queue_size : 32; | ||
287 | #else | ||
288 | u64 host_queue_size : 32; | ||
289 | u64 raz_32_63 : 32; | ||
290 | #endif | ||
291 | }; | ||
292 | }; | ||
293 | |||
294 | /** | ||
295 | * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers | ||
296 | * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed | ||
297 | * by AE engines for which completion interrupt is asserted. | ||
298 | */ | ||
299 | union aqmq_cmp_thr { | ||
300 | u64 value; | ||
301 | struct { | ||
302 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
303 | u64 raz_32_63 : 32; | ||
304 | u64 commands_completed_threshold : 32; | ||
305 | #else | ||
306 | u64 commands_completed_threshold : 32; | ||
307 | u64 raz_32_63 : 32; | ||
308 | #endif | ||
309 | }; | ||
310 | }; | ||
311 | |||
312 | /** | ||
313 | * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers | ||
314 | * @resend: Bit to request completion interrupt Resend. | ||
315 | * @completion_status: Command completion status of the ring. | ||
316 | * @commands_completed_count: Count of 'aqmq_command_s' commands executed by | ||
317 | * AE engines. | ||
318 | */ | ||
319 | union aqmq_cmp_cnt { | ||
320 | u64 value; | ||
321 | struct { | ||
322 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
323 | u64 raz_34_63 : 30; | ||
324 | u64 resend : 1; | ||
325 | u64 completion_status : 1; | ||
326 | u64 commands_completed_count : 32; | ||
327 | #else | ||
328 | u64 commands_completed_count : 32; | ||
329 | u64 completion_status : 1; | ||
330 | u64 resend : 1; | ||
331 | u64 raz_34_63 : 30; | ||
332 | #endif | ||
333 | }; | ||
334 | }; | ||
335 | |||
336 | /** | ||
337 | * struct aqmq_en - AQM Queue Enable Registers | ||
338 | * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled | ||
339 | */ | ||
340 | union aqmq_en { | ||
341 | u64 value; | ||
342 | struct { | ||
343 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
344 | u64 raz_1_63 : 63; | ||
345 | u64 queue_enable : 1; | ||
346 | #else | ||
347 | u64 queue_enable : 1; | ||
348 | u64 raz_1_63 : 63; | ||
349 | #endif | ||
350 | }; | ||
351 | }; | ||
352 | |||
353 | /** | ||
354 | * struct aqmq_activity_stat - AQM Queue Activity Status Registers | ||
355 | * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent | ||
356 | */ | ||
357 | union aqmq_activity_stat { | ||
358 | u64 value; | ||
359 | struct { | ||
360 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
361 | u64 raz_1_63 : 63; | ||
362 | u64 queue_active : 1; | ||
363 | #else | ||
364 | u64 queue_active : 1; | ||
365 | u64 raz_1_63 : 63; | ||
366 | #endif | ||
367 | }; | ||
368 | }; | ||
369 | |||
370 | /** | ||
138 | * struct emu_fuse_map - EMU Fuse Map Registers | 371 | * struct emu_fuse_map - EMU Fuse Map Registers |
139 | * @ae_fuse: Fuse settings for AE 19..0 | 372 | * @ae_fuse: Fuse settings for AE 19..0 |
140 | * @se_fuse: Fuse settings for SE 15..0 | 373 | * @se_fuse: Fuse settings for SE 15..0 |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c index 848ec93d4333..16f7d0bd1303 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c | |||
@@ -9,7 +9,8 @@ static int firmware_show(struct seq_file *s, void *v) | |||
9 | { | 9 | { |
10 | struct nitrox_device *ndev = s->private; | 10 | struct nitrox_device *ndev = s->private; |
11 | 11 | ||
12 | seq_printf(s, "Version: %s\n", ndev->hw.fw_name); | 12 | seq_printf(s, "Version: %s\n", ndev->hw.fw_name[0]); |
13 | seq_printf(s, "Version: %s\n", ndev->hw.fw_name[1]); | ||
13 | return 0; | 14 | return 0; |
14 | } | 15 | } |
15 | 16 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 0338877b828f..2217a2736c8e 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
@@ -10,6 +10,10 @@ | |||
10 | #define VERSION_LEN 32 | 10 | #define VERSION_LEN 32 |
11 | /* Maximum queues in PF mode */ | 11 | /* Maximum queues in PF mode */ |
12 | #define MAX_PF_QUEUES 64 | 12 | #define MAX_PF_QUEUES 64 |
13 | /* Maximum device queues */ | ||
14 | #define MAX_DEV_QUEUES (MAX_PF_QUEUES) | ||
15 | /* Maximum UCD Blocks */ | ||
16 | #define CNN55XX_MAX_UCD_BLOCKS 8 | ||
13 | 17 | ||
14 | /** | 18 | /** |
15 | * struct nitrox_cmdq - NITROX command queue | 19 | * struct nitrox_cmdq - NITROX command queue |
@@ -74,7 +78,7 @@ struct nitrox_cmdq { | |||
74 | */ | 78 | */ |
75 | struct nitrox_hw { | 79 | struct nitrox_hw { |
76 | char partname[IFNAMSIZ * 2]; | 80 | char partname[IFNAMSIZ * 2]; |
77 | char fw_name[VERSION_LEN]; | 81 | char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN]; |
78 | 82 | ||
79 | int freq; | 83 | int freq; |
80 | u16 vendor_id; | 84 | u16 vendor_id; |
@@ -206,6 +210,7 @@ enum vf_mode { | |||
206 | * @mode: Device mode PF/VF | 210 | * @mode: Device mode PF/VF |
207 | * @ctx_pool: DMA pool for crypto context | 211 | * @ctx_pool: DMA pool for crypto context |
208 | * @pkt_inq: Packet input rings | 212 | * @pkt_inq: Packet input rings |
213 | * @aqmq: AQM command queues | ||
209 | * @qvec: MSI-X queue vectors information | 214 | * @qvec: MSI-X queue vectors information |
210 | * @iov: SR-IOV informatin | 215 | * @iov: SR-IOV informatin |
211 | * @num_vecs: number of MSI-X vectors | 216 | * @num_vecs: number of MSI-X vectors |
@@ -232,6 +237,7 @@ struct nitrox_device { | |||
232 | 237 | ||
233 | struct dma_pool *ctx_pool; | 238 | struct dma_pool *ctx_pool; |
234 | struct nitrox_cmdq *pkt_inq; | 239 | struct nitrox_cmdq *pkt_inq; |
240 | struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp; | ||
235 | 241 | ||
236 | struct nitrox_q_vector *qvec; | 242 | struct nitrox_q_vector *qvec; |
237 | struct nitrox_iov iov; | 243 | struct nitrox_iov iov; |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c index 3f0df60267a9..34a2f4f30a7e 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.c +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c | |||
@@ -241,12 +241,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | /** | 243 | /** |
244 | * enable_nps_interrupts - enable NPS interrutps | 244 | * enable_nps_core_interrupts - enable NPS core interrutps |
245 | * @ndev: NITROX device. | 245 | * @ndev: NITROX device. |
246 | * | 246 | * |
247 | * This includes NPS core, packet in and slc interrupts. | 247 | * This includes NPS core interrupts. |
248 | */ | 248 | */ |
249 | static void enable_nps_interrupts(struct nitrox_device *ndev) | 249 | static void enable_nps_core_interrupts(struct nitrox_device *ndev) |
250 | { | 250 | { |
251 | union nps_core_int_ena_w1s core_int; | 251 | union nps_core_int_ena_w1s core_int; |
252 | 252 | ||
@@ -258,18 +258,9 @@ static void enable_nps_interrupts(struct nitrox_device *ndev) | |||
258 | core_int.s.npco_dma_malform = 1; | 258 | core_int.s.npco_dma_malform = 1; |
259 | core_int.s.host_nps_wr_err = 1; | 259 | core_int.s.host_nps_wr_err = 1; |
260 | nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); | 260 | nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); |
261 | |||
262 | /* NPS packet in ring interrupts */ | ||
263 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); | ||
264 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); | ||
265 | nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); | ||
266 | /* NPS packet slc port interrupts */ | ||
267 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); | ||
268 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); | ||
269 | nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); | ||
270 | } | 261 | } |
271 | 262 | ||
272 | void nitrox_config_nps_unit(struct nitrox_device *ndev) | 263 | void nitrox_config_nps_core_unit(struct nitrox_device *ndev) |
273 | { | 264 | { |
274 | union nps_core_gbl_vfcfg core_gbl_vfcfg; | 265 | union nps_core_gbl_vfcfg core_gbl_vfcfg; |
275 | 266 | ||
@@ -281,12 +272,149 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev) | |||
281 | core_gbl_vfcfg.s.ilk_disable = 1; | 272 | core_gbl_vfcfg.s.ilk_disable = 1; |
282 | core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; | 273 | core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; |
283 | nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); | 274 | nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); |
275 | |||
276 | /* enable nps core interrupts */ | ||
277 | enable_nps_core_interrupts(ndev); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * enable_nps_pkt_interrupts - enable NPS packet interrutps | ||
282 | * @ndev: NITROX device. | ||
283 | * | ||
284 | * This includes NPS packet in and slc interrupts. | ||
285 | */ | ||
286 | static void enable_nps_pkt_interrupts(struct nitrox_device *ndev) | ||
287 | { | ||
288 | /* NPS packet in ring interrupts */ | ||
289 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); | ||
290 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); | ||
291 | nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); | ||
292 | /* NPS packet slc port interrupts */ | ||
293 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); | ||
294 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); | ||
295 | nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); | ||
296 | } | ||
297 | |||
298 | void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev) | ||
299 | { | ||
284 | /* config input and solicit ports */ | 300 | /* config input and solicit ports */ |
285 | nitrox_config_pkt_input_rings(ndev); | 301 | nitrox_config_pkt_input_rings(ndev); |
286 | nitrox_config_pkt_solicit_ports(ndev); | 302 | nitrox_config_pkt_solicit_ports(ndev); |
287 | 303 | ||
288 | /* enable interrupts */ | 304 | /* enable nps packet interrupts */ |
289 | enable_nps_interrupts(ndev); | 305 | enable_nps_pkt_interrupts(ndev); |
306 | } | ||
307 | |||
308 | static void reset_aqm_ring(struct nitrox_device *ndev, int ring) | ||
309 | { | ||
310 | union aqmq_en aqmq_en_reg; | ||
311 | union aqmq_activity_stat activity_stat; | ||
312 | union aqmq_cmp_cnt cmp_cnt; | ||
313 | int max_retries = MAX_CSR_RETRIES; | ||
314 | u64 offset; | ||
315 | |||
316 | /* step 1: disable the queue */ | ||
317 | offset = AQMQ_ENX(ring); | ||
318 | aqmq_en_reg.value = 0; | ||
319 | aqmq_en_reg.queue_enable = 0; | ||
320 | nitrox_write_csr(ndev, offset, aqmq_en_reg.value); | ||
321 | |||
322 | /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */ | ||
323 | usleep_range(100, 150); | ||
324 | offset = AQMQ_ACTIVITY_STATX(ring); | ||
325 | do { | ||
326 | activity_stat.value = nitrox_read_csr(ndev, offset); | ||
327 | if (!activity_stat.queue_active) | ||
328 | break; | ||
329 | udelay(50); | ||
330 | } while (max_retries--); | ||
331 | |||
332 | /* step 3: clear commands completed count */ | ||
333 | offset = AQMQ_CMP_CNTX(ring); | ||
334 | cmp_cnt.value = nitrox_read_csr(ndev, offset); | ||
335 | nitrox_write_csr(ndev, offset, cmp_cnt.value); | ||
336 | usleep_range(50, 100); | ||
337 | } | ||
338 | |||
339 | void enable_aqm_ring(struct nitrox_device *ndev, int ring) | ||
340 | { | ||
341 | union aqmq_en aqmq_en_reg; | ||
342 | u64 offset; | ||
343 | |||
344 | offset = AQMQ_ENX(ring); | ||
345 | aqmq_en_reg.value = 0; | ||
346 | aqmq_en_reg.queue_enable = 1; | ||
347 | nitrox_write_csr(ndev, offset, aqmq_en_reg.value); | ||
348 | usleep_range(50, 100); | ||
349 | } | ||
350 | |||
351 | void nitrox_config_aqm_rings(struct nitrox_device *ndev) | ||
352 | { | ||
353 | int ring; | ||
354 | |||
355 | for (ring = 0; ring < ndev->nr_queues; ring++) { | ||
356 | struct nitrox_cmdq *cmdq = ndev->aqmq[ring]; | ||
357 | union aqmq_drbl drbl; | ||
358 | union aqmq_qsz qsize; | ||
359 | union aqmq_cmp_thr cmp_thr; | ||
360 | u64 offset; | ||
361 | |||
362 | /* steps 1 - 3 */ | ||
363 | reset_aqm_ring(ndev, ring); | ||
364 | |||
365 | /* step 4: clear doorbell count of ring */ | ||
366 | offset = AQMQ_DRBLX(ring); | ||
367 | drbl.value = 0; | ||
368 | drbl.dbell_count = 0xFFFFFFFF; | ||
369 | nitrox_write_csr(ndev, offset, drbl.value); | ||
370 | |||
371 | /* step 5: configure host ring details */ | ||
372 | |||
373 | /* set host address for next command of ring */ | ||
374 | offset = AQMQ_NXT_CMDX(ring); | ||
375 | nitrox_write_csr(ndev, offset, 0ULL); | ||
376 | |||
377 | /* set host address of ring base */ | ||
378 | offset = AQMQ_BADRX(ring); | ||
379 | nitrox_write_csr(ndev, offset, cmdq->dma); | ||
380 | |||
381 | /* set ring size */ | ||
382 | offset = AQMQ_QSZX(ring); | ||
383 | qsize.value = 0; | ||
384 | qsize.host_queue_size = ndev->qlen; | ||
385 | nitrox_write_csr(ndev, offset, qsize.value); | ||
386 | |||
387 | /* set command completion threshold */ | ||
388 | offset = AQMQ_CMP_THRX(ring); | ||
389 | cmp_thr.value = 0; | ||
390 | cmp_thr.commands_completed_threshold = 1; | ||
391 | nitrox_write_csr(ndev, offset, cmp_thr.value); | ||
392 | |||
393 | /* step 6: enable the queue */ | ||
394 | enable_aqm_ring(ndev, ring); | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static void enable_aqm_interrupts(struct nitrox_device *ndev) | ||
399 | { | ||
400 | /* clear interrupt enable bits */ | ||
401 | nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL)); | ||
402 | nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL)); | ||
403 | nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL)); | ||
404 | nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL)); | ||
405 | nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL)); | ||
406 | nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL)); | ||
407 | nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL)); | ||
408 | nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL)); | ||
409 | } | ||
410 | |||
411 | void nitrox_config_aqm_unit(struct nitrox_device *ndev) | ||
412 | { | ||
413 | /* config aqm command queues */ | ||
414 | nitrox_config_aqm_rings(ndev); | ||
415 | |||
416 | /* enable aqm interrupts */ | ||
417 | enable_aqm_interrupts(ndev); | ||
290 | } | 418 | } |
291 | 419 | ||
292 | void nitrox_config_pom_unit(struct nitrox_device *ndev) | 420 | void nitrox_config_pom_unit(struct nitrox_device *ndev) |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h index d6606418ba38..48b0af039099 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.h +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h | |||
@@ -4,10 +4,13 @@ | |||
4 | 4 | ||
5 | #include "nitrox_dev.h" | 5 | #include "nitrox_dev.h" |
6 | 6 | ||
7 | void nitrox_config_aqm_rings(struct nitrox_device *ndev); | ||
8 | void nitrox_config_aqm_unit(struct nitrox_device *ndev); | ||
7 | void nitrox_config_emu_unit(struct nitrox_device *ndev); | 9 | void nitrox_config_emu_unit(struct nitrox_device *ndev); |
8 | void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); | 10 | void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); |
9 | void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); | 11 | void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); |
10 | void nitrox_config_nps_unit(struct nitrox_device *ndev); | 12 | void nitrox_config_nps_core_unit(struct nitrox_device *ndev); |
13 | void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev); | ||
11 | void nitrox_config_pom_unit(struct nitrox_device *ndev); | 14 | void nitrox_config_pom_unit(struct nitrox_device *ndev); |
12 | void nitrox_config_rand_unit(struct nitrox_device *ndev); | 15 | void nitrox_config_rand_unit(struct nitrox_device *ndev); |
13 | void nitrox_config_efl_unit(struct nitrox_device *ndev); | 16 | void nitrox_config_efl_unit(struct nitrox_device *ndev); |
@@ -15,6 +18,7 @@ void nitrox_config_bmi_unit(struct nitrox_device *ndev); | |||
15 | void nitrox_config_bmo_unit(struct nitrox_device *ndev); | 18 | void nitrox_config_bmo_unit(struct nitrox_device *ndev); |
16 | void nitrox_config_lbc_unit(struct nitrox_device *ndev); | 19 | void nitrox_config_lbc_unit(struct nitrox_device *ndev); |
17 | void invalidate_lbc(struct nitrox_device *ndev); | 20 | void invalidate_lbc(struct nitrox_device *ndev); |
21 | void enable_aqm_ring(struct nitrox_device *ndev, int qno); | ||
18 | void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); | 22 | void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); |
19 | void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); | 23 | void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); |
20 | void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); | 24 | void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 4ace9bcd603a..5cbc64b851b9 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
@@ -19,6 +19,8 @@ | |||
19 | 19 | ||
20 | /* packet inuput ring alignments */ | 20 | /* packet inuput ring alignments */ |
21 | #define PKTIN_Q_ALIGN_BYTES 16 | 21 | #define PKTIN_Q_ALIGN_BYTES 16 |
22 | /* AQM Queue input alignments */ | ||
23 | #define AQM_Q_ALIGN_BYTES 32 | ||
22 | 24 | ||
23 | static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) | 25 | static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) |
24 | { | 26 | { |
@@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) | |||
57 | 59 | ||
58 | static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) | 60 | static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) |
59 | { | 61 | { |
60 | struct nitrox_device *ndev = cmdq->ndev; | 62 | struct nitrox_device *ndev; |
63 | |||
64 | if (!cmdq) | ||
65 | return; | ||
61 | 66 | ||
62 | if (!cmdq->unalign_base) | 67 | if (!cmdq->unalign_base) |
63 | return; | 68 | return; |
64 | 69 | ||
70 | ndev = cmdq->ndev; | ||
65 | cancel_work_sync(&cmdq->backlog_qflush); | 71 | cancel_work_sync(&cmdq->backlog_qflush); |
66 | 72 | ||
67 | dma_free_coherent(DEV(ndev), cmdq->qsize, | 73 | dma_free_coherent(DEV(ndev), cmdq->qsize, |
@@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) | |||
78 | cmdq->instr_size = 0; | 84 | cmdq->instr_size = 0; |
79 | } | 85 | } |
80 | 86 | ||
87 | static void nitrox_free_aqm_queues(struct nitrox_device *ndev) | ||
88 | { | ||
89 | int i; | ||
90 | |||
91 | for (i = 0; i < ndev->nr_queues; i++) { | ||
92 | nitrox_cmdq_cleanup(ndev->aqmq[i]); | ||
93 | kzfree(ndev->aqmq[i]); | ||
94 | ndev->aqmq[i] = NULL; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev) | ||
99 | { | ||
100 | int i, err; | ||
101 | |||
102 | for (i = 0; i < ndev->nr_queues; i++) { | ||
103 | struct nitrox_cmdq *cmdq; | ||
104 | u64 offset; | ||
105 | |||
106 | cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); | ||
107 | if (!cmdq) { | ||
108 | err = -ENOMEM; | ||
109 | goto aqmq_fail; | ||
110 | } | ||
111 | |||
112 | cmdq->ndev = ndev; | ||
113 | cmdq->qno = i; | ||
114 | cmdq->instr_size = sizeof(struct aqmq_command_s); | ||
115 | |||
116 | /* AQM Queue Doorbell Counter Register Address */ | ||
117 | offset = AQMQ_DRBLX(i); | ||
118 | cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); | ||
119 | /* AQM Queue Commands Completed Count Register Address */ | ||
120 | offset = AQMQ_CMD_CNTX(i); | ||
121 | cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); | ||
122 | |||
123 | err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES); | ||
124 | if (err) { | ||
125 | kzfree(cmdq); | ||
126 | goto aqmq_fail; | ||
127 | } | ||
128 | ndev->aqmq[i] = cmdq; | ||
129 | } | ||
130 | |||
131 | return 0; | ||
132 | |||
133 | aqmq_fail: | ||
134 | nitrox_free_aqm_queues(ndev); | ||
135 | return err; | ||
136 | } | ||
137 | |||
81 | static void nitrox_free_pktin_queues(struct nitrox_device *ndev) | 138 | static void nitrox_free_pktin_queues(struct nitrox_device *ndev) |
82 | { | 139 | { |
83 | int i; | 140 | int i; |
@@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) | |||
222 | if (err) | 279 | if (err) |
223 | destroy_crypto_dma_pool(ndev); | 280 | destroy_crypto_dma_pool(ndev); |
224 | 281 | ||
282 | err = nitrox_alloc_aqm_queues(ndev); | ||
283 | if (err) { | ||
284 | nitrox_free_pktin_queues(ndev); | ||
285 | destroy_crypto_dma_pool(ndev); | ||
286 | } | ||
287 | |||
225 | return err; | 288 | return err; |
226 | } | 289 | } |
227 | 290 | ||
@@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev) | |||
231 | */ | 294 | */ |
232 | void nitrox_common_sw_cleanup(struct nitrox_device *ndev) | 295 | void nitrox_common_sw_cleanup(struct nitrox_device *ndev) |
233 | { | 296 | { |
297 | nitrox_free_aqm_queues(ndev); | ||
234 | nitrox_free_pktin_queues(ndev); | 298 | nitrox_free_pktin_queues(ndev); |
235 | destroy_crypto_dma_pool(ndev); | 299 | destroy_crypto_dma_pool(ndev); |
236 | } | 300 | } |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index fe825d0ef9ca..bc924980e10c 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c | |||
@@ -17,12 +17,17 @@ | |||
17 | 17 | ||
18 | #define CNN55XX_DEV_ID 0x12 | 18 | #define CNN55XX_DEV_ID 0x12 |
19 | #define UCODE_HLEN 48 | 19 | #define UCODE_HLEN 48 |
20 | #define SE_GROUP 0 | 20 | #define DEFAULT_SE_GROUP 0 |
21 | #define DEFAULT_AE_GROUP 0 | ||
21 | 22 | ||
22 | #define DRIVER_VERSION "1.1" | 23 | #define DRIVER_VERSION "1.2" |
24 | #define CNN55XX_UCD_BLOCK_SIZE 32768 | ||
25 | #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2) | ||
23 | #define FW_DIR "cavium/" | 26 | #define FW_DIR "cavium/" |
24 | /* SE microcode */ | 27 | /* SE microcode */ |
25 | #define SE_FW FW_DIR "cnn55xx_se.fw" | 28 | #define SE_FW FW_DIR "cnn55xx_se.fw" |
29 | /* AE microcode */ | ||
30 | #define AE_FW FW_DIR "cnn55xx_ae.fw" | ||
26 | 31 | ||
27 | static const char nitrox_driver_name[] = "CNN55XX"; | 32 | static const char nitrox_driver_name[] = "CNN55XX"; |
28 | 33 | ||
@@ -72,10 +77,10 @@ struct ucode { | |||
72 | /** | 77 | /** |
73 | * write_to_ucd_unit - Write Firmware to NITROX UCD unit | 78 | * write_to_ucd_unit - Write Firmware to NITROX UCD unit |
74 | */ | 79 | */ |
75 | static void write_to_ucd_unit(struct nitrox_device *ndev, | 80 | static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size, |
76 | struct ucode *ucode) | 81 | u64 *ucode_data, int block_num) |
77 | { | 82 | { |
78 | u32 code_size = be32_to_cpu(ucode->code_size) * 2; | 83 | u32 code_size; |
79 | u64 offset, data; | 84 | u64 offset, data; |
80 | int i = 0; | 85 | int i = 0; |
81 | 86 | ||
@@ -96,11 +101,12 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, | |||
96 | 101 | ||
97 | /* set the block number */ | 102 | /* set the block number */ |
98 | offset = UCD_UCODE_LOAD_BLOCK_NUM; | 103 | offset = UCD_UCODE_LOAD_BLOCK_NUM; |
99 | nitrox_write_csr(ndev, offset, 0); | 104 | nitrox_write_csr(ndev, offset, block_num); |
100 | 105 | ||
106 | code_size = ucode_size; | ||
101 | code_size = roundup(code_size, 8); | 107 | code_size = roundup(code_size, 8); |
102 | while (code_size) { | 108 | while (code_size) { |
103 | data = ucode->code[i]; | 109 | data = ucode_data[i]; |
104 | /* write 8 bytes at a time */ | 110 | /* write 8 bytes at a time */ |
105 | offset = UCD_UCODE_LOAD_IDX_DATAX(i); | 111 | offset = UCD_UCODE_LOAD_IDX_DATAX(i); |
106 | nitrox_write_csr(ndev, offset, data); | 112 | nitrox_write_csr(ndev, offset, data); |
@@ -108,29 +114,74 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, | |||
108 | i++; | 114 | i++; |
109 | } | 115 | } |
110 | 116 | ||
111 | /* put all SE cores in group 0 */ | ||
112 | offset = POM_GRP_EXECMASKX(SE_GROUP); | ||
113 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
114 | |||
115 | for (i = 0; i < ndev->hw.se_cores; i++) { | ||
116 | /* | ||
117 | * write block number and firware length | ||
118 | * bit:<2:0> block number | ||
119 | * bit:3 is set SE uses 32KB microcode | ||
120 | * bit:3 is clear SE uses 64KB microcode | ||
121 | */ | ||
122 | offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); | ||
123 | nitrox_write_csr(ndev, offset, 0x8); | ||
124 | } | ||
125 | usleep_range(300, 400); | 117 | usleep_range(300, 400); |
126 | } | 118 | } |
127 | 119 | ||
128 | static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) | 120 | static int nitrox_load_fw(struct nitrox_device *ndev) |
129 | { | 121 | { |
130 | const struct firmware *fw; | 122 | const struct firmware *fw; |
123 | const char *fw_name; | ||
131 | struct ucode *ucode; | 124 | struct ucode *ucode; |
132 | int ret; | 125 | u64 *ucode_data; |
126 | u64 offset; | ||
127 | union ucd_core_eid_ucode_block_num core_2_eid_val; | ||
128 | union aqm_grp_execmsk_lo aqm_grp_execmask_lo; | ||
129 | union aqm_grp_execmsk_hi aqm_grp_execmask_hi; | ||
130 | u32 ucode_size; | ||
131 | int ret, i = 0; | ||
132 | |||
133 | fw_name = SE_FW; | ||
134 | dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); | ||
135 | |||
136 | ret = request_firmware(&fw, fw_name, DEV(ndev)); | ||
137 | if (ret < 0) { | ||
138 | dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | ucode = (struct ucode *)fw->data; | ||
143 | |||
144 | ucode_size = be32_to_cpu(ucode->code_size) * 2; | ||
145 | if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { | ||
146 | dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", | ||
147 | ucode_size, fw_name); | ||
148 | release_firmware(fw); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | ucode_data = ucode->code; | ||
152 | |||
153 | /* copy the firmware version */ | ||
154 | memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2)); | ||
155 | ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0'; | ||
156 | |||
157 | /* Load SE Firmware on UCD Block 0 */ | ||
158 | write_to_ucd_unit(ndev, ucode_size, ucode_data, 0); | ||
133 | 159 | ||
160 | release_firmware(fw); | ||
161 | |||
162 | /* put all SE cores in DEFAULT_SE_GROUP */ | ||
163 | offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP); | ||
164 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
165 | |||
166 | /* write block number and firmware length | ||
167 | * bit:<2:0> block number | ||
168 | * bit:3 is set SE uses 32KB microcode | ||
169 | * bit:3 is clear SE uses 64KB microcode | ||
170 | */ | ||
171 | core_2_eid_val.value = 0ULL; | ||
172 | core_2_eid_val.ucode_blk = 0; | ||
173 | if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) | ||
174 | core_2_eid_val.ucode_len = 1; | ||
175 | else | ||
176 | core_2_eid_val.ucode_len = 0; | ||
177 | |||
178 | for (i = 0; i < ndev->hw.se_cores; i++) { | ||
179 | offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); | ||
180 | nitrox_write_csr(ndev, offset, core_2_eid_val.value); | ||
181 | } | ||
182 | |||
183 | |||
184 | fw_name = AE_FW; | ||
134 | dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); | 185 | dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); |
135 | 186 | ||
136 | ret = request_firmware(&fw, fw_name, DEV(ndev)); | 187 | ret = request_firmware(&fw, fw_name, DEV(ndev)); |
@@ -140,13 +191,50 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) | |||
140 | } | 191 | } |
141 | 192 | ||
142 | ucode = (struct ucode *)fw->data; | 193 | ucode = (struct ucode *)fw->data; |
194 | |||
195 | ucode_size = be32_to_cpu(ucode->code_size) * 2; | ||
196 | if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { | ||
197 | dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", | ||
198 | ucode_size, fw_name); | ||
199 | release_firmware(fw); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | ucode_data = ucode->code; | ||
203 | |||
143 | /* copy the firmware version */ | 204 | /* copy the firmware version */ |
144 | memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2)); | 205 | memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2)); |
145 | ndev->hw.fw_name[VERSION_LEN - 1] = '\0'; | 206 | ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0'; |
207 | |||
208 | /* Load AE Firmware on UCD Block 2 */ | ||
209 | write_to_ucd_unit(ndev, ucode_size, ucode_data, 2); | ||
146 | 210 | ||
147 | write_to_ucd_unit(ndev, ucode); | ||
148 | release_firmware(fw); | 211 | release_firmware(fw); |
149 | 212 | ||
213 | /* put all AE cores in DEFAULT_AE_GROUP */ | ||
214 | offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP); | ||
215 | aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL; | ||
216 | nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value); | ||
217 | offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP); | ||
218 | aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL; | ||
219 | nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value); | ||
220 | |||
221 | /* write block number and firmware length | ||
222 | * bit:<2:0> block number | ||
223 | * bit:3 is set SE uses 32KB microcode | ||
224 | * bit:3 is clear SE uses 64KB microcode | ||
225 | */ | ||
226 | core_2_eid_val.value = 0ULL; | ||
227 | core_2_eid_val.ucode_blk = 0; | ||
228 | if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) | ||
229 | core_2_eid_val.ucode_len = 1; | ||
230 | else | ||
231 | core_2_eid_val.ucode_len = 0; | ||
232 | |||
233 | for (i = 0; i < ndev->hw.ae_cores; i++) { | ||
234 | offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i); | ||
235 | nitrox_write_csr(ndev, offset, core_2_eid_val.value); | ||
236 | } | ||
237 | |||
150 | return 0; | 238 | return 0; |
151 | } | 239 | } |
152 | 240 | ||
@@ -299,7 +387,9 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev) | |||
299 | /* get cores information */ | 387 | /* get cores information */ |
300 | nitrox_get_hwinfo(ndev); | 388 | nitrox_get_hwinfo(ndev); |
301 | 389 | ||
302 | nitrox_config_nps_unit(ndev); | 390 | nitrox_config_nps_core_unit(ndev); |
391 | nitrox_config_aqm_unit(ndev); | ||
392 | nitrox_config_nps_pkt_unit(ndev); | ||
303 | nitrox_config_pom_unit(ndev); | 393 | nitrox_config_pom_unit(ndev); |
304 | nitrox_config_efl_unit(ndev); | 394 | nitrox_config_efl_unit(ndev); |
305 | /* configure IO units */ | 395 | /* configure IO units */ |
@@ -309,8 +399,8 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev) | |||
309 | nitrox_config_lbc_unit(ndev); | 399 | nitrox_config_lbc_unit(ndev); |
310 | nitrox_config_rand_unit(ndev); | 400 | nitrox_config_rand_unit(ndev); |
311 | 401 | ||
312 | /* load firmware on SE cores */ | 402 | /* load firmware on cores */ |
313 | err = nitrox_load_fw(ndev, SE_FW); | 403 | err = nitrox_load_fw(ndev); |
314 | if (err) | 404 | if (err) |
315 | return err; | 405 | return err; |
316 | 406 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index efdbd0fc3e3b..f69ba02c4d25 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h | |||
@@ -400,6 +400,36 @@ struct nps_pkt_instr { | |||
400 | }; | 400 | }; |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * struct aqmq_command_s - The 32 byte command for AE processing. | ||
404 | * @opcode: Request opcode | ||
405 | * @param1: Request control parameter 1 | ||
406 | * @param2: Request control parameter 2 | ||
407 | * @dlen: Input length | ||
408 | * @dptr: Input pointer points to buffer in remote host | ||
409 | * @rptr: Result pointer points to buffer in remote host | ||
410 | * @grp: AQM Group (0..7) | ||
411 | * @cptr: Context pointer | ||
412 | */ | ||
413 | struct aqmq_command_s { | ||
414 | __be16 opcode; | ||
415 | __be16 param1; | ||
416 | __be16 param2; | ||
417 | __be16 dlen; | ||
418 | __be64 dptr; | ||
419 | __be64 rptr; | ||
420 | union { | ||
421 | __be64 word3; | ||
422 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
423 | u64 grp : 3; | ||
424 | u64 cptr : 61; | ||
425 | #else | ||
426 | u64 cptr : 61; | ||
427 | u64 grp : 3; | ||
428 | #endif | ||
429 | }; | ||
430 | }; | ||
431 | |||
432 | /** | ||
403 | * struct ctx_hdr - Book keeping data about the crypto context | 433 | * struct ctx_hdr - Book keeping data about the crypto context |
404 | * @pool: Pool used to allocate crypto context | 434 | * @pool: Pool used to allocate crypto context |
405 | * @dma: Base DMA address of the cypto context | 435 | * @dma: Base DMA address of the cypto context |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 7e4a5e69085e..3cdce1f0f257 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <crypto/aes.h> | 7 | #include <crypto/aes.h> |
8 | #include <crypto/skcipher.h> | 8 | #include <crypto/skcipher.h> |
9 | #include <crypto/ctr.h> | 9 | #include <crypto/ctr.h> |
10 | #include <crypto/des.h> | 10 | #include <crypto/internal/des.h> |
11 | #include <crypto/xts.h> | 11 | #include <crypto/xts.h> |
12 | 12 | ||
13 | #include "nitrox_dev.h" | 13 | #include "nitrox_dev.h" |
@@ -257,7 +257,7 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq) | |||
257 | static int nitrox_3des_setkey(struct crypto_skcipher *cipher, | 257 | static int nitrox_3des_setkey(struct crypto_skcipher *cipher, |
258 | const u8 *key, unsigned int keylen) | 258 | const u8 *key, unsigned int keylen) |
259 | { | 259 | { |
260 | return unlikely(des3_verify_key(cipher, key)) ?: | 260 | return verify_skcipher_des3_key(cipher, key) ?: |
261 | nitrox_skcipher_setkey(cipher, 0, key, keylen); | 261 | nitrox_skcipher_setkey(cipher, 0, key, keylen); |
262 | } | 262 | } |
263 | 263 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c index bf439d8256ba..43287f8471d1 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c +++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c | |||
@@ -109,6 +109,9 @@ static int nitrox_pf_reinit(struct nitrox_device *ndev) | |||
109 | return err; | 109 | return err; |
110 | } | 110 | } |
111 | 111 | ||
112 | /* configure the AQM queues */ | ||
113 | nitrox_config_aqm_rings(ndev); | ||
114 | |||
112 | /* configure the packet queues */ | 115 | /* configure the packet queues */ |
113 | nitrox_config_pkt_input_rings(ndev); | 116 | nitrox_config_pkt_input_rings(ndev); |
114 | nitrox_config_pkt_solicit_ports(ndev); | 117 | nitrox_config_pkt_solicit_ports(ndev); |
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index a8447a3cf366..194624b4855b 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c | |||
@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = { | |||
593 | .owner = THIS_MODULE, | 593 | .owner = THIS_MODULE, |
594 | .open = zip_stats_open, | 594 | .open = zip_stats_open, |
595 | .read = seq_read, | 595 | .read = seq_read, |
596 | .release = single_release, | ||
596 | }; | 597 | }; |
597 | 598 | ||
598 | static int zip_clear_open(struct inode *inode, struct file *file) | 599 | static int zip_clear_open(struct inode *inode, struct file *file) |
@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = { | |||
604 | .owner = THIS_MODULE, | 605 | .owner = THIS_MODULE, |
605 | .open = zip_clear_open, | 606 | .open = zip_clear_open, |
606 | .read = seq_read, | 607 | .read = seq_read, |
608 | .release = single_release, | ||
607 | }; | 609 | }; |
608 | 610 | ||
609 | static int zip_regs_open(struct inode *inode, struct file *file) | 611 | static int zip_regs_open(struct inode *inode, struct file *file) |
@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = { | |||
615 | .owner = THIS_MODULE, | 617 | .owner = THIS_MODULE, |
616 | .open = zip_regs_open, | 618 | .open = zip_regs_open, |
617 | .read = seq_read, | 619 | .read = seq_read, |
620 | .release = single_release, | ||
618 | }; | 621 | }; |
619 | 622 | ||
620 | /* Root directory for thunderx_zip debugfs entry */ | 623 | /* Root directory for thunderx_zip debugfs entry */ |
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 48f3edc1e3fb..8fec733f567f 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig | |||
@@ -30,6 +30,7 @@ config CRYPTO_DEV_CCP_CRYPTO | |||
30 | select CRYPTO_BLKCIPHER | 30 | select CRYPTO_BLKCIPHER |
31 | select CRYPTO_AUTHENC | 31 | select CRYPTO_AUTHENC |
32 | select CRYPTO_RSA | 32 | select CRYPTO_RSA |
33 | select CRYPTO_LIB_AES | ||
33 | help | 34 | help |
34 | Support for using the cryptographic API with the AMD Cryptographic | 35 | Support for using the cryptographic API with the AMD Cryptographic |
35 | Coprocessor. This module supports offload of SHA and AES algorithms. | 36 | Coprocessor. This module supports offload of SHA and AES algorithms. |
@@ -45,3 +46,11 @@ config CRYPTO_DEV_SP_PSP | |||
45 | management commands in Secure Encrypted Virtualization (SEV) mode, | 46 | management commands in Secure Encrypted Virtualization (SEV) mode, |
46 | along with software-based Trusted Execution Environment (TEE) to | 47 | along with software-based Trusted Execution Environment (TEE) to |
47 | enable third-party trusted applications. | 48 | enable third-party trusted applications. |
49 | |||
50 | config CRYPTO_DEV_CCP_DEBUGFS | ||
51 | bool "Enable CCP Internals in DebugFS" | ||
52 | default n | ||
53 | depends on CRYPTO_DEV_SP_CCP | ||
54 | help | ||
55 | Expose CCP device information such as operation statistics, feature | ||
56 | information, and descriptor queue contents. | ||
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 51d1c0cf66c7..6b86f1e6d634 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -5,8 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ | |||
5 | ccp-ops.o \ | 5 | ccp-ops.o \ |
6 | ccp-dev-v3.o \ | 6 | ccp-dev-v3.o \ |
7 | ccp-dev-v5.o \ | 7 | ccp-dev-v5.o \ |
8 | ccp-dmaengine.o \ | 8 | ccp-dmaengine.o |
9 | ccp-debugfs.o | 9 | ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o |
10 | ccp-$(CONFIG_PCI) += sp-pci.o | 10 | ccp-$(CONFIG_PCI) += sp-pci.o |
11 | ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o | 11 | ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o |
12 | 12 | ||
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index bb7219d36b2c..32f19f402073 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c | |||
@@ -261,6 +261,7 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
261 | ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); | 261 | ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); |
262 | u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; | 262 | u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; |
263 | u64 rb_hi = 0x00, rb_lo = 0x87; | 263 | u64 rb_hi = 0x00, rb_lo = 0x87; |
264 | struct crypto_aes_ctx aes; | ||
264 | __be64 *gk; | 265 | __be64 *gk; |
265 | int ret; | 266 | int ret; |
266 | 267 | ||
@@ -284,14 +285,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
284 | ctx->u.aes.key_len = 0; | 285 | ctx->u.aes.key_len = 0; |
285 | 286 | ||
286 | /* Set the key for the AES cipher used to generate the keys */ | 287 | /* Set the key for the AES cipher used to generate the keys */ |
287 | ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); | 288 | ret = aes_expandkey(&aes, key, key_len); |
288 | if (ret) | 289 | if (ret) |
289 | return ret; | 290 | return ret; |
290 | 291 | ||
291 | /* Encrypt a block of zeroes - use key area in context */ | 292 | /* Encrypt a block of zeroes - use key area in context */ |
292 | memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); | 293 | memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); |
293 | crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, | 294 | aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key); |
294 | ctx->u.aes.key); | 295 | memzero_explicit(&aes, sizeof(aes)); |
295 | 296 | ||
296 | /* Generate K1 and K2 */ | 297 | /* Generate K1 and K2 */ |
297 | k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); | 298 | k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); |
@@ -336,32 +337,15 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) | |||
336 | { | 337 | { |
337 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | 338 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
338 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 339 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
339 | struct crypto_cipher *cipher_tfm; | ||
340 | 340 | ||
341 | ctx->complete = ccp_aes_cmac_complete; | 341 | ctx->complete = ccp_aes_cmac_complete; |
342 | ctx->u.aes.key_len = 0; | 342 | ctx->u.aes.key_len = 0; |
343 | 343 | ||
344 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); | 344 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); |
345 | 345 | ||
346 | cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); | ||
347 | if (IS_ERR(cipher_tfm)) { | ||
348 | pr_warn("could not load aes cipher driver\n"); | ||
349 | return PTR_ERR(cipher_tfm); | ||
350 | } | ||
351 | ctx->u.aes.tfm_cipher = cipher_tfm; | ||
352 | |||
353 | return 0; | 346 | return 0; |
354 | } | 347 | } |
355 | 348 | ||
356 | static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm) | ||
357 | { | ||
358 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | ||
359 | |||
360 | if (ctx->u.aes.tfm_cipher) | ||
361 | crypto_free_cipher(ctx->u.aes.tfm_cipher); | ||
362 | ctx->u.aes.tfm_cipher = NULL; | ||
363 | } | ||
364 | |||
365 | int ccp_register_aes_cmac_algs(struct list_head *head) | 349 | int ccp_register_aes_cmac_algs(struct list_head *head) |
366 | { | 350 | { |
367 | struct ccp_crypto_ahash_alg *ccp_alg; | 351 | struct ccp_crypto_ahash_alg *ccp_alg; |
@@ -401,7 +385,6 @@ int ccp_register_aes_cmac_algs(struct list_head *head) | |||
401 | base->cra_ctxsize = sizeof(struct ccp_ctx); | 385 | base->cra_ctxsize = sizeof(struct ccp_ctx); |
402 | base->cra_priority = CCP_CRA_PRIORITY; | 386 | base->cra_priority = CCP_CRA_PRIORITY; |
403 | base->cra_init = ccp_aes_cmac_cra_init; | 387 | base->cra_init = ccp_aes_cmac_cra_init; |
404 | base->cra_exit = ccp_aes_cmac_cra_exit; | ||
405 | base->cra_module = THIS_MODULE; | 388 | base->cra_module = THIS_MODULE; |
406 | 389 | ||
407 | ret = crypto_register_ahash(alg); | 390 | ret = crypto_register_ahash(alg); |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 783ba75e0618..8e4a531f4f70 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c | |||
@@ -116,9 +116,6 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
116 | if (!ctx->u.aes.key_len) | 116 | if (!ctx->u.aes.key_len) |
117 | return -EINVAL; | 117 | return -EINVAL; |
118 | 118 | ||
119 | if (req->nbytes & (AES_BLOCK_SIZE - 1)) | ||
120 | return -EINVAL; | ||
121 | |||
122 | if (!req->info) | 119 | if (!req->info) |
123 | return -EINVAL; | 120 | return -EINVAL; |
124 | 121 | ||
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 5f05f834c7cd..d2c49b2f0323 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/crypto.h> | 14 | #include <linux/crypto.h> |
15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
16 | #include <crypto/scatterwalk.h> | 16 | #include <crypto/scatterwalk.h> |
17 | #include <crypto/des.h> | 17 | #include <crypto/internal/des.h> |
18 | 18 | ||
19 | #include "ccp-crypto.h" | 19 | #include "ccp-crypto.h" |
20 | 20 | ||
@@ -39,11 +39,10 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
39 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); | 39 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); |
40 | struct ccp_crypto_ablkcipher_alg *alg = | 40 | struct ccp_crypto_ablkcipher_alg *alg = |
41 | ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); | 41 | ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); |
42 | u32 *flags = &tfm->base.crt_flags; | ||
43 | int err; | 42 | int err; |
44 | 43 | ||
45 | err = __des3_verify_key(flags, key); | 44 | err = verify_ablkcipher_des3_key(tfm, key); |
46 | if (unlikely(err)) | 45 | if (err) |
47 | return err; | 46 | return err; |
48 | 47 | ||
49 | /* It's not clear that there is any support for a keysize of 112. | 48 | /* It's not clear that there is any support for a keysize of 112. |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 818096490829..8ee4cb45a3f3 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
@@ -405,8 +405,10 @@ static int ccp_crypto_init(void) | |||
405 | int ret; | 405 | int ret; |
406 | 406 | ||
407 | ret = ccp_present(); | 407 | ret = ccp_present(); |
408 | if (ret) | 408 | if (ret) { |
409 | pr_err("Cannot load: there are no available CCPs\n"); | ||
409 | return ret; | 410 | return ret; |
411 | } | ||
410 | 412 | ||
411 | spin_lock_init(&req_queue_lock); | 413 | spin_lock_init(&req_queue_lock); |
412 | INIT_LIST_HEAD(&req_queue.cmds); | 414 | INIT_LIST_HEAD(&req_queue.cmds); |
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 622b34c17643..9015b5da6ba3 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
15 | #include <linux/pci.h> | ||
16 | #include <linux/ccp.h> | 15 | #include <linux/ccp.h> |
17 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
18 | #include <crypto/aes.h> | 17 | #include <crypto/aes.h> |
@@ -24,6 +23,10 @@ | |||
24 | #include <crypto/akcipher.h> | 23 | #include <crypto/akcipher.h> |
25 | #include <crypto/internal/rsa.h> | 24 | #include <crypto/internal/rsa.h> |
26 | 25 | ||
26 | /* We want the module name in front of our messages */ | ||
27 | #undef pr_fmt | ||
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
29 | |||
27 | #define CCP_LOG_LEVEL KERN_INFO | 30 | #define CCP_LOG_LEVEL KERN_INFO |
28 | 31 | ||
29 | #define CCP_CRA_PRIORITY 300 | 32 | #define CCP_CRA_PRIORITY 300 |
@@ -87,9 +90,6 @@ struct ccp_aes_ctx { | |||
87 | /* Fallback cipher for XTS with unsupported unit sizes */ | 90 | /* Fallback cipher for XTS with unsupported unit sizes */ |
88 | struct crypto_sync_skcipher *tfm_skcipher; | 91 | struct crypto_sync_skcipher *tfm_skcipher; |
89 | 92 | ||
90 | /* Cipher used to generate CMAC K1/K2 keys */ | ||
91 | struct crypto_cipher *tfm_cipher; | ||
92 | |||
93 | enum ccp_engine engine; | 93 | enum ccp_engine engine; |
94 | enum ccp_aes_type type; | 94 | enum ccp_aes_type type; |
95 | enum ccp_aes_mode mode; | 95 | enum ccp_aes_mode mode; |
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 2b7d47ed5c74..0186b3df4c87 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/pci.h> | ||
14 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
15 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
16 | #include <linux/ccp.h> | 15 | #include <linux/ccp.h> |
@@ -379,7 +378,7 @@ static int ccp_init(struct ccp_device *ccp) | |||
379 | /* Find available queues */ | 378 | /* Find available queues */ |
380 | ccp->qim = 0; | 379 | ccp->qim = 0; |
381 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | 380 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); |
382 | for (i = 0; i < MAX_HW_QUEUES; i++) { | 381 | for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { |
383 | if (!(qmr & (1 << i))) | 382 | if (!(qmr & (1 << i))) |
384 | continue; | 383 | continue; |
385 | 384 | ||
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 217e41bbadaf..57eb53b8ac21 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -2,16 +2,13 @@ | |||
2 | /* | 2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver | 3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * | 4 | * |
5 | * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. |
6 | * | 6 | * |
7 | * Author: Gary R Hook <gary.hook@amd.com> | 7 | * Author: Gary R Hook <gary.hook@amd.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
12 | #include <linux/pci.h> | ||
13 | #include <linux/kthread.h> | 11 | #include <linux/kthread.h> |
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
16 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
17 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
@@ -792,8 +789,7 @@ static int ccp5_init(struct ccp_device *ccp) | |||
792 | 789 | ||
793 | /* Find available queues */ | 790 | /* Find available queues */ |
794 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | 791 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); |
795 | for (i = 0; i < MAX_HW_QUEUES; i++) { | 792 | for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { |
796 | |||
797 | if (!(qmr & (1 << i))) | 793 | if (!(qmr & (1 << i))) |
798 | continue; | 794 | continue; |
799 | 795 | ||
@@ -806,6 +802,7 @@ static int ccp5_init(struct ccp_device *ccp) | |||
806 | if (!dma_pool) { | 802 | if (!dma_pool) { |
807 | dev_err(dev, "unable to allocate dma pool\n"); | 803 | dev_err(dev, "unable to allocate dma pool\n"); |
808 | ret = -ENOMEM; | 804 | ret = -ENOMEM; |
805 | goto e_pool; | ||
809 | } | 806 | } |
810 | 807 | ||
811 | cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; | 808 | cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; |
@@ -819,9 +816,9 @@ static int ccp5_init(struct ccp_device *ccp) | |||
819 | /* Page alignment satisfies our needs for N <= 128 */ | 816 | /* Page alignment satisfies our needs for N <= 128 */ |
820 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | 817 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); |
821 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | 818 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); |
822 | cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, | 819 | cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, |
823 | &cmd_q->qbase_dma, | 820 | &cmd_q->qbase_dma, |
824 | GFP_KERNEL); | 821 | GFP_KERNEL); |
825 | if (!cmd_q->qbase) { | 822 | if (!cmd_q->qbase) { |
826 | dev_err(dev, "unable to allocate command queue\n"); | 823 | dev_err(dev, "unable to allocate command queue\n"); |
827 | ret = -ENOMEM; | 824 | ret = -ENOMEM; |
@@ -970,8 +967,10 @@ static int ccp5_init(struct ccp_device *ccp) | |||
970 | if (ret) | 967 | if (ret) |
971 | goto e_hwrng; | 968 | goto e_hwrng; |
972 | 969 | ||
970 | #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS | ||
973 | /* Set up debugfs entries */ | 971 | /* Set up debugfs entries */ |
974 | ccp5_debugfs_setup(ccp); | 972 | ccp5_debugfs_setup(ccp); |
973 | #endif | ||
975 | 974 | ||
976 | return 0; | 975 | return 0; |
977 | 976 | ||
@@ -995,7 +994,6 @@ e_pool: | |||
995 | 994 | ||
996 | static void ccp5_destroy(struct ccp_device *ccp) | 995 | static void ccp5_destroy(struct ccp_device *ccp) |
997 | { | 996 | { |
998 | struct device *dev = ccp->dev; | ||
999 | struct ccp_cmd_queue *cmd_q; | 997 | struct ccp_cmd_queue *cmd_q; |
1000 | struct ccp_cmd *cmd; | 998 | struct ccp_cmd *cmd; |
1001 | unsigned int i; | 999 | unsigned int i; |
@@ -1009,11 +1007,13 @@ static void ccp5_destroy(struct ccp_device *ccp) | |||
1009 | /* Remove this device from the list of available units first */ | 1007 | /* Remove this device from the list of available units first */ |
1010 | ccp_del_device(ccp); | 1008 | ccp_del_device(ccp); |
1011 | 1009 | ||
1010 | #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS | ||
1012 | /* We're in the process of tearing down the entire driver; | 1011 | /* We're in the process of tearing down the entire driver; |
1013 | * when all the devices are gone clean up debugfs | 1012 | * when all the devices are gone clean up debugfs |
1014 | */ | 1013 | */ |
1015 | if (ccp_present()) | 1014 | if (ccp_present()) |
1016 | ccp5_debugfs_destroy(); | 1015 | ccp5_debugfs_destroy(); |
1016 | #endif | ||
1017 | 1017 | ||
1018 | /* Disable and clear interrupts */ | 1018 | /* Disable and clear interrupts */ |
1019 | ccp5_disable_queue_interrupts(ccp); | 1019 | ccp5_disable_queue_interrupts(ccp); |
@@ -1036,12 +1036,6 @@ static void ccp5_destroy(struct ccp_device *ccp) | |||
1036 | 1036 | ||
1037 | sp_free_ccp_irq(ccp->sp, ccp); | 1037 | sp_free_ccp_irq(ccp->sp, ccp); |
1038 | 1038 | ||
1039 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
1040 | cmd_q = &ccp->cmd_q[i]; | ||
1041 | dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, | ||
1042 | cmd_q->qbase_dma); | ||
1043 | } | ||
1044 | |||
1045 | /* Flush the cmd and backlog queue */ | 1039 | /* Flush the cmd and backlog queue */ |
1046 | while (!list_empty(&ccp->cmd)) { | 1040 | while (!list_empty(&ccp->cmd)) { |
1047 | /* Invoke the callback directly with an error code */ | 1041 | /* Invoke the callback directly with an error code */ |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index edefa669153f..73acf0fdb793 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -2,12 +2,13 @@ | |||
2 | /* | 2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver | 3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * | 4 | * |
5 | * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. |
6 | * | 6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | * Author: Gary R Hook <gary.hook@amd.com> | 8 | * Author: Gary R Hook <gary.hook@amd.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
12 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
13 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
@@ -19,6 +20,7 @@ | |||
19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
20 | #include <linux/hw_random.h> | 21 | #include <linux/hw_random.h> |
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/atomic.h> | ||
22 | #ifdef CONFIG_X86 | 24 | #ifdef CONFIG_X86 |
23 | #include <asm/cpu_device_id.h> | 25 | #include <asm/cpu_device_id.h> |
24 | #endif | 26 | #endif |
@@ -26,6 +28,19 @@ | |||
26 | 28 | ||
27 | #include "ccp-dev.h" | 29 | #include "ccp-dev.h" |
28 | 30 | ||
31 | #define MAX_CCPS 32 | ||
32 | |||
33 | /* Limit CCP use to a specifed number of queues per device */ | ||
34 | static unsigned int nqueues = 0; | ||
35 | module_param(nqueues, uint, 0444); | ||
36 | MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)"); | ||
37 | |||
38 | /* Limit the maximum number of configured CCPs */ | ||
39 | static atomic_t dev_count = ATOMIC_INIT(0); | ||
40 | static unsigned int max_devs = MAX_CCPS; | ||
41 | module_param(max_devs, uint, 0444); | ||
42 | MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)"); | ||
43 | |||
29 | struct ccp_tasklet_data { | 44 | struct ccp_tasklet_data { |
30 | struct completion completion; | 45 | struct completion completion; |
31 | struct ccp_cmd *cmd; | 46 | struct ccp_cmd *cmd; |
@@ -594,12 +609,24 @@ int ccp_dev_init(struct sp_device *sp) | |||
594 | struct ccp_device *ccp; | 609 | struct ccp_device *ccp; |
595 | int ret; | 610 | int ret; |
596 | 611 | ||
612 | /* | ||
613 | * Check how many we have so far, and stop after reaching | ||
614 | * that number | ||
615 | */ | ||
616 | if (atomic_inc_return(&dev_count) > max_devs) | ||
617 | return 0; /* don't fail the load */ | ||
618 | |||
597 | ret = -ENOMEM; | 619 | ret = -ENOMEM; |
598 | ccp = ccp_alloc_struct(sp); | 620 | ccp = ccp_alloc_struct(sp); |
599 | if (!ccp) | 621 | if (!ccp) |
600 | goto e_err; | 622 | goto e_err; |
601 | sp->ccp_data = ccp; | 623 | sp->ccp_data = ccp; |
602 | 624 | ||
625 | if (!nqueues || (nqueues > MAX_HW_QUEUES)) | ||
626 | ccp->max_q_count = MAX_HW_QUEUES; | ||
627 | else | ||
628 | ccp->max_q_count = nqueues; | ||
629 | |||
603 | ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; | 630 | ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; |
604 | if (!ccp->vdata || !ccp->vdata->version) { | 631 | if (!ccp->vdata || !ccp->vdata->version) { |
605 | ret = -ENODEV; | 632 | ret = -ENODEV; |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5e624920fd99..3f68262d9ab4 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -12,11 +12,11 @@ | |||
12 | #define __CCP_DEV_H__ | 12 | #define __CCP_DEV_H__ |
13 | 13 | ||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/pci.h> | ||
16 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
17 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
18 | #include <linux/list.h> | 17 | #include <linux/list.h> |
19 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | #include <linux/dma-direction.h> | ||
20 | #include <linux/dmapool.h> | 20 | #include <linux/dmapool.h> |
21 | #include <linux/hw_random.h> | 21 | #include <linux/hw_random.h> |
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
@@ -379,6 +379,7 @@ struct ccp_device { | |||
379 | */ | 379 | */ |
380 | struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; | 380 | struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; |
381 | unsigned int cmd_q_count; | 381 | unsigned int cmd_q_count; |
382 | unsigned int max_q_count; | ||
382 | 383 | ||
383 | /* Support for the CCP True RNG | 384 | /* Support for the CCP True RNG |
384 | */ | 385 | */ |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 7f22a45bbc11..a54f9367a580 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
@@ -2,13 +2,14 @@ | |||
2 | /* | 2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver | 3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * | 4 | * |
5 | * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. |
6 | * | 6 | * |
7 | * Author: Gary R Hook <gary.hook@amd.com> | 7 | * Author: Gary R Hook <gary.hook@amd.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
13 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
14 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
@@ -35,6 +36,10 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT; | |||
35 | module_param(dma_chan_attr, uint, 0444); | 36 | module_param(dma_chan_attr, uint, 0444); |
36 | MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); | 37 | MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); |
37 | 38 | ||
39 | static unsigned int dmaengine = 1; | ||
40 | module_param(dmaengine, uint, 0444); | ||
41 | MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)"); | ||
42 | |||
38 | static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) | 43 | static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) |
39 | { | 44 | { |
40 | switch (dma_chan_attr) { | 45 | switch (dma_chan_attr) { |
@@ -637,6 +642,9 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | |||
637 | unsigned int i; | 642 | unsigned int i; |
638 | int ret; | 643 | int ret; |
639 | 644 | ||
645 | if (!dmaengine) | ||
646 | return 0; | ||
647 | |||
640 | ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, | 648 | ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, |
641 | sizeof(*(ccp->ccp_dma_chan)), | 649 | sizeof(*(ccp->ccp_dma_chan)), |
642 | GFP_KERNEL); | 650 | GFP_KERNEL); |
@@ -740,6 +748,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) | |||
740 | { | 748 | { |
741 | struct dma_device *dma_dev = &ccp->dma_dev; | 749 | struct dma_device *dma_dev = &ccp->dma_dev; |
742 | 750 | ||
751 | if (!dmaengine) | ||
752 | return; | ||
753 | |||
743 | dma_async_device_unregister(dma_dev); | 754 | dma_async_device_unregister(dma_dev); |
744 | 755 | ||
745 | kmem_cache_destroy(ccp->dma_desc_cache); | 756 | kmem_cache_destroy(ccp->dma_desc_cache); |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 9bc3c62157d7..c8da8eb160da 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/pci.h> | ||
14 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
15 | #include <crypto/scatterwalk.h> | 14 | #include <crypto/scatterwalk.h> |
16 | #include <crypto/des.h> | 15 | #include <crypto/des.h> |
@@ -150,14 +149,13 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, | |||
150 | if (len <= CCP_DMAPOOL_MAX_SIZE) { | 149 | if (len <= CCP_DMAPOOL_MAX_SIZE) { |
151 | wa->dma_pool = cmd_q->dma_pool; | 150 | wa->dma_pool = cmd_q->dma_pool; |
152 | 151 | ||
153 | wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, | 152 | wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, |
154 | &wa->dma.address); | 153 | &wa->dma.address); |
155 | if (!wa->address) | 154 | if (!wa->address) |
156 | return -ENOMEM; | 155 | return -ENOMEM; |
157 | 156 | ||
158 | wa->dma.length = CCP_DMAPOOL_MAX_SIZE; | 157 | wa->dma.length = CCP_DMAPOOL_MAX_SIZE; |
159 | 158 | ||
160 | memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); | ||
161 | } else { | 159 | } else { |
162 | wa->address = kzalloc(len, GFP_KERNEL); | 160 | wa->address = kzalloc(len, GFP_KERNEL); |
163 | if (!wa->address) | 161 | if (!wa->address) |
@@ -455,8 +453,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, | |||
455 | return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); | 453 | return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); |
456 | } | 454 | } |
457 | 455 | ||
458 | static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | 456 | static noinline_for_stack int |
459 | struct ccp_cmd *cmd) | 457 | ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) |
460 | { | 458 | { |
461 | struct ccp_aes_engine *aes = &cmd->u.aes; | 459 | struct ccp_aes_engine *aes = &cmd->u.aes; |
462 | struct ccp_dm_workarea key, ctx; | 460 | struct ccp_dm_workarea key, ctx; |
@@ -611,8 +609,8 @@ e_key: | |||
611 | return ret; | 609 | return ret; |
612 | } | 610 | } |
613 | 611 | ||
614 | static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, | 612 | static noinline_for_stack int |
615 | struct ccp_cmd *cmd) | 613 | ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) |
616 | { | 614 | { |
617 | struct ccp_aes_engine *aes = &cmd->u.aes; | 615 | struct ccp_aes_engine *aes = &cmd->u.aes; |
618 | struct ccp_dm_workarea key, ctx, final_wa, tag; | 616 | struct ccp_dm_workarea key, ctx, final_wa, tag; |
@@ -894,7 +892,8 @@ e_key: | |||
894 | return ret; | 892 | return ret; |
895 | } | 893 | } |
896 | 894 | ||
897 | static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 895 | static noinline_for_stack int |
896 | ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | ||
898 | { | 897 | { |
899 | struct ccp_aes_engine *aes = &cmd->u.aes; | 898 | struct ccp_aes_engine *aes = &cmd->u.aes; |
900 | struct ccp_dm_workarea key, ctx; | 899 | struct ccp_dm_workarea key, ctx; |
@@ -904,12 +903,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
904 | bool in_place = false; | 903 | bool in_place = false; |
905 | int ret; | 904 | int ret; |
906 | 905 | ||
907 | if (aes->mode == CCP_AES_MODE_CMAC) | ||
908 | return ccp_run_aes_cmac_cmd(cmd_q, cmd); | ||
909 | |||
910 | if (aes->mode == CCP_AES_MODE_GCM) | ||
911 | return ccp_run_aes_gcm_cmd(cmd_q, cmd); | ||
912 | |||
913 | if (!((aes->key_len == AES_KEYSIZE_128) || | 906 | if (!((aes->key_len == AES_KEYSIZE_128) || |
914 | (aes->key_len == AES_KEYSIZE_192) || | 907 | (aes->key_len == AES_KEYSIZE_192) || |
915 | (aes->key_len == AES_KEYSIZE_256))) | 908 | (aes->key_len == AES_KEYSIZE_256))) |
@@ -1076,8 +1069,8 @@ e_key: | |||
1076 | return ret; | 1069 | return ret; |
1077 | } | 1070 | } |
1078 | 1071 | ||
1079 | static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | 1072 | static noinline_for_stack int |
1080 | struct ccp_cmd *cmd) | 1073 | ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) |
1081 | { | 1074 | { |
1082 | struct ccp_xts_aes_engine *xts = &cmd->u.xts; | 1075 | struct ccp_xts_aes_engine *xts = &cmd->u.xts; |
1083 | struct ccp_dm_workarea key, ctx; | 1076 | struct ccp_dm_workarea key, ctx; |
@@ -1276,7 +1269,8 @@ e_key: | |||
1276 | return ret; | 1269 | return ret; |
1277 | } | 1270 | } |
1278 | 1271 | ||
1279 | static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 1272 | static noinline_for_stack int |
1273 | ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | ||
1280 | { | 1274 | { |
1281 | struct ccp_des3_engine *des3 = &cmd->u.des3; | 1275 | struct ccp_des3_engine *des3 = &cmd->u.des3; |
1282 | 1276 | ||
@@ -1472,7 +1466,8 @@ e_key: | |||
1472 | return ret; | 1466 | return ret; |
1473 | } | 1467 | } |
1474 | 1468 | ||
1475 | static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 1469 | static noinline_for_stack int |
1470 | ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | ||
1476 | { | 1471 | { |
1477 | struct ccp_sha_engine *sha = &cmd->u.sha; | 1472 | struct ccp_sha_engine *sha = &cmd->u.sha; |
1478 | struct ccp_dm_workarea ctx; | 1473 | struct ccp_dm_workarea ctx; |
@@ -1816,7 +1811,8 @@ e_ctx: | |||
1816 | return ret; | 1811 | return ret; |
1817 | } | 1812 | } |
1818 | 1813 | ||
1819 | static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 1814 | static noinline_for_stack int |
1815 | ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | ||
1820 | { | 1816 | { |
1821 | struct ccp_rsa_engine *rsa = &cmd->u.rsa; | 1817 | struct ccp_rsa_engine *rsa = &cmd->u.rsa; |
1822 | struct ccp_dm_workarea exp, src, dst; | 1818 | struct ccp_dm_workarea exp, src, dst; |
@@ -1947,8 +1943,8 @@ e_sb: | |||
1947 | return ret; | 1943 | return ret; |
1948 | } | 1944 | } |
1949 | 1945 | ||
1950 | static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | 1946 | static noinline_for_stack int |
1951 | struct ccp_cmd *cmd) | 1947 | ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) |
1952 | { | 1948 | { |
1953 | struct ccp_passthru_engine *pt = &cmd->u.passthru; | 1949 | struct ccp_passthru_engine *pt = &cmd->u.passthru; |
1954 | struct ccp_dm_workarea mask; | 1950 | struct ccp_dm_workarea mask; |
@@ -2079,7 +2075,8 @@ e_mask: | |||
2079 | return ret; | 2075 | return ret; |
2080 | } | 2076 | } |
2081 | 2077 | ||
2082 | static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | 2078 | static noinline_for_stack int |
2079 | ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | ||
2083 | struct ccp_cmd *cmd) | 2080 | struct ccp_cmd *cmd) |
2084 | { | 2081 | { |
2085 | struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; | 2082 | struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; |
@@ -2420,7 +2417,8 @@ e_src: | |||
2420 | return ret; | 2417 | return ret; |
2421 | } | 2418 | } |
2422 | 2419 | ||
2423 | static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 2420 | static noinline_for_stack int |
2421 | ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | ||
2424 | { | 2422 | { |
2425 | struct ccp_ecc_engine *ecc = &cmd->u.ecc; | 2423 | struct ccp_ecc_engine *ecc = &cmd->u.ecc; |
2426 | 2424 | ||
@@ -2457,7 +2455,17 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
2457 | 2455 | ||
2458 | switch (cmd->engine) { | 2456 | switch (cmd->engine) { |
2459 | case CCP_ENGINE_AES: | 2457 | case CCP_ENGINE_AES: |
2460 | ret = ccp_run_aes_cmd(cmd_q, cmd); | 2458 | switch (cmd->u.aes.mode) { |
2459 | case CCP_AES_MODE_CMAC: | ||
2460 | ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); | ||
2461 | break; | ||
2462 | case CCP_AES_MODE_GCM: | ||
2463 | ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); | ||
2464 | break; | ||
2465 | default: | ||
2466 | ret = ccp_run_aes_cmd(cmd_q, cmd); | ||
2467 | break; | ||
2468 | } | ||
2461 | break; | 2469 | break; |
2462 | case CCP_ENGINE_XTS_AES_128: | 2470 | case CCP_ENGINE_XTS_AES_128: |
2463 | ret = ccp_run_xts_aes_cmd(cmd_q, cmd); | 2471 | ret = ccp_run_xts_aes_cmd(cmd_q, cmd); |
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index c5e06c92d40e..82a084f02990 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #define __PSP_DEV_H__ | 11 | #define __PSP_DEV_H__ |
12 | 12 | ||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/pci.h> | ||
15 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
16 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
17 | #include <linux/list.h> | 16 | #include <linux/list.h> |
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 8abe9ea7e76f..53c12562d31e 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #define __SP_DEV_H__ | 13 | #define __SP_DEV_H__ |
14 | 14 | ||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/pci.h> | ||
17 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
18 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
19 | #include <linux/list.h> | 18 | #include <linux/list.h> |
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 1b45236e3716..831aac1393a2 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c | |||
@@ -125,7 +125,6 @@ static int sp_platform_probe(struct platform_device *pdev) | |||
125 | struct sp_platform *sp_platform; | 125 | struct sp_platform *sp_platform; |
126 | struct device *dev = &pdev->dev; | 126 | struct device *dev = &pdev->dev; |
127 | enum dev_dma_attr attr; | 127 | enum dev_dma_attr attr; |
128 | struct resource *ior; | ||
129 | int ret; | 128 | int ret; |
130 | 129 | ||
131 | ret = -ENOMEM; | 130 | ret = -ENOMEM; |
@@ -146,8 +145,7 @@ static int sp_platform_probe(struct platform_device *pdev) | |||
146 | goto e_err; | 145 | goto e_err; |
147 | } | 146 | } |
148 | 147 | ||
149 | ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 148 | sp->io_map = devm_platform_ioremap_resource(pdev, 0); |
150 | sp->io_map = devm_ioremap_resource(dev, ior); | ||
151 | if (IS_ERR(sp->io_map)) { | 149 | if (IS_ERR(sp->io_map)) { |
152 | ret = PTR_ERR(sp->io_map); | 150 | ret = PTR_ERR(sp->io_map); |
153 | goto e_err; | 151 | goto e_err; |
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile index 145e50bdbf16..5cfda508ee41 100644 --- a/drivers/crypto/ccree/Makefile +++ b/drivers/crypto/ccree/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Copyright (C) 2012-2019 ARM Limited (or its affiliates). | 2 | # Copyright (C) 2012-2019 ARM Limited (or its affiliates). |
3 | 3 | ||
4 | obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o | 4 | obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o |
5 | ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o | 5 | ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o |
6 | ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o | 6 | ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o |
7 | ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o | 7 | ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o |
8 | ccree-$(CONFIG_PM) += cc_pm.o | 8 | ccree-$(CONFIG_PM) += cc_pm.o |
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 7aa4cbe19a86..d3e8faa03f15 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <crypto/algapi.h> | 6 | #include <crypto/algapi.h> |
7 | #include <crypto/internal/aead.h> | 7 | #include <crypto/internal/aead.h> |
8 | #include <crypto/authenc.h> | 8 | #include <crypto/authenc.h> |
9 | #include <crypto/des.h> | 9 | #include <crypto/internal/des.h> |
10 | #include <linux/rtnetlink.h> | 10 | #include <linux/rtnetlink.h> |
11 | #include "cc_driver.h" | 11 | #include "cc_driver.h" |
12 | #include "cc_buffer_mgr.h" | 12 | #include "cc_buffer_mgr.h" |
@@ -236,31 +236,17 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) | |||
236 | /* In case of payload authentication failure, MUST NOT | 236 | /* In case of payload authentication failure, MUST NOT |
237 | * revealed the decrypted message --> zero its memory. | 237 | * revealed the decrypted message --> zero its memory. |
238 | */ | 238 | */ |
239 | cc_zero_sgl(areq->dst, areq_ctx->cryptlen); | 239 | sg_zero_buffer(areq->dst, sg_nents(areq->dst), |
240 | areq->cryptlen, 0); | ||
240 | err = -EBADMSG; | 241 | err = -EBADMSG; |
241 | } | 242 | } |
242 | } else { /*ENCRYPT*/ | 243 | /*ENCRYPT*/ |
243 | if (areq_ctx->is_icv_fragmented) { | 244 | } else if (areq_ctx->is_icv_fragmented) { |
244 | u32 skip = areq->cryptlen + areq_ctx->dst_offset; | 245 | u32 skip = areq->cryptlen + areq_ctx->dst_offset; |
245 | |||
246 | cc_copy_sg_portion(dev, areq_ctx->mac_buf, | ||
247 | areq_ctx->dst_sgl, skip, | ||
248 | (skip + ctx->authsize), | ||
249 | CC_SG_FROM_BUF); | ||
250 | } | ||
251 | 246 | ||
252 | /* If an IV was generated, copy it back to the user provided | 247 | cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, |
253 | * buffer. | 248 | skip, (skip + ctx->authsize), |
254 | */ | 249 | CC_SG_FROM_BUF); |
255 | if (areq_ctx->backup_giv) { | ||
256 | if (ctx->cipher_mode == DRV_CIPHER_CTR) | ||
257 | memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + | ||
258 | CTR_RFC3686_NONCE_SIZE, | ||
259 | CTR_RFC3686_IV_SIZE); | ||
260 | else if (ctx->cipher_mode == DRV_CIPHER_CCM) | ||
261 | memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + | ||
262 | CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE); | ||
263 | } | ||
264 | } | 250 | } |
265 | done: | 251 | done: |
266 | aead_request_complete(areq, err); | 252 | aead_request_complete(areq, err); |
@@ -663,33 +649,17 @@ static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
663 | unsigned int keylen) | 649 | unsigned int keylen) |
664 | { | 650 | { |
665 | struct crypto_authenc_keys keys; | 651 | struct crypto_authenc_keys keys; |
666 | u32 flags; | ||
667 | int err; | 652 | int err; |
668 | 653 | ||
669 | err = crypto_authenc_extractkeys(&keys, key, keylen); | 654 | err = crypto_authenc_extractkeys(&keys, key, keylen); |
670 | if (unlikely(err)) | 655 | if (unlikely(err)) |
671 | goto badkey; | 656 | return err; |
672 | |||
673 | err = -EINVAL; | ||
674 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | ||
675 | goto badkey; | ||
676 | 657 | ||
677 | flags = crypto_aead_get_flags(aead); | 658 | err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: |
678 | err = __des3_verify_key(&flags, keys.enckey); | 659 | cc_aead_setkey(aead, key, keylen); |
679 | if (unlikely(err)) { | ||
680 | crypto_aead_set_flags(aead, flags); | ||
681 | goto out; | ||
682 | } | ||
683 | |||
684 | err = cc_aead_setkey(aead, key, keylen); | ||
685 | 660 | ||
686 | out: | ||
687 | memzero_explicit(&keys, sizeof(keys)); | 661 | memzero_explicit(&keys, sizeof(keys)); |
688 | return err; | 662 | return err; |
689 | |||
690 | badkey: | ||
691 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
692 | goto out; | ||
693 | } | 663 | } |
694 | 664 | ||
695 | static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, | 665 | static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, |
@@ -1975,9 +1945,8 @@ static int cc_proc_aead(struct aead_request *req, | |||
1975 | */ | 1945 | */ |
1976 | memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, | 1946 | memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, |
1977 | CTR_RFC3686_NONCE_SIZE); | 1947 | CTR_RFC3686_NONCE_SIZE); |
1978 | if (!areq_ctx->backup_giv) /*User none-generated IV*/ | 1948 | memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, |
1979 | memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, | 1949 | CTR_RFC3686_IV_SIZE); |
1980 | req->iv, CTR_RFC3686_IV_SIZE); | ||
1981 | /* Initialize counter portion of counter block */ | 1950 | /* Initialize counter portion of counter block */ |
1982 | *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + | 1951 | *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + |
1983 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | 1952 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
@@ -2023,40 +1992,6 @@ static int cc_proc_aead(struct aead_request *req, | |||
2023 | goto exit; | 1992 | goto exit; |
2024 | } | 1993 | } |
2025 | 1994 | ||
2026 | /* do we need to generate IV? */ | ||
2027 | if (areq_ctx->backup_giv) { | ||
2028 | /* set the DMA mapped IV address*/ | ||
2029 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { | ||
2030 | cc_req.ivgen_dma_addr[0] = | ||
2031 | areq_ctx->gen_ctx.iv_dma_addr + | ||
2032 | CTR_RFC3686_NONCE_SIZE; | ||
2033 | cc_req.ivgen_dma_addr_len = 1; | ||
2034 | } else if (ctx->cipher_mode == DRV_CIPHER_CCM) { | ||
2035 | /* In ccm, the IV needs to exist both inside B0 and | ||
2036 | * inside the counter.It is also copied to iv_dma_addr | ||
2037 | * for other reasons (like returning it to the user). | ||
2038 | * So, using 3 (identical) IV outputs. | ||
2039 | */ | ||
2040 | cc_req.ivgen_dma_addr[0] = | ||
2041 | areq_ctx->gen_ctx.iv_dma_addr + | ||
2042 | CCM_BLOCK_IV_OFFSET; | ||
2043 | cc_req.ivgen_dma_addr[1] = | ||
2044 | sg_dma_address(&areq_ctx->ccm_adata_sg) + | ||
2045 | CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET; | ||
2046 | cc_req.ivgen_dma_addr[2] = | ||
2047 | sg_dma_address(&areq_ctx->ccm_adata_sg) + | ||
2048 | CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET; | ||
2049 | cc_req.ivgen_dma_addr_len = 3; | ||
2050 | } else { | ||
2051 | cc_req.ivgen_dma_addr[0] = | ||
2052 | areq_ctx->gen_ctx.iv_dma_addr; | ||
2053 | cc_req.ivgen_dma_addr_len = 1; | ||
2054 | } | ||
2055 | |||
2056 | /* set the IV size (8/16 B long)*/ | ||
2057 | cc_req.ivgen_size = crypto_aead_ivsize(tfm); | ||
2058 | } | ||
2059 | |||
2060 | /* STAT_PHASE_2: Create sequence */ | 1995 | /* STAT_PHASE_2: Create sequence */ |
2061 | 1996 | ||
2062 | /* Load MLLI tables to SRAM if necessary */ | 1997 | /* Load MLLI tables to SRAM if necessary */ |
@@ -2107,7 +2042,6 @@ static int cc_aead_encrypt(struct aead_request *req) | |||
2107 | /* No generated IV required */ | 2042 | /* No generated IV required */ |
2108 | areq_ctx->backup_iv = req->iv; | 2043 | areq_ctx->backup_iv = req->iv; |
2109 | areq_ctx->assoclen = req->assoclen; | 2044 | areq_ctx->assoclen = req->assoclen; |
2110 | areq_ctx->backup_giv = NULL; | ||
2111 | areq_ctx->is_gcm4543 = false; | 2045 | areq_ctx->is_gcm4543 = false; |
2112 | 2046 | ||
2113 | areq_ctx->plaintext_authenticate_only = false; | 2047 | areq_ctx->plaintext_authenticate_only = false; |
@@ -2139,7 +2073,6 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) | |||
2139 | /* No generated IV required */ | 2073 | /* No generated IV required */ |
2140 | areq_ctx->backup_iv = req->iv; | 2074 | areq_ctx->backup_iv = req->iv; |
2141 | areq_ctx->assoclen = req->assoclen; | 2075 | areq_ctx->assoclen = req->assoclen; |
2142 | areq_ctx->backup_giv = NULL; | ||
2143 | areq_ctx->is_gcm4543 = true; | 2076 | areq_ctx->is_gcm4543 = true; |
2144 | 2077 | ||
2145 | cc_proc_rfc4309_ccm(req); | 2078 | cc_proc_rfc4309_ccm(req); |
@@ -2161,7 +2094,6 @@ static int cc_aead_decrypt(struct aead_request *req) | |||
2161 | /* No generated IV required */ | 2094 | /* No generated IV required */ |
2162 | areq_ctx->backup_iv = req->iv; | 2095 | areq_ctx->backup_iv = req->iv; |
2163 | areq_ctx->assoclen = req->assoclen; | 2096 | areq_ctx->assoclen = req->assoclen; |
2164 | areq_ctx->backup_giv = NULL; | ||
2165 | areq_ctx->is_gcm4543 = false; | 2097 | areq_ctx->is_gcm4543 = false; |
2166 | 2098 | ||
2167 | areq_ctx->plaintext_authenticate_only = false; | 2099 | areq_ctx->plaintext_authenticate_only = false; |
@@ -2191,7 +2123,6 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) | |||
2191 | /* No generated IV required */ | 2123 | /* No generated IV required */ |
2192 | areq_ctx->backup_iv = req->iv; | 2124 | areq_ctx->backup_iv = req->iv; |
2193 | areq_ctx->assoclen = req->assoclen; | 2125 | areq_ctx->assoclen = req->assoclen; |
2194 | areq_ctx->backup_giv = NULL; | ||
2195 | 2126 | ||
2196 | areq_ctx->is_gcm4543 = true; | 2127 | areq_ctx->is_gcm4543 = true; |
2197 | cc_proc_rfc4309_ccm(req); | 2128 | cc_proc_rfc4309_ccm(req); |
@@ -2311,8 +2242,6 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) | |||
2311 | /* No generated IV required */ | 2242 | /* No generated IV required */ |
2312 | areq_ctx->backup_iv = req->iv; | 2243 | areq_ctx->backup_iv = req->iv; |
2313 | areq_ctx->assoclen = req->assoclen; | 2244 | areq_ctx->assoclen = req->assoclen; |
2314 | areq_ctx->backup_giv = NULL; | ||
2315 | |||
2316 | areq_ctx->plaintext_authenticate_only = false; | 2245 | areq_ctx->plaintext_authenticate_only = false; |
2317 | 2246 | ||
2318 | cc_proc_rfc4_gcm(req); | 2247 | cc_proc_rfc4_gcm(req); |
@@ -2328,9 +2257,16 @@ out: | |||
2328 | static int cc_rfc4543_gcm_encrypt(struct aead_request *req) | 2257 | static int cc_rfc4543_gcm_encrypt(struct aead_request *req) |
2329 | { | 2258 | { |
2330 | /* Very similar to cc_aead_encrypt() above. */ | 2259 | /* Very similar to cc_aead_encrypt() above. */ |
2331 | 2260 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2261 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
2262 | struct device *dev = drvdata_to_dev(ctx->drvdata); | ||
2332 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | 2263 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
2333 | int rc; | 2264 | int rc = -EINVAL; |
2265 | |||
2266 | if (!valid_assoclen(req)) { | ||
2267 | dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); | ||
2268 | goto out; | ||
2269 | } | ||
2334 | 2270 | ||
2335 | memset(areq_ctx, 0, sizeof(*areq_ctx)); | 2271 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
2336 | 2272 | ||
@@ -2340,7 +2276,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) | |||
2340 | /* No generated IV required */ | 2276 | /* No generated IV required */ |
2341 | areq_ctx->backup_iv = req->iv; | 2277 | areq_ctx->backup_iv = req->iv; |
2342 | areq_ctx->assoclen = req->assoclen; | 2278 | areq_ctx->assoclen = req->assoclen; |
2343 | areq_ctx->backup_giv = NULL; | ||
2344 | 2279 | ||
2345 | cc_proc_rfc4_gcm(req); | 2280 | cc_proc_rfc4_gcm(req); |
2346 | areq_ctx->is_gcm4543 = true; | 2281 | areq_ctx->is_gcm4543 = true; |
@@ -2348,7 +2283,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) | |||
2348 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); | 2283 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); |
2349 | if (rc != -EINPROGRESS && rc != -EBUSY) | 2284 | if (rc != -EINPROGRESS && rc != -EBUSY) |
2350 | req->iv = areq_ctx->backup_iv; | 2285 | req->iv = areq_ctx->backup_iv; |
2351 | 2286 | out: | |
2352 | return rc; | 2287 | return rc; |
2353 | } | 2288 | } |
2354 | 2289 | ||
@@ -2372,8 +2307,6 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) | |||
2372 | /* No generated IV required */ | 2307 | /* No generated IV required */ |
2373 | areq_ctx->backup_iv = req->iv; | 2308 | areq_ctx->backup_iv = req->iv; |
2374 | areq_ctx->assoclen = req->assoclen; | 2309 | areq_ctx->assoclen = req->assoclen; |
2375 | areq_ctx->backup_giv = NULL; | ||
2376 | |||
2377 | areq_ctx->plaintext_authenticate_only = false; | 2310 | areq_ctx->plaintext_authenticate_only = false; |
2378 | 2311 | ||
2379 | cc_proc_rfc4_gcm(req); | 2312 | cc_proc_rfc4_gcm(req); |
@@ -2389,9 +2322,16 @@ out: | |||
2389 | static int cc_rfc4543_gcm_decrypt(struct aead_request *req) | 2322 | static int cc_rfc4543_gcm_decrypt(struct aead_request *req) |
2390 | { | 2323 | { |
2391 | /* Very similar to cc_aead_decrypt() above. */ | 2324 | /* Very similar to cc_aead_decrypt() above. */ |
2392 | 2325 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2326 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
2327 | struct device *dev = drvdata_to_dev(ctx->drvdata); | ||
2393 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); | 2328 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
2394 | int rc; | 2329 | int rc = -EINVAL; |
2330 | |||
2331 | if (!valid_assoclen(req)) { | ||
2332 | dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); | ||
2333 | goto out; | ||
2334 | } | ||
2395 | 2335 | ||
2396 | memset(areq_ctx, 0, sizeof(*areq_ctx)); | 2336 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
2397 | 2337 | ||
@@ -2401,7 +2341,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) | |||
2401 | /* No generated IV required */ | 2341 | /* No generated IV required */ |
2402 | areq_ctx->backup_iv = req->iv; | 2342 | areq_ctx->backup_iv = req->iv; |
2403 | areq_ctx->assoclen = req->assoclen; | 2343 | areq_ctx->assoclen = req->assoclen; |
2404 | areq_ctx->backup_giv = NULL; | ||
2405 | 2344 | ||
2406 | cc_proc_rfc4_gcm(req); | 2345 | cc_proc_rfc4_gcm(req); |
2407 | areq_ctx->is_gcm4543 = true; | 2346 | areq_ctx->is_gcm4543 = true; |
@@ -2409,7 +2348,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) | |||
2409 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); | 2348 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); |
2410 | if (rc != -EINPROGRESS && rc != -EBUSY) | 2349 | if (rc != -EINPROGRESS && rc != -EBUSY) |
2411 | req->iv = areq_ctx->backup_iv; | 2350 | req->iv = areq_ctx->backup_iv; |
2412 | 2351 | out: | |
2413 | return rc; | 2352 | return rc; |
2414 | } | 2353 | } |
2415 | 2354 | ||
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h index e51724b96c56..f12169b57f9d 100644 --- a/drivers/crypto/ccree/cc_aead.h +++ b/drivers/crypto/ccree/cc_aead.h | |||
@@ -65,8 +65,7 @@ struct aead_req_ctx { | |||
65 | unsigned int hw_iv_size ____cacheline_aligned; | 65 | unsigned int hw_iv_size ____cacheline_aligned; |
66 | /* used to prevent cache coherence problem */ | 66 | /* used to prevent cache coherence problem */ |
67 | u8 backup_mac[MAX_MAC_SIZE]; | 67 | u8 backup_mac[MAX_MAC_SIZE]; |
68 | u8 *backup_iv; /*store iv for generated IV flow*/ | 68 | u8 *backup_iv; /* store orig iv */ |
69 | u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ | ||
70 | u32 assoclen; /* internal assoclen */ | 69 | u32 assoclen; /* internal assoclen */ |
71 | dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ | 70 | dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ |
72 | /* buffer for internal ccm configurations */ | 71 | /* buffer for internal ccm configurations */ |
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index c81ad33f9115..a72586eccd81 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c | |||
@@ -100,27 +100,6 @@ static unsigned int cc_get_sgl_nents(struct device *dev, | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * cc_zero_sgl() - Zero scatter scatter list data. | ||
104 | * | ||
105 | * @sgl: | ||
106 | */ | ||
107 | void cc_zero_sgl(struct scatterlist *sgl, u32 data_len) | ||
108 | { | ||
109 | struct scatterlist *current_sg = sgl; | ||
110 | int sg_index = 0; | ||
111 | |||
112 | while (sg_index <= data_len) { | ||
113 | if (!current_sg) { | ||
114 | /* reached the end of the sgl --> just return back */ | ||
115 | return; | ||
116 | } | ||
117 | memset(sg_virt(current_sg), 0, current_sg->length); | ||
118 | sg_index += current_sg->length; | ||
119 | current_sg = sg_next(current_sg); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * cc_copy_sg_portion() - Copy scatter list data, | 103 | * cc_copy_sg_portion() - Copy scatter list data, |
125 | * from to_skip to end, to dest and vice versa | 104 | * from to_skip to end, to dest and vice versa |
126 | * | 105 | * |
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h index a726016bdbc1..af434872c6ff 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.h +++ b/drivers/crypto/ccree/cc_buffer_mgr.h | |||
@@ -66,6 +66,4 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, | |||
66 | void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, | 66 | void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, |
67 | u32 to_skip, u32 end, enum cc_sg_cpy_direct direct); | 67 | u32 to_skip, u32 end, enum cc_sg_cpy_direct direct); |
68 | 68 | ||
69 | void cc_zero_sgl(struct scatterlist *sgl, u32 data_len); | ||
70 | |||
71 | #endif /*__BUFFER_MGR_H__*/ | 69 | #endif /*__BUFFER_MGR_H__*/ |
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 5b58226ea24d..254b48797799 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <crypto/algapi.h> | 6 | #include <crypto/algapi.h> |
7 | #include <crypto/internal/skcipher.h> | 7 | #include <crypto/internal/skcipher.h> |
8 | #include <crypto/des.h> | 8 | #include <crypto/internal/des.h> |
9 | #include <crypto/xts.h> | 9 | #include <crypto/xts.h> |
10 | #include <crypto/sm4.h> | 10 | #include <crypto/sm4.h> |
11 | #include <crypto/scatterwalk.h> | 11 | #include <crypto/scatterwalk.h> |
@@ -116,10 +116,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p, | |||
116 | case S_DIN_to_AES: | 116 | case S_DIN_to_AES: |
117 | switch (ctx_p->cipher_mode) { | 117 | switch (ctx_p->cipher_mode) { |
118 | case DRV_CIPHER_XTS: | 118 | case DRV_CIPHER_XTS: |
119 | if (size >= AES_BLOCK_SIZE && | ||
120 | IS_ALIGNED(size, AES_BLOCK_SIZE)) | ||
121 | return 0; | ||
122 | break; | ||
123 | case DRV_CIPHER_CBC_CTS: | 119 | case DRV_CIPHER_CBC_CTS: |
124 | if (size >= AES_BLOCK_SIZE) | 120 | if (size >= AES_BLOCK_SIZE) |
125 | return 0; | 121 | return 0; |
@@ -411,16 +407,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, | |||
411 | * HW does the expansion on its own. | 407 | * HW does the expansion on its own. |
412 | */ | 408 | */ |
413 | if (ctx_p->flow_mode == S_DIN_to_DES) { | 409 | if (ctx_p->flow_mode == S_DIN_to_DES) { |
414 | u32 tmp[DES3_EDE_EXPKEY_WORDS]; | 410 | if ((keylen == DES3_EDE_KEY_SIZE && |
415 | if (keylen == DES3_EDE_KEY_SIZE && | 411 | verify_skcipher_des3_key(sktfm, key)) || |
416 | __des3_ede_setkey(tmp, &tfm->crt_flags, key, | 412 | verify_skcipher_des_key(sktfm, key)) { |
417 | DES3_EDE_KEY_SIZE)) { | ||
418 | dev_dbg(dev, "weak 3DES key"); | ||
419 | return -EINVAL; | ||
420 | } else if (!des_ekey(tmp, key) && | ||
421 | (crypto_tfm_get_flags(tfm) & | ||
422 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | ||
423 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
424 | dev_dbg(dev, "weak DES key"); | 413 | dev_dbg(dev, "weak DES key"); |
425 | return -EINVAL; | 414 | return -EINVAL; |
426 | } | 415 | } |
@@ -945,7 +934,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
945 | { | 934 | { |
946 | .name = "xts(paes)", | 935 | .name = "xts(paes)", |
947 | .driver_name = "xts-paes-ccree", | 936 | .driver_name = "xts-paes-ccree", |
948 | .blocksize = AES_BLOCK_SIZE, | 937 | .blocksize = 1, |
949 | .template_skcipher = { | 938 | .template_skcipher = { |
950 | .setkey = cc_cipher_sethkey, | 939 | .setkey = cc_cipher_sethkey, |
951 | .encrypt = cc_cipher_encrypt, | 940 | .encrypt = cc_cipher_encrypt, |
@@ -963,7 +952,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
963 | { | 952 | { |
964 | .name = "xts512(paes)", | 953 | .name = "xts512(paes)", |
965 | .driver_name = "xts-paes-du512-ccree", | 954 | .driver_name = "xts-paes-du512-ccree", |
966 | .blocksize = AES_BLOCK_SIZE, | 955 | .blocksize = 1, |
967 | .template_skcipher = { | 956 | .template_skcipher = { |
968 | .setkey = cc_cipher_sethkey, | 957 | .setkey = cc_cipher_sethkey, |
969 | .encrypt = cc_cipher_encrypt, | 958 | .encrypt = cc_cipher_encrypt, |
@@ -982,7 +971,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
982 | { | 971 | { |
983 | .name = "xts4096(paes)", | 972 | .name = "xts4096(paes)", |
984 | .driver_name = "xts-paes-du4096-ccree", | 973 | .driver_name = "xts-paes-du4096-ccree", |
985 | .blocksize = AES_BLOCK_SIZE, | 974 | .blocksize = 1, |
986 | .template_skcipher = { | 975 | .template_skcipher = { |
987 | .setkey = cc_cipher_sethkey, | 976 | .setkey = cc_cipher_sethkey, |
988 | .encrypt = cc_cipher_encrypt, | 977 | .encrypt = cc_cipher_encrypt, |
@@ -1203,7 +1192,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
1203 | { | 1192 | { |
1204 | .name = "xts(aes)", | 1193 | .name = "xts(aes)", |
1205 | .driver_name = "xts-aes-ccree", | 1194 | .driver_name = "xts-aes-ccree", |
1206 | .blocksize = AES_BLOCK_SIZE, | 1195 | .blocksize = 1, |
1207 | .template_skcipher = { | 1196 | .template_skcipher = { |
1208 | .setkey = cc_cipher_setkey, | 1197 | .setkey = cc_cipher_setkey, |
1209 | .encrypt = cc_cipher_encrypt, | 1198 | .encrypt = cc_cipher_encrypt, |
@@ -1220,7 +1209,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
1220 | { | 1209 | { |
1221 | .name = "xts512(aes)", | 1210 | .name = "xts512(aes)", |
1222 | .driver_name = "xts-aes-du512-ccree", | 1211 | .driver_name = "xts-aes-du512-ccree", |
1223 | .blocksize = AES_BLOCK_SIZE, | 1212 | .blocksize = 1, |
1224 | .template_skcipher = { | 1213 | .template_skcipher = { |
1225 | .setkey = cc_cipher_setkey, | 1214 | .setkey = cc_cipher_setkey, |
1226 | .encrypt = cc_cipher_encrypt, | 1215 | .encrypt = cc_cipher_encrypt, |
@@ -1238,7 +1227,7 @@ static const struct cc_alg_template skcipher_algs[] = { | |||
1238 | { | 1227 | { |
1239 | .name = "xts4096(aes)", | 1228 | .name = "xts4096(aes)", |
1240 | .driver_name = "xts-aes-du4096-ccree", | 1229 | .driver_name = "xts-aes-du4096-ccree", |
1241 | .blocksize = AES_BLOCK_SIZE, | 1230 | .blocksize = 1, |
1242 | .template_skcipher = { | 1231 | .template_skcipher = { |
1243 | .setkey = cc_cipher_setkey, | 1232 | .setkey = cc_cipher_setkey, |
1244 | .encrypt = cc_cipher_encrypt, | 1233 | .encrypt = cc_cipher_encrypt, |
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 980aa04b655b..8b8eee513c27 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include "cc_cipher.h" | 22 | #include "cc_cipher.h" |
23 | #include "cc_aead.h" | 23 | #include "cc_aead.h" |
24 | #include "cc_hash.h" | 24 | #include "cc_hash.h" |
25 | #include "cc_ivgen.h" | ||
26 | #include "cc_sram_mgr.h" | 25 | #include "cc_sram_mgr.h" |
27 | #include "cc_pm.h" | 26 | #include "cc_pm.h" |
28 | #include "cc_fips.h" | 27 | #include "cc_fips.h" |
@@ -339,10 +338,8 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
339 | 338 | ||
340 | /* Then IRQ */ | 339 | /* Then IRQ */ |
341 | new_drvdata->irq = platform_get_irq(plat_dev, 0); | 340 | new_drvdata->irq = platform_get_irq(plat_dev, 0); |
342 | if (new_drvdata->irq < 0) { | 341 | if (new_drvdata->irq < 0) |
343 | dev_err(dev, "Failed getting IRQ resource\n"); | ||
344 | return new_drvdata->irq; | 342 | return new_drvdata->irq; |
345 | } | ||
346 | 343 | ||
347 | init_completion(&new_drvdata->hw_queue_avail); | 344 | init_completion(&new_drvdata->hw_queue_avail); |
348 | 345 | ||
@@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
421 | } | 418 | } |
422 | break; | 419 | break; |
423 | default: | 420 | default: |
424 | dev_err(dev, "Unsupported engines configration.\n"); | 421 | dev_err(dev, "Unsupported engines configuration.\n"); |
425 | rc = -EINVAL; | 422 | rc = -EINVAL; |
426 | goto post_clk_err; | 423 | goto post_clk_err; |
427 | } | 424 | } |
@@ -503,17 +500,11 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
503 | goto post_buf_mgr_err; | 500 | goto post_buf_mgr_err; |
504 | } | 501 | } |
505 | 502 | ||
506 | rc = cc_ivgen_init(new_drvdata); | ||
507 | if (rc) { | ||
508 | dev_err(dev, "cc_ivgen_init failed\n"); | ||
509 | goto post_buf_mgr_err; | ||
510 | } | ||
511 | |||
512 | /* Allocate crypto algs */ | 503 | /* Allocate crypto algs */ |
513 | rc = cc_cipher_alloc(new_drvdata); | 504 | rc = cc_cipher_alloc(new_drvdata); |
514 | if (rc) { | 505 | if (rc) { |
515 | dev_err(dev, "cc_cipher_alloc failed\n"); | 506 | dev_err(dev, "cc_cipher_alloc failed\n"); |
516 | goto post_ivgen_err; | 507 | goto post_buf_mgr_err; |
517 | } | 508 | } |
518 | 509 | ||
519 | /* hash must be allocated before aead since hash exports APIs */ | 510 | /* hash must be allocated before aead since hash exports APIs */ |
@@ -544,8 +535,6 @@ post_hash_err: | |||
544 | cc_hash_free(new_drvdata); | 535 | cc_hash_free(new_drvdata); |
545 | post_cipher_err: | 536 | post_cipher_err: |
546 | cc_cipher_free(new_drvdata); | 537 | cc_cipher_free(new_drvdata); |
547 | post_ivgen_err: | ||
548 | cc_ivgen_fini(new_drvdata); | ||
549 | post_buf_mgr_err: | 538 | post_buf_mgr_err: |
550 | cc_buffer_mgr_fini(new_drvdata); | 539 | cc_buffer_mgr_fini(new_drvdata); |
551 | post_req_mgr_err: | 540 | post_req_mgr_err: |
@@ -577,7 +566,6 @@ static void cleanup_cc_resources(struct platform_device *plat_dev) | |||
577 | cc_aead_free(drvdata); | 566 | cc_aead_free(drvdata); |
578 | cc_hash_free(drvdata); | 567 | cc_hash_free(drvdata); |
579 | cc_cipher_free(drvdata); | 568 | cc_cipher_free(drvdata); |
580 | cc_ivgen_fini(drvdata); | ||
581 | cc_pm_fini(drvdata); | 569 | cc_pm_fini(drvdata); |
582 | cc_buffer_mgr_fini(drvdata); | 570 | cc_buffer_mgr_fini(drvdata); |
583 | cc_req_mgr_fini(drvdata); | 571 | cc_req_mgr_fini(drvdata); |
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index 7cd99380bf1f..ab31d4a68c80 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h | |||
@@ -126,15 +126,6 @@ struct cc_cpp_req { | |||
126 | struct cc_crypto_req { | 126 | struct cc_crypto_req { |
127 | void (*user_cb)(struct device *dev, void *req, int err); | 127 | void (*user_cb)(struct device *dev, void *req, int err); |
128 | void *user_arg; | 128 | void *user_arg; |
129 | dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES]; | ||
130 | /* For the first 'ivgen_dma_addr_len' addresses of this array, | ||
131 | * generated IV would be placed in it by send_request(). | ||
132 | * Same generated IV for all addresses! | ||
133 | */ | ||
134 | /* Amount of 'ivgen_dma_addr' elements to be filled. */ | ||
135 | unsigned int ivgen_dma_addr_len; | ||
136 | /* The generated IV size required, 8/16 B allowed. */ | ||
137 | unsigned int ivgen_size; | ||
138 | struct completion seq_compl; /* request completion */ | 129 | struct completion seq_compl; /* request completion */ |
139 | struct cc_cpp_req cpp; | 130 | struct cc_cpp_req cpp; |
140 | }; | 131 | }; |
@@ -158,7 +149,6 @@ struct cc_drvdata { | |||
158 | void *aead_handle; | 149 | void *aead_handle; |
159 | void *request_mgr_handle; | 150 | void *request_mgr_handle; |
160 | void *fips_handle; | 151 | void *fips_handle; |
161 | void *ivgen_handle; | ||
162 | void *sram_mgr_handle; | 152 | void *sram_mgr_handle; |
163 | void *debugfs; | 153 | void *debugfs; |
164 | struct clk *clk; | 154 | struct clk *clk; |
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index 5ad3ffb7acaa..4c8bce33abcf 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/fips.h> | 5 | #include <linux/fips.h> |
6 | #include <linux/notifier.h> | ||
6 | 7 | ||
7 | #include "cc_driver.h" | 8 | #include "cc_driver.h" |
8 | #include "cc_fips.h" | 9 | #include "cc_fips.h" |
@@ -11,6 +12,8 @@ static void fips_dsr(unsigned long devarg); | |||
11 | 12 | ||
12 | struct cc_fips_handle { | 13 | struct cc_fips_handle { |
13 | struct tasklet_struct tasklet; | 14 | struct tasklet_struct tasklet; |
15 | struct notifier_block nb; | ||
16 | struct cc_drvdata *drvdata; | ||
14 | }; | 17 | }; |
15 | 18 | ||
16 | /* The function called once at driver entry point to check | 19 | /* The function called once at driver entry point to check |
@@ -21,7 +24,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata) | |||
21 | u32 reg; | 24 | u32 reg; |
22 | 25 | ||
23 | reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); | 26 | reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); |
24 | return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); | 27 | /* Did the TEE report status? */ |
28 | if (reg & CC_FIPS_SYNC_TEE_STATUS) | ||
29 | /* Yes. Is it OK? */ | ||
30 | return (reg & CC_FIPS_SYNC_MODULE_OK); | ||
31 | |||
32 | /* No. It's either not in use or will be reported later */ | ||
33 | return true; | ||
25 | } | 34 | } |
26 | 35 | ||
27 | /* | 36 | /* |
@@ -40,6 +49,21 @@ void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status) | |||
40 | cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); | 49 | cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); |
41 | } | 50 | } |
42 | 51 | ||
52 | /* Push REE side FIPS test failure to TEE side */ | ||
53 | static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1, | ||
54 | void *unused2) | ||
55 | { | ||
56 | struct cc_fips_handle *fips_h = | ||
57 | container_of(nb, struct cc_fips_handle, nb); | ||
58 | struct cc_drvdata *drvdata = fips_h->drvdata; | ||
59 | struct device *dev = drvdata_to_dev(drvdata); | ||
60 | |||
61 | cc_set_ree_fips_status(drvdata, false); | ||
62 | dev_info(dev, "Notifying TEE of FIPS test failure...\n"); | ||
63 | |||
64 | return NOTIFY_OK; | ||
65 | } | ||
66 | |||
43 | void cc_fips_fini(struct cc_drvdata *drvdata) | 67 | void cc_fips_fini(struct cc_drvdata *drvdata) |
44 | { | 68 | { |
45 | struct cc_fips_handle *fips_h = drvdata->fips_handle; | 69 | struct cc_fips_handle *fips_h = drvdata->fips_handle; |
@@ -47,6 +71,8 @@ void cc_fips_fini(struct cc_drvdata *drvdata) | |||
47 | if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) | 71 | if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) |
48 | return; | 72 | return; |
49 | 73 | ||
74 | atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb); | ||
75 | |||
50 | /* Kill tasklet */ | 76 | /* Kill tasklet */ |
51 | tasklet_kill(&fips_h->tasklet); | 77 | tasklet_kill(&fips_h->tasklet); |
52 | drvdata->fips_handle = NULL; | 78 | drvdata->fips_handle = NULL; |
@@ -118,6 +144,9 @@ int cc_fips_init(struct cc_drvdata *p_drvdata) | |||
118 | 144 | ||
119 | dev_dbg(dev, "Initializing fips tasklet\n"); | 145 | dev_dbg(dev, "Initializing fips tasklet\n"); |
120 | tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); | 146 | tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); |
147 | fips_h->drvdata = p_drvdata; | ||
148 | fips_h->nb.notifier_call = cc_ree_fips_failure; | ||
149 | atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb); | ||
121 | 150 | ||
122 | cc_tee_handle_fips_error(p_drvdata); | 151 | cc_tee_handle_fips_error(p_drvdata); |
123 | 152 | ||
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index a6abe4e3bb0e..bc71bdf44a9f 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c | |||
@@ -25,27 +25,27 @@ struct cc_hash_handle { | |||
25 | struct list_head hash_list; | 25 | struct list_head hash_list; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | static const u32 digest_len_init[] = { | 28 | static const u32 cc_digest_len_init[] = { |
29 | 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; | 29 | 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; |
30 | static const u32 md5_init[] = { | 30 | static const u32 cc_md5_init[] = { |
31 | SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; | 31 | SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; |
32 | static const u32 sha1_init[] = { | 32 | static const u32 cc_sha1_init[] = { |
33 | SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; | 33 | SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; |
34 | static const u32 sha224_init[] = { | 34 | static const u32 cc_sha224_init[] = { |
35 | SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, | 35 | SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, |
36 | SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; | 36 | SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; |
37 | static const u32 sha256_init[] = { | 37 | static const u32 cc_sha256_init[] = { |
38 | SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, | 38 | SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, |
39 | SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; | 39 | SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; |
40 | static const u32 digest_len_sha512_init[] = { | 40 | static const u32 cc_digest_len_sha512_init[] = { |
41 | 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; | 41 | 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; |
42 | static u64 sha384_init[] = { | 42 | static u64 cc_sha384_init[] = { |
43 | SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, | 43 | SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, |
44 | SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; | 44 | SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; |
45 | static u64 sha512_init[] = { | 45 | static u64 cc_sha512_init[] = { |
46 | SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, | 46 | SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, |
47 | SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; | 47 | SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; |
48 | static const u32 sm3_init[] = { | 48 | static const u32 cc_sm3_init[] = { |
49 | SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, | 49 | SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, |
50 | SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; | 50 | SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; |
51 | 51 | ||
@@ -144,10 +144,11 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, | |||
144 | if (ctx->hash_mode == DRV_HASH_SHA512 || | 144 | if (ctx->hash_mode == DRV_HASH_SHA512 || |
145 | ctx->hash_mode == DRV_HASH_SHA384) | 145 | ctx->hash_mode == DRV_HASH_SHA384) |
146 | memcpy(state->digest_bytes_len, | 146 | memcpy(state->digest_bytes_len, |
147 | digest_len_sha512_init, | 147 | cc_digest_len_sha512_init, |
148 | ctx->hash_len); | 148 | ctx->hash_len); |
149 | else | 149 | else |
150 | memcpy(state->digest_bytes_len, digest_len_init, | 150 | memcpy(state->digest_bytes_len, |
151 | cc_digest_len_init, | ||
151 | ctx->hash_len); | 152 | ctx->hash_len); |
152 | } | 153 | } |
153 | 154 | ||
@@ -1873,26 +1874,26 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) | |||
1873 | int rc = 0; | 1874 | int rc = 0; |
1874 | 1875 | ||
1875 | /* Copy-to-sram digest-len */ | 1876 | /* Copy-to-sram digest-len */ |
1876 | cc_set_sram_desc(digest_len_init, sram_buff_ofs, | 1877 | cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs, |
1877 | ARRAY_SIZE(digest_len_init), larval_seq, | 1878 | ARRAY_SIZE(cc_digest_len_init), larval_seq, |
1878 | &larval_seq_len); | 1879 | &larval_seq_len); |
1879 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1880 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1880 | if (rc) | 1881 | if (rc) |
1881 | goto init_digest_const_err; | 1882 | goto init_digest_const_err; |
1882 | 1883 | ||
1883 | sram_buff_ofs += sizeof(digest_len_init); | 1884 | sram_buff_ofs += sizeof(cc_digest_len_init); |
1884 | larval_seq_len = 0; | 1885 | larval_seq_len = 0; |
1885 | 1886 | ||
1886 | if (large_sha_supported) { | 1887 | if (large_sha_supported) { |
1887 | /* Copy-to-sram digest-len for sha384/512 */ | 1888 | /* Copy-to-sram digest-len for sha384/512 */ |
1888 | cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs, | 1889 | cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs, |
1889 | ARRAY_SIZE(digest_len_sha512_init), | 1890 | ARRAY_SIZE(cc_digest_len_sha512_init), |
1890 | larval_seq, &larval_seq_len); | 1891 | larval_seq, &larval_seq_len); |
1891 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1892 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1892 | if (rc) | 1893 | if (rc) |
1893 | goto init_digest_const_err; | 1894 | goto init_digest_const_err; |
1894 | 1895 | ||
1895 | sram_buff_ofs += sizeof(digest_len_sha512_init); | 1896 | sram_buff_ofs += sizeof(cc_digest_len_sha512_init); |
1896 | larval_seq_len = 0; | 1897 | larval_seq_len = 0; |
1897 | } | 1898 | } |
1898 | 1899 | ||
@@ -1900,64 +1901,64 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) | |||
1900 | hash_handle->larval_digest_sram_addr = sram_buff_ofs; | 1901 | hash_handle->larval_digest_sram_addr = sram_buff_ofs; |
1901 | 1902 | ||
1902 | /* Copy-to-sram initial SHA* digests */ | 1903 | /* Copy-to-sram initial SHA* digests */ |
1903 | cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init), | 1904 | cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init), |
1904 | larval_seq, &larval_seq_len); | 1905 | larval_seq, &larval_seq_len); |
1905 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1906 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1906 | if (rc) | 1907 | if (rc) |
1907 | goto init_digest_const_err; | 1908 | goto init_digest_const_err; |
1908 | sram_buff_ofs += sizeof(md5_init); | 1909 | sram_buff_ofs += sizeof(cc_md5_init); |
1909 | larval_seq_len = 0; | 1910 | larval_seq_len = 0; |
1910 | 1911 | ||
1911 | cc_set_sram_desc(sha1_init, sram_buff_ofs, | 1912 | cc_set_sram_desc(cc_sha1_init, sram_buff_ofs, |
1912 | ARRAY_SIZE(sha1_init), larval_seq, | 1913 | ARRAY_SIZE(cc_sha1_init), larval_seq, |
1913 | &larval_seq_len); | 1914 | &larval_seq_len); |
1914 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1915 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1915 | if (rc) | 1916 | if (rc) |
1916 | goto init_digest_const_err; | 1917 | goto init_digest_const_err; |
1917 | sram_buff_ofs += sizeof(sha1_init); | 1918 | sram_buff_ofs += sizeof(cc_sha1_init); |
1918 | larval_seq_len = 0; | 1919 | larval_seq_len = 0; |
1919 | 1920 | ||
1920 | cc_set_sram_desc(sha224_init, sram_buff_ofs, | 1921 | cc_set_sram_desc(cc_sha224_init, sram_buff_ofs, |
1921 | ARRAY_SIZE(sha224_init), larval_seq, | 1922 | ARRAY_SIZE(cc_sha224_init), larval_seq, |
1922 | &larval_seq_len); | 1923 | &larval_seq_len); |
1923 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1924 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1924 | if (rc) | 1925 | if (rc) |
1925 | goto init_digest_const_err; | 1926 | goto init_digest_const_err; |
1926 | sram_buff_ofs += sizeof(sha224_init); | 1927 | sram_buff_ofs += sizeof(cc_sha224_init); |
1927 | larval_seq_len = 0; | 1928 | larval_seq_len = 0; |
1928 | 1929 | ||
1929 | cc_set_sram_desc(sha256_init, sram_buff_ofs, | 1930 | cc_set_sram_desc(cc_sha256_init, sram_buff_ofs, |
1930 | ARRAY_SIZE(sha256_init), larval_seq, | 1931 | ARRAY_SIZE(cc_sha256_init), larval_seq, |
1931 | &larval_seq_len); | 1932 | &larval_seq_len); |
1932 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1933 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1933 | if (rc) | 1934 | if (rc) |
1934 | goto init_digest_const_err; | 1935 | goto init_digest_const_err; |
1935 | sram_buff_ofs += sizeof(sha256_init); | 1936 | sram_buff_ofs += sizeof(cc_sha256_init); |
1936 | larval_seq_len = 0; | 1937 | larval_seq_len = 0; |
1937 | 1938 | ||
1938 | if (sm3_supported) { | 1939 | if (sm3_supported) { |
1939 | cc_set_sram_desc(sm3_init, sram_buff_ofs, | 1940 | cc_set_sram_desc(cc_sm3_init, sram_buff_ofs, |
1940 | ARRAY_SIZE(sm3_init), larval_seq, | 1941 | ARRAY_SIZE(cc_sm3_init), larval_seq, |
1941 | &larval_seq_len); | 1942 | &larval_seq_len); |
1942 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1943 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1943 | if (rc) | 1944 | if (rc) |
1944 | goto init_digest_const_err; | 1945 | goto init_digest_const_err; |
1945 | sram_buff_ofs += sizeof(sm3_init); | 1946 | sram_buff_ofs += sizeof(cc_sm3_init); |
1946 | larval_seq_len = 0; | 1947 | larval_seq_len = 0; |
1947 | } | 1948 | } |
1948 | 1949 | ||
1949 | if (large_sha_supported) { | 1950 | if (large_sha_supported) { |
1950 | cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, | 1951 | cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs, |
1951 | (ARRAY_SIZE(sha384_init) * 2), larval_seq, | 1952 | (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq, |
1952 | &larval_seq_len); | 1953 | &larval_seq_len); |
1953 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1954 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1954 | if (rc) | 1955 | if (rc) |
1955 | goto init_digest_const_err; | 1956 | goto init_digest_const_err; |
1956 | sram_buff_ofs += sizeof(sha384_init); | 1957 | sram_buff_ofs += sizeof(cc_sha384_init); |
1957 | larval_seq_len = 0; | 1958 | larval_seq_len = 0; |
1958 | 1959 | ||
1959 | cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs, | 1960 | cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs, |
1960 | (ARRAY_SIZE(sha512_init) * 2), larval_seq, | 1961 | (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq, |
1961 | &larval_seq_len); | 1962 | &larval_seq_len); |
1962 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); | 1963 | rc = send_request_init(drvdata, larval_seq, larval_seq_len); |
1963 | if (rc) | 1964 | if (rc) |
@@ -1986,8 +1987,8 @@ static void __init cc_swap_dwords(u32 *buf, unsigned long size) | |||
1986 | */ | 1987 | */ |
1987 | void __init cc_hash_global_init(void) | 1988 | void __init cc_hash_global_init(void) |
1988 | { | 1989 | { |
1989 | cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2)); | 1990 | cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2)); |
1990 | cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2)); | 1991 | cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2)); |
1991 | } | 1992 | } |
1992 | 1993 | ||
1993 | int cc_hash_alloc(struct cc_drvdata *drvdata) | 1994 | int cc_hash_alloc(struct cc_drvdata *drvdata) |
@@ -2006,18 +2007,18 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) | |||
2006 | INIT_LIST_HEAD(&hash_handle->hash_list); | 2007 | INIT_LIST_HEAD(&hash_handle->hash_list); |
2007 | drvdata->hash_handle = hash_handle; | 2008 | drvdata->hash_handle = hash_handle; |
2008 | 2009 | ||
2009 | sram_size_to_alloc = sizeof(digest_len_init) + | 2010 | sram_size_to_alloc = sizeof(cc_digest_len_init) + |
2010 | sizeof(md5_init) + | 2011 | sizeof(cc_md5_init) + |
2011 | sizeof(sha1_init) + | 2012 | sizeof(cc_sha1_init) + |
2012 | sizeof(sha224_init) + | 2013 | sizeof(cc_sha224_init) + |
2013 | sizeof(sha256_init); | 2014 | sizeof(cc_sha256_init); |
2014 | 2015 | ||
2015 | if (drvdata->hw_rev >= CC_HW_REV_713) | 2016 | if (drvdata->hw_rev >= CC_HW_REV_713) |
2016 | sram_size_to_alloc += sizeof(sm3_init); | 2017 | sram_size_to_alloc += sizeof(cc_sm3_init); |
2017 | 2018 | ||
2018 | if (drvdata->hw_rev >= CC_HW_REV_712) | 2019 | if (drvdata->hw_rev >= CC_HW_REV_712) |
2019 | sram_size_to_alloc += sizeof(digest_len_sha512_init) + | 2020 | sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) + |
2020 | sizeof(sha384_init) + sizeof(sha512_init); | 2021 | sizeof(cc_sha384_init) + sizeof(cc_sha512_init); |
2021 | 2022 | ||
2022 | sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); | 2023 | sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); |
2023 | if (sram_buff == NULL_SRAM_ADDR) { | 2024 | if (sram_buff == NULL_SRAM_ADDR) { |
@@ -2258,22 +2259,22 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) | |||
2258 | { | 2259 | { |
2259 | switch (mode) { | 2260 | switch (mode) { |
2260 | case DRV_HASH_MD5: | 2261 | case DRV_HASH_MD5: |
2261 | return md5_init; | 2262 | return cc_md5_init; |
2262 | case DRV_HASH_SHA1: | 2263 | case DRV_HASH_SHA1: |
2263 | return sha1_init; | 2264 | return cc_sha1_init; |
2264 | case DRV_HASH_SHA224: | 2265 | case DRV_HASH_SHA224: |
2265 | return sha224_init; | 2266 | return cc_sha224_init; |
2266 | case DRV_HASH_SHA256: | 2267 | case DRV_HASH_SHA256: |
2267 | return sha256_init; | 2268 | return cc_sha256_init; |
2268 | case DRV_HASH_SHA384: | 2269 | case DRV_HASH_SHA384: |
2269 | return sha384_init; | 2270 | return cc_sha384_init; |
2270 | case DRV_HASH_SHA512: | 2271 | case DRV_HASH_SHA512: |
2271 | return sha512_init; | 2272 | return cc_sha512_init; |
2272 | case DRV_HASH_SM3: | 2273 | case DRV_HASH_SM3: |
2273 | return sm3_init; | 2274 | return cc_sm3_init; |
2274 | default: | 2275 | default: |
2275 | dev_err(dev, "Invalid hash mode (%d)\n", mode); | 2276 | dev_err(dev, "Invalid hash mode (%d)\n", mode); |
2276 | return md5_init; | 2277 | return cc_md5_init; |
2277 | } | 2278 | } |
2278 | } | 2279 | } |
2279 | 2280 | ||
@@ -2301,40 +2302,40 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) | |||
2301 | return (hash_handle->larval_digest_sram_addr); | 2302 | return (hash_handle->larval_digest_sram_addr); |
2302 | case DRV_HASH_SHA1: | 2303 | case DRV_HASH_SHA1: |
2303 | return (hash_handle->larval_digest_sram_addr + | 2304 | return (hash_handle->larval_digest_sram_addr + |
2304 | sizeof(md5_init)); | 2305 | sizeof(cc_md5_init)); |
2305 | case DRV_HASH_SHA224: | 2306 | case DRV_HASH_SHA224: |
2306 | return (hash_handle->larval_digest_sram_addr + | 2307 | return (hash_handle->larval_digest_sram_addr + |
2307 | sizeof(md5_init) + | 2308 | sizeof(cc_md5_init) + |
2308 | sizeof(sha1_init)); | 2309 | sizeof(cc_sha1_init)); |
2309 | case DRV_HASH_SHA256: | 2310 | case DRV_HASH_SHA256: |
2310 | return (hash_handle->larval_digest_sram_addr + | 2311 | return (hash_handle->larval_digest_sram_addr + |
2311 | sizeof(md5_init) + | 2312 | sizeof(cc_md5_init) + |
2312 | sizeof(sha1_init) + | 2313 | sizeof(cc_sha1_init) + |
2313 | sizeof(sha224_init)); | 2314 | sizeof(cc_sha224_init)); |
2314 | case DRV_HASH_SM3: | 2315 | case DRV_HASH_SM3: |
2315 | return (hash_handle->larval_digest_sram_addr + | 2316 | return (hash_handle->larval_digest_sram_addr + |
2316 | sizeof(md5_init) + | 2317 | sizeof(cc_md5_init) + |
2317 | sizeof(sha1_init) + | 2318 | sizeof(cc_sha1_init) + |
2318 | sizeof(sha224_init) + | 2319 | sizeof(cc_sha224_init) + |
2319 | sizeof(sha256_init)); | 2320 | sizeof(cc_sha256_init)); |
2320 | case DRV_HASH_SHA384: | 2321 | case DRV_HASH_SHA384: |
2321 | addr = (hash_handle->larval_digest_sram_addr + | 2322 | addr = (hash_handle->larval_digest_sram_addr + |
2322 | sizeof(md5_init) + | 2323 | sizeof(cc_md5_init) + |
2323 | sizeof(sha1_init) + | 2324 | sizeof(cc_sha1_init) + |
2324 | sizeof(sha224_init) + | 2325 | sizeof(cc_sha224_init) + |
2325 | sizeof(sha256_init)); | 2326 | sizeof(cc_sha256_init)); |
2326 | if (sm3_supported) | 2327 | if (sm3_supported) |
2327 | addr += sizeof(sm3_init); | 2328 | addr += sizeof(cc_sm3_init); |
2328 | return addr; | 2329 | return addr; |
2329 | case DRV_HASH_SHA512: | 2330 | case DRV_HASH_SHA512: |
2330 | addr = (hash_handle->larval_digest_sram_addr + | 2331 | addr = (hash_handle->larval_digest_sram_addr + |
2331 | sizeof(md5_init) + | 2332 | sizeof(cc_md5_init) + |
2332 | sizeof(sha1_init) + | 2333 | sizeof(cc_sha1_init) + |
2333 | sizeof(sha224_init) + | 2334 | sizeof(cc_sha224_init) + |
2334 | sizeof(sha256_init) + | 2335 | sizeof(cc_sha256_init) + |
2335 | sizeof(sha384_init)); | 2336 | sizeof(cc_sha384_init)); |
2336 | if (sm3_supported) | 2337 | if (sm3_supported) |
2337 | addr += sizeof(sm3_init); | 2338 | addr += sizeof(cc_sm3_init); |
2338 | return addr; | 2339 | return addr; |
2339 | default: | 2340 | default: |
2340 | dev_err(dev, "Invalid hash mode (%d)\n", mode); | 2341 | dev_err(dev, "Invalid hash mode (%d)\n", mode); |
@@ -2360,7 +2361,7 @@ cc_digest_len_addr(void *drvdata, u32 mode) | |||
2360 | #if (CC_DEV_SHA_MAX > 256) | 2361 | #if (CC_DEV_SHA_MAX > 256) |
2361 | case DRV_HASH_SHA384: | 2362 | case DRV_HASH_SHA384: |
2362 | case DRV_HASH_SHA512: | 2363 | case DRV_HASH_SHA512: |
2363 | return digest_len_addr + sizeof(digest_len_init); | 2364 | return digest_len_addr + sizeof(cc_digest_len_init); |
2364 | #endif | 2365 | #endif |
2365 | default: | 2366 | default: |
2366 | return digest_len_addr; /*to avoid kernel crash*/ | 2367 | return digest_len_addr; /*to avoid kernel crash*/ |
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c deleted file mode 100644 index 99dc69383e20..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.c +++ /dev/null | |||
@@ -1,276 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ | ||
3 | |||
4 | #include <crypto/ctr.h> | ||
5 | #include "cc_driver.h" | ||
6 | #include "cc_ivgen.h" | ||
7 | #include "cc_request_mgr.h" | ||
8 | #include "cc_sram_mgr.h" | ||
9 | #include "cc_buffer_mgr.h" | ||
10 | |||
11 | /* The max. size of pool *MUST* be <= SRAM total size */ | ||
12 | #define CC_IVPOOL_SIZE 1024 | ||
13 | /* The first 32B fraction of pool are dedicated to the | ||
14 | * next encryption "key" & "IV" for pool regeneration | ||
15 | */ | ||
16 | #define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128) | ||
17 | #define CC_IVPOOL_GEN_SEQ_LEN 4 | ||
18 | |||
19 | /** | ||
20 | * struct cc_ivgen_ctx -IV pool generation context | ||
21 | * @pool: the start address of the iv-pool resides in internal RAM | ||
22 | * @ctr_key_dma: address of pool's encryption key material in internal RAM | ||
23 | * @ctr_iv_dma: address of pool's counter iv in internal RAM | ||
24 | * @next_iv_ofs: the offset to the next available IV in pool | ||
25 | * @pool_meta: virt. address of the initial enc. key/IV | ||
26 | * @pool_meta_dma: phys. address of the initial enc. key/IV | ||
27 | */ | ||
28 | struct cc_ivgen_ctx { | ||
29 | cc_sram_addr_t pool; | ||
30 | cc_sram_addr_t ctr_key; | ||
31 | cc_sram_addr_t ctr_iv; | ||
32 | u32 next_iv_ofs; | ||
33 | u8 *pool_meta; | ||
34 | dma_addr_t pool_meta_dma; | ||
35 | }; | ||
36 | |||
37 | /*! | ||
38 | * Generates CC_IVPOOL_SIZE of random bytes by | ||
39 | * encrypting 0's using AES128-CTR. | ||
40 | * | ||
41 | * \param ivgen iv-pool context | ||
42 | * \param iv_seq IN/OUT array to the descriptors sequence | ||
43 | * \param iv_seq_len IN/OUT pointer to the sequence length | ||
44 | */ | ||
45 | static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx, | ||
46 | struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) | ||
47 | { | ||
48 | unsigned int idx = *iv_seq_len; | ||
49 | |||
50 | if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) { | ||
51 | /* The sequence will be longer than allowed */ | ||
52 | return -EINVAL; | ||
53 | } | ||
54 | /* Setup key */ | ||
55 | hw_desc_init(&iv_seq[idx]); | ||
56 | set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128); | ||
57 | set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0); | ||
58 | set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); | ||
59 | set_flow_mode(&iv_seq[idx], S_DIN_to_AES); | ||
60 | set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); | ||
61 | set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); | ||
62 | idx++; | ||
63 | |||
64 | /* Setup cipher state */ | ||
65 | hw_desc_init(&iv_seq[idx]); | ||
66 | set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE); | ||
67 | set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); | ||
68 | set_flow_mode(&iv_seq[idx], S_DIN_to_AES); | ||
69 | set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1); | ||
70 | set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); | ||
71 | set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); | ||
72 | idx++; | ||
73 | |||
74 | /* Perform dummy encrypt to skip first block */ | ||
75 | hw_desc_init(&iv_seq[idx]); | ||
76 | set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE); | ||
77 | set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE); | ||
78 | set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); | ||
79 | idx++; | ||
80 | |||
81 | /* Generate IV pool */ | ||
82 | hw_desc_init(&iv_seq[idx]); | ||
83 | set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE); | ||
84 | set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE); | ||
85 | set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); | ||
86 | idx++; | ||
87 | |||
88 | *iv_seq_len = idx; /* Update sequence length */ | ||
89 | |||
90 | /* queue ordering assures pool readiness */ | ||
91 | ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE; | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /*! | ||
97 | * Generates the initial pool in SRAM. | ||
98 | * This function should be invoked when resuming driver. | ||
99 | * | ||
100 | * \param drvdata | ||
101 | * | ||
102 | * \return int Zero for success, negative value otherwise. | ||
103 | */ | ||
104 | int cc_init_iv_sram(struct cc_drvdata *drvdata) | ||
105 | { | ||
106 | struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; | ||
107 | struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; | ||
108 | unsigned int iv_seq_len = 0; | ||
109 | int rc; | ||
110 | |||
111 | /* Generate initial enc. key/iv */ | ||
112 | get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE); | ||
113 | |||
114 | /* The first 32B reserved for the enc. Key/IV */ | ||
115 | ivgen_ctx->ctr_key = ivgen_ctx->pool; | ||
116 | ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128; | ||
117 | |||
118 | /* Copy initial enc. key and IV to SRAM at a single descriptor */ | ||
119 | hw_desc_init(&iv_seq[iv_seq_len]); | ||
120 | set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma, | ||
121 | CC_IVPOOL_META_SIZE, NS_BIT); | ||
122 | set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool, | ||
123 | CC_IVPOOL_META_SIZE); | ||
124 | set_flow_mode(&iv_seq[iv_seq_len], BYPASS); | ||
125 | iv_seq_len++; | ||
126 | |||
127 | /* Generate initial pool */ | ||
128 | rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len); | ||
129 | if (rc) | ||
130 | return rc; | ||
131 | |||
132 | /* Fire-and-forget */ | ||
133 | return send_request_init(drvdata, iv_seq, iv_seq_len); | ||
134 | } | ||
135 | |||
136 | /*! | ||
137 | * Free iv-pool and ivgen context. | ||
138 | * | ||
139 | * \param drvdata | ||
140 | */ | ||
141 | void cc_ivgen_fini(struct cc_drvdata *drvdata) | ||
142 | { | ||
143 | struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; | ||
144 | struct device *device = &drvdata->plat_dev->dev; | ||
145 | |||
146 | if (!ivgen_ctx) | ||
147 | return; | ||
148 | |||
149 | if (ivgen_ctx->pool_meta) { | ||
150 | memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE); | ||
151 | dma_free_coherent(device, CC_IVPOOL_META_SIZE, | ||
152 | ivgen_ctx->pool_meta, | ||
153 | ivgen_ctx->pool_meta_dma); | ||
154 | } | ||
155 | |||
156 | ivgen_ctx->pool = NULL_SRAM_ADDR; | ||
157 | } | ||
158 | |||
159 | /*! | ||
160 | * Allocates iv-pool and maps resources. | ||
161 | * This function generates the first IV pool. | ||
162 | * | ||
163 | * \param drvdata Driver's private context | ||
164 | * | ||
165 | * \return int Zero for success, negative value otherwise. | ||
166 | */ | ||
167 | int cc_ivgen_init(struct cc_drvdata *drvdata) | ||
168 | { | ||
169 | struct cc_ivgen_ctx *ivgen_ctx; | ||
170 | struct device *device = &drvdata->plat_dev->dev; | ||
171 | int rc; | ||
172 | |||
173 | /* Allocate "this" context */ | ||
174 | ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL); | ||
175 | if (!ivgen_ctx) | ||
176 | return -ENOMEM; | ||
177 | |||
178 | drvdata->ivgen_handle = ivgen_ctx; | ||
179 | |||
180 | /* Allocate pool's header for initial enc. key/IV */ | ||
181 | ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, | ||
182 | &ivgen_ctx->pool_meta_dma, | ||
183 | GFP_KERNEL); | ||
184 | if (!ivgen_ctx->pool_meta) { | ||
185 | dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n", | ||
186 | CC_IVPOOL_META_SIZE); | ||
187 | rc = -ENOMEM; | ||
188 | goto out; | ||
189 | } | ||
190 | /* Allocate IV pool in SRAM */ | ||
191 | ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE); | ||
192 | if (ivgen_ctx->pool == NULL_SRAM_ADDR) { | ||
193 | dev_err(device, "SRAM pool exhausted\n"); | ||
194 | rc = -ENOMEM; | ||
195 | goto out; | ||
196 | } | ||
197 | |||
198 | return cc_init_iv_sram(drvdata); | ||
199 | |||
200 | out: | ||
201 | cc_ivgen_fini(drvdata); | ||
202 | return rc; | ||
203 | } | ||
204 | |||
205 | /*! | ||
206 | * Acquires 16 Bytes IV from the iv-pool | ||
207 | * | ||
208 | * \param drvdata Driver private context | ||
209 | * \param iv_out_dma Array of physical IV out addresses | ||
210 | * \param iv_out_dma_len Length of iv_out_dma array (additional elements | ||
211 | * of iv_out_dma array are ignore) | ||
212 | * \param iv_out_size May be 8 or 16 bytes long | ||
213 | * \param iv_seq IN/OUT array to the descriptors sequence | ||
214 | * \param iv_seq_len IN/OUT pointer to the sequence length | ||
215 | * | ||
216 | * \return int Zero for success, negative value otherwise. | ||
217 | */ | ||
218 | int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], | ||
219 | unsigned int iv_out_dma_len, unsigned int iv_out_size, | ||
220 | struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) | ||
221 | { | ||
222 | struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; | ||
223 | unsigned int idx = *iv_seq_len; | ||
224 | struct device *dev = drvdata_to_dev(drvdata); | ||
225 | unsigned int t; | ||
226 | |||
227 | if (iv_out_size != CC_AES_IV_SIZE && | ||
228 | iv_out_size != CTR_RFC3686_IV_SIZE) { | ||
229 | return -EINVAL; | ||
230 | } | ||
231 | if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) { | ||
232 | /* The sequence will be longer than allowed */ | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | |||
236 | /* check that number of generated IV is limited to max dma address | ||
237 | * iv buffer size | ||
238 | */ | ||
239 | if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) { | ||
240 | /* The sequence will be longer than allowed */ | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | for (t = 0; t < iv_out_dma_len; t++) { | ||
245 | /* Acquire IV from pool */ | ||
246 | hw_desc_init(&iv_seq[idx]); | ||
247 | set_din_sram(&iv_seq[idx], (ivgen_ctx->pool + | ||
248 | ivgen_ctx->next_iv_ofs), | ||
249 | iv_out_size); | ||
250 | set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size, | ||
251 | NS_BIT, 0); | ||
252 | set_flow_mode(&iv_seq[idx], BYPASS); | ||
253 | idx++; | ||
254 | } | ||
255 | |||
256 | /* Bypass operation is proceeded by crypto sequence, hence must | ||
257 | * assure bypass-write-transaction by a memory barrier | ||
258 | */ | ||
259 | hw_desc_init(&iv_seq[idx]); | ||
260 | set_din_no_dma(&iv_seq[idx], 0, 0xfffff0); | ||
261 | set_dout_no_dma(&iv_seq[idx], 0, 0, 1); | ||
262 | idx++; | ||
263 | |||
264 | *iv_seq_len = idx; /* update seq length */ | ||
265 | |||
266 | /* Update iv index */ | ||
267 | ivgen_ctx->next_iv_ofs += iv_out_size; | ||
268 | |||
269 | if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) { | ||
270 | dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n"); | ||
271 | /* pool is drained -regenerate it! */ | ||
272 | return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len); | ||
273 | } | ||
274 | |||
275 | return 0; | ||
276 | } | ||
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h deleted file mode 100644 index a9f5e8bba4f1..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ | ||
3 | |||
4 | #ifndef __CC_IVGEN_H__ | ||
5 | #define __CC_IVGEN_H__ | ||
6 | |||
7 | #include "cc_hw_queue_defs.h" | ||
8 | |||
9 | #define CC_IVPOOL_SEQ_LEN 8 | ||
10 | |||
11 | /*! | ||
12 | * Allocates iv-pool and maps resources. | ||
13 | * This function generates the first IV pool. | ||
14 | * | ||
15 | * \param drvdata Driver's private context | ||
16 | * | ||
17 | * \return int Zero for success, negative value otherwise. | ||
18 | */ | ||
19 | int cc_ivgen_init(struct cc_drvdata *drvdata); | ||
20 | |||
21 | /*! | ||
22 | * Free iv-pool and ivgen context. | ||
23 | * | ||
24 | * \param drvdata | ||
25 | */ | ||
26 | void cc_ivgen_fini(struct cc_drvdata *drvdata); | ||
27 | |||
28 | /*! | ||
29 | * Generates the initial pool in SRAM. | ||
30 | * This function should be invoked when resuming DX driver. | ||
31 | * | ||
32 | * \param drvdata | ||
33 | * | ||
34 | * \return int Zero for success, negative value otherwise. | ||
35 | */ | ||
36 | int cc_init_iv_sram(struct cc_drvdata *drvdata); | ||
37 | |||
38 | /*! | ||
39 | * Acquires 16 Bytes IV from the iv-pool | ||
40 | * | ||
41 | * \param drvdata Driver private context | ||
42 | * \param iv_out_dma Array of physical IV out addresses | ||
43 | * \param iv_out_dma_len Length of iv_out_dma array (additional elements of | ||
44 | * iv_out_dma array are ignore) | ||
45 | * \param iv_out_size May be 8 or 16 bytes long | ||
46 | * \param iv_seq IN/OUT array to the descriptors sequence | ||
47 | * \param iv_seq_len IN/OUT pointer to the sequence length | ||
48 | * | ||
49 | * \return int Zero for success, negative value otherwise. | ||
50 | */ | ||
51 | int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], | ||
52 | unsigned int iv_out_dma_len, unsigned int iv_out_size, | ||
53 | struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len); | ||
54 | |||
55 | #endif /*__CC_IVGEN_H__*/ | ||
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index 899a52f05b7a..dbc508fb719b 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include "cc_buffer_mgr.h" | 8 | #include "cc_buffer_mgr.h" |
9 | #include "cc_request_mgr.h" | 9 | #include "cc_request_mgr.h" |
10 | #include "cc_sram_mgr.h" | 10 | #include "cc_sram_mgr.h" |
11 | #include "cc_ivgen.h" | ||
12 | #include "cc_hash.h" | 11 | #include "cc_hash.h" |
13 | #include "cc_pm.h" | 12 | #include "cc_pm.h" |
14 | #include "cc_fips.h" | 13 | #include "cc_fips.h" |
@@ -73,7 +72,6 @@ int cc_pm_resume(struct device *dev) | |||
73 | /* must be after the queue resuming as it uses the HW queue*/ | 72 | /* must be after the queue resuming as it uses the HW queue*/ |
74 | cc_init_hash_sram(drvdata); | 73 | cc_init_hash_sram(drvdata); |
75 | 74 | ||
76 | cc_init_iv_sram(drvdata); | ||
77 | return 0; | 75 | return 0; |
78 | } | 76 | } |
79 | 77 | ||
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index 0bc6ccb0b899..a947d5a2cf35 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include "cc_driver.h" | 6 | #include "cc_driver.h" |
7 | #include "cc_buffer_mgr.h" | 7 | #include "cc_buffer_mgr.h" |
8 | #include "cc_request_mgr.h" | 8 | #include "cc_request_mgr.h" |
9 | #include "cc_ivgen.h" | ||
10 | #include "cc_pm.h" | 9 | #include "cc_pm.h" |
11 | 10 | ||
12 | #define CC_MAX_POLL_ITER 10 | 11 | #define CC_MAX_POLL_ITER 10 |
@@ -281,36 +280,12 @@ static int cc_queues_status(struct cc_drvdata *drvdata, | |||
281 | static int cc_do_send_request(struct cc_drvdata *drvdata, | 280 | static int cc_do_send_request(struct cc_drvdata *drvdata, |
282 | struct cc_crypto_req *cc_req, | 281 | struct cc_crypto_req *cc_req, |
283 | struct cc_hw_desc *desc, unsigned int len, | 282 | struct cc_hw_desc *desc, unsigned int len, |
284 | bool add_comp, bool ivgen) | 283 | bool add_comp) |
285 | { | 284 | { |
286 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; | 285 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; |
287 | unsigned int used_sw_slots; | 286 | unsigned int used_sw_slots; |
288 | unsigned int iv_seq_len = 0; | ||
289 | unsigned int total_seq_len = len; /*initial sequence length*/ | 287 | unsigned int total_seq_len = len; /*initial sequence length*/ |
290 | struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; | ||
291 | struct device *dev = drvdata_to_dev(drvdata); | 288 | struct device *dev = drvdata_to_dev(drvdata); |
292 | int rc; | ||
293 | |||
294 | if (ivgen) { | ||
295 | dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n", | ||
296 | cc_req->ivgen_dma_addr_len, | ||
297 | &cc_req->ivgen_dma_addr[0], | ||
298 | &cc_req->ivgen_dma_addr[1], | ||
299 | &cc_req->ivgen_dma_addr[2], | ||
300 | cc_req->ivgen_size); | ||
301 | |||
302 | /* Acquire IV from pool */ | ||
303 | rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr, | ||
304 | cc_req->ivgen_dma_addr_len, | ||
305 | cc_req->ivgen_size, iv_seq, &iv_seq_len); | ||
306 | |||
307 | if (rc) { | ||
308 | dev_err(dev, "Failed to generate IV (rc=%d)\n", rc); | ||
309 | return rc; | ||
310 | } | ||
311 | |||
312 | total_seq_len += iv_seq_len; | ||
313 | } | ||
314 | 289 | ||
315 | used_sw_slots = ((req_mgr_h->req_queue_head - | 290 | used_sw_slots = ((req_mgr_h->req_queue_head - |
316 | req_mgr_h->req_queue_tail) & | 291 | req_mgr_h->req_queue_tail) & |
@@ -334,8 +309,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, | |||
334 | wmb(); | 309 | wmb(); |
335 | 310 | ||
336 | /* STAT_PHASE_4: Push sequence */ | 311 | /* STAT_PHASE_4: Push sequence */ |
337 | if (ivgen) | ||
338 | enqueue_seq(drvdata, iv_seq, iv_seq_len); | ||
339 | 312 | ||
340 | enqueue_seq(drvdata, desc, len); | 313 | enqueue_seq(drvdata, desc, len); |
341 | 314 | ||
@@ -380,8 +353,6 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) | |||
380 | struct cc_bl_item *bli; | 353 | struct cc_bl_item *bli; |
381 | struct cc_crypto_req *creq; | 354 | struct cc_crypto_req *creq; |
382 | void *req; | 355 | void *req; |
383 | bool ivgen; | ||
384 | unsigned int total_len; | ||
385 | struct device *dev = drvdata_to_dev(drvdata); | 356 | struct device *dev = drvdata_to_dev(drvdata); |
386 | int rc; | 357 | int rc; |
387 | 358 | ||
@@ -406,12 +377,9 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) | |||
406 | bli->notif = true; | 377 | bli->notif = true; |
407 | } | 378 | } |
408 | 379 | ||
409 | ivgen = !!creq->ivgen_dma_addr_len; | ||
410 | total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); | ||
411 | |||
412 | spin_lock(&mgr->hw_lock); | 380 | spin_lock(&mgr->hw_lock); |
413 | 381 | ||
414 | rc = cc_queues_status(drvdata, mgr, total_len); | 382 | rc = cc_queues_status(drvdata, mgr, bli->len); |
415 | if (rc) { | 383 | if (rc) { |
416 | /* | 384 | /* |
417 | * There is still not room in the FIFO for | 385 | * There is still not room in the FIFO for |
@@ -423,7 +391,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) | |||
423 | } | 391 | } |
424 | 392 | ||
425 | rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, | 393 | rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, |
426 | bli->len, false, ivgen); | 394 | bli->len, false); |
427 | 395 | ||
428 | spin_unlock(&mgr->hw_lock); | 396 | spin_unlock(&mgr->hw_lock); |
429 | 397 | ||
@@ -447,8 +415,6 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, | |||
447 | { | 415 | { |
448 | int rc; | 416 | int rc; |
449 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; | 417 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
450 | bool ivgen = !!cc_req->ivgen_dma_addr_len; | ||
451 | unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); | ||
452 | struct device *dev = drvdata_to_dev(drvdata); | 418 | struct device *dev = drvdata_to_dev(drvdata); |
453 | bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; | 419 | bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; |
454 | gfp_t flags = cc_gfp_flags(req); | 420 | gfp_t flags = cc_gfp_flags(req); |
@@ -461,7 +427,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, | |||
461 | } | 427 | } |
462 | 428 | ||
463 | spin_lock_bh(&mgr->hw_lock); | 429 | spin_lock_bh(&mgr->hw_lock); |
464 | rc = cc_queues_status(drvdata, mgr, total_len); | 430 | rc = cc_queues_status(drvdata, mgr, len); |
465 | 431 | ||
466 | #ifdef CC_DEBUG_FORCE_BACKLOG | 432 | #ifdef CC_DEBUG_FORCE_BACKLOG |
467 | if (backlog_ok) | 433 | if (backlog_ok) |
@@ -486,8 +452,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, | |||
486 | } | 452 | } |
487 | 453 | ||
488 | if (!rc) | 454 | if (!rc) |
489 | rc = cc_do_send_request(drvdata, cc_req, desc, len, false, | 455 | rc = cc_do_send_request(drvdata, cc_req, desc, len, false); |
490 | ivgen); | ||
491 | 456 | ||
492 | spin_unlock_bh(&mgr->hw_lock); | 457 | spin_unlock_bh(&mgr->hw_lock); |
493 | return rc; | 458 | return rc; |
@@ -527,7 +492,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, | |||
527 | reinit_completion(&drvdata->hw_queue_avail); | 492 | reinit_completion(&drvdata->hw_queue_avail); |
528 | } | 493 | } |
529 | 494 | ||
530 | rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); | 495 | rc = cc_do_send_request(drvdata, cc_req, desc, len, true); |
531 | spin_unlock_bh(&mgr->hw_lock); | 496 | spin_unlock_bh(&mgr->hw_lock); |
532 | 497 | ||
533 | if (rc != -EINPROGRESS) { | 498 | if (rc != -EINPROGRESS) { |
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 4b9b37a130d3..250150560e68 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig | |||
@@ -2,6 +2,7 @@ | |||
2 | config CRYPTO_DEV_CHELSIO | 2 | config CRYPTO_DEV_CHELSIO |
3 | tristate "Chelsio Crypto Co-processor Driver" | 3 | tristate "Chelsio Crypto Co-processor Driver" |
4 | depends on CHELSIO_T4 | 4 | depends on CHELSIO_T4 |
5 | select CRYPTO_LIB_AES | ||
5 | select CRYPTO_SHA1 | 6 | select CRYPTO_SHA1 |
6 | select CRYPTO_SHA256 | 7 | select CRYPTO_SHA256 |
7 | select CRYPTO_SHA512 | 8 | select CRYPTO_SHA512 |
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 177f572b9589..38ee38b37ae6 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -1023,22 +1023,21 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, | |||
1023 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1023 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
1024 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); | 1024 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
1025 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 1025 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
1026 | struct crypto_cipher *cipher; | 1026 | struct crypto_aes_ctx aes; |
1027 | int ret, i; | 1027 | int ret, i; |
1028 | u8 *key; | 1028 | u8 *key; |
1029 | unsigned int keylen; | 1029 | unsigned int keylen; |
1030 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; | 1030 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; |
1031 | int round8 = round / 8; | 1031 | int round8 = round / 8; |
1032 | 1032 | ||
1033 | cipher = ablkctx->aes_generic; | ||
1034 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); | 1033 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
1035 | 1034 | ||
1036 | keylen = ablkctx->enckey_len / 2; | 1035 | keylen = ablkctx->enckey_len / 2; |
1037 | key = ablkctx->key + keylen; | 1036 | key = ablkctx->key + keylen; |
1038 | ret = crypto_cipher_setkey(cipher, key, keylen); | 1037 | ret = aes_expandkey(&aes, key, keylen); |
1039 | if (ret) | 1038 | if (ret) |
1040 | goto out; | 1039 | return ret; |
1041 | crypto_cipher_encrypt_one(cipher, iv, iv); | 1040 | aes_encrypt(&aes, iv, iv); |
1042 | for (i = 0; i < round8; i++) | 1041 | for (i = 0; i < round8; i++) |
1043 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); | 1042 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); |
1044 | 1043 | ||
@@ -1046,9 +1045,10 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, | |||
1046 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); | 1045 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
1047 | 1046 | ||
1048 | if (!isfinal) | 1047 | if (!isfinal) |
1049 | crypto_cipher_decrypt_one(cipher, iv, iv); | 1048 | aes_decrypt(&aes, iv, iv); |
1050 | out: | 1049 | |
1051 | return ret; | 1050 | memzero_explicit(&aes, sizeof(aes)); |
1051 | return 0; | ||
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | static int chcr_update_cipher_iv(struct ablkcipher_request *req, | 1054 | static int chcr_update_cipher_iv(struct ablkcipher_request *req, |
@@ -1411,16 +1411,6 @@ static int chcr_cra_init(struct crypto_tfm *tfm) | |||
1411 | return PTR_ERR(ablkctx->sw_cipher); | 1411 | return PTR_ERR(ablkctx->sw_cipher); |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { | ||
1415 | /* To update tweak*/ | ||
1416 | ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); | ||
1417 | if (IS_ERR(ablkctx->aes_generic)) { | ||
1418 | pr_err("failed to allocate aes cipher for tweak\n"); | ||
1419 | return PTR_ERR(ablkctx->aes_generic); | ||
1420 | } | ||
1421 | } else | ||
1422 | ablkctx->aes_generic = NULL; | ||
1423 | |||
1424 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | 1414 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
1425 | return chcr_device_init(crypto_tfm_ctx(tfm)); | 1415 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
1426 | } | 1416 | } |
@@ -1451,8 +1441,6 @@ static void chcr_cra_exit(struct crypto_tfm *tfm) | |||
1451 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 1441 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
1452 | 1442 | ||
1453 | crypto_free_sync_skcipher(ablkctx->sw_cipher); | 1443 | crypto_free_sync_skcipher(ablkctx->sw_cipher); |
1454 | if (ablkctx->aes_generic) | ||
1455 | crypto_free_cipher(ablkctx->aes_generic); | ||
1456 | } | 1444 | } |
1457 | 1445 | ||
1458 | static int get_alg_config(struct algo_param *params, | 1446 | static int get_alg_config(struct algo_param *params, |
@@ -3364,9 +3352,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
3364 | { | 3352 | { |
3365 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); | 3353 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
3366 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); | 3354 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); |
3367 | struct crypto_cipher *cipher; | ||
3368 | unsigned int ck_size; | 3355 | unsigned int ck_size; |
3369 | int ret = 0, key_ctx_size = 0; | 3356 | int ret = 0, key_ctx_size = 0; |
3357 | struct crypto_aes_ctx aes; | ||
3370 | 3358 | ||
3371 | aeadctx->enckey_len = 0; | 3359 | aeadctx->enckey_len = 0; |
3372 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | 3360 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
@@ -3409,23 +3397,15 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
3409 | /* Calculate the H = CIPH(K, 0 repeated 16 times). | 3397 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
3410 | * It will go in key context | 3398 | * It will go in key context |
3411 | */ | 3399 | */ |
3412 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); | 3400 | ret = aes_expandkey(&aes, key, keylen); |
3413 | if (IS_ERR(cipher)) { | ||
3414 | aeadctx->enckey_len = 0; | ||
3415 | ret = -ENOMEM; | ||
3416 | goto out; | ||
3417 | } | ||
3418 | |||
3419 | ret = crypto_cipher_setkey(cipher, key, keylen); | ||
3420 | if (ret) { | 3401 | if (ret) { |
3421 | aeadctx->enckey_len = 0; | 3402 | aeadctx->enckey_len = 0; |
3422 | goto out1; | 3403 | goto out; |
3423 | } | 3404 | } |
3424 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); | 3405 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); |
3425 | crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); | 3406 | aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); |
3407 | memzero_explicit(&aes, sizeof(aes)); | ||
3426 | 3408 | ||
3427 | out1: | ||
3428 | crypto_free_cipher(cipher); | ||
3429 | out: | 3409 | out: |
3430 | return ret; | 3410 | return ret; |
3431 | } | 3411 | } |
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index ee20dd899e83..d1e6b51df0ce 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h | |||
@@ -333,26 +333,26 @@ struct phys_sge_pairs { | |||
333 | }; | 333 | }; |
334 | 334 | ||
335 | 335 | ||
336 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | 336 | static const u32 chcr_sha1_init[SHA1_DIGEST_SIZE / 4] = { |
337 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | 337 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, |
338 | }; | 338 | }; |
339 | 339 | ||
340 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | 340 | static const u32 chcr_sha224_init[SHA256_DIGEST_SIZE / 4] = { |
341 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | 341 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, |
342 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | 342 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, |
343 | }; | 343 | }; |
344 | 344 | ||
345 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | 345 | static const u32 chcr_sha256_init[SHA256_DIGEST_SIZE / 4] = { |
346 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | 346 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
347 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | 347 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, |
348 | }; | 348 | }; |
349 | 349 | ||
350 | static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { | 350 | static const u64 chcr_sha384_init[SHA512_DIGEST_SIZE / 8] = { |
351 | SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, | 351 | SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, |
352 | SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, | 352 | SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, |
353 | }; | 353 | }; |
354 | 354 | ||
355 | static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { | 355 | static const u64 chcr_sha512_init[SHA512_DIGEST_SIZE / 8] = { |
356 | SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, | 356 | SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, |
357 | SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, | 357 | SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, |
358 | }; | 358 | }; |
@@ -362,21 +362,21 @@ static inline void copy_hash_init_values(char *key, int digestsize) | |||
362 | u8 i; | 362 | u8 i; |
363 | __be32 *dkey = (__be32 *)key; | 363 | __be32 *dkey = (__be32 *)key; |
364 | u64 *ldkey = (u64 *)key; | 364 | u64 *ldkey = (u64 *)key; |
365 | __be64 *sha384 = (__be64 *)sha384_init; | 365 | __be64 *sha384 = (__be64 *)chcr_sha384_init; |
366 | __be64 *sha512 = (__be64 *)sha512_init; | 366 | __be64 *sha512 = (__be64 *)chcr_sha512_init; |
367 | 367 | ||
368 | switch (digestsize) { | 368 | switch (digestsize) { |
369 | case SHA1_DIGEST_SIZE: | 369 | case SHA1_DIGEST_SIZE: |
370 | for (i = 0; i < SHA1_INIT_STATE; i++) | 370 | for (i = 0; i < SHA1_INIT_STATE; i++) |
371 | dkey[i] = cpu_to_be32(sha1_init[i]); | 371 | dkey[i] = cpu_to_be32(chcr_sha1_init[i]); |
372 | break; | 372 | break; |
373 | case SHA224_DIGEST_SIZE: | 373 | case SHA224_DIGEST_SIZE: |
374 | for (i = 0; i < SHA224_INIT_STATE; i++) | 374 | for (i = 0; i < SHA224_INIT_STATE; i++) |
375 | dkey[i] = cpu_to_be32(sha224_init[i]); | 375 | dkey[i] = cpu_to_be32(chcr_sha224_init[i]); |
376 | break; | 376 | break; |
377 | case SHA256_DIGEST_SIZE: | 377 | case SHA256_DIGEST_SIZE: |
378 | for (i = 0; i < SHA256_INIT_STATE; i++) | 378 | for (i = 0; i < SHA256_INIT_STATE; i++) |
379 | dkey[i] = cpu_to_be32(sha256_init[i]); | 379 | dkey[i] = cpu_to_be32(chcr_sha256_init[i]); |
380 | break; | 380 | break; |
381 | case SHA384_DIGEST_SIZE: | 381 | case SHA384_DIGEST_SIZE: |
382 | for (i = 0; i < SHA384_INIT_STATE; i++) | 382 | for (i = 0; i < SHA384_INIT_STATE; i++) |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 655606f2e4d0..993c97e70565 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -172,7 +172,6 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) | |||
172 | 172 | ||
173 | struct ablk_ctx { | 173 | struct ablk_ctx { |
174 | struct crypto_sync_skcipher *sw_cipher; | 174 | struct crypto_sync_skcipher *sw_cipher; |
175 | struct crypto_cipher *aes_generic; | ||
176 | __be32 key_ctx_hdr; | 175 | __be32 key_ctx_hdr; |
177 | unsigned int enckey_len; | 176 | unsigned int enckey_len; |
178 | unsigned char ciph_mode; | 177 | unsigned char ciph_mode; |
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index f429aae72542..24355680f30a 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c | |||
@@ -132,11 +132,11 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, | |||
132 | static inline int chcr_ipsec_setkey(struct xfrm_state *x, | 132 | static inline int chcr_ipsec_setkey(struct xfrm_state *x, |
133 | struct ipsec_sa_entry *sa_entry) | 133 | struct ipsec_sa_entry *sa_entry) |
134 | { | 134 | { |
135 | struct crypto_cipher *cipher; | ||
136 | int keylen = (x->aead->alg_key_len + 7) / 8; | 135 | int keylen = (x->aead->alg_key_len + 7) / 8; |
137 | unsigned char *key = x->aead->alg_key; | 136 | unsigned char *key = x->aead->alg_key; |
138 | int ck_size, key_ctx_size = 0; | 137 | int ck_size, key_ctx_size = 0; |
139 | unsigned char ghash_h[AEAD_H_SIZE]; | 138 | unsigned char ghash_h[AEAD_H_SIZE]; |
139 | struct crypto_aes_ctx aes; | ||
140 | int ret = 0; | 140 | int ret = 0; |
141 | 141 | ||
142 | if (keylen > 3) { | 142 | if (keylen > 3) { |
@@ -170,26 +170,19 @@ static inline int chcr_ipsec_setkey(struct xfrm_state *x, | |||
170 | /* Calculate the H = CIPH(K, 0 repeated 16 times). | 170 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
171 | * It will go in key context | 171 | * It will go in key context |
172 | */ | 172 | */ |
173 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); | 173 | ret = aes_expandkey(&aes, key, keylen); |
174 | if (IS_ERR(cipher)) { | ||
175 | sa_entry->enckey_len = 0; | ||
176 | ret = -ENOMEM; | ||
177 | goto out; | ||
178 | } | ||
179 | |||
180 | ret = crypto_cipher_setkey(cipher, key, keylen); | ||
181 | if (ret) { | 174 | if (ret) { |
182 | sa_entry->enckey_len = 0; | 175 | sa_entry->enckey_len = 0; |
183 | goto out1; | 176 | goto out; |
184 | } | 177 | } |
185 | memset(ghash_h, 0, AEAD_H_SIZE); | 178 | memset(ghash_h, 0, AEAD_H_SIZE); |
186 | crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); | 179 | aes_encrypt(&aes, ghash_h, ghash_h); |
180 | memzero_explicit(&aes, sizeof(aes)); | ||
181 | |||
187 | memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * | 182 | memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * |
188 | 16), ghash_h, AEAD_H_SIZE); | 183 | 16), ghash_h, AEAD_H_SIZE); |
189 | sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + | 184 | sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + |
190 | AEAD_H_SIZE; | 185 | AEAD_H_SIZE; |
191 | out1: | ||
192 | crypto_free_cipher(cipher); | ||
193 | out: | 186 | out: |
194 | return ret; | 187 | return ret; |
195 | } | 188 | } |
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index f2424f4c5f78..2a34035d3cfb 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c | |||
@@ -213,8 +213,8 @@ static int chtls_key_info(struct chtls_sock *csk, | |||
213 | unsigned char key[AES_KEYSIZE_128]; | 213 | unsigned char key[AES_KEYSIZE_128]; |
214 | struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; | 214 | struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; |
215 | unsigned char ghash_h[AEAD_H_SIZE]; | 215 | unsigned char ghash_h[AEAD_H_SIZE]; |
216 | struct crypto_cipher *cipher; | ||
217 | int ck_size, key_ctx_size; | 216 | int ck_size, key_ctx_size; |
217 | struct crypto_aes_ctx aes; | ||
218 | int ret; | 218 | int ret; |
219 | 219 | ||
220 | gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) | 220 | gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) |
@@ -234,18 +234,13 @@ static int chtls_key_info(struct chtls_sock *csk, | |||
234 | /* Calculate the H = CIPH(K, 0 repeated 16 times). | 234 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
235 | * It will go in key context | 235 | * It will go in key context |
236 | */ | 236 | */ |
237 | cipher = crypto_alloc_cipher("aes", 0, 0); | 237 | ret = aes_expandkey(&aes, key, keylen); |
238 | if (IS_ERR(cipher)) { | ||
239 | ret = -ENOMEM; | ||
240 | goto out; | ||
241 | } | ||
242 | |||
243 | ret = crypto_cipher_setkey(cipher, key, keylen); | ||
244 | if (ret) | 238 | if (ret) |
245 | goto out1; | 239 | return ret; |
246 | 240 | ||
247 | memset(ghash_h, 0, AEAD_H_SIZE); | 241 | memset(ghash_h, 0, AEAD_H_SIZE); |
248 | crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); | 242 | aes_encrypt(&aes, ghash_h, ghash_h); |
243 | memzero_explicit(&aes, sizeof(aes)); | ||
249 | csk->tlshws.keylen = key_ctx_size; | 244 | csk->tlshws.keylen = key_ctx_size; |
250 | 245 | ||
251 | /* Copy the Key context */ | 246 | /* Copy the Key context */ |
@@ -269,10 +264,7 @@ static int chtls_key_info(struct chtls_sock *csk, | |||
269 | /* erase key info from driver */ | 264 | /* erase key info from driver */ |
270 | memset(gcm_ctx->key, 0, keylen); | 265 | memset(gcm_ctx->key, 0, keylen); |
271 | 266 | ||
272 | out1: | 267 | return 0; |
273 | crypto_free_cipher(cipher); | ||
274 | out: | ||
275 | return ret; | ||
276 | } | 268 | } |
277 | 269 | ||
278 | static void chtls_set_scmd(struct chtls_sock *csk) | 270 | static void chtls_set_scmd(struct chtls_sock *csk) |
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index 2cfabb99cb6e..cbd8ca6e52ee 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c | |||
@@ -268,7 +268,6 @@ static struct rng_alg exynos_rng_alg = { | |||
268 | static int exynos_rng_probe(struct platform_device *pdev) | 268 | static int exynos_rng_probe(struct platform_device *pdev) |
269 | { | 269 | { |
270 | struct exynos_rng_dev *rng; | 270 | struct exynos_rng_dev *rng; |
271 | struct resource *res; | ||
272 | int ret; | 271 | int ret; |
273 | 272 | ||
274 | if (exynos_rng_dev) | 273 | if (exynos_rng_dev) |
@@ -289,8 +288,7 @@ static int exynos_rng_probe(struct platform_device *pdev) | |||
289 | return PTR_ERR(rng->clk); | 288 | return PTR_ERR(rng->clk); |
290 | } | 289 | } |
291 | 290 | ||
292 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 291 | rng->mem = devm_platform_ioremap_resource(pdev, 0); |
293 | rng->mem = devm_ioremap_resource(&pdev->dev, res); | ||
294 | if (IS_ERR(rng->mem)) | 292 | if (IS_ERR(rng->mem)) |
295 | return PTR_ERR(rng->mem); | 293 | return PTR_ERR(rng->mem); |
296 | 294 | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 5c3f02e4aece..a18e62df68d9 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/ktime.h> | 21 | #include <linux/ktime.h> |
22 | 22 | ||
23 | #include <crypto/algapi.h> | 23 | #include <crypto/algapi.h> |
24 | #include <crypto/des.h> | 24 | #include <crypto/internal/des.h> |
25 | 25 | ||
26 | static char hifn_pll_ref[sizeof("extNNN")] = "ext"; | 26 | static char hifn_pll_ref[sizeof("extNNN")] = "ext"; |
27 | module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); | 27 | module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); |
@@ -1939,25 +1939,13 @@ static void hifn_flush(struct hifn_device *dev) | |||
1939 | static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 1939 | static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
1940 | unsigned int len) | 1940 | unsigned int len) |
1941 | { | 1941 | { |
1942 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 1942 | struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); |
1943 | struct hifn_context *ctx = crypto_tfm_ctx(tfm); | ||
1944 | struct hifn_device *dev = ctx->dev; | 1943 | struct hifn_device *dev = ctx->dev; |
1944 | int err; | ||
1945 | 1945 | ||
1946 | if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { | 1946 | err = verify_ablkcipher_des_key(cipher, key); |
1947 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1947 | if (err) |
1948 | return -1; | 1948 | return err; |
1949 | } | ||
1950 | |||
1951 | if (len == HIFN_DES_KEY_LENGTH) { | ||
1952 | u32 tmp[DES_EXPKEY_WORDS]; | ||
1953 | int ret = des_ekey(tmp, key); | ||
1954 | |||
1955 | if (unlikely(ret == 0) && | ||
1956 | (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | ||
1957 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
1958 | return -EINVAL; | ||
1959 | } | ||
1960 | } | ||
1961 | 1949 | ||
1962 | dev->flags &= ~HIFN_FLAG_OLD_KEY; | 1950 | dev->flags &= ~HIFN_FLAG_OLD_KEY; |
1963 | 1951 | ||
@@ -1972,15 +1960,11 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
1972 | { | 1960 | { |
1973 | struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); | 1961 | struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); |
1974 | struct hifn_device *dev = ctx->dev; | 1962 | struct hifn_device *dev = ctx->dev; |
1975 | u32 flags; | ||
1976 | int err; | 1963 | int err; |
1977 | 1964 | ||
1978 | flags = crypto_ablkcipher_get_flags(cipher); | 1965 | err = verify_ablkcipher_des3_key(cipher, key); |
1979 | err = __des3_verify_key(&flags, key); | 1966 | if (err) |
1980 | if (unlikely(err)) { | ||
1981 | crypto_ablkcipher_set_flags(cipher, flags); | ||
1982 | return err; | 1967 | return err; |
1983 | } | ||
1984 | 1968 | ||
1985 | dev->flags &= ~HIFN_FLAG_OLD_KEY; | 1969 | dev->flags &= ~HIFN_FLAG_OLD_KEY; |
1986 | 1970 | ||
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 8ca9c503bcb0..ebaf91e0146d 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig | |||
@@ -4,6 +4,7 @@ config CRYPTO_DEV_HISI_SEC | |||
4 | tristate "Support for Hisilicon SEC crypto block cipher accelerator" | 4 | tristate "Support for Hisilicon SEC crypto block cipher accelerator" |
5 | select CRYPTO_BLKCIPHER | 5 | select CRYPTO_BLKCIPHER |
6 | select CRYPTO_ALGAPI | 6 | select CRYPTO_ALGAPI |
7 | select CRYPTO_LIB_DES | ||
7 | select SG_SPLIT | 8 | select SG_SPLIT |
8 | depends on ARM64 || COMPILE_TEST | 9 | depends on ARM64 || COMPILE_TEST |
9 | depends on HAS_IOMEM | 10 | depends on HAS_IOMEM |
@@ -12,3 +13,27 @@ config CRYPTO_DEV_HISI_SEC | |||
12 | 13 | ||
13 | To compile this as a module, choose M here: the module | 14 | To compile this as a module, choose M here: the module |
14 | will be called hisi_sec. | 15 | will be called hisi_sec. |
16 | |||
17 | config CRYPTO_DEV_HISI_QM | ||
18 | tristate | ||
19 | depends on ARM64 && PCI && PCI_MSI | ||
20 | help | ||
21 | HiSilicon accelerator engines use a common queue management | ||
22 | interface. Specific engine driver may use this module. | ||
23 | |||
24 | config CRYPTO_HISI_SGL | ||
25 | tristate | ||
26 | depends on ARM64 | ||
27 | help | ||
28 | HiSilicon accelerator engines use a common hardware scatterlist | ||
29 | interface for data format. Specific engine driver may use this | ||
30 | module. | ||
31 | |||
32 | config CRYPTO_DEV_HISI_ZIP | ||
33 | tristate "Support for HiSilicon ZIP accelerator" | ||
34 | depends on ARM64 && PCI && PCI_MSI | ||
35 | select CRYPTO_DEV_HISI_QM | ||
36 | select CRYPTO_HISI_SGL | ||
37 | select SG_SPLIT | ||
38 | help | ||
39 | Support for HiSilicon ZIP Driver | ||
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 463f46ace182..45a279741126 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile | |||
@@ -1,2 +1,5 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ | 2 | obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ |
3 | obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o | ||
4 | obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o | ||
5 | obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ | ||
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c new file mode 100644 index 000000000000..f975c393a603 --- /dev/null +++ b/drivers/crypto/hisilicon/qm.c | |||
@@ -0,0 +1,1913 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #include <asm/page.h> | ||
4 | #include <linux/bitmap.h> | ||
5 | #include <linux/debugfs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | #include <linux/io.h> | ||
8 | #include <linux/irqreturn.h> | ||
9 | #include <linux/log2.h> | ||
10 | #include <linux/seq_file.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include "qm.h" | ||
13 | |||
14 | /* eq/aeq irq enable */ | ||
15 | #define QM_VF_AEQ_INT_SOURCE 0x0 | ||
16 | #define QM_VF_AEQ_INT_MASK 0x4 | ||
17 | #define QM_VF_EQ_INT_SOURCE 0x8 | ||
18 | #define QM_VF_EQ_INT_MASK 0xc | ||
19 | #define QM_IRQ_NUM_V1 1 | ||
20 | #define QM_IRQ_NUM_PF_V2 4 | ||
21 | #define QM_IRQ_NUM_VF_V2 2 | ||
22 | |||
23 | #define QM_EQ_EVENT_IRQ_VECTOR 0 | ||
24 | #define QM_AEQ_EVENT_IRQ_VECTOR 1 | ||
25 | #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 | ||
26 | |||
27 | /* mailbox */ | ||
28 | #define QM_MB_CMD_SQC 0x0 | ||
29 | #define QM_MB_CMD_CQC 0x1 | ||
30 | #define QM_MB_CMD_EQC 0x2 | ||
31 | #define QM_MB_CMD_AEQC 0x3 | ||
32 | #define QM_MB_CMD_SQC_BT 0x4 | ||
33 | #define QM_MB_CMD_CQC_BT 0x5 | ||
34 | #define QM_MB_CMD_SQC_VFT_V2 0x6 | ||
35 | |||
36 | #define QM_MB_CMD_SEND_BASE 0x300 | ||
37 | #define QM_MB_EVENT_SHIFT 8 | ||
38 | #define QM_MB_BUSY_SHIFT 13 | ||
39 | #define QM_MB_OP_SHIFT 14 | ||
40 | #define QM_MB_CMD_DATA_ADDR_L 0x304 | ||
41 | #define QM_MB_CMD_DATA_ADDR_H 0x308 | ||
42 | |||
43 | /* sqc shift */ | ||
44 | #define QM_SQ_HOP_NUM_SHIFT 0 | ||
45 | #define QM_SQ_PAGE_SIZE_SHIFT 4 | ||
46 | #define QM_SQ_BUF_SIZE_SHIFT 8 | ||
47 | #define QM_SQ_SQE_SIZE_SHIFT 12 | ||
48 | #define QM_SQ_PRIORITY_SHIFT 0 | ||
49 | #define QM_SQ_ORDERS_SHIFT 4 | ||
50 | #define QM_SQ_TYPE_SHIFT 8 | ||
51 | |||
52 | #define QM_SQ_TYPE_MASK GENMASK(3, 0) | ||
53 | |||
54 | /* cqc shift */ | ||
55 | #define QM_CQ_HOP_NUM_SHIFT 0 | ||
56 | #define QM_CQ_PAGE_SIZE_SHIFT 4 | ||
57 | #define QM_CQ_BUF_SIZE_SHIFT 8 | ||
58 | #define QM_CQ_CQE_SIZE_SHIFT 12 | ||
59 | #define QM_CQ_PHASE_SHIFT 0 | ||
60 | #define QM_CQ_FLAG_SHIFT 1 | ||
61 | |||
62 | #define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1) | ||
63 | #define QM_QC_CQE_SIZE 4 | ||
64 | |||
65 | /* eqc shift */ | ||
66 | #define QM_EQE_AEQE_SIZE (2UL << 12) | ||
67 | #define QM_EQC_PHASE_SHIFT 16 | ||
68 | |||
69 | #define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1) | ||
70 | #define QM_EQE_CQN_MASK GENMASK(15, 0) | ||
71 | |||
72 | #define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1) | ||
73 | #define QM_AEQE_TYPE_SHIFT 17 | ||
74 | |||
75 | #define QM_DOORBELL_CMD_SQ 0 | ||
76 | #define QM_DOORBELL_CMD_CQ 1 | ||
77 | #define QM_DOORBELL_CMD_EQ 2 | ||
78 | #define QM_DOORBELL_CMD_AEQ 3 | ||
79 | |||
80 | #define QM_DOORBELL_BASE_V1 0x340 | ||
81 | #define QM_DB_CMD_SHIFT_V1 16 | ||
82 | #define QM_DB_INDEX_SHIFT_V1 32 | ||
83 | #define QM_DB_PRIORITY_SHIFT_V1 48 | ||
84 | #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 | ||
85 | #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 | ||
86 | #define QM_DB_CMD_SHIFT_V2 12 | ||
87 | #define QM_DB_RAND_SHIFT_V2 16 | ||
88 | #define QM_DB_INDEX_SHIFT_V2 32 | ||
89 | #define QM_DB_PRIORITY_SHIFT_V2 48 | ||
90 | |||
91 | #define QM_MEM_START_INIT 0x100040 | ||
92 | #define QM_MEM_INIT_DONE 0x100044 | ||
93 | #define QM_VFT_CFG_RDY 0x10006c | ||
94 | #define QM_VFT_CFG_OP_WR 0x100058 | ||
95 | #define QM_VFT_CFG_TYPE 0x10005c | ||
96 | #define QM_SQC_VFT 0x0 | ||
97 | #define QM_CQC_VFT 0x1 | ||
98 | #define QM_VFT_CFG 0x100060 | ||
99 | #define QM_VFT_CFG_OP_ENABLE 0x100054 | ||
100 | |||
101 | #define QM_VFT_CFG_DATA_L 0x100064 | ||
102 | #define QM_VFT_CFG_DATA_H 0x100068 | ||
103 | #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) | ||
104 | #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) | ||
105 | #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) | ||
106 | #define QM_SQC_VFT_START_SQN_SHIFT 28 | ||
107 | #define QM_SQC_VFT_VALID (1ULL << 44) | ||
108 | #define QM_SQC_VFT_SQN_SHIFT 45 | ||
109 | #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) | ||
110 | #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) | ||
111 | #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) | ||
112 | #define QM_CQC_VFT_VALID (1ULL << 28) | ||
113 | |||
114 | #define QM_SQC_VFT_BASE_SHIFT_V2 28 | ||
115 | #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0) | ||
116 | #define QM_SQC_VFT_NUM_SHIFT_V2 45 | ||
117 | #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) | ||
118 | |||
119 | #define QM_DFX_CNT_CLR_CE 0x100118 | ||
120 | |||
121 | #define QM_ABNORMAL_INT_SOURCE 0x100000 | ||
122 | #define QM_ABNORMAL_INT_MASK 0x100004 | ||
123 | #define QM_ABNORMAL_INT_MASK_VALUE 0x1fff | ||
124 | #define QM_ABNORMAL_INT_STATUS 0x100008 | ||
125 | #define QM_ABNORMAL_INF00 0x100010 | ||
126 | #define QM_FIFO_OVERFLOW_TYPE 0xc0 | ||
127 | #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 | ||
128 | #define QM_FIFO_OVERFLOW_VF 0x3f | ||
129 | #define QM_ABNORMAL_INF01 0x100014 | ||
130 | #define QM_DB_TIMEOUT_TYPE 0xc0 | ||
131 | #define QM_DB_TIMEOUT_TYPE_SHIFT 6 | ||
132 | #define QM_DB_TIMEOUT_VF 0x3f | ||
133 | #define QM_RAS_CE_ENABLE 0x1000ec | ||
134 | #define QM_RAS_FE_ENABLE 0x1000f0 | ||
135 | #define QM_RAS_NFE_ENABLE 0x1000f4 | ||
136 | #define QM_RAS_CE_THRESHOLD 0x1000f8 | ||
137 | #define QM_RAS_CE_TIMES_PER_IRQ 1 | ||
138 | #define QM_RAS_MSI_INT_SEL 0x1040f4 | ||
139 | |||
140 | #define QM_CACHE_WB_START 0x204 | ||
141 | #define QM_CACHE_WB_DONE 0x208 | ||
142 | |||
143 | #define PCI_BAR_2 2 | ||
144 | #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) | ||
145 | #define QMC_ALIGN(sz) ALIGN(sz, 32) | ||
146 | |||
147 | #define QM_DBG_TMP_BUF_LEN 22 | ||
148 | |||
149 | #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ | ||
150 | (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ | ||
151 | ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ | ||
152 | ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ | ||
153 | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) | ||
154 | |||
155 | #define QM_MK_CQC_DW3_V2(cqe_sz) \ | ||
156 | ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) | ||
157 | |||
158 | #define QM_MK_SQC_W13(priority, orders, alg_type) \ | ||
159 | (((priority) << QM_SQ_PRIORITY_SHIFT) | \ | ||
160 | ((orders) << QM_SQ_ORDERS_SHIFT) | \ | ||
161 | (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) | ||
162 | |||
163 | #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ | ||
164 | (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ | ||
165 | ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ | ||
166 | ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ | ||
167 | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) | ||
168 | |||
169 | #define QM_MK_SQC_DW3_V2(sqe_sz) \ | ||
170 | ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) | ||
171 | |||
172 | #define INIT_QC_COMMON(qc, base, pasid) do { \ | ||
173 | (qc)->head = 0; \ | ||
174 | (qc)->tail = 0; \ | ||
175 | (qc)->base_l = lower_32_bits(base); \ | ||
176 | (qc)->base_h = upper_32_bits(base); \ | ||
177 | (qc)->dw3 = 0; \ | ||
178 | (qc)->w8 = 0; \ | ||
179 | (qc)->rsvd0 = 0; \ | ||
180 | (qc)->pasid = pasid; \ | ||
181 | (qc)->w11 = 0; \ | ||
182 | (qc)->rsvd1 = 0; \ | ||
183 | } while (0) | ||
184 | |||
185 | enum vft_type { | ||
186 | SQC_VFT = 0, | ||
187 | CQC_VFT, | ||
188 | }; | ||
189 | |||
190 | struct qm_cqe { | ||
191 | __le32 rsvd0; | ||
192 | __le16 cmd_id; | ||
193 | __le16 rsvd1; | ||
194 | __le16 sq_head; | ||
195 | __le16 sq_num; | ||
196 | __le16 rsvd2; | ||
197 | __le16 w7; | ||
198 | }; | ||
199 | |||
200 | struct qm_eqe { | ||
201 | __le32 dw0; | ||
202 | }; | ||
203 | |||
204 | struct qm_aeqe { | ||
205 | __le32 dw0; | ||
206 | }; | ||
207 | |||
208 | struct qm_sqc { | ||
209 | __le16 head; | ||
210 | __le16 tail; | ||
211 | __le32 base_l; | ||
212 | __le32 base_h; | ||
213 | __le32 dw3; | ||
214 | __le16 w8; | ||
215 | __le16 rsvd0; | ||
216 | __le16 pasid; | ||
217 | __le16 w11; | ||
218 | __le16 cq_num; | ||
219 | __le16 w13; | ||
220 | __le32 rsvd1; | ||
221 | }; | ||
222 | |||
223 | struct qm_cqc { | ||
224 | __le16 head; | ||
225 | __le16 tail; | ||
226 | __le32 base_l; | ||
227 | __le32 base_h; | ||
228 | __le32 dw3; | ||
229 | __le16 w8; | ||
230 | __le16 rsvd0; | ||
231 | __le16 pasid; | ||
232 | __le16 w11; | ||
233 | __le32 dw6; | ||
234 | __le32 rsvd1; | ||
235 | }; | ||
236 | |||
237 | struct qm_eqc { | ||
238 | __le16 head; | ||
239 | __le16 tail; | ||
240 | __le32 base_l; | ||
241 | __le32 base_h; | ||
242 | __le32 dw3; | ||
243 | __le32 rsvd[2]; | ||
244 | __le32 dw6; | ||
245 | }; | ||
246 | |||
247 | struct qm_aeqc { | ||
248 | __le16 head; | ||
249 | __le16 tail; | ||
250 | __le32 base_l; | ||
251 | __le32 base_h; | ||
252 | __le32 dw3; | ||
253 | __le32 rsvd[2]; | ||
254 | __le32 dw6; | ||
255 | }; | ||
256 | |||
257 | struct qm_mailbox { | ||
258 | __le16 w0; | ||
259 | __le16 queue_num; | ||
260 | __le32 base_l; | ||
261 | __le32 base_h; | ||
262 | __le32 rsvd; | ||
263 | }; | ||
264 | |||
265 | struct qm_doorbell { | ||
266 | __le16 queue_num; | ||
267 | __le16 cmd; | ||
268 | __le16 index; | ||
269 | __le16 priority; | ||
270 | }; | ||
271 | |||
272 | struct hisi_qm_hw_ops { | ||
273 | int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); | ||
274 | void (*qm_db)(struct hisi_qm *qm, u16 qn, | ||
275 | u8 cmd, u16 index, u8 priority); | ||
276 | u32 (*get_irq_num)(struct hisi_qm *qm); | ||
277 | int (*debug_init)(struct hisi_qm *qm); | ||
278 | void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, | ||
279 | u32 msi); | ||
280 | pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); | ||
281 | }; | ||
282 | |||
283 | static const char * const qm_debug_file_name[] = { | ||
284 | [CURRENT_Q] = "current_q", | ||
285 | [CLEAR_ENABLE] = "clear_enable", | ||
286 | }; | ||
287 | |||
288 | struct hisi_qm_hw_error { | ||
289 | u32 int_msk; | ||
290 | const char *msg; | ||
291 | }; | ||
292 | |||
293 | static const struct hisi_qm_hw_error qm_hw_error[] = { | ||
294 | { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, | ||
295 | { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, | ||
296 | { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, | ||
297 | { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, | ||
298 | { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, | ||
299 | { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, | ||
300 | { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, | ||
301 | { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, | ||
302 | { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, | ||
303 | { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, | ||
304 | { .int_msk = BIT(10), .msg = "qm_db_timeout" }, | ||
305 | { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, | ||
306 | { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, | ||
307 | { /* sentinel */ } | ||
308 | }; | ||
309 | |||
310 | static const char * const qm_db_timeout[] = { | ||
311 | "sq", "cq", "eq", "aeq", | ||
312 | }; | ||
313 | |||
314 | static const char * const qm_fifo_overflow[] = { | ||
315 | "cq", "eq", "aeq", | ||
316 | }; | ||
317 | |||
318 | /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ | ||
319 | static int qm_wait_mb_ready(struct hisi_qm *qm) | ||
320 | { | ||
321 | u32 val; | ||
322 | |||
323 | return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, | ||
324 | val, !((val >> QM_MB_BUSY_SHIFT) & | ||
325 | 0x1), 10, 1000); | ||
326 | } | ||
327 | |||
328 | /* 128 bit should be written to hardware at one time to trigger a mailbox */ | ||
329 | static void qm_mb_write(struct hisi_qm *qm, const void *src) | ||
330 | { | ||
331 | void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; | ||
332 | unsigned long tmp0 = 0, tmp1 = 0; | ||
333 | |||
334 | asm volatile("ldp %0, %1, %3\n" | ||
335 | "stp %0, %1, %2\n" | ||
336 | "dsb sy\n" | ||
337 | : "=&r" (tmp0), | ||
338 | "=&r" (tmp1), | ||
339 | "+Q" (*((char *)fun_base)) | ||
340 | : "Q" (*((char *)src)) | ||
341 | : "memory"); | ||
342 | } | ||
343 | |||
344 | static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, | ||
345 | bool op) | ||
346 | { | ||
347 | struct qm_mailbox mailbox; | ||
348 | int ret = 0; | ||
349 | |||
350 | dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", | ||
351 | queue, cmd, (unsigned long long)dma_addr); | ||
352 | |||
353 | mailbox.w0 = cmd | | ||
354 | (op ? 0x1 << QM_MB_OP_SHIFT : 0) | | ||
355 | (0x1 << QM_MB_BUSY_SHIFT); | ||
356 | mailbox.queue_num = queue; | ||
357 | mailbox.base_l = lower_32_bits(dma_addr); | ||
358 | mailbox.base_h = upper_32_bits(dma_addr); | ||
359 | mailbox.rsvd = 0; | ||
360 | |||
361 | mutex_lock(&qm->mailbox_lock); | ||
362 | |||
363 | if (unlikely(qm_wait_mb_ready(qm))) { | ||
364 | ret = -EBUSY; | ||
365 | dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); | ||
366 | goto busy_unlock; | ||
367 | } | ||
368 | |||
369 | qm_mb_write(qm, &mailbox); | ||
370 | |||
371 | if (unlikely(qm_wait_mb_ready(qm))) { | ||
372 | ret = -EBUSY; | ||
373 | dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); | ||
374 | goto busy_unlock; | ||
375 | } | ||
376 | |||
377 | busy_unlock: | ||
378 | mutex_unlock(&qm->mailbox_lock); | ||
379 | |||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) | ||
384 | { | ||
385 | u64 doorbell; | ||
386 | |||
387 | doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | | ||
388 | ((u64)index << QM_DB_INDEX_SHIFT_V1) | | ||
389 | ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); | ||
390 | |||
391 | writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); | ||
392 | } | ||
393 | |||
394 | static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) | ||
395 | { | ||
396 | u64 doorbell; | ||
397 | u64 dbase; | ||
398 | u16 randata = 0; | ||
399 | |||
400 | if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) | ||
401 | dbase = QM_DOORBELL_SQ_CQ_BASE_V2; | ||
402 | else | ||
403 | dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; | ||
404 | |||
405 | doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | | ||
406 | ((u64)randata << QM_DB_RAND_SHIFT_V2) | | ||
407 | ((u64)index << QM_DB_INDEX_SHIFT_V2) | | ||
408 | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); | ||
409 | |||
410 | writeq(doorbell, qm->io_base + dbase); | ||
411 | } | ||
412 | |||
413 | static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) | ||
414 | { | ||
415 | dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", | ||
416 | qn, cmd, index); | ||
417 | |||
418 | qm->ops->qm_db(qm, qn, cmd, index, priority); | ||
419 | } | ||
420 | |||
421 | static int qm_dev_mem_reset(struct hisi_qm *qm) | ||
422 | { | ||
423 | u32 val; | ||
424 | |||
425 | writel(0x1, qm->io_base + QM_MEM_START_INIT); | ||
426 | return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, | ||
427 | val & BIT(0), 10, 1000); | ||
428 | } | ||
429 | |||
430 | static u32 qm_get_irq_num_v1(struct hisi_qm *qm) | ||
431 | { | ||
432 | return QM_IRQ_NUM_V1; | ||
433 | } | ||
434 | |||
435 | static u32 qm_get_irq_num_v2(struct hisi_qm *qm) | ||
436 | { | ||
437 | if (qm->fun_type == QM_HW_PF) | ||
438 | return QM_IRQ_NUM_PF_V2; | ||
439 | else | ||
440 | return QM_IRQ_NUM_VF_V2; | ||
441 | } | ||
442 | |||
443 | static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) | ||
444 | { | ||
445 | u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK; | ||
446 | |||
447 | return qm->qp_array[cqn]; | ||
448 | } | ||
449 | |||
450 | static void qm_cq_head_update(struct hisi_qp *qp) | ||
451 | { | ||
452 | if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { | ||
453 | qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; | ||
454 | qp->qp_status.cq_head = 0; | ||
455 | } else { | ||
456 | qp->qp_status.cq_head++; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) | ||
461 | { | ||
462 | struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; | ||
463 | |||
464 | if (qp->req_cb) { | ||
465 | while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { | ||
466 | dma_rmb(); | ||
467 | qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head); | ||
468 | qm_cq_head_update(qp); | ||
469 | cqe = qp->cqe + qp->qp_status.cq_head; | ||
470 | qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, | ||
471 | qp->qp_status.cq_head, 0); | ||
472 | atomic_dec(&qp->qp_status.used); | ||
473 | } | ||
474 | |||
475 | /* set c_flag */ | ||
476 | qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, | ||
477 | qp->qp_status.cq_head, 1); | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static void qm_qp_work_func(struct work_struct *work) | ||
482 | { | ||
483 | struct hisi_qp *qp; | ||
484 | |||
485 | qp = container_of(work, struct hisi_qp, work); | ||
486 | qm_poll_qp(qp, qp->qm); | ||
487 | } | ||
488 | |||
489 | static irqreturn_t qm_irq_handler(int irq, void *data) | ||
490 | { | ||
491 | struct hisi_qm *qm = data; | ||
492 | struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; | ||
493 | struct hisi_qp *qp; | ||
494 | int eqe_num = 0; | ||
495 | |||
496 | while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { | ||
497 | eqe_num++; | ||
498 | qp = qm_to_hisi_qp(qm, eqe); | ||
499 | if (qp) | ||
500 | queue_work(qp->wq, &qp->work); | ||
501 | |||
502 | if (qm->status.eq_head == QM_Q_DEPTH - 1) { | ||
503 | qm->status.eqc_phase = !qm->status.eqc_phase; | ||
504 | eqe = qm->eqe; | ||
505 | qm->status.eq_head = 0; | ||
506 | } else { | ||
507 | eqe++; | ||
508 | qm->status.eq_head++; | ||
509 | } | ||
510 | |||
511 | if (eqe_num == QM_Q_DEPTH / 2 - 1) { | ||
512 | eqe_num = 0; | ||
513 | qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); | ||
518 | |||
519 | return IRQ_HANDLED; | ||
520 | } | ||
521 | |||
522 | static irqreturn_t qm_irq(int irq, void *data) | ||
523 | { | ||
524 | struct hisi_qm *qm = data; | ||
525 | |||
526 | if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) | ||
527 | return qm_irq_handler(irq, data); | ||
528 | |||
529 | dev_err(&qm->pdev->dev, "invalid int source\n"); | ||
530 | qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); | ||
531 | |||
532 | return IRQ_NONE; | ||
533 | } | ||
534 | |||
535 | static irqreturn_t qm_aeq_irq(int irq, void *data) | ||
536 | { | ||
537 | struct hisi_qm *qm = data; | ||
538 | struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; | ||
539 | u32 type; | ||
540 | |||
541 | if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) | ||
542 | return IRQ_NONE; | ||
543 | |||
544 | while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { | ||
545 | type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT; | ||
546 | if (type < ARRAY_SIZE(qm_fifo_overflow)) | ||
547 | dev_err(&qm->pdev->dev, "%s overflow\n", | ||
548 | qm_fifo_overflow[type]); | ||
549 | else | ||
550 | dev_err(&qm->pdev->dev, "unknown error type %d\n", | ||
551 | type); | ||
552 | |||
553 | if (qm->status.aeq_head == QM_Q_DEPTH - 1) { | ||
554 | qm->status.aeqc_phase = !qm->status.aeqc_phase; | ||
555 | aeqe = qm->aeqe; | ||
556 | qm->status.aeq_head = 0; | ||
557 | } else { | ||
558 | aeqe++; | ||
559 | qm->status.aeq_head++; | ||
560 | } | ||
561 | |||
562 | qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); | ||
563 | } | ||
564 | |||
565 | return IRQ_HANDLED; | ||
566 | } | ||
567 | |||
568 | static irqreturn_t qm_abnormal_irq(int irq, void *data) | ||
569 | { | ||
570 | const struct hisi_qm_hw_error *err = qm_hw_error; | ||
571 | struct hisi_qm *qm = data; | ||
572 | struct device *dev = &qm->pdev->dev; | ||
573 | u32 error_status, tmp; | ||
574 | |||
575 | /* read err sts */ | ||
576 | tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); | ||
577 | error_status = qm->msi_mask & tmp; | ||
578 | |||
579 | while (err->msg) { | ||
580 | if (err->int_msk & error_status) | ||
581 | dev_err(dev, "%s [error status=0x%x] found\n", | ||
582 | err->msg, err->int_msk); | ||
583 | |||
584 | err++; | ||
585 | } | ||
586 | |||
587 | /* clear err sts */ | ||
588 | writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); | ||
589 | |||
590 | return IRQ_HANDLED; | ||
591 | } | ||
592 | |||
593 | static int qm_irq_register(struct hisi_qm *qm) | ||
594 | { | ||
595 | struct pci_dev *pdev = qm->pdev; | ||
596 | int ret; | ||
597 | |||
598 | ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), | ||
599 | qm_irq, IRQF_SHARED, qm->dev_name, qm); | ||
600 | if (ret) | ||
601 | return ret; | ||
602 | |||
603 | if (qm->ver == QM_HW_V2) { | ||
604 | ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), | ||
605 | qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); | ||
606 | if (ret) | ||
607 | goto err_aeq_irq; | ||
608 | |||
609 | if (qm->fun_type == QM_HW_PF) { | ||
610 | ret = request_irq(pci_irq_vector(pdev, | ||
611 | QM_ABNORMAL_EVENT_IRQ_VECTOR), | ||
612 | qm_abnormal_irq, IRQF_SHARED, | ||
613 | qm->dev_name, qm); | ||
614 | if (ret) | ||
615 | goto err_abonormal_irq; | ||
616 | } | ||
617 | } | ||
618 | |||
619 | return 0; | ||
620 | |||
621 | err_abonormal_irq: | ||
622 | free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); | ||
623 | err_aeq_irq: | ||
624 | free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); | ||
625 | return ret; | ||
626 | } | ||
627 | |||
628 | static void qm_irq_unregister(struct hisi_qm *qm) | ||
629 | { | ||
630 | struct pci_dev *pdev = qm->pdev; | ||
631 | |||
632 | free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); | ||
633 | |||
634 | if (qm->ver == QM_HW_V2) { | ||
635 | free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); | ||
636 | |||
637 | if (qm->fun_type == QM_HW_PF) | ||
638 | free_irq(pci_irq_vector(pdev, | ||
639 | QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); | ||
640 | } | ||
641 | } | ||
642 | |||
643 | static void qm_init_qp_status(struct hisi_qp *qp) | ||
644 | { | ||
645 | struct hisi_qp_status *qp_status = &qp->qp_status; | ||
646 | |||
647 | qp_status->sq_tail = 0; | ||
648 | qp_status->cq_head = 0; | ||
649 | qp_status->cqc_phase = 1; | ||
650 | qp_status->flags = 0; | ||
651 | } | ||
652 | |||
653 | static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, | ||
654 | u32 number) | ||
655 | { | ||
656 | u64 tmp = 0; | ||
657 | |||
658 | if (number > 0) { | ||
659 | switch (type) { | ||
660 | case SQC_VFT: | ||
661 | switch (qm->ver) { | ||
662 | case QM_HW_V1: | ||
663 | tmp = QM_SQC_VFT_BUF_SIZE | | ||
664 | QM_SQC_VFT_SQC_SIZE | | ||
665 | QM_SQC_VFT_INDEX_NUMBER | | ||
666 | QM_SQC_VFT_VALID | | ||
667 | (u64)base << QM_SQC_VFT_START_SQN_SHIFT; | ||
668 | break; | ||
669 | case QM_HW_V2: | ||
670 | tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | | ||
671 | QM_SQC_VFT_VALID | | ||
672 | (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; | ||
673 | break; | ||
674 | case QM_HW_UNKNOWN: | ||
675 | break; | ||
676 | } | ||
677 | break; | ||
678 | case CQC_VFT: | ||
679 | switch (qm->ver) { | ||
680 | case QM_HW_V1: | ||
681 | tmp = QM_CQC_VFT_BUF_SIZE | | ||
682 | QM_CQC_VFT_SQC_SIZE | | ||
683 | QM_CQC_VFT_INDEX_NUMBER | | ||
684 | QM_CQC_VFT_VALID; | ||
685 | break; | ||
686 | case QM_HW_V2: | ||
687 | tmp = QM_CQC_VFT_VALID; | ||
688 | break; | ||
689 | case QM_HW_UNKNOWN: | ||
690 | break; | ||
691 | } | ||
692 | break; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); | ||
697 | writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); | ||
698 | } | ||
699 | |||
700 | static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, | ||
701 | u32 fun_num, u32 base, u32 number) | ||
702 | { | ||
703 | unsigned int val; | ||
704 | int ret; | ||
705 | |||
706 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, | ||
707 | val & BIT(0), 10, 1000); | ||
708 | if (ret) | ||
709 | return ret; | ||
710 | |||
711 | writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); | ||
712 | writel(type, qm->io_base + QM_VFT_CFG_TYPE); | ||
713 | writel(fun_num, qm->io_base + QM_VFT_CFG); | ||
714 | |||
715 | qm_vft_data_cfg(qm, type, base, number); | ||
716 | |||
717 | writel(0x0, qm->io_base + QM_VFT_CFG_RDY); | ||
718 | writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); | ||
719 | |||
720 | return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, | ||
721 | val & BIT(0), 10, 1000); | ||
722 | } | ||
723 | |||
724 | /* The config should be conducted after qm_dev_mem_reset() */ | ||
725 | static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, | ||
726 | u32 number) | ||
727 | { | ||
728 | int ret, i; | ||
729 | |||
730 | for (i = SQC_VFT; i <= CQC_VFT; i++) { | ||
731 | ret = qm_set_vft_common(qm, i, fun_num, base, number); | ||
732 | if (ret) | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) | ||
740 | { | ||
741 | u64 sqc_vft; | ||
742 | int ret; | ||
743 | |||
744 | ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); | ||
745 | if (ret) | ||
746 | return ret; | ||
747 | |||
748 | sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | | ||
749 | ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); | ||
750 | *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); | ||
751 | *number = (QM_SQC_VFT_NUM_MASK_v2 & | ||
752 | (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | static struct hisi_qm *file_to_qm(struct debugfs_file *file) | ||
758 | { | ||
759 | struct qm_debug *debug = file->debug; | ||
760 | |||
761 | return container_of(debug, struct hisi_qm, debug); | ||
762 | } | ||
763 | |||
764 | static u32 current_q_read(struct debugfs_file *file) | ||
765 | { | ||
766 | struct hisi_qm *qm = file_to_qm(file); | ||
767 | |||
768 | return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; | ||
769 | } | ||
770 | |||
771 | static int current_q_write(struct debugfs_file *file, u32 val) | ||
772 | { | ||
773 | struct hisi_qm *qm = file_to_qm(file); | ||
774 | u32 tmp; | ||
775 | |||
776 | if (val >= qm->debug.curr_qm_qp_num) | ||
777 | return -EINVAL; | ||
778 | |||
779 | tmp = val << QM_DFX_QN_SHIFT | | ||
780 | (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); | ||
781 | writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); | ||
782 | |||
783 | tmp = val << QM_DFX_QN_SHIFT | | ||
784 | (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); | ||
785 | writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | static u32 clear_enable_read(struct debugfs_file *file) | ||
791 | { | ||
792 | struct hisi_qm *qm = file_to_qm(file); | ||
793 | |||
794 | return readl(qm->io_base + QM_DFX_CNT_CLR_CE); | ||
795 | } | ||
796 | |||
797 | /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ | ||
798 | static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) | ||
799 | { | ||
800 | struct hisi_qm *qm = file_to_qm(file); | ||
801 | |||
802 | if (rd_clr_ctrl > 1) | ||
803 | return -EINVAL; | ||
804 | |||
805 | writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | |||
810 | static ssize_t qm_debug_read(struct file *filp, char __user *buf, | ||
811 | size_t count, loff_t *pos) | ||
812 | { | ||
813 | struct debugfs_file *file = filp->private_data; | ||
814 | enum qm_debug_file index = file->index; | ||
815 | char tbuf[QM_DBG_TMP_BUF_LEN]; | ||
816 | u32 val; | ||
817 | int ret; | ||
818 | |||
819 | mutex_lock(&file->lock); | ||
820 | switch (index) { | ||
821 | case CURRENT_Q: | ||
822 | val = current_q_read(file); | ||
823 | break; | ||
824 | case CLEAR_ENABLE: | ||
825 | val = clear_enable_read(file); | ||
826 | break; | ||
827 | default: | ||
828 | mutex_unlock(&file->lock); | ||
829 | return -EINVAL; | ||
830 | } | ||
831 | mutex_unlock(&file->lock); | ||
832 | ret = sprintf(tbuf, "%u\n", val); | ||
833 | return simple_read_from_buffer(buf, count, pos, tbuf, ret); | ||
834 | } | ||
835 | |||
836 | static ssize_t qm_debug_write(struct file *filp, const char __user *buf, | ||
837 | size_t count, loff_t *pos) | ||
838 | { | ||
839 | struct debugfs_file *file = filp->private_data; | ||
840 | enum qm_debug_file index = file->index; | ||
841 | unsigned long val; | ||
842 | char tbuf[QM_DBG_TMP_BUF_LEN]; | ||
843 | int len, ret; | ||
844 | |||
845 | if (*pos != 0) | ||
846 | return 0; | ||
847 | |||
848 | if (count >= QM_DBG_TMP_BUF_LEN) | ||
849 | return -ENOSPC; | ||
850 | |||
851 | len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, | ||
852 | count); | ||
853 | if (len < 0) | ||
854 | return len; | ||
855 | |||
856 | tbuf[len] = '\0'; | ||
857 | if (kstrtoul(tbuf, 0, &val)) | ||
858 | return -EFAULT; | ||
859 | |||
860 | mutex_lock(&file->lock); | ||
861 | switch (index) { | ||
862 | case CURRENT_Q: | ||
863 | ret = current_q_write(file, val); | ||
864 | if (ret) | ||
865 | goto err_input; | ||
866 | break; | ||
867 | case CLEAR_ENABLE: | ||
868 | ret = clear_enable_write(file, val); | ||
869 | if (ret) | ||
870 | goto err_input; | ||
871 | break; | ||
872 | default: | ||
873 | ret = -EINVAL; | ||
874 | goto err_input; | ||
875 | } | ||
876 | mutex_unlock(&file->lock); | ||
877 | |||
878 | return count; | ||
879 | |||
880 | err_input: | ||
881 | mutex_unlock(&file->lock); | ||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | static const struct file_operations qm_debug_fops = { | ||
886 | .owner = THIS_MODULE, | ||
887 | .open = simple_open, | ||
888 | .read = qm_debug_read, | ||
889 | .write = qm_debug_write, | ||
890 | }; | ||
891 | |||
892 | struct qm_dfx_registers { | ||
893 | char *reg_name; | ||
894 | u64 reg_offset; | ||
895 | }; | ||
896 | |||
897 | #define CNT_CYC_REGS_NUM 10 | ||
898 | static struct qm_dfx_registers qm_dfx_regs[] = { | ||
899 | /* XXX_CNT are reading clear register */ | ||
900 | {"QM_ECC_1BIT_CNT ", 0x104000ull}, | ||
901 | {"QM_ECC_MBIT_CNT ", 0x104008ull}, | ||
902 | {"QM_DFX_MB_CNT ", 0x104018ull}, | ||
903 | {"QM_DFX_DB_CNT ", 0x104028ull}, | ||
904 | {"QM_DFX_SQE_CNT ", 0x104038ull}, | ||
905 | {"QM_DFX_CQE_CNT ", 0x104048ull}, | ||
906 | {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, | ||
907 | {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, | ||
908 | {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, | ||
909 | {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, | ||
910 | {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, | ||
911 | {"QM_ECC_1BIT_INF ", 0x104004ull}, | ||
912 | {"QM_ECC_MBIT_INF ", 0x10400cull}, | ||
913 | {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, | ||
914 | {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, | ||
915 | {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, | ||
916 | {"QM_DFX_FF_ST0 ", 0x1040c8ull}, | ||
917 | {"QM_DFX_FF_ST1 ", 0x1040ccull}, | ||
918 | {"QM_DFX_FF_ST2 ", 0x1040d0ull}, | ||
919 | {"QM_DFX_FF_ST3 ", 0x1040d4ull}, | ||
920 | {"QM_DFX_FF_ST4 ", 0x1040d8ull}, | ||
921 | {"QM_DFX_FF_ST5 ", 0x1040dcull}, | ||
922 | {"QM_DFX_FF_ST6 ", 0x1040e0ull}, | ||
923 | {"QM_IN_IDLE_ST ", 0x1040e4ull}, | ||
924 | { NULL, 0} | ||
925 | }; | ||
926 | |||
927 | static struct qm_dfx_registers qm_vf_dfx_regs[] = { | ||
928 | {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, | ||
929 | { NULL, 0} | ||
930 | }; | ||
931 | |||
932 | static int qm_regs_show(struct seq_file *s, void *unused) | ||
933 | { | ||
934 | struct hisi_qm *qm = s->private; | ||
935 | struct qm_dfx_registers *regs; | ||
936 | u32 val; | ||
937 | |||
938 | if (qm->fun_type == QM_HW_PF) | ||
939 | regs = qm_dfx_regs; | ||
940 | else | ||
941 | regs = qm_vf_dfx_regs; | ||
942 | |||
943 | while (regs->reg_name) { | ||
944 | val = readl(qm->io_base + regs->reg_offset); | ||
945 | seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val); | ||
946 | regs++; | ||
947 | } | ||
948 | |||
949 | return 0; | ||
950 | } | ||
951 | |||
952 | static int qm_regs_open(struct inode *inode, struct file *file) | ||
953 | { | ||
954 | return single_open(file, qm_regs_show, inode->i_private); | ||
955 | } | ||
956 | |||
957 | static const struct file_operations qm_regs_fops = { | ||
958 | .owner = THIS_MODULE, | ||
959 | .open = qm_regs_open, | ||
960 | .read = seq_read, | ||
961 | .release = single_release, | ||
962 | }; | ||
963 | |||
964 | static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) | ||
965 | { | ||
966 | struct dentry *qm_d = qm->debug.qm_d, *tmp; | ||
967 | struct debugfs_file *file = qm->debug.files + index; | ||
968 | |||
969 | tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, | ||
970 | &qm_debug_fops); | ||
971 | if (IS_ERR(tmp)) | ||
972 | return -ENOENT; | ||
973 | |||
974 | file->index = index; | ||
975 | mutex_init(&file->lock); | ||
976 | file->debug = &qm->debug; | ||
977 | |||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, | ||
982 | u32 msi) | ||
983 | { | ||
984 | dev_info(&qm->pdev->dev, | ||
985 | "QM v%d does not support hw error handle\n", qm->ver); | ||
986 | |||
987 | writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); | ||
988 | } | ||
989 | |||
990 | static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, | ||
991 | u32 msi) | ||
992 | { | ||
993 | u32 irq_enable = ce | nfe | fe | msi; | ||
994 | u32 irq_unmask = ~irq_enable; | ||
995 | |||
996 | qm->error_mask = ce | nfe | fe; | ||
997 | qm->msi_mask = msi; | ||
998 | |||
999 | /* configure error type */ | ||
1000 | writel(ce, qm->io_base + QM_RAS_CE_ENABLE); | ||
1001 | writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); | ||
1002 | writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); | ||
1003 | writel(fe, qm->io_base + QM_RAS_FE_ENABLE); | ||
1004 | |||
1005 | /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */ | ||
1006 | writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL); | ||
1007 | |||
1008 | irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); | ||
1009 | writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); | ||
1010 | } | ||
1011 | |||
1012 | static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) | ||
1013 | { | ||
1014 | const struct hisi_qm_hw_error *err = qm_hw_error; | ||
1015 | struct device *dev = &qm->pdev->dev; | ||
1016 | u32 reg_val, type, vf_num; | ||
1017 | |||
1018 | while (err->msg) { | ||
1019 | if (err->int_msk & error_status) { | ||
1020 | dev_err(dev, "%s [error status=0x%x] found\n", | ||
1021 | err->msg, err->int_msk); | ||
1022 | |||
1023 | if (error_status & QM_DB_TIMEOUT) { | ||
1024 | reg_val = readl(qm->io_base + | ||
1025 | QM_ABNORMAL_INF01); | ||
1026 | type = (reg_val & QM_DB_TIMEOUT_TYPE) >> | ||
1027 | QM_DB_TIMEOUT_TYPE_SHIFT; | ||
1028 | vf_num = reg_val & QM_DB_TIMEOUT_VF; | ||
1029 | dev_err(dev, "qm %s doorbell timeout in function %u\n", | ||
1030 | qm_db_timeout[type], vf_num); | ||
1031 | } | ||
1032 | |||
1033 | if (error_status & QM_OF_FIFO_OF) { | ||
1034 | reg_val = readl(qm->io_base + | ||
1035 | QM_ABNORMAL_INF00); | ||
1036 | type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> | ||
1037 | QM_FIFO_OVERFLOW_TYPE_SHIFT; | ||
1038 | vf_num = reg_val & QM_FIFO_OVERFLOW_VF; | ||
1039 | |||
1040 | if (type < ARRAY_SIZE(qm_fifo_overflow)) | ||
1041 | dev_err(dev, "qm %s fifo overflow in function %u\n", | ||
1042 | qm_fifo_overflow[type], | ||
1043 | vf_num); | ||
1044 | else | ||
1045 | dev_err(dev, "unknown error type\n"); | ||
1046 | } | ||
1047 | } | ||
1048 | err++; | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) | ||
1053 | { | ||
1054 | u32 error_status, tmp; | ||
1055 | |||
1056 | /* read err sts */ | ||
1057 | tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); | ||
1058 | error_status = qm->error_mask & tmp; | ||
1059 | |||
1060 | if (error_status) { | ||
1061 | qm_log_hw_error(qm, error_status); | ||
1062 | |||
1063 | /* clear err sts */ | ||
1064 | writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); | ||
1065 | |||
1066 | return PCI_ERS_RESULT_NEED_RESET; | ||
1067 | } | ||
1068 | |||
1069 | return PCI_ERS_RESULT_RECOVERED; | ||
1070 | } | ||
1071 | |||
1072 | static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { | ||
1073 | .qm_db = qm_db_v1, | ||
1074 | .get_irq_num = qm_get_irq_num_v1, | ||
1075 | .hw_error_init = qm_hw_error_init_v1, | ||
1076 | }; | ||
1077 | |||
1078 | static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { | ||
1079 | .get_vft = qm_get_vft_v2, | ||
1080 | .qm_db = qm_db_v2, | ||
1081 | .get_irq_num = qm_get_irq_num_v2, | ||
1082 | .hw_error_init = qm_hw_error_init_v2, | ||
1083 | .hw_error_handle = qm_hw_error_handle_v2, | ||
1084 | }; | ||
1085 | |||
1086 | static void *qm_get_avail_sqe(struct hisi_qp *qp) | ||
1087 | { | ||
1088 | struct hisi_qp_status *qp_status = &qp->qp_status; | ||
1089 | u16 sq_tail = qp_status->sq_tail; | ||
1090 | |||
1091 | if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH)) | ||
1092 | return NULL; | ||
1093 | |||
1094 | return qp->sqe + sq_tail * qp->qm->sqe_size; | ||
1095 | } | ||
1096 | |||
1097 | /** | ||
1098 | * hisi_qm_create_qp() - Create a queue pair from qm. | ||
1099 | * @qm: The qm we create a qp from. | ||
1100 | * @alg_type: Accelerator specific algorithm type in sqc. | ||
1101 | * | ||
1102 | * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating | ||
1103 | * qp memory fails. | ||
1104 | */ | ||
1105 | struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) | ||
1106 | { | ||
1107 | struct device *dev = &qm->pdev->dev; | ||
1108 | struct hisi_qp *qp; | ||
1109 | int qp_id, ret; | ||
1110 | |||
1111 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | ||
1112 | if (!qp) | ||
1113 | return ERR_PTR(-ENOMEM); | ||
1114 | |||
1115 | write_lock(&qm->qps_lock); | ||
1116 | |||
1117 | qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); | ||
1118 | if (qp_id >= qm->qp_num) { | ||
1119 | write_unlock(&qm->qps_lock); | ||
1120 | dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); | ||
1121 | ret = -EBUSY; | ||
1122 | goto err_free_qp; | ||
1123 | } | ||
1124 | set_bit(qp_id, qm->qp_bitmap); | ||
1125 | qm->qp_array[qp_id] = qp; | ||
1126 | |||
1127 | write_unlock(&qm->qps_lock); | ||
1128 | |||
1129 | qp->qm = qm; | ||
1130 | |||
1131 | if (qm->use_dma_api) { | ||
1132 | qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + | ||
1133 | sizeof(struct qm_cqe) * QM_Q_DEPTH; | ||
1134 | qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, | ||
1135 | &qp->qdma.dma, GFP_KERNEL); | ||
1136 | if (!qp->qdma.va) { | ||
1137 | ret = -ENOMEM; | ||
1138 | goto err_clear_bit; | ||
1139 | } | ||
1140 | |||
1141 | dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", | ||
1142 | qp->qdma.va, &qp->qdma.dma, qp->qdma.size); | ||
1143 | } | ||
1144 | |||
1145 | qp->qp_id = qp_id; | ||
1146 | qp->alg_type = alg_type; | ||
1147 | INIT_WORK(&qp->work, qm_qp_work_func); | ||
1148 | qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI | | ||
1149 | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0); | ||
1150 | if (!qp->wq) { | ||
1151 | ret = -EFAULT; | ||
1152 | goto err_free_qp_mem; | ||
1153 | } | ||
1154 | |||
1155 | return qp; | ||
1156 | |||
1157 | err_free_qp_mem: | ||
1158 | if (qm->use_dma_api) | ||
1159 | dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, | ||
1160 | qp->qdma.dma); | ||
1161 | err_clear_bit: | ||
1162 | write_lock(&qm->qps_lock); | ||
1163 | qm->qp_array[qp_id] = NULL; | ||
1164 | clear_bit(qp_id, qm->qp_bitmap); | ||
1165 | write_unlock(&qm->qps_lock); | ||
1166 | err_free_qp: | ||
1167 | kfree(qp); | ||
1168 | return ERR_PTR(ret); | ||
1169 | } | ||
1170 | EXPORT_SYMBOL_GPL(hisi_qm_create_qp); | ||
1171 | |||
1172 | /** | ||
1173 | * hisi_qm_release_qp() - Release a qp back to its qm. | ||
1174 | * @qp: The qp we want to release. | ||
1175 | * | ||
1176 | * This function releases the resource of a qp. | ||
1177 | */ | ||
1178 | void hisi_qm_release_qp(struct hisi_qp *qp) | ||
1179 | { | ||
1180 | struct hisi_qm *qm = qp->qm; | ||
1181 | struct qm_dma *qdma = &qp->qdma; | ||
1182 | struct device *dev = &qm->pdev->dev; | ||
1183 | |||
1184 | if (qm->use_dma_api && qdma->va) | ||
1185 | dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); | ||
1186 | |||
1187 | write_lock(&qm->qps_lock); | ||
1188 | qm->qp_array[qp->qp_id] = NULL; | ||
1189 | clear_bit(qp->qp_id, qm->qp_bitmap); | ||
1190 | write_unlock(&qm->qps_lock); | ||
1191 | |||
1192 | kfree(qp); | ||
1193 | } | ||
1194 | EXPORT_SYMBOL_GPL(hisi_qm_release_qp); | ||
1195 | |||
1196 | static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) | ||
1197 | { | ||
1198 | struct hisi_qm *qm = qp->qm; | ||
1199 | struct device *dev = &qm->pdev->dev; | ||
1200 | enum qm_hw_ver ver = qm->ver; | ||
1201 | struct qm_sqc *sqc; | ||
1202 | struct qm_cqc *cqc; | ||
1203 | dma_addr_t sqc_dma; | ||
1204 | dma_addr_t cqc_dma; | ||
1205 | int ret; | ||
1206 | |||
1207 | qm_init_qp_status(qp); | ||
1208 | |||
1209 | sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); | ||
1210 | if (!sqc) | ||
1211 | return -ENOMEM; | ||
1212 | sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), | ||
1213 | DMA_TO_DEVICE); | ||
1214 | if (dma_mapping_error(dev, sqc_dma)) { | ||
1215 | kfree(sqc); | ||
1216 | return -ENOMEM; | ||
1217 | } | ||
1218 | |||
1219 | INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); | ||
1220 | if (ver == QM_HW_V1) { | ||
1221 | sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size); | ||
1222 | sqc->w8 = QM_Q_DEPTH - 1; | ||
1223 | } else if (ver == QM_HW_V2) { | ||
1224 | sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size); | ||
1225 | sqc->w8 = 0; /* rand_qc */ | ||
1226 | } | ||
1227 | sqc->cq_num = qp_id; | ||
1228 | sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type); | ||
1229 | |||
1230 | ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); | ||
1231 | dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); | ||
1232 | kfree(sqc); | ||
1233 | if (ret) | ||
1234 | return ret; | ||
1235 | |||
1236 | cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); | ||
1237 | if (!cqc) | ||
1238 | return -ENOMEM; | ||
1239 | cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), | ||
1240 | DMA_TO_DEVICE); | ||
1241 | if (dma_mapping_error(dev, cqc_dma)) { | ||
1242 | kfree(cqc); | ||
1243 | return -ENOMEM; | ||
1244 | } | ||
1245 | |||
1246 | INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); | ||
1247 | if (ver == QM_HW_V1) { | ||
1248 | cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4); | ||
1249 | cqc->w8 = QM_Q_DEPTH - 1; | ||
1250 | } else if (ver == QM_HW_V2) { | ||
1251 | cqc->dw3 = QM_MK_CQC_DW3_V2(4); | ||
1252 | cqc->w8 = 0; | ||
1253 | } | ||
1254 | cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT; | ||
1255 | |||
1256 | ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); | ||
1257 | dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); | ||
1258 | kfree(cqc); | ||
1259 | |||
1260 | return ret; | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1264 | * hisi_qm_start_qp() - Start a qp into running. | ||
1265 | * @qp: The qp we want to start to run. | ||
1266 | * @arg: Accelerator specific argument. | ||
1267 | * | ||
1268 | * After this function, qp can receive request from user. Return qp_id if | ||
1269 | * successful, Return -EBUSY if failed. | ||
1270 | */ | ||
1271 | int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) | ||
1272 | { | ||
1273 | struct hisi_qm *qm = qp->qm; | ||
1274 | struct device *dev = &qm->pdev->dev; | ||
1275 | enum qm_hw_ver ver = qm->ver; | ||
1276 | int qp_id = qp->qp_id; | ||
1277 | int pasid = arg; | ||
1278 | size_t off = 0; | ||
1279 | int ret; | ||
1280 | |||
1281 | #define QP_INIT_BUF(qp, type, size) do { \ | ||
1282 | (qp)->type = ((qp)->qdma.va + (off)); \ | ||
1283 | (qp)->type##_dma = (qp)->qdma.dma + (off); \ | ||
1284 | off += (size); \ | ||
1285 | } while (0) | ||
1286 | |||
1287 | if (!qp->qdma.dma) { | ||
1288 | dev_err(dev, "cannot get qm dma buffer\n"); | ||
1289 | return -EINVAL; | ||
1290 | } | ||
1291 | |||
1292 | /* sq need 128 bytes alignment */ | ||
1293 | if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) { | ||
1294 | dev_err(dev, "qm sq is not aligned to 128 byte\n"); | ||
1295 | return -EINVAL; | ||
1296 | } | ||
1297 | |||
1298 | QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH); | ||
1299 | QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH); | ||
1300 | |||
1301 | dev_dbg(dev, "init qp buffer(v%d):\n" | ||
1302 | " sqe (%pK, %lx)\n" | ||
1303 | " cqe (%pK, %lx)\n", | ||
1304 | ver, qp->sqe, (unsigned long)qp->sqe_dma, | ||
1305 | qp->cqe, (unsigned long)qp->cqe_dma); | ||
1306 | |||
1307 | ret = qm_qp_ctx_cfg(qp, qp_id, pasid); | ||
1308 | if (ret) | ||
1309 | return ret; | ||
1310 | |||
1311 | dev_dbg(dev, "queue %d started\n", qp_id); | ||
1312 | |||
1313 | return qp_id; | ||
1314 | } | ||
1315 | EXPORT_SYMBOL_GPL(hisi_qm_start_qp); | ||
1316 | |||
1317 | /** | ||
1318 | * hisi_qm_stop_qp() - Stop a qp in qm. | ||
1319 | * @qp: The qp we want to stop. | ||
1320 | * | ||
1321 | * This function is reverse of hisi_qm_start_qp. Return 0 if successful. | ||
1322 | */ | ||
1323 | int hisi_qm_stop_qp(struct hisi_qp *qp) | ||
1324 | { | ||
1325 | struct device *dev = &qp->qm->pdev->dev; | ||
1326 | int i = 0; | ||
1327 | |||
1328 | /* it is stopped */ | ||
1329 | if (test_bit(QP_STOP, &qp->qp_status.flags)) | ||
1330 | return 0; | ||
1331 | |||
1332 | while (atomic_read(&qp->qp_status.used)) { | ||
1333 | i++; | ||
1334 | msleep(20); | ||
1335 | if (i == 10) { | ||
1336 | dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n"); | ||
1337 | return 0; | ||
1338 | } | ||
1339 | } | ||
1340 | |||
1341 | set_bit(QP_STOP, &qp->qp_status.flags); | ||
1342 | |||
1343 | dev_dbg(dev, "stop queue %u!", qp->qp_id); | ||
1344 | |||
1345 | return 0; | ||
1346 | } | ||
1347 | EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); | ||
1348 | |||
1349 | /** | ||
1350 | * hisi_qp_send() - Queue up a task in the hardware queue. | ||
1351 | * @qp: The qp in which to put the message. | ||
1352 | * @msg: The message. | ||
1353 | * | ||
1354 | * This function will return -EBUSY if qp is currently full, and -EAGAIN | ||
1355 | * if qp related qm is resetting. | ||
1356 | */ | ||
1357 | int hisi_qp_send(struct hisi_qp *qp, const void *msg) | ||
1358 | { | ||
1359 | struct hisi_qp_status *qp_status = &qp->qp_status; | ||
1360 | u16 sq_tail = qp_status->sq_tail; | ||
1361 | u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; | ||
1362 | void *sqe = qm_get_avail_sqe(qp); | ||
1363 | |||
1364 | if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) { | ||
1365 | dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); | ||
1366 | return -EAGAIN; | ||
1367 | } | ||
1368 | |||
1369 | if (!sqe) | ||
1370 | return -EBUSY; | ||
1371 | |||
1372 | memcpy(sqe, msg, qp->qm->sqe_size); | ||
1373 | |||
1374 | qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); | ||
1375 | atomic_inc(&qp->qp_status.used); | ||
1376 | qp_status->sq_tail = sq_tail_next; | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | EXPORT_SYMBOL_GPL(hisi_qp_send); | ||
1381 | |||
1382 | static void hisi_qm_cache_wb(struct hisi_qm *qm) | ||
1383 | { | ||
1384 | unsigned int val; | ||
1385 | |||
1386 | if (qm->ver == QM_HW_V2) { | ||
1387 | writel(0x1, qm->io_base + QM_CACHE_WB_START); | ||
1388 | if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, | ||
1389 | val, val & BIT(0), 10, 1000)) | ||
1390 | dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); | ||
1391 | } | ||
1392 | } | ||
1393 | |||
1394 | /** | ||
1395 | * hisi_qm_init() - Initialize configures about qm. | ||
1396 | * @qm: The qm needing init. | ||
1397 | * | ||
1398 | * This function init qm, then we can call hisi_qm_start to put qm into work. | ||
1399 | */ | ||
1400 | int hisi_qm_init(struct hisi_qm *qm) | ||
1401 | { | ||
1402 | struct pci_dev *pdev = qm->pdev; | ||
1403 | struct device *dev = &pdev->dev; | ||
1404 | unsigned int num_vec; | ||
1405 | int ret; | ||
1406 | |||
1407 | switch (qm->ver) { | ||
1408 | case QM_HW_V1: | ||
1409 | qm->ops = &qm_hw_ops_v1; | ||
1410 | break; | ||
1411 | case QM_HW_V2: | ||
1412 | qm->ops = &qm_hw_ops_v2; | ||
1413 | break; | ||
1414 | default: | ||
1415 | return -EINVAL; | ||
1416 | } | ||
1417 | |||
1418 | ret = pci_enable_device_mem(pdev); | ||
1419 | if (ret < 0) { | ||
1420 | dev_err(&pdev->dev, "Failed to enable device mem!\n"); | ||
1421 | return ret; | ||
1422 | } | ||
1423 | |||
1424 | ret = pci_request_mem_regions(pdev, qm->dev_name); | ||
1425 | if (ret < 0) { | ||
1426 | dev_err(&pdev->dev, "Failed to request mem regions!\n"); | ||
1427 | goto err_disable_pcidev; | ||
1428 | } | ||
1429 | |||
1430 | qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2), | ||
1431 | pci_resource_len(qm->pdev, PCI_BAR_2)); | ||
1432 | if (!qm->io_base) { | ||
1433 | ret = -EIO; | ||
1434 | goto err_release_mem_regions; | ||
1435 | } | ||
1436 | |||
1437 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); | ||
1438 | if (ret < 0) | ||
1439 | goto err_iounmap; | ||
1440 | pci_set_master(pdev); | ||
1441 | |||
1442 | if (!qm->ops->get_irq_num) { | ||
1443 | ret = -EOPNOTSUPP; | ||
1444 | goto err_iounmap; | ||
1445 | } | ||
1446 | num_vec = qm->ops->get_irq_num(qm); | ||
1447 | ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); | ||
1448 | if (ret < 0) { | ||
1449 | dev_err(dev, "Failed to enable MSI vectors!\n"); | ||
1450 | goto err_iounmap; | ||
1451 | } | ||
1452 | |||
1453 | ret = qm_irq_register(qm); | ||
1454 | if (ret) | ||
1455 | goto err_free_irq_vectors; | ||
1456 | |||
1457 | mutex_init(&qm->mailbox_lock); | ||
1458 | rwlock_init(&qm->qps_lock); | ||
1459 | |||
1460 | dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", | ||
1461 | qm->use_dma_api ? "dma api" : "iommu api"); | ||
1462 | |||
1463 | return 0; | ||
1464 | |||
1465 | err_free_irq_vectors: | ||
1466 | pci_free_irq_vectors(pdev); | ||
1467 | err_iounmap: | ||
1468 | iounmap(qm->io_base); | ||
1469 | err_release_mem_regions: | ||
1470 | pci_release_mem_regions(pdev); | ||
1471 | err_disable_pcidev: | ||
1472 | pci_disable_device(pdev); | ||
1473 | |||
1474 | return ret; | ||
1475 | } | ||
1476 | EXPORT_SYMBOL_GPL(hisi_qm_init); | ||
1477 | |||
1478 | /** | ||
1479 | * hisi_qm_uninit() - Uninitialize qm. | ||
1480 | * @qm: The qm needed uninit. | ||
1481 | * | ||
1482 | * This function uninits qm related device resources. | ||
1483 | */ | ||
1484 | void hisi_qm_uninit(struct hisi_qm *qm) | ||
1485 | { | ||
1486 | struct pci_dev *pdev = qm->pdev; | ||
1487 | struct device *dev = &pdev->dev; | ||
1488 | |||
1489 | if (qm->use_dma_api && qm->qdma.va) { | ||
1490 | hisi_qm_cache_wb(qm); | ||
1491 | dma_free_coherent(dev, qm->qdma.size, | ||
1492 | qm->qdma.va, qm->qdma.dma); | ||
1493 | memset(&qm->qdma, 0, sizeof(qm->qdma)); | ||
1494 | } | ||
1495 | |||
1496 | qm_irq_unregister(qm); | ||
1497 | pci_free_irq_vectors(pdev); | ||
1498 | iounmap(qm->io_base); | ||
1499 | pci_release_mem_regions(pdev); | ||
1500 | pci_disable_device(pdev); | ||
1501 | } | ||
1502 | EXPORT_SYMBOL_GPL(hisi_qm_uninit); | ||
1503 | |||
1504 | /** | ||
1505 | * hisi_qm_get_vft() - Get vft from a qm. | ||
1506 | * @qm: The qm we want to get its vft. | ||
1507 | * @base: The base number of queue in vft. | ||
1508 | * @number: The number of queues in vft. | ||
1509 | * | ||
1510 | * We can allocate multiple queues to a qm by configuring virtual function | ||
1511 | * table. We get related configures by this function. Normally, we call this | ||
1512 | * function in VF driver to get the queue information. | ||
1513 | * | ||
1514 | * qm hw v1 does not support this interface. | ||
1515 | */ | ||
1516 | int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) | ||
1517 | { | ||
1518 | if (!base || !number) | ||
1519 | return -EINVAL; | ||
1520 | |||
1521 | if (!qm->ops->get_vft) { | ||
1522 | dev_err(&qm->pdev->dev, "Don't support vft read!\n"); | ||
1523 | return -EINVAL; | ||
1524 | } | ||
1525 | |||
1526 | return qm->ops->get_vft(qm, base, number); | ||
1527 | } | ||
1528 | EXPORT_SYMBOL_GPL(hisi_qm_get_vft); | ||
1529 | |||
1530 | /** | ||
1531 | * hisi_qm_set_vft() - Set "virtual function table" for a qm. | ||
1532 | * @fun_num: Number of operated function. | ||
1533 | * @qm: The qm in which to set vft, alway in a PF. | ||
1534 | * @base: The base number of queue in vft. | ||
1535 | * @number: The number of queues in vft. 0 means invalid vft. | ||
1536 | * | ||
1537 | * This function is alway called in PF driver, it is used to assign queues | ||
1538 | * among PF and VFs. | ||
1539 | * | ||
1540 | * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) | ||
1541 | * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) | ||
1542 | * (VF function number 0x2) | ||
1543 | */ | ||
1544 | int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, | ||
1545 | u32 number) | ||
1546 | { | ||
1547 | u32 max_q_num = qm->ctrl_qp_num; | ||
1548 | |||
1549 | if (base >= max_q_num || number > max_q_num || | ||
1550 | (base + number) > max_q_num) | ||
1551 | return -EINVAL; | ||
1552 | |||
1553 | return qm_set_sqc_cqc_vft(qm, fun_num, base, number); | ||
1554 | } | ||
1555 | EXPORT_SYMBOL_GPL(hisi_qm_set_vft); | ||
1556 | |||
1557 | static void qm_init_eq_aeq_status(struct hisi_qm *qm) | ||
1558 | { | ||
1559 | struct hisi_qm_status *status = &qm->status; | ||
1560 | |||
1561 | status->eq_head = 0; | ||
1562 | status->aeq_head = 0; | ||
1563 | status->eqc_phase = 1; | ||
1564 | status->aeqc_phase = 1; | ||
1565 | } | ||
1566 | |||
1567 | static int qm_eq_ctx_cfg(struct hisi_qm *qm) | ||
1568 | { | ||
1569 | struct device *dev = &qm->pdev->dev; | ||
1570 | struct qm_eqc *eqc; | ||
1571 | struct qm_aeqc *aeqc; | ||
1572 | dma_addr_t eqc_dma; | ||
1573 | dma_addr_t aeqc_dma; | ||
1574 | int ret; | ||
1575 | |||
1576 | qm_init_eq_aeq_status(qm); | ||
1577 | |||
1578 | eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); | ||
1579 | if (!eqc) | ||
1580 | return -ENOMEM; | ||
1581 | eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), | ||
1582 | DMA_TO_DEVICE); | ||
1583 | if (dma_mapping_error(dev, eqc_dma)) { | ||
1584 | kfree(eqc); | ||
1585 | return -ENOMEM; | ||
1586 | } | ||
1587 | |||
1588 | eqc->base_l = lower_32_bits(qm->eqe_dma); | ||
1589 | eqc->base_h = upper_32_bits(qm->eqe_dma); | ||
1590 | if (qm->ver == QM_HW_V1) | ||
1591 | eqc->dw3 = QM_EQE_AEQE_SIZE; | ||
1592 | eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); | ||
1593 | ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); | ||
1594 | dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); | ||
1595 | kfree(eqc); | ||
1596 | if (ret) | ||
1597 | return ret; | ||
1598 | |||
1599 | aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); | ||
1600 | if (!aeqc) | ||
1601 | return -ENOMEM; | ||
1602 | aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), | ||
1603 | DMA_TO_DEVICE); | ||
1604 | if (dma_mapping_error(dev, aeqc_dma)) { | ||
1605 | kfree(aeqc); | ||
1606 | return -ENOMEM; | ||
1607 | } | ||
1608 | |||
1609 | aeqc->base_l = lower_32_bits(qm->aeqe_dma); | ||
1610 | aeqc->base_h = upper_32_bits(qm->aeqe_dma); | ||
1611 | aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT); | ||
1612 | |||
1613 | ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); | ||
1614 | dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); | ||
1615 | kfree(aeqc); | ||
1616 | |||
1617 | return ret; | ||
1618 | } | ||
1619 | |||
1620 | static int __hisi_qm_start(struct hisi_qm *qm) | ||
1621 | { | ||
1622 | struct pci_dev *pdev = qm->pdev; | ||
1623 | struct device *dev = &pdev->dev; | ||
1624 | size_t off = 0; | ||
1625 | int ret; | ||
1626 | |||
1627 | #define QM_INIT_BUF(qm, type, num) do { \ | ||
1628 | (qm)->type = ((qm)->qdma.va + (off)); \ | ||
1629 | (qm)->type##_dma = (qm)->qdma.dma + (off); \ | ||
1630 | off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ | ||
1631 | } while (0) | ||
1632 | |||
1633 | WARN_ON(!qm->qdma.dma); | ||
1634 | |||
1635 | if (qm->qp_num == 0) | ||
1636 | return -EINVAL; | ||
1637 | |||
1638 | if (qm->fun_type == QM_HW_PF) { | ||
1639 | ret = qm_dev_mem_reset(qm); | ||
1640 | if (ret) | ||
1641 | return ret; | ||
1642 | |||
1643 | ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); | ||
1644 | if (ret) | ||
1645 | return ret; | ||
1646 | } | ||
1647 | |||
1648 | QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); | ||
1649 | QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); | ||
1650 | QM_INIT_BUF(qm, sqc, qm->qp_num); | ||
1651 | QM_INIT_BUF(qm, cqc, qm->qp_num); | ||
1652 | |||
1653 | dev_dbg(dev, "init qm buffer:\n" | ||
1654 | " eqe (%pK, %lx)\n" | ||
1655 | " aeqe (%pK, %lx)\n" | ||
1656 | " sqc (%pK, %lx)\n" | ||
1657 | " cqc (%pK, %lx)\n", | ||
1658 | qm->eqe, (unsigned long)qm->eqe_dma, | ||
1659 | qm->aeqe, (unsigned long)qm->aeqe_dma, | ||
1660 | qm->sqc, (unsigned long)qm->sqc_dma, | ||
1661 | qm->cqc, (unsigned long)qm->cqc_dma); | ||
1662 | |||
1663 | ret = qm_eq_ctx_cfg(qm); | ||
1664 | if (ret) | ||
1665 | return ret; | ||
1666 | |||
1667 | ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); | ||
1668 | if (ret) | ||
1669 | return ret; | ||
1670 | |||
1671 | ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); | ||
1672 | if (ret) | ||
1673 | return ret; | ||
1674 | |||
1675 | writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); | ||
1676 | writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); | ||
1677 | |||
1678 | return 0; | ||
1679 | } | ||
1680 | |||
1681 | /** | ||
1682 | * hisi_qm_start() - start qm | ||
1683 | * @qm: The qm to be started. | ||
1684 | * | ||
1685 | * This function starts a qm, then we can allocate qp from this qm. | ||
1686 | */ | ||
1687 | int hisi_qm_start(struct hisi_qm *qm) | ||
1688 | { | ||
1689 | struct device *dev = &qm->pdev->dev; | ||
1690 | |||
1691 | dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); | ||
1692 | |||
1693 | if (!qm->qp_num) { | ||
1694 | dev_err(dev, "qp_num should not be 0\n"); | ||
1695 | return -EINVAL; | ||
1696 | } | ||
1697 | |||
1698 | if (!qm->qp_bitmap) { | ||
1699 | qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num), | ||
1700 | sizeof(long), GFP_KERNEL); | ||
1701 | qm->qp_array = devm_kcalloc(dev, qm->qp_num, | ||
1702 | sizeof(struct hisi_qp *), | ||
1703 | GFP_KERNEL); | ||
1704 | if (!qm->qp_bitmap || !qm->qp_array) | ||
1705 | return -ENOMEM; | ||
1706 | } | ||
1707 | |||
1708 | if (!qm->use_dma_api) { | ||
1709 | dev_dbg(&qm->pdev->dev, "qm delay start\n"); | ||
1710 | return 0; | ||
1711 | } else if (!qm->qdma.va) { | ||
1712 | qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + | ||
1713 | QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + | ||
1714 | QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + | ||
1715 | QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); | ||
1716 | qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, | ||
1717 | &qm->qdma.dma, GFP_KERNEL); | ||
1718 | dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", | ||
1719 | qm->qdma.va, &qm->qdma.dma, qm->qdma.size); | ||
1720 | if (!qm->qdma.va) | ||
1721 | return -ENOMEM; | ||
1722 | } | ||
1723 | |||
1724 | return __hisi_qm_start(qm); | ||
1725 | } | ||
1726 | EXPORT_SYMBOL_GPL(hisi_qm_start); | ||
1727 | |||
1728 | /** | ||
1729 | * hisi_qm_stop() - Stop a qm. | ||
1730 | * @qm: The qm which will be stopped. | ||
1731 | * | ||
1732 | * This function stops qm and its qps, then qm can not accept request. | ||
1733 | * Related resources are not released at this state, we can use hisi_qm_start | ||
1734 | * to let qm start again. | ||
1735 | */ | ||
1736 | int hisi_qm_stop(struct hisi_qm *qm) | ||
1737 | { | ||
1738 | struct device *dev; | ||
1739 | struct hisi_qp *qp; | ||
1740 | int ret = 0, i; | ||
1741 | |||
1742 | if (!qm || !qm->pdev) { | ||
1743 | WARN_ON(1); | ||
1744 | return -EINVAL; | ||
1745 | } | ||
1746 | |||
1747 | dev = &qm->pdev->dev; | ||
1748 | |||
1749 | /* Mask eq and aeq irq */ | ||
1750 | writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); | ||
1751 | writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); | ||
1752 | |||
1753 | /* Stop all qps belong to this qm */ | ||
1754 | for (i = 0; i < qm->qp_num; i++) { | ||
1755 | qp = qm->qp_array[i]; | ||
1756 | if (qp) { | ||
1757 | ret = hisi_qm_stop_qp(qp); | ||
1758 | if (ret < 0) { | ||
1759 | dev_err(dev, "Failed to stop qp%d!\n", i); | ||
1760 | return -EBUSY; | ||
1761 | } | ||
1762 | } | ||
1763 | } | ||
1764 | |||
1765 | if (qm->fun_type == QM_HW_PF) { | ||
1766 | ret = hisi_qm_set_vft(qm, 0, 0, 0); | ||
1767 | if (ret < 0) | ||
1768 | dev_err(dev, "Failed to set vft!\n"); | ||
1769 | } | ||
1770 | |||
1771 | return ret; | ||
1772 | } | ||
1773 | EXPORT_SYMBOL_GPL(hisi_qm_stop); | ||
1774 | |||
1775 | /** | ||
1776 | * hisi_qm_debug_init() - Initialize qm related debugfs files. | ||
1777 | * @qm: The qm for which we want to add debugfs files. | ||
1778 | * | ||
1779 | * Create qm related debugfs files. | ||
1780 | */ | ||
1781 | int hisi_qm_debug_init(struct hisi_qm *qm) | ||
1782 | { | ||
1783 | struct dentry *qm_d, *qm_regs; | ||
1784 | int i, ret; | ||
1785 | |||
1786 | qm_d = debugfs_create_dir("qm", qm->debug.debug_root); | ||
1787 | if (IS_ERR(qm_d)) | ||
1788 | return -ENOENT; | ||
1789 | qm->debug.qm_d = qm_d; | ||
1790 | |||
1791 | /* only show this in PF */ | ||
1792 | if (qm->fun_type == QM_HW_PF) | ||
1793 | for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) | ||
1794 | if (qm_create_debugfs_file(qm, i)) { | ||
1795 | ret = -ENOENT; | ||
1796 | goto failed_to_create; | ||
1797 | } | ||
1798 | |||
1799 | qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, | ||
1800 | &qm_regs_fops); | ||
1801 | if (IS_ERR(qm_regs)) { | ||
1802 | ret = -ENOENT; | ||
1803 | goto failed_to_create; | ||
1804 | } | ||
1805 | |||
1806 | return 0; | ||
1807 | |||
1808 | failed_to_create: | ||
1809 | debugfs_remove_recursive(qm_d); | ||
1810 | return ret; | ||
1811 | } | ||
1812 | EXPORT_SYMBOL_GPL(hisi_qm_debug_init); | ||
1813 | |||
1814 | /** | ||
1815 | * hisi_qm_debug_regs_clear() - clear qm debug related registers. | ||
1816 | * @qm: The qm for which we want to clear its debug registers. | ||
1817 | */ | ||
1818 | void hisi_qm_debug_regs_clear(struct hisi_qm *qm) | ||
1819 | { | ||
1820 | struct qm_dfx_registers *regs; | ||
1821 | int i; | ||
1822 | |||
1823 | /* clear current_q */ | ||
1824 | writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); | ||
1825 | writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); | ||
1826 | |||
1827 | /* | ||
1828 | * these registers are reading and clearing, so clear them after | ||
1829 | * reading them. | ||
1830 | */ | ||
1831 | writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); | ||
1832 | |||
1833 | regs = qm_dfx_regs; | ||
1834 | for (i = 0; i < CNT_CYC_REGS_NUM; i++) { | ||
1835 | readl(qm->io_base + regs->reg_offset); | ||
1836 | regs++; | ||
1837 | } | ||
1838 | |||
1839 | writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); | ||
1840 | } | ||
1841 | EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); | ||
1842 | |||
1843 | /** | ||
1844 | * hisi_qm_hw_error_init() - Configure qm hardware error report method. | ||
1845 | * @qm: The qm which we want to configure. | ||
1846 | * @ce: Bit mask of correctable error configure. | ||
1847 | * @nfe: Bit mask of non-fatal error configure. | ||
1848 | * @fe: Bit mask of fatal error configure. | ||
1849 | * @msi: Bit mask of error reported by message signal interrupt. | ||
1850 | * | ||
1851 | * Hardware errors of qm can be reported either by RAS interrupts which will | ||
1852 | * be handled by UEFI and then PCIe AER or by device MSI. User can configure | ||
1853 | * each error to use either of above two methods. For RAS interrupts, we can | ||
1854 | * configure an error as one of correctable error, non-fatal error or | ||
1855 | * fatal error. | ||
1856 | * | ||
1857 | * Bits indicating errors can be configured to ce, nfe, fe and msi to enable | ||
1858 | * related report methods. Error report will be masked if related error bit | ||
1859 | * does not configure. | ||
1860 | */ | ||
1861 | void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, | ||
1862 | u32 msi) | ||
1863 | { | ||
1864 | if (!qm->ops->hw_error_init) { | ||
1865 | dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n", | ||
1866 | qm->ver); | ||
1867 | return; | ||
1868 | } | ||
1869 | |||
1870 | qm->ops->hw_error_init(qm, ce, nfe, fe, msi); | ||
1871 | } | ||
1872 | EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init); | ||
1873 | |||
1874 | /** | ||
1875 | * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors. | ||
1876 | * @qm: The qm which has non-fatal hardware errors. | ||
1877 | * | ||
1878 | * Accelerators use this function to handle qm non-fatal hardware errors. | ||
1879 | */ | ||
1880 | int hisi_qm_hw_error_handle(struct hisi_qm *qm) | ||
1881 | { | ||
1882 | if (!qm->ops->hw_error_handle) { | ||
1883 | dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n", | ||
1884 | qm->ver); | ||
1885 | return PCI_ERS_RESULT_NONE; | ||
1886 | } | ||
1887 | |||
1888 | return qm->ops->hw_error_handle(qm); | ||
1889 | } | ||
1890 | EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle); | ||
1891 | |||
1892 | /** | ||
1893 | * hisi_qm_get_hw_version() - Get hardware version of a qm. | ||
1894 | * @pdev: The device which hardware version we want to get. | ||
1895 | * | ||
1896 | * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN | ||
1897 | * if the hardware version is not supported. | ||
1898 | */ | ||
1899 | enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev) | ||
1900 | { | ||
1901 | switch (pdev->revision) { | ||
1902 | case QM_HW_V1: | ||
1903 | case QM_HW_V2: | ||
1904 | return pdev->revision; | ||
1905 | default: | ||
1906 | return QM_HW_UNKNOWN; | ||
1907 | } | ||
1908 | } | ||
1909 | EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version); | ||
1910 | |||
1911 | MODULE_LICENSE("GPL v2"); | ||
1912 | MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); | ||
1913 | MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); | ||
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h new file mode 100644 index 000000000000..70e672ae86bf --- /dev/null +++ b/drivers/crypto/hisilicon/qm.h | |||
@@ -0,0 +1,215 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #ifndef HISI_ACC_QM_H | ||
4 | #define HISI_ACC_QM_H | ||
5 | |||
6 | #include <linux/bitfield.h> | ||
7 | #include <linux/iopoll.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/pci.h> | ||
10 | |||
11 | /* qm user domain */ | ||
12 | #define QM_ARUSER_M_CFG_1 0x100088 | ||
13 | #define AXUSER_SNOOP_ENABLE BIT(30) | ||
14 | #define AXUSER_CMD_TYPE GENMASK(14, 12) | ||
15 | #define AXUSER_CMD_SMMU_NORMAL 1 | ||
16 | #define AXUSER_NS BIT(6) | ||
17 | #define AXUSER_NO BIT(5) | ||
18 | #define AXUSER_FP BIT(4) | ||
19 | #define AXUSER_SSV BIT(0) | ||
20 | #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ | ||
21 | FIELD_PREP(AXUSER_CMD_TYPE, \ | ||
22 | AXUSER_CMD_SMMU_NORMAL) | \ | ||
23 | AXUSER_NS | AXUSER_NO | AXUSER_FP) | ||
24 | #define QM_ARUSER_M_CFG_ENABLE 0x100090 | ||
25 | #define ARUSER_M_CFG_ENABLE 0xfffffffe | ||
26 | #define QM_AWUSER_M_CFG_1 0x100098 | ||
27 | #define QM_AWUSER_M_CFG_ENABLE 0x1000a0 | ||
28 | #define AWUSER_M_CFG_ENABLE 0xfffffffe | ||
29 | #define QM_WUSER_M_CFG_ENABLE 0x1000a8 | ||
30 | #define WUSER_M_CFG_ENABLE 0xffffffff | ||
31 | |||
32 | /* qm cache */ | ||
33 | #define QM_CACHE_CTL 0x100050 | ||
34 | #define SQC_CACHE_ENABLE BIT(0) | ||
35 | #define CQC_CACHE_ENABLE BIT(1) | ||
36 | #define SQC_CACHE_WB_ENABLE BIT(4) | ||
37 | #define SQC_CACHE_WB_THRD GENMASK(10, 5) | ||
38 | #define CQC_CACHE_WB_ENABLE BIT(11) | ||
39 | #define CQC_CACHE_WB_THRD GENMASK(17, 12) | ||
40 | #define QM_AXI_M_CFG 0x1000ac | ||
41 | #define AXI_M_CFG 0xffff | ||
42 | #define QM_AXI_M_CFG_ENABLE 0x1000b0 | ||
43 | #define AXI_M_CFG_ENABLE 0xffffffff | ||
44 | #define QM_PEH_AXUSER_CFG 0x1000cc | ||
45 | #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 | ||
46 | #define PEH_AXUSER_CFG 0x401001 | ||
47 | #define PEH_AXUSER_CFG_ENABLE 0xffffffff | ||
48 | |||
49 | #define QM_DFX_MB_CNT_VF 0x104010 | ||
50 | #define QM_DFX_DB_CNT_VF 0x104020 | ||
51 | #define QM_DFX_SQE_CNT_VF_SQN 0x104030 | ||
52 | #define QM_DFX_CQE_CNT_VF_CQN 0x104040 | ||
53 | #define QM_DFX_QN_SHIFT 16 | ||
54 | #define CURRENT_FUN_MASK GENMASK(5, 0) | ||
55 | #define CURRENT_Q_MASK GENMASK(31, 16) | ||
56 | |||
57 | #define QM_AXI_RRESP BIT(0) | ||
58 | #define QM_AXI_BRESP BIT(1) | ||
59 | #define QM_ECC_MBIT BIT(2) | ||
60 | #define QM_ECC_1BIT BIT(3) | ||
61 | #define QM_ACC_GET_TASK_TIMEOUT BIT(4) | ||
62 | #define QM_ACC_DO_TASK_TIMEOUT BIT(5) | ||
63 | #define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) | ||
64 | #define QM_SQ_CQ_VF_INVALID BIT(7) | ||
65 | #define QM_CQ_VF_INVALID BIT(8) | ||
66 | #define QM_SQ_VF_INVALID BIT(9) | ||
67 | #define QM_DB_TIMEOUT BIT(10) | ||
68 | #define QM_OF_FIFO_OF BIT(11) | ||
69 | #define QM_DB_RANDOM_INVALID BIT(12) | ||
70 | |||
71 | #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ | ||
72 | QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ | ||
73 | QM_OF_FIFO_OF) | ||
74 | #define QM_BASE_CE QM_ECC_1BIT | ||
75 | |||
76 | #define QM_Q_DEPTH 1024 | ||
77 | |||
78 | enum qp_state { | ||
79 | QP_STOP, | ||
80 | }; | ||
81 | |||
82 | enum qm_hw_ver { | ||
83 | QM_HW_UNKNOWN = -1, | ||
84 | QM_HW_V1 = 0x20, | ||
85 | QM_HW_V2 = 0x21, | ||
86 | }; | ||
87 | |||
88 | enum qm_fun_type { | ||
89 | QM_HW_PF, | ||
90 | QM_HW_VF, | ||
91 | }; | ||
92 | |||
93 | enum qm_debug_file { | ||
94 | CURRENT_Q, | ||
95 | CLEAR_ENABLE, | ||
96 | DEBUG_FILE_NUM, | ||
97 | }; | ||
98 | |||
99 | struct debugfs_file { | ||
100 | enum qm_debug_file index; | ||
101 | struct mutex lock; | ||
102 | struct qm_debug *debug; | ||
103 | }; | ||
104 | |||
105 | struct qm_debug { | ||
106 | u32 curr_qm_qp_num; | ||
107 | struct dentry *debug_root; | ||
108 | struct dentry *qm_d; | ||
109 | struct debugfs_file files[DEBUG_FILE_NUM]; | ||
110 | }; | ||
111 | |||
112 | struct qm_dma { | ||
113 | void *va; | ||
114 | dma_addr_t dma; | ||
115 | size_t size; | ||
116 | }; | ||
117 | |||
118 | struct hisi_qm_status { | ||
119 | u32 eq_head; | ||
120 | bool eqc_phase; | ||
121 | u32 aeq_head; | ||
122 | bool aeqc_phase; | ||
123 | unsigned long flags; | ||
124 | }; | ||
125 | |||
126 | struct hisi_qm { | ||
127 | enum qm_hw_ver ver; | ||
128 | enum qm_fun_type fun_type; | ||
129 | const char *dev_name; | ||
130 | struct pci_dev *pdev; | ||
131 | void __iomem *io_base; | ||
132 | u32 sqe_size; | ||
133 | u32 qp_base; | ||
134 | u32 qp_num; | ||
135 | u32 ctrl_qp_num; | ||
136 | |||
137 | struct qm_dma qdma; | ||
138 | struct qm_sqc *sqc; | ||
139 | struct qm_cqc *cqc; | ||
140 | struct qm_eqe *eqe; | ||
141 | struct qm_aeqe *aeqe; | ||
142 | dma_addr_t sqc_dma; | ||
143 | dma_addr_t cqc_dma; | ||
144 | dma_addr_t eqe_dma; | ||
145 | dma_addr_t aeqe_dma; | ||
146 | |||
147 | struct hisi_qm_status status; | ||
148 | |||
149 | rwlock_t qps_lock; | ||
150 | unsigned long *qp_bitmap; | ||
151 | struct hisi_qp **qp_array; | ||
152 | |||
153 | struct mutex mailbox_lock; | ||
154 | |||
155 | const struct hisi_qm_hw_ops *ops; | ||
156 | |||
157 | struct qm_debug debug; | ||
158 | |||
159 | u32 error_mask; | ||
160 | u32 msi_mask; | ||
161 | |||
162 | bool use_dma_api; | ||
163 | }; | ||
164 | |||
165 | struct hisi_qp_status { | ||
166 | atomic_t used; | ||
167 | u16 sq_tail; | ||
168 | u16 cq_head; | ||
169 | bool cqc_phase; | ||
170 | unsigned long flags; | ||
171 | }; | ||
172 | |||
173 | struct hisi_qp_ops { | ||
174 | int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); | ||
175 | }; | ||
176 | |||
177 | struct hisi_qp { | ||
178 | u32 qp_id; | ||
179 | u8 alg_type; | ||
180 | u8 req_type; | ||
181 | |||
182 | struct qm_dma qdma; | ||
183 | void *sqe; | ||
184 | struct qm_cqe *cqe; | ||
185 | dma_addr_t sqe_dma; | ||
186 | dma_addr_t cqe_dma; | ||
187 | |||
188 | struct hisi_qp_status qp_status; | ||
189 | struct hisi_qp_ops *hw_ops; | ||
190 | void *qp_ctx; | ||
191 | void (*req_cb)(struct hisi_qp *qp, void *data); | ||
192 | struct work_struct work; | ||
193 | struct workqueue_struct *wq; | ||
194 | |||
195 | struct hisi_qm *qm; | ||
196 | }; | ||
197 | |||
198 | int hisi_qm_init(struct hisi_qm *qm); | ||
199 | void hisi_qm_uninit(struct hisi_qm *qm); | ||
200 | int hisi_qm_start(struct hisi_qm *qm); | ||
201 | int hisi_qm_stop(struct hisi_qm *qm); | ||
202 | struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); | ||
203 | int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); | ||
204 | int hisi_qm_stop_qp(struct hisi_qp *qp); | ||
205 | void hisi_qm_release_qp(struct hisi_qp *qp); | ||
206 | int hisi_qp_send(struct hisi_qp *qp, const void *msg); | ||
207 | int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); | ||
208 | int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); | ||
209 | int hisi_qm_debug_init(struct hisi_qm *qm); | ||
210 | void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, | ||
211 | u32 msi); | ||
212 | int hisi_qm_hw_error_handle(struct hisi_qm *qm); | ||
213 | enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); | ||
214 | void hisi_qm_debug_regs_clear(struct hisi_qm *qm); | ||
215 | #endif | ||
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 02768af0dccd..e0508ea160f1 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | #include <crypto/aes.h> | 10 | #include <crypto/aes.h> |
11 | #include <crypto/algapi.h> | 11 | #include <crypto/algapi.h> |
12 | #include <crypto/des.h> | 12 | #include <crypto/internal/des.h> |
13 | #include <crypto/skcipher.h> | 13 | #include <crypto/skcipher.h> |
14 | #include <crypto/xts.h> | 14 | #include <crypto/xts.h> |
15 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
@@ -347,25 +347,21 @@ static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm, | |||
347 | static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, | 347 | static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, |
348 | const u8 *key, unsigned int keylen) | 348 | const u8 *key, unsigned int keylen) |
349 | { | 349 | { |
350 | if (keylen != DES_KEY_SIZE) | 350 | return verify_skcipher_des_key(tfm, key) ?: |
351 | return -EINVAL; | 351 | sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); |
352 | |||
353 | return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); | ||
354 | } | 352 | } |
355 | 353 | ||
356 | static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, | 354 | static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, |
357 | const u8 *key, unsigned int keylen) | 355 | const u8 *key, unsigned int keylen) |
358 | { | 356 | { |
359 | if (keylen != DES_KEY_SIZE) | 357 | return verify_skcipher_des_key(tfm, key) ?: |
360 | return -EINVAL; | 358 | sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); |
361 | |||
362 | return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); | ||
363 | } | 359 | } |
364 | 360 | ||
365 | static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, | 361 | static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, |
366 | const u8 *key, unsigned int keylen) | 362 | const u8 *key, unsigned int keylen) |
367 | { | 363 | { |
368 | return unlikely(des3_verify_key(tfm, key)) ?: | 364 | return verify_skcipher_des3_key(tfm, key) ?: |
369 | sec_alg_skcipher_setkey(tfm, key, keylen, | 365 | sec_alg_skcipher_setkey(tfm, key, keylen, |
370 | SEC_C_3DES_ECB_192_3KEY); | 366 | SEC_C_3DES_ECB_192_3KEY); |
371 | } | 367 | } |
@@ -373,7 +369,7 @@ static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, | |||
373 | static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, | 369 | static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, |
374 | const u8 *key, unsigned int keylen) | 370 | const u8 *key, unsigned int keylen) |
375 | { | 371 | { |
376 | return unlikely(des3_verify_key(tfm, key)) ?: | 372 | return verify_skcipher_des3_key(tfm, key) ?: |
377 | sec_alg_skcipher_setkey(tfm, key, keylen, | 373 | sec_alg_skcipher_setkey(tfm, key, keylen, |
378 | SEC_C_3DES_CBC_192_3KEY); | 374 | SEC_C_3DES_CBC_192_3KEY); |
379 | } | 375 | } |
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c new file mode 100644 index 000000000000..e083d172b618 --- /dev/null +++ b/drivers/crypto/hisilicon/sgl.c | |||
@@ -0,0 +1,214 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #include <linux/dma-mapping.h> | ||
4 | #include <linux/module.h> | ||
5 | #include "./sgl.h" | ||
6 | |||
7 | #define HISI_ACC_SGL_SGE_NR_MIN 1 | ||
8 | #define HISI_ACC_SGL_SGE_NR_MAX 255 | ||
9 | #define HISI_ACC_SGL_SGE_NR_DEF 10 | ||
10 | #define HISI_ACC_SGL_NR_MAX 256 | ||
11 | #define HISI_ACC_SGL_ALIGN_SIZE 64 | ||
12 | |||
13 | static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp) | ||
14 | { | ||
15 | int ret; | ||
16 | u32 n; | ||
17 | |||
18 | if (!val) | ||
19 | return -EINVAL; | ||
20 | |||
21 | ret = kstrtou32(val, 10, &n); | ||
22 | if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0) | ||
23 | return -EINVAL; | ||
24 | |||
25 | return param_set_int(val, kp); | ||
26 | } | ||
27 | |||
28 | static const struct kernel_param_ops acc_sgl_sge_ops = { | ||
29 | .set = acc_sgl_sge_set, | ||
30 | .get = param_get_int, | ||
31 | }; | ||
32 | |||
33 | static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF; | ||
34 | module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444); | ||
35 | MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)"); | ||
36 | |||
37 | struct acc_hw_sge { | ||
38 | dma_addr_t buf; | ||
39 | void *page_ctrl; | ||
40 | __le32 len; | ||
41 | __le32 pad; | ||
42 | __le32 pad0; | ||
43 | __le32 pad1; | ||
44 | }; | ||
45 | |||
46 | /* use default sgl head size 64B */ | ||
47 | struct hisi_acc_hw_sgl { | ||
48 | dma_addr_t next_dma; | ||
49 | __le16 entry_sum_in_chain; | ||
50 | __le16 entry_sum_in_sgl; | ||
51 | __le16 entry_length_in_sgl; | ||
52 | __le16 pad0; | ||
53 | __le64 pad1[5]; | ||
54 | struct hisi_acc_hw_sgl *next; | ||
55 | struct acc_hw_sge sge_entries[]; | ||
56 | } __aligned(1); | ||
57 | |||
58 | /** | ||
59 | * hisi_acc_create_sgl_pool() - Create a hw sgl pool. | ||
60 | * @dev: The device which hw sgl pool belongs to. | ||
61 | * @pool: Pointer of pool. | ||
62 | * @count: Count of hisi_acc_hw_sgl in pool. | ||
63 | * | ||
64 | * This function creates a hw sgl pool, after this user can get hw sgl memory | ||
65 | * from it. | ||
66 | */ | ||
67 | int hisi_acc_create_sgl_pool(struct device *dev, | ||
68 | struct hisi_acc_sgl_pool *pool, u32 count) | ||
69 | { | ||
70 | u32 sgl_size; | ||
71 | u32 size; | ||
72 | |||
73 | if (!dev || !pool || !count) | ||
74 | return -EINVAL; | ||
75 | |||
76 | sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr + | ||
77 | sizeof(struct hisi_acc_hw_sgl); | ||
78 | size = sgl_size * count; | ||
79 | |||
80 | pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL); | ||
81 | if (!pool->sgl) | ||
82 | return -ENOMEM; | ||
83 | |||
84 | pool->size = size; | ||
85 | pool->count = count; | ||
86 | pool->sgl_size = sgl_size; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); | ||
91 | |||
92 | /** | ||
93 | * hisi_acc_free_sgl_pool() - Free a hw sgl pool. | ||
94 | * @dev: The device which hw sgl pool belongs to. | ||
95 | * @pool: Pointer of pool. | ||
96 | * | ||
97 | * This function frees memory of a hw sgl pool. | ||
98 | */ | ||
99 | void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) | ||
100 | { | ||
101 | dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma); | ||
102 | memset(pool, 0, sizeof(struct hisi_acc_sgl_pool)); | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); | ||
105 | |||
106 | struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index, | ||
107 | dma_addr_t *hw_sgl_dma) | ||
108 | { | ||
109 | if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl) | ||
110 | return ERR_PTR(-EINVAL); | ||
111 | |||
112 | *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index; | ||
113 | return (void *)pool->sgl + pool->sgl_size * index; | ||
114 | } | ||
115 | |||
116 | void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {} | ||
117 | |||
118 | static void sg_map_to_hw_sg(struct scatterlist *sgl, | ||
119 | struct acc_hw_sge *hw_sge) | ||
120 | { | ||
121 | hw_sge->buf = sgl->dma_address; | ||
122 | hw_sge->len = sgl->dma_length; | ||
123 | } | ||
124 | |||
125 | static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) | ||
126 | { | ||
127 | hw_sgl->entry_sum_in_sgl++; | ||
128 | } | ||
129 | |||
130 | static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) | ||
131 | { | ||
132 | hw_sgl->entry_sum_in_chain = sum; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. | ||
137 | * @dev: The device which hw sgl belongs to. | ||
138 | * @sgl: Scatterlist which will be mapped to hw sgl. | ||
139 | * @pool: Pool which hw sgl memory will be allocated in. | ||
140 | * @index: Index of hisi_acc_hw_sgl in pool. | ||
141 | * @hw_sgl_dma: The dma address of allocated hw sgl. | ||
142 | * | ||
143 | * This function builds hw sgl according input sgl, user can use hw_sgl_dma | ||
144 | * as src/dst in its BD. Only support single hw sgl currently. | ||
145 | */ | ||
146 | struct hisi_acc_hw_sgl * | ||
147 | hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, | ||
148 | struct scatterlist *sgl, | ||
149 | struct hisi_acc_sgl_pool *pool, | ||
150 | u32 index, dma_addr_t *hw_sgl_dma) | ||
151 | { | ||
152 | struct hisi_acc_hw_sgl *curr_hw_sgl; | ||
153 | dma_addr_t curr_sgl_dma = 0; | ||
154 | struct acc_hw_sge *curr_hw_sge; | ||
155 | struct scatterlist *sg; | ||
156 | int sg_n = sg_nents(sgl); | ||
157 | int i, ret; | ||
158 | |||
159 | if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr) | ||
160 | return ERR_PTR(-EINVAL); | ||
161 | |||
162 | ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); | ||
163 | if (!ret) | ||
164 | return ERR_PTR(-EINVAL); | ||
165 | |||
166 | curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); | ||
167 | if (!curr_hw_sgl) { | ||
168 | ret = -ENOMEM; | ||
169 | goto err_unmap_sg; | ||
170 | } | ||
171 | curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr; | ||
172 | curr_hw_sge = curr_hw_sgl->sge_entries; | ||
173 | |||
174 | for_each_sg(sgl, sg, sg_n, i) { | ||
175 | sg_map_to_hw_sg(sg, curr_hw_sge); | ||
176 | inc_hw_sgl_sge(curr_hw_sgl); | ||
177 | curr_hw_sge++; | ||
178 | } | ||
179 | |||
180 | update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr); | ||
181 | *hw_sgl_dma = curr_sgl_dma; | ||
182 | |||
183 | return curr_hw_sgl; | ||
184 | |||
185 | err_unmap_sg: | ||
186 | dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); | ||
187 | return ERR_PTR(ret); | ||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); | ||
190 | |||
191 | /** | ||
192 | * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl. | ||
193 | * @dev: The device which hw sgl belongs to. | ||
194 | * @sgl: Related scatterlist. | ||
195 | * @hw_sgl: Virtual address of hw sgl. | ||
196 | * @hw_sgl_dma: DMA address of hw sgl. | ||
197 | * @pool: Pool which hw sgl is allocated in. | ||
198 | * | ||
199 | * This function unmaps allocated hw sgl. | ||
200 | */ | ||
201 | void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, | ||
202 | struct hisi_acc_hw_sgl *hw_sgl) | ||
203 | { | ||
204 | dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); | ||
205 | |||
206 | hw_sgl->entry_sum_in_chain = 0; | ||
207 | hw_sgl->entry_sum_in_sgl = 0; | ||
208 | hw_sgl->entry_length_in_sgl = 0; | ||
209 | } | ||
210 | EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap); | ||
211 | |||
212 | MODULE_LICENSE("GPL v2"); | ||
213 | MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); | ||
214 | MODULE_DESCRIPTION("HiSilicon Accelerator SGL support"); | ||
diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h new file mode 100644 index 000000000000..3ac8871c7acf --- /dev/null +++ b/drivers/crypto/hisilicon/sgl.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #ifndef HISI_ACC_SGL_H | ||
4 | #define HISI_ACC_SGL_H | ||
5 | |||
6 | struct hisi_acc_sgl_pool { | ||
7 | struct hisi_acc_hw_sgl *sgl; | ||
8 | dma_addr_t sgl_dma; | ||
9 | size_t size; | ||
10 | u32 count; | ||
11 | size_t sgl_size; | ||
12 | }; | ||
13 | |||
14 | struct hisi_acc_hw_sgl * | ||
15 | hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, | ||
16 | struct scatterlist *sgl, | ||
17 | struct hisi_acc_sgl_pool *pool, | ||
18 | u32 index, dma_addr_t *hw_sgl_dma); | ||
19 | void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, | ||
20 | struct hisi_acc_hw_sgl *hw_sgl); | ||
21 | int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool, | ||
22 | u32 count); | ||
23 | void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool); | ||
24 | #endif | ||
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile new file mode 100644 index 000000000000..a936f099ee22 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o | ||
2 | hisi_zip-objs = zip_main.o zip_crypto.o | ||
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h new file mode 100644 index 000000000000..ffb00d987d02 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #ifndef HISI_ZIP_H | ||
4 | #define HISI_ZIP_H | ||
5 | |||
6 | #undef pr_fmt | ||
7 | #define pr_fmt(fmt) "hisi_zip: " fmt | ||
8 | |||
9 | #include <linux/list.h> | ||
10 | #include "../qm.h" | ||
11 | #include "../sgl.h" | ||
12 | |||
13 | /* hisi_zip_sqe dw3 */ | ||
14 | #define HZIP_BD_STATUS_M GENMASK(7, 0) | ||
15 | /* hisi_zip_sqe dw9 */ | ||
16 | #define HZIP_REQ_TYPE_M GENMASK(7, 0) | ||
17 | #define HZIP_ALG_TYPE_ZLIB 0x02 | ||
18 | #define HZIP_ALG_TYPE_GZIP 0x03 | ||
19 | #define HZIP_BUF_TYPE_M GENMASK(11, 8) | ||
20 | #define HZIP_PBUFFER 0x0 | ||
21 | #define HZIP_SGL 0x1 | ||
22 | |||
23 | enum hisi_zip_error_type { | ||
24 | /* negative compression */ | ||
25 | HZIP_NC_ERR = 0x0d, | ||
26 | }; | ||
27 | |||
28 | struct hisi_zip_ctrl; | ||
29 | |||
30 | struct hisi_zip { | ||
31 | struct hisi_qm qm; | ||
32 | struct list_head list; | ||
33 | struct hisi_zip_ctrl *ctrl; | ||
34 | }; | ||
35 | |||
36 | struct hisi_zip_sqe { | ||
37 | u32 consumed; | ||
38 | u32 produced; | ||
39 | u32 comp_data_length; | ||
40 | u32 dw3; | ||
41 | u32 input_data_length; | ||
42 | u32 lba_l; | ||
43 | u32 lba_h; | ||
44 | u32 dw7; | ||
45 | u32 dw8; | ||
46 | u32 dw9; | ||
47 | u32 dw10; | ||
48 | u32 priv_info; | ||
49 | u32 dw12; | ||
50 | u32 tag; | ||
51 | u32 dest_avail_out; | ||
52 | u32 rsvd0; | ||
53 | u32 comp_head_addr_l; | ||
54 | u32 comp_head_addr_h; | ||
55 | u32 source_addr_l; | ||
56 | u32 source_addr_h; | ||
57 | u32 dest_addr_l; | ||
58 | u32 dest_addr_h; | ||
59 | u32 stream_ctx_addr_l; | ||
60 | u32 stream_ctx_addr_h; | ||
61 | u32 cipher_key1_addr_l; | ||
62 | u32 cipher_key1_addr_h; | ||
63 | u32 cipher_key2_addr_l; | ||
64 | u32 cipher_key2_addr_h; | ||
65 | u32 rsvd1[4]; | ||
66 | }; | ||
67 | |||
68 | struct hisi_zip *find_zip_device(int node); | ||
69 | int hisi_zip_register_to_crypto(void); | ||
70 | void hisi_zip_unregister_from_crypto(void); | ||
71 | #endif | ||
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c new file mode 100644 index 000000000000..5a3f84dcdcde --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c | |||
@@ -0,0 +1,653 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #include <crypto/internal/acompress.h> | ||
4 | #include <linux/bitfield.h> | ||
5 | #include <linux/dma-mapping.h> | ||
6 | #include <linux/scatterlist.h> | ||
7 | #include "zip.h" | ||
8 | |||
9 | #define HZIP_ZLIB_HEAD_SIZE 2 | ||
10 | #define HZIP_GZIP_HEAD_SIZE 10 | ||
11 | |||
12 | #define GZIP_HEAD_FHCRC_BIT BIT(1) | ||
13 | #define GZIP_HEAD_FEXTRA_BIT BIT(2) | ||
14 | #define GZIP_HEAD_FNAME_BIT BIT(3) | ||
15 | #define GZIP_HEAD_FCOMMENT_BIT BIT(4) | ||
16 | |||
17 | #define GZIP_HEAD_FLG_SHIFT 3 | ||
18 | #define GZIP_HEAD_FEXTRA_SHIFT 10 | ||
19 | #define GZIP_HEAD_FEXTRA_XLEN 2 | ||
20 | #define GZIP_HEAD_FHCRC_SIZE 2 | ||
21 | |||
22 | #define HZIP_CTX_Q_NUM 2 | ||
23 | #define HZIP_GZIP_HEAD_BUF 256 | ||
24 | #define HZIP_ALG_PRIORITY 300 | ||
25 | |||
26 | static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; | ||
27 | static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0, | ||
28 | 0x0, 0x0, 0x0, 0x0, 0x03}; | ||
29 | enum hisi_zip_alg_type { | ||
30 | HZIP_ALG_TYPE_COMP = 0, | ||
31 | HZIP_ALG_TYPE_DECOMP = 1, | ||
32 | }; | ||
33 | |||
34 | #define COMP_NAME_TO_TYPE(alg_name) \ | ||
35 | (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \ | ||
36 | !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \ | ||
37 | |||
38 | #define TO_HEAD_SIZE(req_type) \ | ||
39 | (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \ | ||
40 | ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \ | ||
41 | |||
42 | #define TO_HEAD(req_type) \ | ||
43 | (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ | ||
44 | ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \ | ||
45 | |||
46 | struct hisi_zip_req { | ||
47 | struct acomp_req *req; | ||
48 | struct scatterlist *src; | ||
49 | struct scatterlist *dst; | ||
50 | size_t slen; | ||
51 | size_t dlen; | ||
52 | struct hisi_acc_hw_sgl *hw_src; | ||
53 | struct hisi_acc_hw_sgl *hw_dst; | ||
54 | dma_addr_t dma_src; | ||
55 | dma_addr_t dma_dst; | ||
56 | int req_id; | ||
57 | }; | ||
58 | |||
59 | struct hisi_zip_req_q { | ||
60 | struct hisi_zip_req *q; | ||
61 | unsigned long *req_bitmap; | ||
62 | rwlock_t req_lock; | ||
63 | u16 size; | ||
64 | }; | ||
65 | |||
66 | struct hisi_zip_qp_ctx { | ||
67 | struct hisi_qp *qp; | ||
68 | struct hisi_zip_sqe zip_sqe; | ||
69 | struct hisi_zip_req_q req_q; | ||
70 | struct hisi_acc_sgl_pool sgl_pool; | ||
71 | struct hisi_zip *zip_dev; | ||
72 | struct hisi_zip_ctx *ctx; | ||
73 | }; | ||
74 | |||
75 | struct hisi_zip_ctx { | ||
76 | #define QPC_COMP 0 | ||
77 | #define QPC_DECOMP 1 | ||
78 | struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; | ||
79 | }; | ||
80 | |||
81 | static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) | ||
82 | { | ||
83 | u32 val; | ||
84 | |||
85 | val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; | ||
86 | val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); | ||
87 | sqe->dw9 = val; | ||
88 | } | ||
89 | |||
90 | static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) | ||
91 | { | ||
92 | sqe->tag = tag; | ||
93 | } | ||
94 | |||
95 | static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, | ||
96 | dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, | ||
97 | u32 dlen) | ||
98 | { | ||
99 | memset(sqe, 0, sizeof(struct hisi_zip_sqe)); | ||
100 | |||
101 | sqe->input_data_length = slen; | ||
102 | sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); | ||
103 | sqe->dest_avail_out = dlen; | ||
104 | sqe->source_addr_l = lower_32_bits(s_addr); | ||
105 | sqe->source_addr_h = upper_32_bits(s_addr); | ||
106 | sqe->dest_addr_l = lower_32_bits(d_addr); | ||
107 | sqe->dest_addr_h = upper_32_bits(d_addr); | ||
108 | } | ||
109 | |||
110 | static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx, | ||
111 | int alg_type, int req_type) | ||
112 | { | ||
113 | struct hisi_qp *qp; | ||
114 | int ret; | ||
115 | |||
116 | qp = hisi_qm_create_qp(qm, alg_type); | ||
117 | if (IS_ERR(qp)) | ||
118 | return PTR_ERR(qp); | ||
119 | |||
120 | qp->req_type = req_type; | ||
121 | qp->qp_ctx = ctx; | ||
122 | ctx->qp = qp; | ||
123 | |||
124 | ret = hisi_qm_start_qp(qp, 0); | ||
125 | if (ret < 0) | ||
126 | goto err_release_qp; | ||
127 | |||
128 | return 0; | ||
129 | |||
130 | err_release_qp: | ||
131 | hisi_qm_release_qp(qp); | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) | ||
136 | { | ||
137 | hisi_qm_stop_qp(ctx->qp); | ||
138 | hisi_qm_release_qp(ctx->qp); | ||
139 | } | ||
140 | |||
141 | static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) | ||
142 | { | ||
143 | struct hisi_zip *hisi_zip; | ||
144 | struct hisi_qm *qm; | ||
145 | int ret, i, j; | ||
146 | |||
147 | /* find the proper zip device */ | ||
148 | hisi_zip = find_zip_device(cpu_to_node(smp_processor_id())); | ||
149 | if (!hisi_zip) { | ||
150 | pr_err("Failed to find a proper ZIP device!\n"); | ||
151 | return -ENODEV; | ||
152 | } | ||
153 | qm = &hisi_zip->qm; | ||
154 | |||
155 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) { | ||
156 | /* alg_type = 0 for compress, 1 for decompress in hw sqe */ | ||
157 | ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i, | ||
158 | req_type); | ||
159 | if (ret) | ||
160 | goto err; | ||
161 | |||
162 | hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip; | ||
163 | } | ||
164 | |||
165 | return 0; | ||
166 | err: | ||
167 | for (j = i - 1; j >= 0; j--) | ||
168 | hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]); | ||
169 | |||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) | ||
174 | { | ||
175 | int i; | ||
176 | |||
177 | for (i = 1; i >= 0; i--) | ||
178 | hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); | ||
179 | } | ||
180 | |||
181 | static u16 get_extra_field_size(const u8 *start) | ||
182 | { | ||
183 | return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; | ||
184 | } | ||
185 | |||
186 | static u32 get_name_field_size(const u8 *start) | ||
187 | { | ||
188 | return strlen(start) + 1; | ||
189 | } | ||
190 | |||
191 | static u32 get_comment_field_size(const u8 *start) | ||
192 | { | ||
193 | return strlen(start) + 1; | ||
194 | } | ||
195 | |||
196 | static u32 __get_gzip_head_size(const u8 *src) | ||
197 | { | ||
198 | u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); | ||
199 | u32 size = GZIP_HEAD_FEXTRA_SHIFT; | ||
200 | |||
201 | if (head_flg & GZIP_HEAD_FEXTRA_BIT) | ||
202 | size += get_extra_field_size(src + size); | ||
203 | if (head_flg & GZIP_HEAD_FNAME_BIT) | ||
204 | size += get_name_field_size(src + size); | ||
205 | if (head_flg & GZIP_HEAD_FCOMMENT_BIT) | ||
206 | size += get_comment_field_size(src + size); | ||
207 | if (head_flg & GZIP_HEAD_FHCRC_BIT) | ||
208 | size += GZIP_HEAD_FHCRC_SIZE; | ||
209 | |||
210 | return size; | ||
211 | } | ||
212 | |||
213 | static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) | ||
214 | { | ||
215 | struct hisi_zip_req_q *req_q; | ||
216 | int i, ret; | ||
217 | |||
218 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) { | ||
219 | req_q = &ctx->qp_ctx[i].req_q; | ||
220 | req_q->size = QM_Q_DEPTH; | ||
221 | |||
222 | req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size), | ||
223 | sizeof(long), GFP_KERNEL); | ||
224 | if (!req_q->req_bitmap) { | ||
225 | ret = -ENOMEM; | ||
226 | if (i == 0) | ||
227 | return ret; | ||
228 | |||
229 | goto err_free_loop0; | ||
230 | } | ||
231 | rwlock_init(&req_q->req_lock); | ||
232 | |||
233 | req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), | ||
234 | GFP_KERNEL); | ||
235 | if (!req_q->q) { | ||
236 | ret = -ENOMEM; | ||
237 | if (i == 0) | ||
238 | goto err_free_bitmap; | ||
239 | else | ||
240 | goto err_free_loop1; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | return 0; | ||
245 | |||
246 | err_free_loop1: | ||
247 | kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap); | ||
248 | err_free_loop0: | ||
249 | kfree(ctx->qp_ctx[QPC_COMP].req_q.q); | ||
250 | err_free_bitmap: | ||
251 | kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap); | ||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) { | ||
260 | kfree(ctx->qp_ctx[i].req_q.q); | ||
261 | kfree(ctx->qp_ctx[i].req_q.req_bitmap); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) | ||
266 | { | ||
267 | struct hisi_zip_qp_ctx *tmp; | ||
268 | int i, ret; | ||
269 | |||
270 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) { | ||
271 | tmp = &ctx->qp_ctx[i]; | ||
272 | ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev, | ||
273 | &tmp->sgl_pool, | ||
274 | QM_Q_DEPTH << 1); | ||
275 | if (ret < 0) { | ||
276 | if (i == 1) | ||
277 | goto err_free_sgl_pool0; | ||
278 | return -ENOMEM; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | return 0; | ||
283 | |||
284 | err_free_sgl_pool0: | ||
285 | hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev, | ||
286 | &ctx->qp_ctx[QPC_COMP].sgl_pool); | ||
287 | return -ENOMEM; | ||
288 | } | ||
289 | |||
290 | static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) | ||
291 | { | ||
292 | int i; | ||
293 | |||
294 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) | ||
295 | hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev, | ||
296 | &ctx->qp_ctx[i].sgl_pool); | ||
297 | } | ||
298 | |||
299 | static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, | ||
300 | struct hisi_zip_req *req) | ||
301 | { | ||
302 | struct hisi_zip_req_q *req_q = &qp_ctx->req_q; | ||
303 | |||
304 | if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP) | ||
305 | kfree(req->dst); | ||
306 | else | ||
307 | kfree(req->src); | ||
308 | |||
309 | write_lock(&req_q->req_lock); | ||
310 | clear_bit(req->req_id, req_q->req_bitmap); | ||
311 | memset(req, 0, sizeof(struct hisi_zip_req)); | ||
312 | write_unlock(&req_q->req_lock); | ||
313 | } | ||
314 | |||
315 | static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) | ||
316 | { | ||
317 | struct hisi_zip_sqe *sqe = data; | ||
318 | struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; | ||
319 | struct hisi_zip_req_q *req_q = &qp_ctx->req_q; | ||
320 | struct hisi_zip_req *req = req_q->q + sqe->tag; | ||
321 | struct acomp_req *acomp_req = req->req; | ||
322 | struct device *dev = &qp->qm->pdev->dev; | ||
323 | u32 status, dlen, head_size; | ||
324 | int err = 0; | ||
325 | |||
326 | status = sqe->dw3 & HZIP_BD_STATUS_M; | ||
327 | |||
328 | if (status != 0 && status != HZIP_NC_ERR) { | ||
329 | dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", | ||
330 | (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, | ||
331 | sqe->produced); | ||
332 | err = -EIO; | ||
333 | } | ||
334 | dlen = sqe->produced; | ||
335 | |||
336 | hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); | ||
337 | hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); | ||
338 | |||
339 | head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; | ||
340 | acomp_req->dlen = dlen + head_size; | ||
341 | |||
342 | if (acomp_req->base.complete) | ||
343 | acomp_request_complete(acomp_req, err); | ||
344 | |||
345 | hisi_zip_remove_req(qp_ctx, req); | ||
346 | } | ||
347 | |||
348 | static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, | ||
349 | void (*fn)(struct hisi_qp *, void *)) | ||
350 | { | ||
351 | int i; | ||
352 | |||
353 | for (i = 0; i < HZIP_CTX_Q_NUM; i++) | ||
354 | ctx->qp_ctx[i].qp->req_cb = fn; | ||
355 | } | ||
356 | |||
357 | static int hisi_zip_acomp_init(struct crypto_acomp *tfm) | ||
358 | { | ||
359 | const char *alg_name = crypto_tfm_alg_name(&tfm->base); | ||
360 | struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
361 | int ret; | ||
362 | |||
363 | ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); | ||
364 | if (ret) | ||
365 | return ret; | ||
366 | |||
367 | ret = hisi_zip_create_req_q(ctx); | ||
368 | if (ret) | ||
369 | goto err_ctx_exit; | ||
370 | |||
371 | ret = hisi_zip_create_sgl_pool(ctx); | ||
372 | if (ret) | ||
373 | goto err_release_req_q; | ||
374 | |||
375 | hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb); | ||
376 | |||
377 | return 0; | ||
378 | |||
379 | err_release_req_q: | ||
380 | hisi_zip_release_req_q(ctx); | ||
381 | err_ctx_exit: | ||
382 | hisi_zip_ctx_exit(ctx); | ||
383 | return ret; | ||
384 | } | ||
385 | |||
386 | static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) | ||
387 | { | ||
388 | struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
389 | |||
390 | hisi_zip_set_acomp_cb(ctx, NULL); | ||
391 | hisi_zip_release_sgl_pool(ctx); | ||
392 | hisi_zip_release_req_q(ctx); | ||
393 | hisi_zip_ctx_exit(ctx); | ||
394 | } | ||
395 | |||
396 | static int add_comp_head(struct scatterlist *dst, u8 req_type) | ||
397 | { | ||
398 | int head_size = TO_HEAD_SIZE(req_type); | ||
399 | const u8 *head = TO_HEAD(req_type); | ||
400 | int ret; | ||
401 | |||
402 | ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); | ||
403 | if (ret != head_size) | ||
404 | return -ENOMEM; | ||
405 | |||
406 | return head_size; | ||
407 | } | ||
408 | |||
409 | static size_t get_gzip_head_size(struct scatterlist *sgl) | ||
410 | { | ||
411 | char buf[HZIP_GZIP_HEAD_BUF]; | ||
412 | |||
413 | sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); | ||
414 | |||
415 | return __get_gzip_head_size(buf); | ||
416 | } | ||
417 | |||
418 | static size_t get_comp_head_size(struct scatterlist *src, u8 req_type) | ||
419 | { | ||
420 | switch (req_type) { | ||
421 | case HZIP_ALG_TYPE_ZLIB: | ||
422 | return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); | ||
423 | case HZIP_ALG_TYPE_GZIP: | ||
424 | return get_gzip_head_size(src); | ||
425 | default: | ||
426 | pr_err("request type does not support!\n"); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes, | ||
432 | size_t remains, struct scatterlist **out) | ||
433 | { | ||
434 | #define SPLIT_NUM 2 | ||
435 | size_t split_sizes[SPLIT_NUM]; | ||
436 | int out_mapped_nents[SPLIT_NUM]; | ||
437 | |||
438 | split_sizes[0] = bytes; | ||
439 | split_sizes[1] = remains; | ||
440 | |||
441 | return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out, | ||
442 | out_mapped_nents, GFP_KERNEL); | ||
443 | } | ||
444 | |||
445 | static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, | ||
446 | struct hisi_zip_qp_ctx *qp_ctx, | ||
447 | size_t head_size, bool is_comp) | ||
448 | { | ||
449 | struct hisi_zip_req_q *req_q = &qp_ctx->req_q; | ||
450 | struct hisi_zip_req *q = req_q->q; | ||
451 | struct hisi_zip_req *req_cache; | ||
452 | struct scatterlist *out[2]; | ||
453 | struct scatterlist *sgl; | ||
454 | size_t len; | ||
455 | int ret, req_id; | ||
456 | |||
457 | /* | ||
458 | * remove/add zlib/gzip head, as hardware operations do not include | ||
459 | * comp head. so split req->src to get sgl without heads in acomp, or | ||
460 | * add comp head to req->dst ahead of that hardware output compressed | ||
461 | * data in sgl splited from req->dst without comp head. | ||
462 | */ | ||
463 | if (is_comp) { | ||
464 | sgl = req->dst; | ||
465 | len = req->dlen - head_size; | ||
466 | } else { | ||
467 | sgl = req->src; | ||
468 | len = req->slen - head_size; | ||
469 | } | ||
470 | |||
471 | ret = get_sg_skip_bytes(sgl, head_size, len, out); | ||
472 | if (ret) | ||
473 | return ERR_PTR(ret); | ||
474 | |||
475 | /* sgl for comp head is useless, so free it now */ | ||
476 | kfree(out[0]); | ||
477 | |||
478 | write_lock(&req_q->req_lock); | ||
479 | |||
480 | req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); | ||
481 | if (req_id >= req_q->size) { | ||
482 | write_unlock(&req_q->req_lock); | ||
483 | dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); | ||
484 | kfree(out[1]); | ||
485 | return ERR_PTR(-EBUSY); | ||
486 | } | ||
487 | set_bit(req_id, req_q->req_bitmap); | ||
488 | |||
489 | req_cache = q + req_id; | ||
490 | req_cache->req_id = req_id; | ||
491 | req_cache->req = req; | ||
492 | if (is_comp) { | ||
493 | req_cache->src = req->src; | ||
494 | req_cache->dst = out[1]; | ||
495 | req_cache->slen = req->slen; | ||
496 | req_cache->dlen = req->dlen - head_size; | ||
497 | } else { | ||
498 | req_cache->src = out[1]; | ||
499 | req_cache->dst = req->dst; | ||
500 | req_cache->slen = req->slen - head_size; | ||
501 | req_cache->dlen = req->dlen; | ||
502 | } | ||
503 | |||
504 | write_unlock(&req_q->req_lock); | ||
505 | |||
506 | return req_cache; | ||
507 | } | ||
508 | |||
509 | static int hisi_zip_do_work(struct hisi_zip_req *req, | ||
510 | struct hisi_zip_qp_ctx *qp_ctx) | ||
511 | { | ||
512 | struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; | ||
513 | struct hisi_qp *qp = qp_ctx->qp; | ||
514 | struct device *dev = &qp->qm->pdev->dev; | ||
515 | struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool; | ||
516 | dma_addr_t input; | ||
517 | dma_addr_t output; | ||
518 | int ret; | ||
519 | |||
520 | if (!req->src || !req->slen || !req->dst || !req->dlen) | ||
521 | return -EINVAL; | ||
522 | |||
523 | req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool, | ||
524 | req->req_id << 1, &input); | ||
525 | if (IS_ERR(req->hw_src)) | ||
526 | return PTR_ERR(req->hw_src); | ||
527 | req->dma_src = input; | ||
528 | |||
529 | req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool, | ||
530 | (req->req_id << 1) + 1, | ||
531 | &output); | ||
532 | if (IS_ERR(req->hw_dst)) { | ||
533 | ret = PTR_ERR(req->hw_dst); | ||
534 | goto err_unmap_input; | ||
535 | } | ||
536 | req->dma_dst = output; | ||
537 | |||
538 | hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen, | ||
539 | req->dlen); | ||
540 | hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); | ||
541 | hisi_zip_config_tag(zip_sqe, req->req_id); | ||
542 | |||
543 | /* send command to start a task */ | ||
544 | ret = hisi_qp_send(qp, zip_sqe); | ||
545 | if (ret < 0) | ||
546 | goto err_unmap_output; | ||
547 | |||
548 | return -EINPROGRESS; | ||
549 | |||
550 | err_unmap_output: | ||
551 | hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); | ||
552 | err_unmap_input: | ||
553 | hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); | ||
554 | return ret; | ||
555 | } | ||
556 | |||
557 | static int hisi_zip_acompress(struct acomp_req *acomp_req) | ||
558 | { | ||
559 | struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); | ||
560 | struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP]; | ||
561 | struct hisi_zip_req *req; | ||
562 | size_t head_size; | ||
563 | int ret; | ||
564 | |||
565 | /* let's output compression head now */ | ||
566 | head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); | ||
567 | if (head_size < 0) | ||
568 | return -ENOMEM; | ||
569 | |||
570 | req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); | ||
571 | if (IS_ERR(req)) | ||
572 | return PTR_ERR(req); | ||
573 | |||
574 | ret = hisi_zip_do_work(req, qp_ctx); | ||
575 | if (ret != -EINPROGRESS) | ||
576 | hisi_zip_remove_req(qp_ctx, req); | ||
577 | |||
578 | return ret; | ||
579 | } | ||
580 | |||
581 | static int hisi_zip_adecompress(struct acomp_req *acomp_req) | ||
582 | { | ||
583 | struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); | ||
584 | struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP]; | ||
585 | struct hisi_zip_req *req; | ||
586 | size_t head_size; | ||
587 | int ret; | ||
588 | |||
589 | head_size = get_comp_head_size(acomp_req->src, qp_ctx->qp->req_type); | ||
590 | |||
591 | req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); | ||
592 | if (IS_ERR(req)) | ||
593 | return PTR_ERR(req); | ||
594 | |||
595 | ret = hisi_zip_do_work(req, qp_ctx); | ||
596 | if (ret != -EINPROGRESS) | ||
597 | hisi_zip_remove_req(qp_ctx, req); | ||
598 | |||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | static struct acomp_alg hisi_zip_acomp_zlib = { | ||
603 | .init = hisi_zip_acomp_init, | ||
604 | .exit = hisi_zip_acomp_exit, | ||
605 | .compress = hisi_zip_acompress, | ||
606 | .decompress = hisi_zip_adecompress, | ||
607 | .base = { | ||
608 | .cra_name = "zlib-deflate", | ||
609 | .cra_driver_name = "hisi-zlib-acomp", | ||
610 | .cra_module = THIS_MODULE, | ||
611 | .cra_priority = HZIP_ALG_PRIORITY, | ||
612 | .cra_ctxsize = sizeof(struct hisi_zip_ctx), | ||
613 | } | ||
614 | }; | ||
615 | |||
616 | static struct acomp_alg hisi_zip_acomp_gzip = { | ||
617 | .init = hisi_zip_acomp_init, | ||
618 | .exit = hisi_zip_acomp_exit, | ||
619 | .compress = hisi_zip_acompress, | ||
620 | .decompress = hisi_zip_adecompress, | ||
621 | .base = { | ||
622 | .cra_name = "gzip", | ||
623 | .cra_driver_name = "hisi-gzip-acomp", | ||
624 | .cra_module = THIS_MODULE, | ||
625 | .cra_priority = HZIP_ALG_PRIORITY, | ||
626 | .cra_ctxsize = sizeof(struct hisi_zip_ctx), | ||
627 | } | ||
628 | }; | ||
629 | |||
630 | int hisi_zip_register_to_crypto(void) | ||
631 | { | ||
632 | int ret = 0; | ||
633 | |||
634 | ret = crypto_register_acomp(&hisi_zip_acomp_zlib); | ||
635 | if (ret) { | ||
636 | pr_err("Zlib acomp algorithm registration failed\n"); | ||
637 | return ret; | ||
638 | } | ||
639 | |||
640 | ret = crypto_register_acomp(&hisi_zip_acomp_gzip); | ||
641 | if (ret) { | ||
642 | pr_err("Gzip acomp algorithm registration failed\n"); | ||
643 | crypto_unregister_acomp(&hisi_zip_acomp_zlib); | ||
644 | } | ||
645 | |||
646 | return ret; | ||
647 | } | ||
648 | |||
649 | void hisi_zip_unregister_from_crypto(void) | ||
650 | { | ||
651 | crypto_unregister_acomp(&hisi_zip_acomp_gzip); | ||
652 | crypto_unregister_acomp(&hisi_zip_acomp_zlib); | ||
653 | } | ||
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c new file mode 100644 index 000000000000..6e0ca75585d4 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_main.c | |||
@@ -0,0 +1,1013 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2019 HiSilicon Limited. */ | ||
3 | #include <linux/acpi.h> | ||
4 | #include <linux/aer.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/debugfs.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/io.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/topology.h> | ||
14 | #include "zip.h" | ||
15 | |||
16 | #define PCI_DEVICE_ID_ZIP_PF 0xa250 | ||
17 | #define PCI_DEVICE_ID_ZIP_VF 0xa251 | ||
18 | |||
19 | #define HZIP_VF_NUM 63 | ||
20 | #define HZIP_QUEUE_NUM_V1 4096 | ||
21 | #define HZIP_QUEUE_NUM_V2 1024 | ||
22 | |||
23 | #define HZIP_CLOCK_GATE_CTRL 0x301004 | ||
24 | #define COMP0_ENABLE BIT(0) | ||
25 | #define COMP1_ENABLE BIT(1) | ||
26 | #define DECOMP0_ENABLE BIT(2) | ||
27 | #define DECOMP1_ENABLE BIT(3) | ||
28 | #define DECOMP2_ENABLE BIT(4) | ||
29 | #define DECOMP3_ENABLE BIT(5) | ||
30 | #define DECOMP4_ENABLE BIT(6) | ||
31 | #define DECOMP5_ENABLE BIT(7) | ||
32 | #define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ | ||
33 | DECOMP0_ENABLE | DECOMP1_ENABLE | \ | ||
34 | DECOMP2_ENABLE | DECOMP3_ENABLE | \ | ||
35 | DECOMP4_ENABLE | DECOMP5_ENABLE) | ||
36 | #define DECOMP_CHECK_ENABLE BIT(16) | ||
37 | #define HZIP_FSM_MAX_CNT 0x301008 | ||
38 | |||
39 | #define HZIP_PORT_ARCA_CHE_0 0x301040 | ||
40 | #define HZIP_PORT_ARCA_CHE_1 0x301044 | ||
41 | #define HZIP_PORT_AWCA_CHE_0 0x301060 | ||
42 | #define HZIP_PORT_AWCA_CHE_1 0x301064 | ||
43 | #define CACHE_ALL_EN 0xffffffff | ||
44 | |||
45 | #define HZIP_BD_RUSER_32_63 0x301110 | ||
46 | #define HZIP_SGL_RUSER_32_63 0x30111c | ||
47 | #define HZIP_DATA_RUSER_32_63 0x301128 | ||
48 | #define HZIP_DATA_WUSER_32_63 0x301134 | ||
49 | #define HZIP_BD_WUSER_32_63 0x301140 | ||
50 | |||
51 | #define HZIP_QM_IDEL_STATUS 0x3040e4 | ||
52 | |||
53 | #define HZIP_CORE_DEBUG_COMP_0 0x302000 | ||
54 | #define HZIP_CORE_DEBUG_COMP_1 0x303000 | ||
55 | #define HZIP_CORE_DEBUG_DECOMP_0 0x304000 | ||
56 | #define HZIP_CORE_DEBUG_DECOMP_1 0x305000 | ||
57 | #define HZIP_CORE_DEBUG_DECOMP_2 0x306000 | ||
58 | #define HZIP_CORE_DEBUG_DECOMP_3 0x307000 | ||
59 | #define HZIP_CORE_DEBUG_DECOMP_4 0x308000 | ||
60 | #define HZIP_CORE_DEBUG_DECOMP_5 0x309000 | ||
61 | |||
62 | #define HZIP_CORE_INT_SOURCE 0x3010A0 | ||
63 | #define HZIP_CORE_INT_MASK 0x3010A4 | ||
64 | #define HZIP_CORE_INT_STATUS 0x3010AC | ||
65 | #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) | ||
66 | #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 | ||
67 | #define SRAM_ECC_ERR_NUM_SHIFT 16 | ||
68 | #define SRAM_ECC_ERR_ADDR_SHIFT 24 | ||
69 | #define HZIP_CORE_INT_DISABLE 0x000007FF | ||
70 | #define HZIP_COMP_CORE_NUM 2 | ||
71 | #define HZIP_DECOMP_CORE_NUM 6 | ||
72 | #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ | ||
73 | HZIP_DECOMP_CORE_NUM) | ||
74 | #define HZIP_SQE_SIZE 128 | ||
75 | #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) | ||
76 | #define HZIP_PF_DEF_Q_NUM 64 | ||
77 | #define HZIP_PF_DEF_Q_BASE 0 | ||
78 | |||
79 | #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 | ||
80 | #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) | ||
81 | |||
82 | #define HZIP_NUMA_DISTANCE 100 | ||
83 | #define HZIP_BUF_SIZE 22 | ||
84 | |||
85 | static const char hisi_zip_name[] = "hisi_zip"; | ||
86 | static struct dentry *hzip_debugfs_root; | ||
87 | LIST_HEAD(hisi_zip_list); | ||
88 | DEFINE_MUTEX(hisi_zip_list_lock); | ||
89 | |||
90 | #ifdef CONFIG_NUMA | ||
91 | static struct hisi_zip *find_zip_device_numa(int node) | ||
92 | { | ||
93 | struct hisi_zip *zip = NULL; | ||
94 | struct hisi_zip *hisi_zip; | ||
95 | int min_distance = HZIP_NUMA_DISTANCE; | ||
96 | struct device *dev; | ||
97 | |||
98 | list_for_each_entry(hisi_zip, &hisi_zip_list, list) { | ||
99 | dev = &hisi_zip->qm.pdev->dev; | ||
100 | if (node_distance(dev->numa_node, node) < min_distance) { | ||
101 | zip = hisi_zip; | ||
102 | min_distance = node_distance(dev->numa_node, node); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return zip; | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | struct hisi_zip *find_zip_device(int node) | ||
111 | { | ||
112 | struct hisi_zip *zip = NULL; | ||
113 | |||
114 | mutex_lock(&hisi_zip_list_lock); | ||
115 | #ifdef CONFIG_NUMA | ||
116 | zip = find_zip_device_numa(node); | ||
117 | #else | ||
118 | zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list); | ||
119 | #endif | ||
120 | mutex_unlock(&hisi_zip_list_lock); | ||
121 | |||
122 | return zip; | ||
123 | } | ||
124 | |||
125 | struct hisi_zip_hw_error { | ||
126 | u32 int_msk; | ||
127 | const char *msg; | ||
128 | }; | ||
129 | |||
130 | static const struct hisi_zip_hw_error zip_hw_error[] = { | ||
131 | { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, | ||
132 | { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, | ||
133 | { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, | ||
134 | { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, | ||
135 | { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, | ||
136 | { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, | ||
137 | { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, | ||
138 | { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, | ||
139 | { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, | ||
140 | { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, | ||
141 | { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, | ||
142 | { /* sentinel */ } | ||
143 | }; | ||
144 | |||
145 | enum ctrl_debug_file_index { | ||
146 | HZIP_CURRENT_QM, | ||
147 | HZIP_CLEAR_ENABLE, | ||
148 | HZIP_DEBUG_FILE_NUM, | ||
149 | }; | ||
150 | |||
151 | static const char * const ctrl_debug_file_name[] = { | ||
152 | [HZIP_CURRENT_QM] = "current_qm", | ||
153 | [HZIP_CLEAR_ENABLE] = "clear_enable", | ||
154 | }; | ||
155 | |||
156 | struct ctrl_debug_file { | ||
157 | enum ctrl_debug_file_index index; | ||
158 | spinlock_t lock; | ||
159 | struct hisi_zip_ctrl *ctrl; | ||
160 | }; | ||
161 | |||
162 | /* | ||
163 | * One ZIP controller has one PF and multiple VFs, some global configurations | ||
164 | * which PF has need this structure. | ||
165 | * | ||
166 | * Just relevant for PF. | ||
167 | */ | ||
168 | struct hisi_zip_ctrl { | ||
169 | u32 num_vfs; | ||
170 | struct hisi_zip *hisi_zip; | ||
171 | struct dentry *debug_root; | ||
172 | struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; | ||
173 | }; | ||
174 | |||
175 | enum { | ||
176 | HZIP_COMP_CORE0, | ||
177 | HZIP_COMP_CORE1, | ||
178 | HZIP_DECOMP_CORE0, | ||
179 | HZIP_DECOMP_CORE1, | ||
180 | HZIP_DECOMP_CORE2, | ||
181 | HZIP_DECOMP_CORE3, | ||
182 | HZIP_DECOMP_CORE4, | ||
183 | HZIP_DECOMP_CORE5, | ||
184 | }; | ||
185 | |||
186 | static const u64 core_offsets[] = { | ||
187 | [HZIP_COMP_CORE0] = 0x302000, | ||
188 | [HZIP_COMP_CORE1] = 0x303000, | ||
189 | [HZIP_DECOMP_CORE0] = 0x304000, | ||
190 | [HZIP_DECOMP_CORE1] = 0x305000, | ||
191 | [HZIP_DECOMP_CORE2] = 0x306000, | ||
192 | [HZIP_DECOMP_CORE3] = 0x307000, | ||
193 | [HZIP_DECOMP_CORE4] = 0x308000, | ||
194 | [HZIP_DECOMP_CORE5] = 0x309000, | ||
195 | }; | ||
196 | |||
197 | static struct debugfs_reg32 hzip_dfx_regs[] = { | ||
198 | {"HZIP_GET_BD_NUM ", 0x00ull}, | ||
199 | {"HZIP_GET_RIGHT_BD ", 0x04ull}, | ||
200 | {"HZIP_GET_ERROR_BD ", 0x08ull}, | ||
201 | {"HZIP_DONE_BD_NUM ", 0x0cull}, | ||
202 | {"HZIP_WORK_CYCLE ", 0x10ull}, | ||
203 | {"HZIP_IDLE_CYCLE ", 0x18ull}, | ||
204 | {"HZIP_MAX_DELAY ", 0x20ull}, | ||
205 | {"HZIP_MIN_DELAY ", 0x24ull}, | ||
206 | {"HZIP_AVG_DELAY ", 0x28ull}, | ||
207 | {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, | ||
208 | {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, | ||
209 | {"HZIP_COMSUMED_BYTE ", 0x38ull}, | ||
210 | {"HZIP_PRODUCED_BYTE ", 0x40ull}, | ||
211 | {"HZIP_COMP_INF ", 0x70ull}, | ||
212 | {"HZIP_PRE_OUT ", 0x78ull}, | ||
213 | {"HZIP_BD_RD ", 0x7cull}, | ||
214 | {"HZIP_BD_WR ", 0x80ull}, | ||
215 | {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, | ||
216 | {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, | ||
217 | {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, | ||
218 | {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, | ||
219 | {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, | ||
220 | }; | ||
221 | |||
222 | static int pf_q_num_set(const char *val, const struct kernel_param *kp) | ||
223 | { | ||
224 | struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, | ||
225 | PCI_DEVICE_ID_ZIP_PF, NULL); | ||
226 | u32 n, q_num; | ||
227 | u8 rev_id; | ||
228 | int ret; | ||
229 | |||
230 | if (!val) | ||
231 | return -EINVAL; | ||
232 | |||
233 | if (!pdev) { | ||
234 | q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2); | ||
235 | pr_info("No device found currently, suppose queue number is %d\n", | ||
236 | q_num); | ||
237 | } else { | ||
238 | rev_id = pdev->revision; | ||
239 | switch (rev_id) { | ||
240 | case QM_HW_V1: | ||
241 | q_num = HZIP_QUEUE_NUM_V1; | ||
242 | break; | ||
243 | case QM_HW_V2: | ||
244 | q_num = HZIP_QUEUE_NUM_V2; | ||
245 | break; | ||
246 | default: | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | ret = kstrtou32(val, 10, &n); | ||
252 | if (ret != 0 || n > q_num || n == 0) | ||
253 | return -EINVAL; | ||
254 | |||
255 | return param_set_int(val, kp); | ||
256 | } | ||
257 | |||
258 | static const struct kernel_param_ops pf_q_num_ops = { | ||
259 | .set = pf_q_num_set, | ||
260 | .get = param_get_int, | ||
261 | }; | ||
262 | |||
263 | static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; | ||
264 | module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); | ||
265 | MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); | ||
266 | |||
267 | static int uacce_mode; | ||
268 | module_param(uacce_mode, int, 0); | ||
269 | |||
270 | static const struct pci_device_id hisi_zip_dev_ids[] = { | ||
271 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, | ||
272 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, | ||
273 | { 0, } | ||
274 | }; | ||
275 | MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); | ||
276 | |||
277 | static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip) | ||
278 | { | ||
279 | mutex_lock(&hisi_zip_list_lock); | ||
280 | list_add_tail(&hisi_zip->list, &hisi_zip_list); | ||
281 | mutex_unlock(&hisi_zip_list_lock); | ||
282 | } | ||
283 | |||
284 | static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip) | ||
285 | { | ||
286 | mutex_lock(&hisi_zip_list_lock); | ||
287 | list_del(&hisi_zip->list); | ||
288 | mutex_unlock(&hisi_zip_list_lock); | ||
289 | } | ||
290 | |||
291 | static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) | ||
292 | { | ||
293 | void __iomem *base = hisi_zip->qm.io_base; | ||
294 | |||
295 | /* qm user domain */ | ||
296 | writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); | ||
297 | writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); | ||
298 | writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); | ||
299 | writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); | ||
300 | writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); | ||
301 | |||
302 | /* qm cache */ | ||
303 | writel(AXI_M_CFG, base + QM_AXI_M_CFG); | ||
304 | writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); | ||
305 | /* disable FLR triggered by BME(bus master enable) */ | ||
306 | writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); | ||
307 | writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); | ||
308 | |||
309 | /* cache */ | ||
310 | writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); | ||
311 | writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); | ||
312 | writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); | ||
313 | writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); | ||
314 | |||
315 | /* user domain configurations */ | ||
316 | writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); | ||
317 | writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); | ||
318 | writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); | ||
319 | writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); | ||
320 | writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); | ||
321 | |||
322 | /* let's open all compression/decompression cores */ | ||
323 | writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN, | ||
324 | base + HZIP_CLOCK_GATE_CTRL); | ||
325 | |||
326 | /* enable sqc writeback */ | ||
327 | writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | | ||
328 | CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | | ||
329 | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); | ||
330 | } | ||
331 | |||
332 | static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state) | ||
333 | { | ||
334 | struct hisi_qm *qm = &hisi_zip->qm; | ||
335 | |||
336 | if (qm->ver == QM_HW_V1) { | ||
337 | writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK); | ||
338 | dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n", | ||
339 | qm->ver); | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | if (state) { | ||
344 | /* clear ZIP hw error source if having */ | ||
345 | writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base + | ||
346 | HZIP_CORE_INT_SOURCE); | ||
347 | /* enable ZIP hw error interrupts */ | ||
348 | writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK); | ||
349 | } else { | ||
350 | /* disable ZIP hw error interrupts */ | ||
351 | writel(HZIP_CORE_INT_DISABLE, | ||
352 | hisi_zip->qm.io_base + HZIP_CORE_INT_MASK); | ||
353 | } | ||
354 | } | ||
355 | |||
356 | static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) | ||
357 | { | ||
358 | struct hisi_zip *hisi_zip = file->ctrl->hisi_zip; | ||
359 | |||
360 | return &hisi_zip->qm; | ||
361 | } | ||
362 | |||
363 | static u32 current_qm_read(struct ctrl_debug_file *file) | ||
364 | { | ||
365 | struct hisi_qm *qm = file_to_qm(file); | ||
366 | |||
367 | return readl(qm->io_base + QM_DFX_MB_CNT_VF); | ||
368 | } | ||
369 | |||
370 | static int current_qm_write(struct ctrl_debug_file *file, u32 val) | ||
371 | { | ||
372 | struct hisi_qm *qm = file_to_qm(file); | ||
373 | struct hisi_zip_ctrl *ctrl = file->ctrl; | ||
374 | u32 vfq_num; | ||
375 | u32 tmp; | ||
376 | |||
377 | if (val > ctrl->num_vfs) | ||
378 | return -EINVAL; | ||
379 | |||
380 | /* Calculate curr_qm_qp_num and store */ | ||
381 | if (val == 0) { | ||
382 | qm->debug.curr_qm_qp_num = qm->qp_num; | ||
383 | } else { | ||
384 | vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; | ||
385 | if (val == ctrl->num_vfs) | ||
386 | qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - | ||
387 | qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; | ||
388 | else | ||
389 | qm->debug.curr_qm_qp_num = vfq_num; | ||
390 | } | ||
391 | |||
392 | writel(val, qm->io_base + QM_DFX_MB_CNT_VF); | ||
393 | writel(val, qm->io_base + QM_DFX_DB_CNT_VF); | ||
394 | |||
395 | tmp = val | | ||
396 | (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); | ||
397 | writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); | ||
398 | |||
399 | tmp = val | | ||
400 | (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); | ||
401 | writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static u32 clear_enable_read(struct ctrl_debug_file *file) | ||
407 | { | ||
408 | struct hisi_qm *qm = file_to_qm(file); | ||
409 | |||
410 | return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & | ||
411 | SOFT_CTRL_CNT_CLR_CE_BIT; | ||
412 | } | ||
413 | |||
414 | static int clear_enable_write(struct ctrl_debug_file *file, u32 val) | ||
415 | { | ||
416 | struct hisi_qm *qm = file_to_qm(file); | ||
417 | u32 tmp; | ||
418 | |||
419 | if (val != 1 && val != 0) | ||
420 | return -EINVAL; | ||
421 | |||
422 | tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & | ||
423 | ~SOFT_CTRL_CNT_CLR_CE_BIT) | val; | ||
424 | writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static ssize_t ctrl_debug_read(struct file *filp, char __user *buf, | ||
430 | size_t count, loff_t *pos) | ||
431 | { | ||
432 | struct ctrl_debug_file *file = filp->private_data; | ||
433 | char tbuf[HZIP_BUF_SIZE]; | ||
434 | u32 val; | ||
435 | int ret; | ||
436 | |||
437 | spin_lock_irq(&file->lock); | ||
438 | switch (file->index) { | ||
439 | case HZIP_CURRENT_QM: | ||
440 | val = current_qm_read(file); | ||
441 | break; | ||
442 | case HZIP_CLEAR_ENABLE: | ||
443 | val = clear_enable_read(file); | ||
444 | break; | ||
445 | default: | ||
446 | spin_unlock_irq(&file->lock); | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | spin_unlock_irq(&file->lock); | ||
450 | ret = sprintf(tbuf, "%u\n", val); | ||
451 | return simple_read_from_buffer(buf, count, pos, tbuf, ret); | ||
452 | } | ||
453 | |||
454 | static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf, | ||
455 | size_t count, loff_t *pos) | ||
456 | { | ||
457 | struct ctrl_debug_file *file = filp->private_data; | ||
458 | char tbuf[HZIP_BUF_SIZE]; | ||
459 | unsigned long val; | ||
460 | int len, ret; | ||
461 | |||
462 | if (*pos != 0) | ||
463 | return 0; | ||
464 | |||
465 | if (count >= HZIP_BUF_SIZE) | ||
466 | return -ENOSPC; | ||
467 | |||
468 | len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); | ||
469 | if (len < 0) | ||
470 | return len; | ||
471 | |||
472 | tbuf[len] = '\0'; | ||
473 | if (kstrtoul(tbuf, 0, &val)) | ||
474 | return -EFAULT; | ||
475 | |||
476 | spin_lock_irq(&file->lock); | ||
477 | switch (file->index) { | ||
478 | case HZIP_CURRENT_QM: | ||
479 | ret = current_qm_write(file, val); | ||
480 | if (ret) | ||
481 | goto err_input; | ||
482 | break; | ||
483 | case HZIP_CLEAR_ENABLE: | ||
484 | ret = clear_enable_write(file, val); | ||
485 | if (ret) | ||
486 | goto err_input; | ||
487 | break; | ||
488 | default: | ||
489 | ret = -EINVAL; | ||
490 | goto err_input; | ||
491 | } | ||
492 | spin_unlock_irq(&file->lock); | ||
493 | |||
494 | return count; | ||
495 | |||
496 | err_input: | ||
497 | spin_unlock_irq(&file->lock); | ||
498 | return ret; | ||
499 | } | ||
500 | |||
501 | static const struct file_operations ctrl_debug_fops = { | ||
502 | .owner = THIS_MODULE, | ||
503 | .open = simple_open, | ||
504 | .read = ctrl_debug_read, | ||
505 | .write = ctrl_debug_write, | ||
506 | }; | ||
507 | |||
508 | static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) | ||
509 | { | ||
510 | struct hisi_zip *hisi_zip = ctrl->hisi_zip; | ||
511 | struct hisi_qm *qm = &hisi_zip->qm; | ||
512 | struct device *dev = &qm->pdev->dev; | ||
513 | struct debugfs_regset32 *regset; | ||
514 | struct dentry *tmp_d, *tmp; | ||
515 | char buf[HZIP_BUF_SIZE]; | ||
516 | int i; | ||
517 | |||
518 | for (i = 0; i < HZIP_CORE_NUM; i++) { | ||
519 | if (i < HZIP_COMP_CORE_NUM) | ||
520 | sprintf(buf, "comp_core%d", i); | ||
521 | else | ||
522 | sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); | ||
523 | |||
524 | tmp_d = debugfs_create_dir(buf, ctrl->debug_root); | ||
525 | if (!tmp_d) | ||
526 | return -ENOENT; | ||
527 | |||
528 | regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); | ||
529 | if (!regset) | ||
530 | return -ENOENT; | ||
531 | |||
532 | regset->regs = hzip_dfx_regs; | ||
533 | regset->nregs = ARRAY_SIZE(hzip_dfx_regs); | ||
534 | regset->base = qm->io_base + core_offsets[i]; | ||
535 | |||
536 | tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset); | ||
537 | if (!tmp) | ||
538 | return -ENOENT; | ||
539 | } | ||
540 | |||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) | ||
545 | { | ||
546 | struct dentry *tmp; | ||
547 | int i; | ||
548 | |||
549 | for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { | ||
550 | spin_lock_init(&ctrl->files[i].lock); | ||
551 | ctrl->files[i].ctrl = ctrl; | ||
552 | ctrl->files[i].index = i; | ||
553 | |||
554 | tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600, | ||
555 | ctrl->debug_root, ctrl->files + i, | ||
556 | &ctrl_debug_fops); | ||
557 | if (!tmp) | ||
558 | return -ENOENT; | ||
559 | } | ||
560 | |||
561 | return hisi_zip_core_debug_init(ctrl); | ||
562 | } | ||
563 | |||
564 | static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) | ||
565 | { | ||
566 | struct hisi_qm *qm = &hisi_zip->qm; | ||
567 | struct device *dev = &qm->pdev->dev; | ||
568 | struct dentry *dev_d; | ||
569 | int ret; | ||
570 | |||
571 | dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); | ||
572 | if (!dev_d) | ||
573 | return -ENOENT; | ||
574 | |||
575 | qm->debug.debug_root = dev_d; | ||
576 | ret = hisi_qm_debug_init(qm); | ||
577 | if (ret) | ||
578 | goto failed_to_create; | ||
579 | |||
580 | if (qm->fun_type == QM_HW_PF) { | ||
581 | hisi_zip->ctrl->debug_root = dev_d; | ||
582 | ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl); | ||
583 | if (ret) | ||
584 | goto failed_to_create; | ||
585 | } | ||
586 | |||
587 | return 0; | ||
588 | |||
589 | failed_to_create: | ||
590 | debugfs_remove_recursive(hzip_debugfs_root); | ||
591 | return ret; | ||
592 | } | ||
593 | |||
594 | static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip) | ||
595 | { | ||
596 | struct hisi_qm *qm = &hisi_zip->qm; | ||
597 | |||
598 | writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); | ||
599 | writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); | ||
600 | writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); | ||
601 | |||
602 | hisi_qm_debug_regs_clear(qm); | ||
603 | } | ||
604 | |||
605 | static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip) | ||
606 | { | ||
607 | struct hisi_qm *qm = &hisi_zip->qm; | ||
608 | |||
609 | debugfs_remove_recursive(qm->debug.debug_root); | ||
610 | |||
611 | if (qm->fun_type == QM_HW_PF) | ||
612 | hisi_zip_debug_regs_clear(hisi_zip); | ||
613 | } | ||
614 | |||
615 | static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip) | ||
616 | { | ||
617 | hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE, | ||
618 | QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0, | ||
619 | QM_DB_RANDOM_INVALID); | ||
620 | hisi_zip_hw_error_set_state(hisi_zip, true); | ||
621 | } | ||
622 | |||
623 | static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) | ||
624 | { | ||
625 | struct hisi_qm *qm = &hisi_zip->qm; | ||
626 | struct hisi_zip_ctrl *ctrl; | ||
627 | |||
628 | ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); | ||
629 | if (!ctrl) | ||
630 | return -ENOMEM; | ||
631 | |||
632 | hisi_zip->ctrl = ctrl; | ||
633 | ctrl->hisi_zip = hisi_zip; | ||
634 | |||
635 | switch (qm->ver) { | ||
636 | case QM_HW_V1: | ||
637 | qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; | ||
638 | break; | ||
639 | |||
640 | case QM_HW_V2: | ||
641 | qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; | ||
642 | break; | ||
643 | |||
644 | default: | ||
645 | return -EINVAL; | ||
646 | } | ||
647 | |||
648 | hisi_zip_set_user_domain_and_cache(hisi_zip); | ||
649 | hisi_zip_hw_error_init(hisi_zip); | ||
650 | hisi_zip_debug_regs_clear(hisi_zip); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
656 | { | ||
657 | struct hisi_zip *hisi_zip; | ||
658 | enum qm_hw_ver rev_id; | ||
659 | struct hisi_qm *qm; | ||
660 | int ret; | ||
661 | |||
662 | rev_id = hisi_qm_get_hw_version(pdev); | ||
663 | if (rev_id == QM_HW_UNKNOWN) | ||
664 | return -EINVAL; | ||
665 | |||
666 | hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); | ||
667 | if (!hisi_zip) | ||
668 | return -ENOMEM; | ||
669 | pci_set_drvdata(pdev, hisi_zip); | ||
670 | |||
671 | qm = &hisi_zip->qm; | ||
672 | qm->pdev = pdev; | ||
673 | qm->ver = rev_id; | ||
674 | |||
675 | qm->sqe_size = HZIP_SQE_SIZE; | ||
676 | qm->dev_name = hisi_zip_name; | ||
677 | qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : | ||
678 | QM_HW_VF; | ||
679 | switch (uacce_mode) { | ||
680 | case 0: | ||
681 | qm->use_dma_api = true; | ||
682 | break; | ||
683 | case 1: | ||
684 | qm->use_dma_api = false; | ||
685 | break; | ||
686 | case 2: | ||
687 | qm->use_dma_api = true; | ||
688 | break; | ||
689 | default: | ||
690 | return -EINVAL; | ||
691 | } | ||
692 | |||
693 | ret = hisi_qm_init(qm); | ||
694 | if (ret) { | ||
695 | dev_err(&pdev->dev, "Failed to init qm!\n"); | ||
696 | return ret; | ||
697 | } | ||
698 | |||
699 | if (qm->fun_type == QM_HW_PF) { | ||
700 | ret = hisi_zip_pf_probe_init(hisi_zip); | ||
701 | if (ret) | ||
702 | return ret; | ||
703 | |||
704 | qm->qp_base = HZIP_PF_DEF_Q_BASE; | ||
705 | qm->qp_num = pf_q_num; | ||
706 | } else if (qm->fun_type == QM_HW_VF) { | ||
707 | /* | ||
708 | * have no way to get qm configure in VM in v1 hardware, | ||
709 | * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force | ||
710 | * to trigger only one VF in v1 hardware. | ||
711 | * | ||
712 | * v2 hardware has no such problem. | ||
713 | */ | ||
714 | if (qm->ver == QM_HW_V1) { | ||
715 | qm->qp_base = HZIP_PF_DEF_Q_NUM; | ||
716 | qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; | ||
717 | } else if (qm->ver == QM_HW_V2) | ||
718 | /* v2 starts to support get vft by mailbox */ | ||
719 | hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); | ||
720 | } | ||
721 | |||
722 | ret = hisi_qm_start(qm); | ||
723 | if (ret) | ||
724 | goto err_qm_uninit; | ||
725 | |||
726 | ret = hisi_zip_debugfs_init(hisi_zip); | ||
727 | if (ret) | ||
728 | dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); | ||
729 | |||
730 | hisi_zip_add_to_list(hisi_zip); | ||
731 | |||
732 | return 0; | ||
733 | |||
734 | err_qm_uninit: | ||
735 | hisi_qm_uninit(qm); | ||
736 | return ret; | ||
737 | } | ||
738 | |||
739 | /* Currently we only support equal assignment */ | ||
740 | static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) | ||
741 | { | ||
742 | struct hisi_qm *qm = &hisi_zip->qm; | ||
743 | u32 qp_num = qm->qp_num; | ||
744 | u32 q_base = qp_num; | ||
745 | u32 q_num, remain_q_num, i; | ||
746 | int ret; | ||
747 | |||
748 | if (!num_vfs) | ||
749 | return -EINVAL; | ||
750 | |||
751 | remain_q_num = qm->ctrl_qp_num - qp_num; | ||
752 | if (remain_q_num < num_vfs) | ||
753 | return -EINVAL; | ||
754 | |||
755 | q_num = remain_q_num / num_vfs; | ||
756 | for (i = 1; i <= num_vfs; i++) { | ||
757 | if (i == num_vfs) | ||
758 | q_num += remain_q_num % num_vfs; | ||
759 | ret = hisi_qm_set_vft(qm, i, q_base, q_num); | ||
760 | if (ret) | ||
761 | return ret; | ||
762 | q_base += q_num; | ||
763 | } | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) | ||
769 | { | ||
770 | struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl; | ||
771 | struct hisi_qm *qm = &hisi_zip->qm; | ||
772 | u32 i, num_vfs = ctrl->num_vfs; | ||
773 | int ret; | ||
774 | |||
775 | for (i = 1; i <= num_vfs; i++) { | ||
776 | ret = hisi_qm_set_vft(qm, i, 0, 0); | ||
777 | if (ret) | ||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | ctrl->num_vfs = 0; | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) | ||
787 | { | ||
788 | #ifdef CONFIG_PCI_IOV | ||
789 | struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); | ||
790 | int pre_existing_vfs, num_vfs, ret; | ||
791 | |||
792 | pre_existing_vfs = pci_num_vf(pdev); | ||
793 | |||
794 | if (pre_existing_vfs) { | ||
795 | dev_err(&pdev->dev, | ||
796 | "Can't enable VF. Please disable pre-enabled VFs!\n"); | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); | ||
801 | |||
802 | ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); | ||
803 | if (ret) { | ||
804 | dev_err(&pdev->dev, "Can't assign queues for VF!\n"); | ||
805 | return ret; | ||
806 | } | ||
807 | |||
808 | hisi_zip->ctrl->num_vfs = num_vfs; | ||
809 | |||
810 | ret = pci_enable_sriov(pdev, num_vfs); | ||
811 | if (ret) { | ||
812 | dev_err(&pdev->dev, "Can't enable VF!\n"); | ||
813 | hisi_zip_clear_vft_config(hisi_zip); | ||
814 | return ret; | ||
815 | } | ||
816 | |||
817 | return num_vfs; | ||
818 | #else | ||
819 | return 0; | ||
820 | #endif | ||
821 | } | ||
822 | |||
823 | static int hisi_zip_sriov_disable(struct pci_dev *pdev) | ||
824 | { | ||
825 | struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); | ||
826 | |||
827 | if (pci_vfs_assigned(pdev)) { | ||
828 | dev_err(&pdev->dev, | ||
829 | "Can't disable VFs while VFs are assigned!\n"); | ||
830 | return -EPERM; | ||
831 | } | ||
832 | |||
833 | /* remove in hisi_zip_pci_driver will be called to free VF resources */ | ||
834 | pci_disable_sriov(pdev); | ||
835 | |||
836 | return hisi_zip_clear_vft_config(hisi_zip); | ||
837 | } | ||
838 | |||
839 | static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) | ||
840 | { | ||
841 | if (num_vfs == 0) | ||
842 | return hisi_zip_sriov_disable(pdev); | ||
843 | else | ||
844 | return hisi_zip_sriov_enable(pdev, num_vfs); | ||
845 | } | ||
846 | |||
847 | static void hisi_zip_remove(struct pci_dev *pdev) | ||
848 | { | ||
849 | struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); | ||
850 | struct hisi_qm *qm = &hisi_zip->qm; | ||
851 | |||
852 | if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) | ||
853 | hisi_zip_sriov_disable(pdev); | ||
854 | |||
855 | hisi_zip_debugfs_exit(hisi_zip); | ||
856 | hisi_qm_stop(qm); | ||
857 | |||
858 | if (qm->fun_type == QM_HW_PF) | ||
859 | hisi_zip_hw_error_set_state(hisi_zip, false); | ||
860 | |||
861 | hisi_qm_uninit(qm); | ||
862 | hisi_zip_remove_from_list(hisi_zip); | ||
863 | } | ||
864 | |||
865 | static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts) | ||
866 | { | ||
867 | const struct hisi_zip_hw_error *err = zip_hw_error; | ||
868 | struct device *dev = &hisi_zip->qm.pdev->dev; | ||
869 | u32 err_val; | ||
870 | |||
871 | while (err->msg) { | ||
872 | if (err->int_msk & err_sts) { | ||
873 | dev_warn(dev, "%s [error status=0x%x] found\n", | ||
874 | err->msg, err->int_msk); | ||
875 | |||
876 | if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) { | ||
877 | err_val = readl(hisi_zip->qm.io_base + | ||
878 | HZIP_CORE_SRAM_ECC_ERR_INFO); | ||
879 | dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n", | ||
880 | ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) & | ||
881 | 0xFF)); | ||
882 | dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n", | ||
883 | (err_val >> SRAM_ECC_ERR_ADDR_SHIFT)); | ||
884 | } | ||
885 | } | ||
886 | err++; | ||
887 | } | ||
888 | } | ||
889 | |||
890 | static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip) | ||
891 | { | ||
892 | u32 err_sts; | ||
893 | |||
894 | /* read err sts */ | ||
895 | err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS); | ||
896 | |||
897 | if (err_sts) { | ||
898 | hisi_zip_log_hw_error(hisi_zip, err_sts); | ||
899 | /* clear error interrupts */ | ||
900 | writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE); | ||
901 | |||
902 | return PCI_ERS_RESULT_NEED_RESET; | ||
903 | } | ||
904 | |||
905 | return PCI_ERS_RESULT_RECOVERED; | ||
906 | } | ||
907 | |||
908 | static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev) | ||
909 | { | ||
910 | struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); | ||
911 | struct device *dev = &pdev->dev; | ||
912 | pci_ers_result_t qm_ret, zip_ret; | ||
913 | |||
914 | if (!hisi_zip) { | ||
915 | dev_err(dev, | ||
916 | "Can't recover ZIP-error occurred during device init\n"); | ||
917 | return PCI_ERS_RESULT_NONE; | ||
918 | } | ||
919 | |||
920 | qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm); | ||
921 | |||
922 | zip_ret = hisi_zip_hw_error_handle(hisi_zip); | ||
923 | |||
924 | return (qm_ret == PCI_ERS_RESULT_NEED_RESET || | ||
925 | zip_ret == PCI_ERS_RESULT_NEED_RESET) ? | ||
926 | PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; | ||
927 | } | ||
928 | |||
929 | static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, | ||
930 | pci_channel_state_t state) | ||
931 | { | ||
932 | if (pdev->is_virtfn) | ||
933 | return PCI_ERS_RESULT_NONE; | ||
934 | |||
935 | dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); | ||
936 | if (state == pci_channel_io_perm_failure) | ||
937 | return PCI_ERS_RESULT_DISCONNECT; | ||
938 | |||
939 | return hisi_zip_process_hw_error(pdev); | ||
940 | } | ||
941 | |||
942 | static const struct pci_error_handlers hisi_zip_err_handler = { | ||
943 | .error_detected = hisi_zip_error_detected, | ||
944 | }; | ||
945 | |||
946 | static struct pci_driver hisi_zip_pci_driver = { | ||
947 | .name = "hisi_zip", | ||
948 | .id_table = hisi_zip_dev_ids, | ||
949 | .probe = hisi_zip_probe, | ||
950 | .remove = hisi_zip_remove, | ||
951 | .sriov_configure = hisi_zip_sriov_configure, | ||
952 | .err_handler = &hisi_zip_err_handler, | ||
953 | }; | ||
954 | |||
955 | static void hisi_zip_register_debugfs(void) | ||
956 | { | ||
957 | if (!debugfs_initialized()) | ||
958 | return; | ||
959 | |||
960 | hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); | ||
961 | if (IS_ERR_OR_NULL(hzip_debugfs_root)) | ||
962 | hzip_debugfs_root = NULL; | ||
963 | } | ||
964 | |||
965 | static void hisi_zip_unregister_debugfs(void) | ||
966 | { | ||
967 | debugfs_remove_recursive(hzip_debugfs_root); | ||
968 | } | ||
969 | |||
970 | static int __init hisi_zip_init(void) | ||
971 | { | ||
972 | int ret; | ||
973 | |||
974 | hisi_zip_register_debugfs(); | ||
975 | |||
976 | ret = pci_register_driver(&hisi_zip_pci_driver); | ||
977 | if (ret < 0) { | ||
978 | pr_err("Failed to register pci driver.\n"); | ||
979 | goto err_pci; | ||
980 | } | ||
981 | |||
982 | if (uacce_mode == 0 || uacce_mode == 2) { | ||
983 | ret = hisi_zip_register_to_crypto(); | ||
984 | if (ret < 0) { | ||
985 | pr_err("Failed to register driver to crypto.\n"); | ||
986 | goto err_crypto; | ||
987 | } | ||
988 | } | ||
989 | |||
990 | return 0; | ||
991 | |||
992 | err_crypto: | ||
993 | pci_unregister_driver(&hisi_zip_pci_driver); | ||
994 | err_pci: | ||
995 | hisi_zip_unregister_debugfs(); | ||
996 | |||
997 | return ret; | ||
998 | } | ||
999 | |||
1000 | static void __exit hisi_zip_exit(void) | ||
1001 | { | ||
1002 | if (uacce_mode == 0 || uacce_mode == 2) | ||
1003 | hisi_zip_unregister_from_crypto(); | ||
1004 | pci_unregister_driver(&hisi_zip_pci_driver); | ||
1005 | hisi_zip_unregister_debugfs(); | ||
1006 | } | ||
1007 | |||
1008 | module_init(hisi_zip_init); | ||
1009 | module_exit(hisi_zip_exit); | ||
1010 | |||
1011 | MODULE_LICENSE("GPL v2"); | ||
1012 | MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); | ||
1013 | MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator"); | ||
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d27c812c3d8d..fe4cc8babe1c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c | |||
@@ -958,9 +958,7 @@ static int img_hash_probe(struct platform_device *pdev) | |||
958 | crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); | 958 | crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); |
959 | 959 | ||
960 | /* Register bank */ | 960 | /* Register bank */ |
961 | hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 961 | hdev->io_base = devm_platform_ioremap_resource(pdev, 0); |
962 | |||
963 | hdev->io_base = devm_ioremap_resource(dev, hash_res); | ||
964 | if (IS_ERR(hdev->io_base)) { | 962 | if (IS_ERR(hdev->io_base)) { |
965 | err = PTR_ERR(hdev->io_base); | 963 | err = PTR_ERR(hdev->io_base); |
966 | dev_err(dev, "can't ioremap, returned %d\n", err); | 964 | dev_err(dev, "can't ioremap, returned %d\n", err); |
@@ -980,7 +978,6 @@ static int img_hash_probe(struct platform_device *pdev) | |||
980 | 978 | ||
981 | irq = platform_get_irq(pdev, 0); | 979 | irq = platform_get_irq(pdev, 0); |
982 | if (irq < 0) { | 980 | if (irq < 0) { |
983 | dev_err(dev, "no IRQ resource info\n"); | ||
984 | err = irq; | 981 | err = irq; |
985 | goto res_err; | 982 | goto res_err; |
986 | } | 983 | } |
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index df43a2c6933b..b456b85f46d3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/of_platform.h> | 15 | #include <linux/of_platform.h> |
16 | #include <linux/of_irq.h> | 16 | #include <linux/of_irq.h> |
17 | #include <linux/pci.h> | ||
17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
18 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
19 | 20 | ||
@@ -27,62 +28,205 @@ static u32 max_rings = EIP197_MAX_RINGS; | |||
27 | module_param(max_rings, uint, 0644); | 28 | module_param(max_rings, uint, 0644); |
28 | MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); | 29 | MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); |
29 | 30 | ||
30 | static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) | 31 | static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv) |
31 | { | 32 | { |
32 | u32 val, htable_offset; | 33 | int i; |
33 | int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; | 34 | |
34 | 35 | /* | |
35 | if (priv->version == EIP197B) { | 36 | * Map all interfaces/rings to register index 0 |
36 | cs_rc_max = EIP197B_CS_RC_MAX; | 37 | * so they can share contexts. Without this, the EIP197 will |
37 | cs_ht_wc = EIP197B_CS_HT_WC; | 38 | * assume each interface/ring to be in its own memory domain |
38 | cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; | 39 | * i.e. have its own subset of UNIQUE memory addresses. |
39 | cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; | 40 | * Which would cause records with the SAME memory address to |
40 | } else { | 41 | * use DIFFERENT cache buffers, causing both poor cache utilization |
41 | cs_rc_max = EIP197D_CS_RC_MAX; | 42 | * AND serious coherence/invalidation issues. |
42 | cs_ht_wc = EIP197D_CS_HT_WC; | 43 | */ |
43 | cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; | 44 | for (i = 0; i < 4; i++) |
44 | cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; | 45 | writel(0, priv->base + EIP197_FLUE_IFC_LUT(i)); |
46 | |||
47 | /* | ||
48 | * Initialize other virtualization regs for cache | ||
49 | * These may not be in their reset state ... | ||
50 | */ | ||
51 | for (i = 0; i < priv->config.rings; i++) { | ||
52 | writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i)); | ||
53 | writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i)); | ||
54 | writel(EIP197_FLUE_CONFIG_MAGIC, | ||
55 | priv->base + EIP197_FLUE_CONFIG(i)); | ||
45 | } | 56 | } |
57 | writel(0, priv->base + EIP197_FLUE_OFFSETS); | ||
58 | writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET); | ||
59 | } | ||
46 | 60 | ||
47 | /* Enable the record cache memory access */ | 61 | static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv, |
48 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 62 | u32 addrmid, int *actbank) |
49 | val &= ~EIP197_TRC_ENABLE_MASK; | 63 | { |
50 | val |= EIP197_TRC_ENABLE_0; | 64 | u32 val; |
51 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | 65 | int curbank; |
66 | |||
67 | curbank = addrmid >> 16; | ||
68 | if (curbank != *actbank) { | ||
69 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | ||
70 | val = (val & ~EIP197_CS_BANKSEL_MASK) | | ||
71 | (curbank << EIP197_CS_BANKSEL_OFS); | ||
72 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | ||
73 | *actbank = curbank; | ||
74 | } | ||
75 | } | ||
52 | 76 | ||
53 | /* Clear all ECC errors */ | 77 | static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv, |
54 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | 78 | int maxbanks, u32 probemask) |
79 | { | ||
80 | u32 val, addrhi, addrlo, addrmid; | ||
81 | int actbank; | ||
55 | 82 | ||
56 | /* | 83 | /* |
57 | * Make sure the cache memory is accessible by taking record cache into | 84 | * And probe the actual size of the physically attached cache data RAM |
58 | * reset. | 85 | * Using a binary subdivision algorithm downto 32 byte cache lines. |
59 | */ | 86 | */ |
60 | val = readl(priv->base + EIP197_TRC_PARAMS); | 87 | addrhi = 1 << (16 + maxbanks); |
61 | val |= EIP197_TRC_PARAMS_SW_RESET; | 88 | addrlo = 0; |
62 | val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; | 89 | actbank = min(maxbanks - 1, 0); |
63 | writel(val, priv->base + EIP197_TRC_PARAMS); | 90 | while ((addrhi - addrlo) > 32) { |
91 | /* write marker to lowest address in top half */ | ||
92 | addrmid = (addrhi + addrlo) >> 1; | ||
93 | eip197_trc_cache_banksel(priv, addrmid, &actbank); | ||
94 | writel((addrmid | (addrlo << 16)) & probemask, | ||
95 | priv->base + EIP197_CLASSIFICATION_RAMS + | ||
96 | (addrmid & 0xffff)); | ||
97 | |||
98 | /* write marker to lowest address in bottom half */ | ||
99 | eip197_trc_cache_banksel(priv, addrlo, &actbank); | ||
100 | writel((addrlo | (addrhi << 16)) & probemask, | ||
101 | priv->base + EIP197_CLASSIFICATION_RAMS + | ||
102 | (addrlo & 0xffff)); | ||
103 | |||
104 | /* read back marker from top half */ | ||
105 | eip197_trc_cache_banksel(priv, addrmid, &actbank); | ||
106 | val = readl(priv->base + EIP197_CLASSIFICATION_RAMS + | ||
107 | (addrmid & 0xffff)); | ||
108 | |||
109 | if (val == ((addrmid | (addrlo << 16)) & probemask)) { | ||
110 | /* read back correct, continue with top half */ | ||
111 | addrlo = addrmid; | ||
112 | } else { | ||
113 | /* not read back correct, continue with bottom half */ | ||
114 | addrhi = addrmid; | ||
115 | } | ||
116 | } | ||
117 | return addrhi; | ||
118 | } | ||
119 | |||
120 | static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv, | ||
121 | int cs_rc_max, int cs_ht_wc) | ||
122 | { | ||
123 | int i; | ||
124 | u32 htable_offset, val, offset; | ||
64 | 125 | ||
65 | /* Clear all records */ | 126 | /* Clear all records in administration RAM */ |
66 | for (i = 0; i < cs_rc_max; i++) { | 127 | for (i = 0; i < cs_rc_max; i++) { |
67 | u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; | 128 | offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; |
68 | 129 | ||
69 | writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | | 130 | writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | |
70 | EIP197_CS_RC_PREV(EIP197_RC_NULL), | 131 | EIP197_CS_RC_PREV(EIP197_RC_NULL), |
71 | priv->base + offset); | 132 | priv->base + offset); |
72 | 133 | ||
73 | val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); | 134 | val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1); |
74 | if (i == 0) | 135 | if (i == 0) |
75 | val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); | 136 | val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); |
76 | else if (i == cs_rc_max - 1) | 137 | else if (i == cs_rc_max - 1) |
77 | val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); | 138 | val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); |
78 | writel(val, priv->base + offset + sizeof(u32)); | 139 | writel(val, priv->base + offset + 4); |
140 | /* must also initialize the address key due to ECC! */ | ||
141 | writel(0, priv->base + offset + 8); | ||
142 | writel(0, priv->base + offset + 12); | ||
79 | } | 143 | } |
80 | 144 | ||
81 | /* Clear the hash table entries */ | 145 | /* Clear the hash table entries */ |
82 | htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; | 146 | htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; |
83 | for (i = 0; i < cs_ht_wc; i++) | 147 | for (i = 0; i < cs_ht_wc; i++) |
84 | writel(GENMASK(29, 0), | 148 | writel(GENMASK(29, 0), |
85 | priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); | 149 | priv->base + EIP197_CLASSIFICATION_RAMS + |
150 | htable_offset + i * sizeof(u32)); | ||
151 | } | ||
152 | |||
153 | static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) | ||
154 | { | ||
155 | u32 val, dsize, asize; | ||
156 | int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; | ||
157 | int cs_rc_abs_max, cs_ht_sz; | ||
158 | int maxbanks; | ||
159 | |||
160 | /* Setup (dummy) virtualization for cache */ | ||
161 | eip197_trc_cache_setupvirt(priv); | ||
162 | |||
163 | /* | ||
164 | * Enable the record cache memory access and | ||
165 | * probe the bank select width | ||
166 | */ | ||
167 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | ||
168 | val &= ~EIP197_TRC_ENABLE_MASK; | ||
169 | val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK; | ||
170 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | ||
171 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | ||
172 | maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1; | ||
173 | |||
174 | /* Clear all ECC errors */ | ||
175 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | ||
176 | |||
177 | /* | ||
178 | * Make sure the cache memory is accessible by taking record cache into | ||
179 | * reset. Need data memory access here, not admin access. | ||
180 | */ | ||
181 | val = readl(priv->base + EIP197_TRC_PARAMS); | ||
182 | val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS; | ||
183 | writel(val, priv->base + EIP197_TRC_PARAMS); | ||
184 | |||
185 | /* Probed data RAM size in bytes */ | ||
186 | dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff); | ||
187 | |||
188 | /* | ||
189 | * Now probe the administration RAM size pretty much the same way | ||
190 | * Except that only the lower 30 bits are writable and we don't need | ||
191 | * bank selects | ||
192 | */ | ||
193 | val = readl(priv->base + EIP197_TRC_PARAMS); | ||
194 | /* admin access now */ | ||
195 | val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK); | ||
196 | writel(val, priv->base + EIP197_TRC_PARAMS); | ||
197 | |||
198 | /* Probed admin RAM size in admin words */ | ||
199 | asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4; | ||
200 | |||
201 | /* Clear any ECC errors detected while probing! */ | ||
202 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | ||
203 | |||
204 | /* | ||
205 | * Determine optimal configuration from RAM sizes | ||
206 | * Note that we assume that the physical RAM configuration is sane | ||
207 | * Therefore, we don't do any parameter error checking here ... | ||
208 | */ | ||
209 | |||
210 | /* For now, just use a single record format covering everything */ | ||
211 | cs_trc_rec_wc = EIP197_CS_TRC_REC_WC; | ||
212 | cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC; | ||
213 | |||
214 | /* | ||
215 | * Step #1: How many records will physically fit? | ||
216 | * Hard upper limit is 1023! | ||
217 | */ | ||
218 | cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023); | ||
219 | /* Step #2: Need at least 2 words in the admin RAM per record */ | ||
220 | cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1)); | ||
221 | /* Step #3: Determine log2 of hash table size */ | ||
222 | cs_ht_sz = __fls(asize - cs_rc_max) - 2; | ||
223 | /* Step #4: determine current size of hash table in dwords */ | ||
224 | cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */ | ||
225 | /* Step #5: add back excess words and see if we can fit more records */ | ||
226 | cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4)); | ||
227 | |||
228 | /* Clear the cache RAMs */ | ||
229 | eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); | ||
86 | 230 | ||
87 | /* Disable the record cache memory access */ | 231 | /* Disable the record cache memory access */ |
88 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 232 | val = readl(priv->base + EIP197_CS_RAM_CTRL); |
@@ -102,119 +246,231 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) | |||
102 | /* Configure the record cache #2 */ | 246 | /* Configure the record cache #2 */ |
103 | val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | | 247 | val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | |
104 | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | | 248 | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | |
105 | EIP197_TRC_PARAMS_HTABLE_SZ(2); | 249 | EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz); |
106 | writel(val, priv->base + EIP197_TRC_PARAMS); | 250 | writel(val, priv->base + EIP197_TRC_PARAMS); |
251 | |||
252 | dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n", | ||
253 | dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc); | ||
107 | } | 254 | } |
108 | 255 | ||
109 | static void eip197_write_firmware(struct safexcel_crypto_priv *priv, | 256 | static void eip197_init_firmware(struct safexcel_crypto_priv *priv) |
110 | const struct firmware *fw, int pe, u32 ctrl, | ||
111 | u32 prog_en) | ||
112 | { | 257 | { |
113 | const u32 *data = (const u32 *)fw->data; | 258 | int pe, i; |
114 | u32 val; | 259 | u32 val; |
115 | int i; | ||
116 | 260 | ||
117 | /* Reset the engine to make its program memory accessible */ | 261 | for (pe = 0; pe < priv->config.pes; pe++) { |
118 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | 262 | /* Configure the token FIFO's */ |
119 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | 263 | writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe)); |
120 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | 264 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe)); |
121 | EIP197_PE(priv) + ctrl); | ||
122 | 265 | ||
123 | /* Enable access to the program memory */ | 266 | /* Clear the ICE scratchpad memory */ |
124 | writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | 267 | val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); |
268 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | | ||
269 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | | ||
270 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | | ||
271 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | ||
272 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); | ||
273 | |||
274 | /* clear the scratchpad RAM using 32 bit writes only */ | ||
275 | for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++) | ||
276 | writel(0, EIP197_PE(priv) + | ||
277 | EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2)); | ||
278 | |||
279 | /* Reset the IFPP engine to make its program mem accessible */ | ||
280 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | ||
281 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | ||
282 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | ||
283 | EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); | ||
284 | |||
285 | /* Reset the IPUE engine to make its program mem accessible */ | ||
286 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | ||
287 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | ||
288 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | ||
289 | EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); | ||
290 | |||
291 | /* Enable access to all IFPP program memories */ | ||
292 | writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN, | ||
293 | EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | ||
294 | } | ||
295 | |||
296 | } | ||
297 | |||
298 | static int eip197_write_firmware(struct safexcel_crypto_priv *priv, | ||
299 | const struct firmware *fw) | ||
300 | { | ||
301 | const u32 *data = (const u32 *)fw->data; | ||
302 | int i; | ||
125 | 303 | ||
126 | /* Write the firmware */ | 304 | /* Write the firmware */ |
127 | for (i = 0; i < fw->size / sizeof(u32); i++) | 305 | for (i = 0; i < fw->size / sizeof(u32); i++) |
128 | writel(be32_to_cpu(data[i]), | 306 | writel(be32_to_cpu(data[i]), |
129 | priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); | 307 | priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); |
130 | 308 | ||
131 | /* Disable access to the program memory */ | 309 | /* Exclude final 2 NOPs from size */ |
132 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | 310 | return i - EIP197_FW_TERMINAL_NOPS; |
311 | } | ||
312 | |||
313 | /* | ||
314 | * If FW is actual production firmware, then poll for its initialization | ||
315 | * to complete and check if it is good for the HW, otherwise just return OK. | ||
316 | */ | ||
317 | static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp) | ||
318 | { | ||
319 | int pe, pollcnt; | ||
320 | u32 base, pollofs; | ||
133 | 321 | ||
134 | /* Release engine from reset */ | 322 | if (fpp) |
135 | val = readl(EIP197_PE(priv) + ctrl); | 323 | pollofs = EIP197_FW_FPP_READY; |
136 | val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; | 324 | else |
137 | writel(val, EIP197_PE(priv) + ctrl); | 325 | pollofs = EIP197_FW_PUE_READY; |
326 | |||
327 | for (pe = 0; pe < priv->config.pes; pe++) { | ||
328 | base = EIP197_PE_ICE_SCRATCH_RAM(pe); | ||
329 | pollcnt = EIP197_FW_START_POLLCNT; | ||
330 | while (pollcnt && | ||
331 | (readl_relaxed(EIP197_PE(priv) + base + | ||
332 | pollofs) != 1)) { | ||
333 | pollcnt--; | ||
334 | } | ||
335 | if (!pollcnt) { | ||
336 | dev_err(priv->dev, "FW(%d) for PE %d failed to start\n", | ||
337 | fpp, pe); | ||
338 | return false; | ||
339 | } | ||
340 | } | ||
341 | return true; | ||
342 | } | ||
343 | |||
344 | static bool eip197_start_firmware(struct safexcel_crypto_priv *priv, | ||
345 | int ipuesz, int ifppsz, int minifw) | ||
346 | { | ||
347 | int pe; | ||
348 | u32 val; | ||
349 | |||
350 | for (pe = 0; pe < priv->config.pes; pe++) { | ||
351 | /* Disable access to all program memory */ | ||
352 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | ||
353 | |||
354 | /* Start IFPP microengines */ | ||
355 | if (minifw) | ||
356 | val = 0; | ||
357 | else | ||
358 | val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) & | ||
359 | EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | | ||
360 | EIP197_PE_ICE_UENG_DEBUG_RESET; | ||
361 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); | ||
362 | |||
363 | /* Start IPUE microengines */ | ||
364 | if (minifw) | ||
365 | val = 0; | ||
366 | else | ||
367 | val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) & | ||
368 | EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | | ||
369 | EIP197_PE_ICE_UENG_DEBUG_RESET; | ||
370 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); | ||
371 | } | ||
372 | |||
373 | /* For miniFW startup, there is no initialization, so always succeed */ | ||
374 | if (minifw) | ||
375 | return true; | ||
376 | |||
377 | /* Wait until all the firmwares have properly started up */ | ||
378 | if (!poll_fw_ready(priv, 1)) | ||
379 | return false; | ||
380 | if (!poll_fw_ready(priv, 0)) | ||
381 | return false; | ||
382 | |||
383 | return true; | ||
138 | } | 384 | } |
139 | 385 | ||
140 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | 386 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) |
141 | { | 387 | { |
142 | const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; | 388 | const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; |
143 | const struct firmware *fw[FW_NB]; | 389 | const struct firmware *fw[FW_NB]; |
144 | char fw_path[31], *dir = NULL; | 390 | char fw_path[37], *dir = NULL; |
145 | int i, j, ret = 0, pe; | 391 | int i, j, ret = 0, pe; |
146 | u32 val; | 392 | int ipuesz, ifppsz, minifw = 0; |
147 | 393 | ||
148 | switch (priv->version) { | 394 | if (priv->version == EIP197D_MRVL) |
149 | case EIP197B: | ||
150 | dir = "eip197b"; | ||
151 | break; | ||
152 | case EIP197D: | ||
153 | dir = "eip197d"; | 395 | dir = "eip197d"; |
154 | break; | 396 | else if (priv->version == EIP197B_MRVL || |
155 | default: | 397 | priv->version == EIP197_DEVBRD) |
156 | /* No firmware is required */ | 398 | dir = "eip197b"; |
157 | return 0; | 399 | else |
158 | } | 400 | return -ENODEV; |
159 | 401 | ||
402 | retry_fw: | ||
160 | for (i = 0; i < FW_NB; i++) { | 403 | for (i = 0; i < FW_NB; i++) { |
161 | snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); | 404 | snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); |
162 | ret = request_firmware(&fw[i], fw_path, priv->dev); | 405 | ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); |
163 | if (ret) { | 406 | if (ret) { |
164 | if (priv->version != EIP197B) | 407 | if (minifw || priv->version != EIP197B_MRVL) |
165 | goto release_fw; | 408 | goto release_fw; |
166 | 409 | ||
167 | /* Fallback to the old firmware location for the | 410 | /* Fallback to the old firmware location for the |
168 | * EIP197b. | 411 | * EIP197b. |
169 | */ | 412 | */ |
170 | ret = request_firmware(&fw[i], fw_name[i], priv->dev); | 413 | ret = firmware_request_nowarn(&fw[i], fw_name[i], |
171 | if (ret) { | 414 | priv->dev); |
172 | dev_err(priv->dev, | 415 | if (ret) |
173 | "Failed to request firmware %s (%d)\n", | ||
174 | fw_name[i], ret); | ||
175 | goto release_fw; | 416 | goto release_fw; |
176 | } | ||
177 | } | 417 | } |
178 | } | 418 | } |
179 | 419 | ||
180 | for (pe = 0; pe < priv->config.pes; pe++) { | 420 | eip197_init_firmware(priv); |
181 | /* Clear the scratchpad memory */ | 421 | |
182 | val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); | 422 | ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]); |
183 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | | ||
184 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | | ||
185 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | | ||
186 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | ||
187 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); | ||
188 | 423 | ||
189 | memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, | 424 | /* Enable access to IPUE program memories */ |
190 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); | 425 | for (pe = 0; pe < priv->config.pes; pe++) |
426 | writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN, | ||
427 | EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | ||
191 | 428 | ||
192 | eip197_write_firmware(priv, fw[FW_IFPP], pe, | 429 | ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]); |
193 | EIP197_PE_ICE_FPP_CTRL(pe), | ||
194 | EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); | ||
195 | 430 | ||
196 | eip197_write_firmware(priv, fw[FW_IPUE], pe, | 431 | if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) { |
197 | EIP197_PE_ICE_PUE_CTRL(pe), | 432 | dev_dbg(priv->dev, "Firmware loaded successfully\n"); |
198 | EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); | 433 | return 0; |
199 | } | 434 | } |
200 | 435 | ||
436 | ret = -ENODEV; | ||
437 | |||
201 | release_fw: | 438 | release_fw: |
202 | for (j = 0; j < i; j++) | 439 | for (j = 0; j < i; j++) |
203 | release_firmware(fw[j]); | 440 | release_firmware(fw[j]); |
204 | 441 | ||
442 | if (!minifw) { | ||
443 | /* Retry with minifw path */ | ||
444 | dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n"); | ||
445 | dir = "eip197_minifw"; | ||
446 | minifw = 1; | ||
447 | goto retry_fw; | ||
448 | } | ||
449 | |||
450 | dev_dbg(priv->dev, "Firmware load failed.\n"); | ||
451 | |||
205 | return ret; | 452 | return ret; |
206 | } | 453 | } |
207 | 454 | ||
208 | static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | 455 | static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) |
209 | { | 456 | { |
210 | u32 hdw, cd_size_rnd, val; | 457 | u32 cd_size_rnd, val; |
211 | int i; | 458 | int i, cd_fetch_cnt; |
212 | 459 | ||
213 | hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); | 460 | cd_size_rnd = (priv->config.cd_size + |
214 | hdw &= GENMASK(27, 25); | 461 | (BIT(priv->hwconfig.hwdataw) - 1)) >> |
215 | hdw >>= 25; | 462 | priv->hwconfig.hwdataw; |
216 | 463 | /* determine number of CD's we can fetch into the CD FIFO as 1 block */ | |
217 | cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; | 464 | if (priv->flags & SAFEXCEL_HW_EIP197) { |
465 | /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ | ||
466 | cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd; | ||
467 | cd_fetch_cnt = min_t(uint, cd_fetch_cnt, | ||
468 | (priv->config.pes * EIP197_FETCH_DEPTH)); | ||
469 | } else { | ||
470 | /* for the EIP97, just fetch all that fits minus 1 */ | ||
471 | cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) / | ||
472 | cd_size_rnd) - 1; | ||
473 | } | ||
218 | 474 | ||
219 | for (i = 0; i < priv->config.rings; i++) { | 475 | for (i = 0; i < priv->config.rings; i++) { |
220 | /* ring base address */ | 476 | /* ring base address */ |
@@ -226,8 +482,9 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | |||
226 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | | 482 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | |
227 | priv->config.cd_size, | 483 | priv->config.cd_size, |
228 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); | 484 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); |
229 | writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | | 485 | writel(((cd_fetch_cnt * |
230 | (EIP197_FETCH_COUNT * priv->config.cd_offset), | 486 | (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | |
487 | (cd_fetch_cnt * priv->config.cd_offset), | ||
231 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); | 488 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); |
232 | 489 | ||
233 | /* Configure DMA tx control */ | 490 | /* Configure DMA tx control */ |
@@ -245,14 +502,23 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | |||
245 | 502 | ||
246 | static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | 503 | static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) |
247 | { | 504 | { |
248 | u32 hdw, rd_size_rnd, val; | 505 | u32 rd_size_rnd, val; |
249 | int i; | 506 | int i, rd_fetch_cnt; |
250 | 507 | ||
251 | hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); | 508 | /* determine number of RD's we can fetch into the FIFO as one block */ |
252 | hdw &= GENMASK(27, 25); | 509 | rd_size_rnd = (EIP197_RD64_FETCH_SIZE + |
253 | hdw >>= 25; | 510 | (BIT(priv->hwconfig.hwdataw) - 1)) >> |
254 | 511 | priv->hwconfig.hwdataw; | |
255 | rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; | 512 | if (priv->flags & SAFEXCEL_HW_EIP197) { |
513 | /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ | ||
514 | rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd; | ||
515 | rd_fetch_cnt = min_t(uint, rd_fetch_cnt, | ||
516 | (priv->config.pes * EIP197_FETCH_DEPTH)); | ||
517 | } else { | ||
518 | /* for the EIP97, just fetch all that fits minus 1 */ | ||
519 | rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / | ||
520 | rd_size_rnd) - 1; | ||
521 | } | ||
256 | 522 | ||
257 | for (i = 0; i < priv->config.rings; i++) { | 523 | for (i = 0; i < priv->config.rings; i++) { |
258 | /* ring base address */ | 524 | /* ring base address */ |
@@ -265,8 +531,9 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | |||
265 | priv->config.rd_size, | 531 | priv->config.rd_size, |
266 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); | 532 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); |
267 | 533 | ||
268 | writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | | 534 | writel(((rd_fetch_cnt * |
269 | (EIP197_FETCH_COUNT * priv->config.rd_offset), | 535 | (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) | |
536 | (rd_fetch_cnt * priv->config.rd_offset), | ||
270 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); | 537 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); |
271 | 538 | ||
272 | /* Configure DMA tx control */ | 539 | /* Configure DMA tx control */ |
@@ -291,23 +558,21 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | |||
291 | 558 | ||
292 | static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | 559 | static int safexcel_hw_init(struct safexcel_crypto_priv *priv) |
293 | { | 560 | { |
294 | u32 version, val; | 561 | u32 val; |
295 | int i, ret, pe; | 562 | int i, ret, pe; |
296 | 563 | ||
297 | /* Determine endianess and configure byte swap */ | 564 | dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", |
298 | version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); | 565 | priv->config.pes, priv->config.rings); |
299 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | ||
300 | 566 | ||
301 | if ((version & 0xffff) == EIP197_HIA_VERSION_BE) | 567 | /* |
302 | val |= EIP197_MST_CTRL_BYTE_SWAP; | 568 | * For EIP197's only set maximum number of TX commands to 2^5 = 32 |
303 | else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) | 569 | * Skip for the EIP97 as it does not have this field. |
304 | val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); | 570 | */ |
305 | 571 | if (priv->flags & SAFEXCEL_HW_EIP197) { | |
306 | /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ | 572 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); |
307 | if (priv->version == EIP197B || priv->version == EIP197D) | ||
308 | val |= EIP197_MST_CTRL_TX_MAX_CMD(5); | 573 | val |= EIP197_MST_CTRL_TX_MAX_CMD(5); |
309 | 574 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | |
310 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | 575 | } |
311 | 576 | ||
312 | /* Configure wr/rd cache values */ | 577 | /* Configure wr/rd cache values */ |
313 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | | 578 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | |
@@ -330,11 +595,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
330 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | 595 | writel(EIP197_DxE_THR_CTRL_RESET_PE, |
331 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); | 596 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); |
332 | 597 | ||
333 | if (priv->version == EIP197B || priv->version == EIP197D) { | 598 | if (priv->flags & SAFEXCEL_HW_EIP197) |
334 | /* Reset HIA input interface arbiter */ | 599 | /* Reset HIA input interface arbiter (EIP197 only) */ |
335 | writel(EIP197_HIA_RA_PE_CTRL_RESET, | 600 | writel(EIP197_HIA_RA_PE_CTRL_RESET, |
336 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); | 601 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); |
337 | } | ||
338 | 602 | ||
339 | /* DMA transfer size to use */ | 603 | /* DMA transfer size to use */ |
340 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; | 604 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; |
@@ -357,12 +621,11 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
357 | EIP197_PE_IN_xBUF_THRES_MAX(7), | 621 | EIP197_PE_IN_xBUF_THRES_MAX(7), |
358 | EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); | 622 | EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); |
359 | 623 | ||
360 | if (priv->version == EIP197B || priv->version == EIP197D) { | 624 | if (priv->flags & SAFEXCEL_HW_EIP197) |
361 | /* enable HIA input interface arbiter and rings */ | 625 | /* enable HIA input interface arbiter and rings */ |
362 | writel(EIP197_HIA_RA_PE_CTRL_EN | | 626 | writel(EIP197_HIA_RA_PE_CTRL_EN | |
363 | GENMASK(priv->config.rings - 1, 0), | 627 | GENMASK(priv->config.rings - 1, 0), |
364 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); | 628 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); |
365 | } | ||
366 | 629 | ||
367 | /* Data Store Engine configuration */ | 630 | /* Data Store Engine configuration */ |
368 | 631 | ||
@@ -381,10 +644,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
381 | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); | 644 | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); |
382 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); | 645 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); |
383 | val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; | 646 | val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; |
384 | /* FIXME: instability issues can occur for EIP97 but disabling it impact | 647 | /* FIXME: instability issues can occur for EIP97 but disabling |
385 | * performances. | 648 | * it impacts performance. |
386 | */ | 649 | */ |
387 | if (priv->version == EIP197B || priv->version == EIP197D) | 650 | if (priv->flags & SAFEXCEL_HW_EIP197) |
388 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; | 651 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; |
389 | writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); | 652 | writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); |
390 | 653 | ||
@@ -400,21 +663,15 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
400 | 663 | ||
401 | /* Token & context configuration */ | 664 | /* Token & context configuration */ |
402 | val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | | 665 | val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | |
403 | EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX | | 666 | EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT | |
404 | EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; | 667 | EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT; |
405 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); | 668 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); |
406 | 669 | ||
407 | /* H/W capabilities selection */ | 670 | /* H/W capabilities selection: just enable everything */ |
408 | val = EIP197_FUNCTION_RSVD; | 671 | writel(EIP197_FUNCTION_ALL, |
409 | val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; | 672 | EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); |
410 | val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; | 673 | writel(EIP197_FUNCTION_ALL, |
411 | val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; | 674 | EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe)); |
412 | val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC; | ||
413 | val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; | ||
414 | val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5; | ||
415 | val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; | ||
416 | val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; | ||
417 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); | ||
418 | } | 675 | } |
419 | 676 | ||
420 | /* Command Descriptor Rings prepare */ | 677 | /* Command Descriptor Rings prepare */ |
@@ -479,8 +736,9 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
479 | /* Clear any HIA interrupt */ | 736 | /* Clear any HIA interrupt */ |
480 | writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); | 737 | writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); |
481 | 738 | ||
482 | if (priv->version == EIP197B || priv->version == EIP197D) { | 739 | if (priv->flags & SAFEXCEL_HW_EIP197) { |
483 | eip197_trc_cache_init(priv); | 740 | eip197_trc_cache_init(priv); |
741 | priv->flags |= EIP197_TRC_CACHE; | ||
484 | 742 | ||
485 | ret = eip197_load_firmwares(priv); | 743 | ret = eip197_load_firmwares(priv); |
486 | if (ret) | 744 | if (ret) |
@@ -589,16 +847,32 @@ finalize: | |||
589 | inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, | 847 | inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, |
590 | struct safexcel_result_desc *rdesc) | 848 | struct safexcel_result_desc *rdesc) |
591 | { | 849 | { |
592 | if (likely(!rdesc->result_data.error_code)) | 850 | if (likely((!rdesc->descriptor_overflow) && |
851 | (!rdesc->buffer_overflow) && | ||
852 | (!rdesc->result_data.error_code))) | ||
593 | return 0; | 853 | return 0; |
594 | 854 | ||
595 | if (rdesc->result_data.error_code & 0x407f) { | 855 | if (rdesc->descriptor_overflow) |
596 | /* Fatal error (bits 0-7, 14) */ | 856 | dev_err(priv->dev, "Descriptor overflow detected"); |
857 | |||
858 | if (rdesc->buffer_overflow) | ||
859 | dev_err(priv->dev, "Buffer overflow detected"); | ||
860 | |||
861 | if (rdesc->result_data.error_code & 0x4066) { | ||
862 | /* Fatal error (bits 1,2,5,6 & 14) */ | ||
597 | dev_err(priv->dev, | 863 | dev_err(priv->dev, |
598 | "cipher: result: result descriptor error (0x%x)\n", | 864 | "result descriptor error (%x)", |
599 | rdesc->result_data.error_code); | 865 | rdesc->result_data.error_code); |
866 | return -EIO; | ||
867 | } else if (rdesc->result_data.error_code & | ||
868 | (BIT(7) | BIT(4) | BIT(3) | BIT(0))) { | ||
869 | /* | ||
870 | * Give priority over authentication fails: | ||
871 | * Blocksize, length & overflow errors, | ||
872 | * something wrong with the input! | ||
873 | */ | ||
600 | return -EINVAL; | 874 | return -EINVAL; |
601 | } else if (rdesc->result_data.error_code == BIT(9)) { | 875 | } else if (rdesc->result_data.error_code & BIT(9)) { |
602 | /* Authentication failed */ | 876 | /* Authentication failed */ |
603 | return -EBADMSG; | 877 | return -EBADMSG; |
604 | } | 878 | } |
@@ -711,7 +985,8 @@ handle_results: | |||
711 | ndesc = ctx->handle_result(priv, ring, req, | 985 | ndesc = ctx->handle_result(priv, ring, req, |
712 | &should_complete, &ret); | 986 | &should_complete, &ret); |
713 | if (ndesc < 0) { | 987 | if (ndesc < 0) { |
714 | dev_err(priv->dev, "failed to handle result (%d)", ndesc); | 988 | dev_err(priv->dev, "failed to handle result (%d)\n", |
989 | ndesc); | ||
715 | goto acknowledge; | 990 | goto acknowledge; |
716 | } | 991 | } |
717 | 992 | ||
@@ -783,7 +1058,7 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) | |||
783 | * reinitialized. This should not happen under | 1058 | * reinitialized. This should not happen under |
784 | * normal circumstances. | 1059 | * normal circumstances. |
785 | */ | 1060 | */ |
786 | dev_err(priv->dev, "RDR: fatal error."); | 1061 | dev_err(priv->dev, "RDR: fatal error.\n"); |
787 | } else if (likely(stat & EIP197_xDR_THRESH)) { | 1062 | } else if (likely(stat & EIP197_xDR_THRESH)) { |
788 | rc = IRQ_WAKE_THREAD; | 1063 | rc = IRQ_WAKE_THREAD; |
789 | } | 1064 | } |
@@ -813,23 +1088,45 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) | |||
813 | return IRQ_HANDLED; | 1088 | return IRQ_HANDLED; |
814 | } | 1089 | } |
815 | 1090 | ||
816 | static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, | 1091 | static int safexcel_request_ring_irq(void *pdev, int irqid, |
1092 | int is_pci_dev, | ||
817 | irq_handler_t handler, | 1093 | irq_handler_t handler, |
818 | irq_handler_t threaded_handler, | 1094 | irq_handler_t threaded_handler, |
819 | struct safexcel_ring_irq_data *ring_irq_priv) | 1095 | struct safexcel_ring_irq_data *ring_irq_priv) |
820 | { | 1096 | { |
821 | int ret, irq = platform_get_irq_byname(pdev, name); | 1097 | int ret, irq; |
1098 | struct device *dev; | ||
1099 | |||
1100 | if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { | ||
1101 | struct pci_dev *pci_pdev = pdev; | ||
1102 | |||
1103 | dev = &pci_pdev->dev; | ||
1104 | irq = pci_irq_vector(pci_pdev, irqid); | ||
1105 | if (irq < 0) { | ||
1106 | dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n", | ||
1107 | irqid, irq); | ||
1108 | return irq; | ||
1109 | } | ||
1110 | } else if (IS_ENABLED(CONFIG_OF)) { | ||
1111 | struct platform_device *plf_pdev = pdev; | ||
1112 | char irq_name[6] = {0}; /* "ringX\0" */ | ||
1113 | |||
1114 | snprintf(irq_name, 6, "ring%d", irqid); | ||
1115 | dev = &plf_pdev->dev; | ||
1116 | irq = platform_get_irq_byname(plf_pdev, irq_name); | ||
822 | 1117 | ||
823 | if (irq < 0) { | 1118 | if (irq < 0) { |
824 | dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); | 1119 | dev_err(dev, "unable to get IRQ '%s' (err %d)\n", |
825 | return irq; | 1120 | irq_name, irq); |
1121 | return irq; | ||
1122 | } | ||
826 | } | 1123 | } |
827 | 1124 | ||
828 | ret = devm_request_threaded_irq(&pdev->dev, irq, handler, | 1125 | ret = devm_request_threaded_irq(dev, irq, handler, |
829 | threaded_handler, IRQF_ONESHOT, | 1126 | threaded_handler, IRQF_ONESHOT, |
830 | dev_name(&pdev->dev), ring_irq_priv); | 1127 | dev_name(dev), ring_irq_priv); |
831 | if (ret) { | 1128 | if (ret) { |
832 | dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); | 1129 | dev_err(dev, "unable to request IRQ %d\n", irq); |
833 | return ret; | 1130 | return ret; |
834 | } | 1131 | } |
835 | 1132 | ||
@@ -843,6 +1140,9 @@ static struct safexcel_alg_template *safexcel_algs[] = { | |||
843 | &safexcel_alg_cbc_des3_ede, | 1140 | &safexcel_alg_cbc_des3_ede, |
844 | &safexcel_alg_ecb_aes, | 1141 | &safexcel_alg_ecb_aes, |
845 | &safexcel_alg_cbc_aes, | 1142 | &safexcel_alg_cbc_aes, |
1143 | &safexcel_alg_cfb_aes, | ||
1144 | &safexcel_alg_ofb_aes, | ||
1145 | &safexcel_alg_ctr_aes, | ||
846 | &safexcel_alg_md5, | 1146 | &safexcel_alg_md5, |
847 | &safexcel_alg_sha1, | 1147 | &safexcel_alg_sha1, |
848 | &safexcel_alg_sha224, | 1148 | &safexcel_alg_sha224, |
@@ -860,6 +1160,15 @@ static struct safexcel_alg_template *safexcel_algs[] = { | |||
860 | &safexcel_alg_authenc_hmac_sha256_cbc_aes, | 1160 | &safexcel_alg_authenc_hmac_sha256_cbc_aes, |
861 | &safexcel_alg_authenc_hmac_sha384_cbc_aes, | 1161 | &safexcel_alg_authenc_hmac_sha384_cbc_aes, |
862 | &safexcel_alg_authenc_hmac_sha512_cbc_aes, | 1162 | &safexcel_alg_authenc_hmac_sha512_cbc_aes, |
1163 | &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, | ||
1164 | &safexcel_alg_authenc_hmac_sha1_ctr_aes, | ||
1165 | &safexcel_alg_authenc_hmac_sha224_ctr_aes, | ||
1166 | &safexcel_alg_authenc_hmac_sha256_ctr_aes, | ||
1167 | &safexcel_alg_authenc_hmac_sha384_ctr_aes, | ||
1168 | &safexcel_alg_authenc_hmac_sha512_ctr_aes, | ||
1169 | &safexcel_alg_xts_aes, | ||
1170 | &safexcel_alg_gcm, | ||
1171 | &safexcel_alg_ccm, | ||
863 | }; | 1172 | }; |
864 | 1173 | ||
865 | static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) | 1174 | static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) |
@@ -869,7 +1178,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) | |||
869 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | 1178 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { |
870 | safexcel_algs[i]->priv = priv; | 1179 | safexcel_algs[i]->priv = priv; |
871 | 1180 | ||
872 | if (!(safexcel_algs[i]->engines & priv->version)) | 1181 | /* Do we have all required base algorithms available? */ |
1182 | if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != | ||
1183 | safexcel_algs[i]->algo_mask) | ||
1184 | /* No, so don't register this ciphersuite */ | ||
873 | continue; | 1185 | continue; |
874 | 1186 | ||
875 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 1187 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) |
@@ -887,7 +1199,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) | |||
887 | 1199 | ||
888 | fail: | 1200 | fail: |
889 | for (j = 0; j < i; j++) { | 1201 | for (j = 0; j < i; j++) { |
890 | if (!(safexcel_algs[j]->engines & priv->version)) | 1202 | /* Do we have all required base algorithms available? */ |
1203 | if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) != | ||
1204 | safexcel_algs[j]->algo_mask) | ||
1205 | /* No, so don't unregister this ciphersuite */ | ||
891 | continue; | 1206 | continue; |
892 | 1207 | ||
893 | if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 1208 | if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) |
@@ -906,7 +1221,10 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) | |||
906 | int i; | 1221 | int i; |
907 | 1222 | ||
908 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | 1223 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { |
909 | if (!(safexcel_algs[i]->engines & priv->version)) | 1224 | /* Do we have all required base algorithms available? */ |
1225 | if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != | ||
1226 | safexcel_algs[i]->algo_mask) | ||
1227 | /* No, so don't unregister this ciphersuite */ | ||
910 | continue; | 1228 | continue; |
911 | 1229 | ||
912 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 1230 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) |
@@ -925,22 +1243,20 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) | |||
925 | val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); | 1243 | val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); |
926 | 1244 | ||
927 | /* Read number of PEs from the engine */ | 1245 | /* Read number of PEs from the engine */ |
928 | switch (priv->version) { | 1246 | if (priv->flags & SAFEXCEL_HW_EIP197) |
929 | case EIP197B: | 1247 | /* Wider field width for all EIP197 type engines */ |
930 | case EIP197D: | ||
931 | mask = EIP197_N_PES_MASK; | 1248 | mask = EIP197_N_PES_MASK; |
932 | break; | 1249 | else |
933 | default: | 1250 | /* Narrow field width for EIP97 type engine */ |
934 | mask = EIP97_N_PES_MASK; | 1251 | mask = EIP97_N_PES_MASK; |
935 | } | 1252 | |
936 | priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; | 1253 | priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; |
937 | 1254 | ||
1255 | priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); | ||
1256 | |||
938 | val = (val & GENMASK(27, 25)) >> 25; | 1257 | val = (val & GENMASK(27, 25)) >> 25; |
939 | mask = BIT(val) - 1; | 1258 | mask = BIT(val) - 1; |
940 | 1259 | ||
941 | val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); | ||
942 | priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); | ||
943 | |||
944 | priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); | 1260 | priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); |
945 | priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; | 1261 | priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; |
946 | 1262 | ||
@@ -952,9 +1268,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) | |||
952 | { | 1268 | { |
953 | struct safexcel_register_offsets *offsets = &priv->offsets; | 1269 | struct safexcel_register_offsets *offsets = &priv->offsets; |
954 | 1270 | ||
955 | switch (priv->version) { | 1271 | if (priv->flags & SAFEXCEL_HW_EIP197) { |
956 | case EIP197B: | ||
957 | case EIP197D: | ||
958 | offsets->hia_aic = EIP197_HIA_AIC_BASE; | 1272 | offsets->hia_aic = EIP197_HIA_AIC_BASE; |
959 | offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; | 1273 | offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; |
960 | offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; | 1274 | offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; |
@@ -965,8 +1279,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) | |||
965 | offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; | 1279 | offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; |
966 | offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; | 1280 | offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; |
967 | offsets->pe = EIP197_PE_BASE; | 1281 | offsets->pe = EIP197_PE_BASE; |
968 | break; | 1282 | offsets->global = EIP197_GLOBAL_BASE; |
969 | case EIP97IES: | 1283 | } else { |
970 | offsets->hia_aic = EIP97_HIA_AIC_BASE; | 1284 | offsets->hia_aic = EIP97_HIA_AIC_BASE; |
971 | offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; | 1285 | offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; |
972 | offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; | 1286 | offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; |
@@ -977,135 +1291,213 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) | |||
977 | offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; | 1291 | offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; |
978 | offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; | 1292 | offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; |
979 | offsets->pe = EIP97_PE_BASE; | 1293 | offsets->pe = EIP97_PE_BASE; |
980 | break; | 1294 | offsets->global = EIP97_GLOBAL_BASE; |
981 | } | 1295 | } |
982 | } | 1296 | } |
983 | 1297 | ||
984 | static int safexcel_probe(struct platform_device *pdev) | 1298 | /* |
1299 | * Generic part of probe routine, shared by platform and PCI driver | ||
1300 | * | ||
1301 | * Assumes IO resources have been mapped, private data mem has been allocated, | ||
1302 | * clocks have been enabled, device pointer has been assigned etc. | ||
1303 | * | ||
1304 | */ | ||
1305 | static int safexcel_probe_generic(void *pdev, | ||
1306 | struct safexcel_crypto_priv *priv, | ||
1307 | int is_pci_dev) | ||
985 | { | 1308 | { |
986 | struct device *dev = &pdev->dev; | 1309 | struct device *dev = priv->dev; |
987 | struct resource *res; | 1310 | u32 peid, version, mask, val, hiaopt; |
988 | struct safexcel_crypto_priv *priv; | 1311 | int i, ret, hwctg; |
989 | int i, ret; | ||
990 | 1312 | ||
991 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 1313 | priv->context_pool = dmam_pool_create("safexcel-context", dev, |
992 | if (!priv) | 1314 | sizeof(struct safexcel_context_record), |
1315 | 1, 0); | ||
1316 | if (!priv->context_pool) | ||
993 | return -ENOMEM; | 1317 | return -ENOMEM; |
994 | 1318 | ||
995 | priv->dev = dev; | 1319 | /* |
996 | priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); | 1320 | * First try the EIP97 HIA version regs |
997 | 1321 | * For the EIP197, this is guaranteed to NOT return any of the test | |
998 | if (priv->version == EIP197B || priv->version == EIP197D) | 1322 | * values |
999 | priv->flags |= EIP197_TRC_CACHE; | 1323 | */ |
1324 | version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION); | ||
1325 | |||
1326 | mask = 0; /* do not swap */ | ||
1327 | if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { | ||
1328 | priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); | ||
1329 | } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) { | ||
1330 | /* read back byte-swapped, so complement byte swap bits */ | ||
1331 | mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; | ||
1332 | priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); | ||
1333 | } else { | ||
1334 | /* So it wasn't an EIP97 ... maybe it's an EIP197? */ | ||
1335 | version = readl(priv->base + EIP197_HIA_AIC_BASE + | ||
1336 | EIP197_HIA_VERSION); | ||
1337 | if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { | ||
1338 | priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); | ||
1339 | priv->flags |= SAFEXCEL_HW_EIP197; | ||
1340 | } else if (EIP197_REG_HI16(version) == | ||
1341 | EIP197_HIA_VERSION_BE) { | ||
1342 | /* read back byte-swapped, so complement swap bits */ | ||
1343 | mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; | ||
1344 | priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); | ||
1345 | priv->flags |= SAFEXCEL_HW_EIP197; | ||
1346 | } else { | ||
1347 | return -ENODEV; | ||
1348 | } | ||
1349 | } | ||
1000 | 1350 | ||
1351 | /* Now initialize the reg offsets based on the probing info so far */ | ||
1001 | safexcel_init_register_offsets(priv); | 1352 | safexcel_init_register_offsets(priv); |
1002 | 1353 | ||
1003 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1354 | /* |
1004 | priv->base = devm_ioremap_resource(dev, res); | 1355 | * If the version was read byte-swapped, we need to flip the device |
1005 | if (IS_ERR(priv->base)) { | 1356 | * swapping Keep in mind here, though, that what we write will also be |
1006 | dev_err(dev, "failed to get resource\n"); | 1357 | * byte-swapped ... |
1007 | return PTR_ERR(priv->base); | 1358 | */ |
1359 | if (mask) { | ||
1360 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | ||
1361 | val = val ^ (mask >> 24); /* toggle byte swap bits */ | ||
1362 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | ||
1008 | } | 1363 | } |
1009 | 1364 | ||
1010 | priv->clk = devm_clk_get(&pdev->dev, NULL); | 1365 | /* |
1011 | ret = PTR_ERR_OR_ZERO(priv->clk); | 1366 | * We're not done probing yet! We may fall through to here if no HIA |
1012 | /* The clock isn't mandatory */ | 1367 | * was found at all. So, with the endianness presumably correct now and |
1013 | if (ret != -ENOENT) { | 1368 | * the offsets setup, *really* probe for the EIP97/EIP197. |
1014 | if (ret) | 1369 | */ |
1015 | return ret; | 1370 | version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION); |
1016 | 1371 | if (((priv->flags & SAFEXCEL_HW_EIP197) && | |
1017 | ret = clk_prepare_enable(priv->clk); | 1372 | (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) || |
1018 | if (ret) { | 1373 | ((!(priv->flags & SAFEXCEL_HW_EIP197) && |
1019 | dev_err(dev, "unable to enable clk (%d)\n", ret); | 1374 | (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) { |
1020 | return ret; | 1375 | /* |
1021 | } | 1376 | * We did not find the device that matched our initial probing |
1377 | * (or our initial probing failed) Report appropriate error. | ||
1378 | */ | ||
1379 | return -ENODEV; | ||
1022 | } | 1380 | } |
1023 | 1381 | ||
1024 | priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); | 1382 | priv->hwconfig.hwver = EIP197_VERSION_MASK(version); |
1025 | ret = PTR_ERR_OR_ZERO(priv->reg_clk); | 1383 | hwctg = version >> 28; |
1026 | /* The clock isn't mandatory */ | 1384 | peid = version & 255; |
1027 | if (ret != -ENOENT) { | ||
1028 | if (ret) | ||
1029 | goto err_core_clk; | ||
1030 | 1385 | ||
1031 | ret = clk_prepare_enable(priv->reg_clk); | 1386 | /* Detect EIP96 packet engine and version */ |
1032 | if (ret) { | 1387 | version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0)); |
1033 | dev_err(dev, "unable to enable reg clk (%d)\n", ret); | 1388 | if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) { |
1034 | goto err_core_clk; | 1389 | dev_err(dev, "EIP%d: EIP96 not detected.\n", peid); |
1035 | } | 1390 | return -ENODEV; |
1391 | } | ||
1392 | priv->hwconfig.pever = EIP197_VERSION_MASK(version); | ||
1393 | |||
1394 | hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS); | ||
1395 | |||
1396 | if (priv->flags & SAFEXCEL_HW_EIP197) { | ||
1397 | /* EIP197 */ | ||
1398 | priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & | ||
1399 | EIP197_HWDATAW_MASK; | ||
1400 | priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & | ||
1401 | EIP197_CFSIZE_MASK) + | ||
1402 | EIP197_CFSIZE_ADJUST; | ||
1403 | priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) & | ||
1404 | EIP197_RFSIZE_MASK) + | ||
1405 | EIP197_RFSIZE_ADJUST; | ||
1406 | } else { | ||
1407 | /* EIP97 */ | ||
1408 | priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & | ||
1409 | EIP97_HWDATAW_MASK; | ||
1410 | priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & | ||
1411 | EIP97_CFSIZE_MASK; | ||
1412 | priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) & | ||
1413 | EIP97_RFSIZE_MASK; | ||
1036 | } | 1414 | } |
1037 | 1415 | ||
1038 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); | 1416 | /* Get supported algorithms from EIP96 transform engine */ |
1039 | if (ret) | 1417 | priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + |
1040 | goto err_reg_clk; | 1418 | EIP197_PE_EIP96_OPTIONS(0)); |
1041 | 1419 | ||
1042 | priv->context_pool = dmam_pool_create("safexcel-context", dev, | 1420 | /* Print single info line describing what we just detected */ |
1043 | sizeof(struct safexcel_context_record), | 1421 | dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n", |
1044 | 1, 0); | 1422 | peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver, |
1045 | if (!priv->context_pool) { | 1423 | priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize, |
1046 | ret = -ENOMEM; | 1424 | priv->hwconfig.hwrfsize, priv->hwconfig.pever, |
1047 | goto err_reg_clk; | 1425 | priv->hwconfig.algo_flags); |
1048 | } | ||
1049 | 1426 | ||
1050 | safexcel_configure(priv); | 1427 | safexcel_configure(priv); |
1051 | 1428 | ||
1429 | if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) { | ||
1430 | /* | ||
1431 | * Request MSI vectors for global + 1 per ring - | ||
1432 | * or just 1 for older dev images | ||
1433 | */ | ||
1434 | struct pci_dev *pci_pdev = pdev; | ||
1435 | |||
1436 | ret = pci_alloc_irq_vectors(pci_pdev, | ||
1437 | priv->config.rings + 1, | ||
1438 | priv->config.rings + 1, | ||
1439 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | ||
1440 | if (ret < 0) { | ||
1441 | dev_err(dev, "Failed to allocate PCI MSI interrupts\n"); | ||
1442 | return ret; | ||
1443 | } | ||
1444 | } | ||
1445 | |||
1446 | /* Register the ring IRQ handlers and configure the rings */ | ||
1052 | priv->ring = devm_kcalloc(dev, priv->config.rings, | 1447 | priv->ring = devm_kcalloc(dev, priv->config.rings, |
1053 | sizeof(*priv->ring), | 1448 | sizeof(*priv->ring), |
1054 | GFP_KERNEL); | 1449 | GFP_KERNEL); |
1055 | if (!priv->ring) { | 1450 | if (!priv->ring) |
1056 | ret = -ENOMEM; | 1451 | return -ENOMEM; |
1057 | goto err_reg_clk; | ||
1058 | } | ||
1059 | 1452 | ||
1060 | for (i = 0; i < priv->config.rings; i++) { | 1453 | for (i = 0; i < priv->config.rings; i++) { |
1061 | char irq_name[6] = {0}; /* "ringX\0" */ | 1454 | char wq_name[9] = {0}; |
1062 | char wq_name[9] = {0}; /* "wq_ringX\0" */ | ||
1063 | int irq; | 1455 | int irq; |
1064 | struct safexcel_ring_irq_data *ring_irq; | 1456 | struct safexcel_ring_irq_data *ring_irq; |
1065 | 1457 | ||
1066 | ret = safexcel_init_ring_descriptors(priv, | 1458 | ret = safexcel_init_ring_descriptors(priv, |
1067 | &priv->ring[i].cdr, | 1459 | &priv->ring[i].cdr, |
1068 | &priv->ring[i].rdr); | 1460 | &priv->ring[i].rdr); |
1069 | if (ret) | 1461 | if (ret) { |
1070 | goto err_reg_clk; | 1462 | dev_err(dev, "Failed to initialize rings\n"); |
1463 | return ret; | ||
1464 | } | ||
1071 | 1465 | ||
1072 | priv->ring[i].rdr_req = devm_kcalloc(dev, | 1466 | priv->ring[i].rdr_req = devm_kcalloc(dev, |
1073 | EIP197_DEFAULT_RING_SIZE, | 1467 | EIP197_DEFAULT_RING_SIZE, |
1074 | sizeof(priv->ring[i].rdr_req), | 1468 | sizeof(priv->ring[i].rdr_req), |
1075 | GFP_KERNEL); | 1469 | GFP_KERNEL); |
1076 | if (!priv->ring[i].rdr_req) { | 1470 | if (!priv->ring[i].rdr_req) |
1077 | ret = -ENOMEM; | 1471 | return -ENOMEM; |
1078 | goto err_reg_clk; | ||
1079 | } | ||
1080 | 1472 | ||
1081 | ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); | 1473 | ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); |
1082 | if (!ring_irq) { | 1474 | if (!ring_irq) |
1083 | ret = -ENOMEM; | 1475 | return -ENOMEM; |
1084 | goto err_reg_clk; | ||
1085 | } | ||
1086 | 1476 | ||
1087 | ring_irq->priv = priv; | 1477 | ring_irq->priv = priv; |
1088 | ring_irq->ring = i; | 1478 | ring_irq->ring = i; |
1089 | 1479 | ||
1090 | snprintf(irq_name, 6, "ring%d", i); | 1480 | irq = safexcel_request_ring_irq(pdev, |
1091 | irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, | 1481 | EIP197_IRQ_NUMBER(i, is_pci_dev), |
1482 | is_pci_dev, | ||
1483 | safexcel_irq_ring, | ||
1092 | safexcel_irq_ring_thread, | 1484 | safexcel_irq_ring_thread, |
1093 | ring_irq); | 1485 | ring_irq); |
1094 | if (irq < 0) { | 1486 | if (irq < 0) { |
1095 | ret = irq; | 1487 | dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); |
1096 | goto err_reg_clk; | 1488 | return irq; |
1097 | } | 1489 | } |
1098 | 1490 | ||
1099 | priv->ring[i].work_data.priv = priv; | 1491 | priv->ring[i].work_data.priv = priv; |
1100 | priv->ring[i].work_data.ring = i; | 1492 | priv->ring[i].work_data.ring = i; |
1101 | INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); | 1493 | INIT_WORK(&priv->ring[i].work_data.work, |
1494 | safexcel_dequeue_work); | ||
1102 | 1495 | ||
1103 | snprintf(wq_name, 9, "wq_ring%d", i); | 1496 | snprintf(wq_name, 9, "wq_ring%d", i); |
1104 | priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); | 1497 | priv->ring[i].workqueue = |
1105 | if (!priv->ring[i].workqueue) { | 1498 | create_singlethread_workqueue(wq_name); |
1106 | ret = -ENOMEM; | 1499 | if (!priv->ring[i].workqueue) |
1107 | goto err_reg_clk; | 1500 | return -ENOMEM; |
1108 | } | ||
1109 | 1501 | ||
1110 | priv->ring[i].requests = 0; | 1502 | priv->ring[i].requests = 0; |
1111 | priv->ring[i].busy = false; | 1503 | priv->ring[i].busy = false; |
@@ -1117,28 +1509,21 @@ static int safexcel_probe(struct platform_device *pdev) | |||
1117 | spin_lock_init(&priv->ring[i].queue_lock); | 1509 | spin_lock_init(&priv->ring[i].queue_lock); |
1118 | } | 1510 | } |
1119 | 1511 | ||
1120 | platform_set_drvdata(pdev, priv); | ||
1121 | atomic_set(&priv->ring_used, 0); | 1512 | atomic_set(&priv->ring_used, 0); |
1122 | 1513 | ||
1123 | ret = safexcel_hw_init(priv); | 1514 | ret = safexcel_hw_init(priv); |
1124 | if (ret) { | 1515 | if (ret) { |
1125 | dev_err(dev, "EIP h/w init failed (%d)\n", ret); | 1516 | dev_err(dev, "HW init failed (%d)\n", ret); |
1126 | goto err_reg_clk; | 1517 | return ret; |
1127 | } | 1518 | } |
1128 | 1519 | ||
1129 | ret = safexcel_register_algorithms(priv); | 1520 | ret = safexcel_register_algorithms(priv); |
1130 | if (ret) { | 1521 | if (ret) { |
1131 | dev_err(dev, "Failed to register algorithms (%d)\n", ret); | 1522 | dev_err(dev, "Failed to register algorithms (%d)\n", ret); |
1132 | goto err_reg_clk; | 1523 | return ret; |
1133 | } | 1524 | } |
1134 | 1525 | ||
1135 | return 0; | 1526 | return 0; |
1136 | |||
1137 | err_reg_clk: | ||
1138 | clk_disable_unprepare(priv->reg_clk); | ||
1139 | err_core_clk: | ||
1140 | clk_disable_unprepare(priv->clk); | ||
1141 | return ret; | ||
1142 | } | 1527 | } |
1143 | 1528 | ||
1144 | static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) | 1529 | static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) |
@@ -1160,6 +1545,76 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) | |||
1160 | } | 1545 | } |
1161 | } | 1546 | } |
1162 | 1547 | ||
1548 | #if IS_ENABLED(CONFIG_OF) | ||
1549 | /* for Device Tree platform driver */ | ||
1550 | |||
1551 | static int safexcel_probe(struct platform_device *pdev) | ||
1552 | { | ||
1553 | struct device *dev = &pdev->dev; | ||
1554 | struct safexcel_crypto_priv *priv; | ||
1555 | int ret; | ||
1556 | |||
1557 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
1558 | if (!priv) | ||
1559 | return -ENOMEM; | ||
1560 | |||
1561 | priv->dev = dev; | ||
1562 | priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); | ||
1563 | |||
1564 | platform_set_drvdata(pdev, priv); | ||
1565 | |||
1566 | priv->base = devm_platform_ioremap_resource(pdev, 0); | ||
1567 | if (IS_ERR(priv->base)) { | ||
1568 | dev_err(dev, "failed to get resource\n"); | ||
1569 | return PTR_ERR(priv->base); | ||
1570 | } | ||
1571 | |||
1572 | priv->clk = devm_clk_get(&pdev->dev, NULL); | ||
1573 | ret = PTR_ERR_OR_ZERO(priv->clk); | ||
1574 | /* The clock isn't mandatory */ | ||
1575 | if (ret != -ENOENT) { | ||
1576 | if (ret) | ||
1577 | return ret; | ||
1578 | |||
1579 | ret = clk_prepare_enable(priv->clk); | ||
1580 | if (ret) { | ||
1581 | dev_err(dev, "unable to enable clk (%d)\n", ret); | ||
1582 | return ret; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); | ||
1587 | ret = PTR_ERR_OR_ZERO(priv->reg_clk); | ||
1588 | /* The clock isn't mandatory */ | ||
1589 | if (ret != -ENOENT) { | ||
1590 | if (ret) | ||
1591 | goto err_core_clk; | ||
1592 | |||
1593 | ret = clk_prepare_enable(priv->reg_clk); | ||
1594 | if (ret) { | ||
1595 | dev_err(dev, "unable to enable reg clk (%d)\n", ret); | ||
1596 | goto err_core_clk; | ||
1597 | } | ||
1598 | } | ||
1599 | |||
1600 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); | ||
1601 | if (ret) | ||
1602 | goto err_reg_clk; | ||
1603 | |||
1604 | /* Generic EIP97/EIP197 device probing */ | ||
1605 | ret = safexcel_probe_generic(pdev, priv, 0); | ||
1606 | if (ret) | ||
1607 | goto err_reg_clk; | ||
1608 | |||
1609 | return 0; | ||
1610 | |||
1611 | err_reg_clk: | ||
1612 | clk_disable_unprepare(priv->reg_clk); | ||
1613 | err_core_clk: | ||
1614 | clk_disable_unprepare(priv->clk); | ||
1615 | return ret; | ||
1616 | } | ||
1617 | |||
1163 | static int safexcel_remove(struct platform_device *pdev) | 1618 | static int safexcel_remove(struct platform_device *pdev) |
1164 | { | 1619 | { |
1165 | struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); | 1620 | struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); |
@@ -1179,30 +1634,28 @@ static int safexcel_remove(struct platform_device *pdev) | |||
1179 | static const struct of_device_id safexcel_of_match_table[] = { | 1634 | static const struct of_device_id safexcel_of_match_table[] = { |
1180 | { | 1635 | { |
1181 | .compatible = "inside-secure,safexcel-eip97ies", | 1636 | .compatible = "inside-secure,safexcel-eip97ies", |
1182 | .data = (void *)EIP97IES, | 1637 | .data = (void *)EIP97IES_MRVL, |
1183 | }, | 1638 | }, |
1184 | { | 1639 | { |
1185 | .compatible = "inside-secure,safexcel-eip197b", | 1640 | .compatible = "inside-secure,safexcel-eip197b", |
1186 | .data = (void *)EIP197B, | 1641 | .data = (void *)EIP197B_MRVL, |
1187 | }, | 1642 | }, |
1188 | { | 1643 | { |
1189 | .compatible = "inside-secure,safexcel-eip197d", | 1644 | .compatible = "inside-secure,safexcel-eip197d", |
1190 | .data = (void *)EIP197D, | 1645 | .data = (void *)EIP197D_MRVL, |
1191 | }, | 1646 | }, |
1647 | /* For backward compatibility and intended for generic use */ | ||
1192 | { | 1648 | { |
1193 | /* Deprecated. Kept for backward compatibility. */ | ||
1194 | .compatible = "inside-secure,safexcel-eip97", | 1649 | .compatible = "inside-secure,safexcel-eip97", |
1195 | .data = (void *)EIP97IES, | 1650 | .data = (void *)EIP97IES_MRVL, |
1196 | }, | 1651 | }, |
1197 | { | 1652 | { |
1198 | /* Deprecated. Kept for backward compatibility. */ | ||
1199 | .compatible = "inside-secure,safexcel-eip197", | 1653 | .compatible = "inside-secure,safexcel-eip197", |
1200 | .data = (void *)EIP197B, | 1654 | .data = (void *)EIP197B_MRVL, |
1201 | }, | 1655 | }, |
1202 | {}, | 1656 | {}, |
1203 | }; | 1657 | }; |
1204 | 1658 | ||
1205 | |||
1206 | static struct platform_driver crypto_safexcel = { | 1659 | static struct platform_driver crypto_safexcel = { |
1207 | .probe = safexcel_probe, | 1660 | .probe = safexcel_probe, |
1208 | .remove = safexcel_remove, | 1661 | .remove = safexcel_remove, |
@@ -1211,10 +1664,166 @@ static struct platform_driver crypto_safexcel = { | |||
1211 | .of_match_table = safexcel_of_match_table, | 1664 | .of_match_table = safexcel_of_match_table, |
1212 | }, | 1665 | }, |
1213 | }; | 1666 | }; |
1214 | module_platform_driver(crypto_safexcel); | 1667 | #endif |
1668 | |||
1669 | #if IS_ENABLED(CONFIG_PCI) | ||
1670 | /* PCIE devices - i.e. Inside Secure development boards */ | ||
1671 | |||
1672 | static int safexcel_pci_probe(struct pci_dev *pdev, | ||
1673 | const struct pci_device_id *ent) | ||
1674 | { | ||
1675 | struct device *dev = &pdev->dev; | ||
1676 | struct safexcel_crypto_priv *priv; | ||
1677 | void __iomem *pciebase; | ||
1678 | int rc; | ||
1679 | u32 val; | ||
1680 | |||
1681 | dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n", | ||
1682 | ent->vendor, ent->device, ent->subvendor, | ||
1683 | ent->subdevice, ent->driver_data); | ||
1684 | |||
1685 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
1686 | if (!priv) | ||
1687 | return -ENOMEM; | ||
1688 | |||
1689 | priv->dev = dev; | ||
1690 | priv->version = (enum safexcel_eip_version)ent->driver_data; | ||
1691 | |||
1692 | pci_set_drvdata(pdev, priv); | ||
1693 | |||
1694 | /* enable the device */ | ||
1695 | rc = pcim_enable_device(pdev); | ||
1696 | if (rc) { | ||
1697 | dev_err(dev, "Failed to enable PCI device\n"); | ||
1698 | return rc; | ||
1699 | } | ||
1700 | |||
1701 | /* take ownership of PCI BAR0 */ | ||
1702 | rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel"); | ||
1703 | if (rc) { | ||
1704 | dev_err(dev, "Failed to map IO region for BAR0\n"); | ||
1705 | return rc; | ||
1706 | } | ||
1707 | priv->base = pcim_iomap_table(pdev)[0]; | ||
1708 | |||
1709 | if (priv->version == EIP197_DEVBRD) { | ||
1710 | dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); | ||
1711 | |||
1712 | rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel"); | ||
1713 | if (rc) { | ||
1714 | dev_err(dev, "Failed to map IO region for BAR4\n"); | ||
1715 | return rc; | ||
1716 | } | ||
1717 | |||
1718 | pciebase = pcim_iomap_table(pdev)[2]; | ||
1719 | val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR); | ||
1720 | if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) { | ||
1721 | dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n", | ||
1722 | (val & 0xff)); | ||
1723 | |||
1724 | /* Setup MSI identity map mapping */ | ||
1725 | writel(EIP197_XLX_USER_VECT_LUT0_IDENT, | ||
1726 | pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR); | ||
1727 | writel(EIP197_XLX_USER_VECT_LUT1_IDENT, | ||
1728 | pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR); | ||
1729 | writel(EIP197_XLX_USER_VECT_LUT2_IDENT, | ||
1730 | pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR); | ||
1731 | writel(EIP197_XLX_USER_VECT_LUT3_IDENT, | ||
1732 | pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR); | ||
1733 | |||
1734 | /* Enable all device interrupts */ | ||
1735 | writel(GENMASK(31, 0), | ||
1736 | pciebase + EIP197_XLX_USER_INT_ENB_MSK); | ||
1737 | } else { | ||
1738 | dev_err(dev, "Unrecognised IRQ block identifier %x\n", | ||
1739 | val); | ||
1740 | return -ENODEV; | ||
1741 | } | ||
1742 | |||
1743 | /* HW reset FPGA dev board */ | ||
1744 | /* assert reset */ | ||
1745 | writel(1, priv->base + EIP197_XLX_GPIO_BASE); | ||
1746 | wmb(); /* maintain strict ordering for accesses here */ | ||
1747 | /* deassert reset */ | ||
1748 | writel(0, priv->base + EIP197_XLX_GPIO_BASE); | ||
1749 | wmb(); /* maintain strict ordering for accesses here */ | ||
1750 | } | ||
1751 | |||
1752 | /* enable bus mastering */ | ||
1753 | pci_set_master(pdev); | ||
1754 | |||
1755 | /* Generic EIP97/EIP197 device probing */ | ||
1756 | rc = safexcel_probe_generic(pdev, priv, 1); | ||
1757 | return rc; | ||
1758 | } | ||
1759 | |||
1760 | void safexcel_pci_remove(struct pci_dev *pdev) | ||
1761 | { | ||
1762 | struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev); | ||
1763 | int i; | ||
1764 | |||
1765 | safexcel_unregister_algorithms(priv); | ||
1766 | |||
1767 | for (i = 0; i < priv->config.rings; i++) | ||
1768 | destroy_workqueue(priv->ring[i].workqueue); | ||
1769 | |||
1770 | safexcel_hw_reset_rings(priv); | ||
1771 | } | ||
1772 | |||
1773 | static const struct pci_device_id safexcel_pci_ids[] = { | ||
1774 | { | ||
1775 | PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, | ||
1776 | 0x16ae, 0xc522), | ||
1777 | .driver_data = EIP197_DEVBRD, | ||
1778 | }, | ||
1779 | {}, | ||
1780 | }; | ||
1781 | |||
1782 | MODULE_DEVICE_TABLE(pci, safexcel_pci_ids); | ||
1783 | |||
1784 | static struct pci_driver safexcel_pci_driver = { | ||
1785 | .name = "crypto-safexcel", | ||
1786 | .id_table = safexcel_pci_ids, | ||
1787 | .probe = safexcel_pci_probe, | ||
1788 | .remove = safexcel_pci_remove, | ||
1789 | }; | ||
1790 | #endif | ||
1791 | |||
1792 | static int __init safexcel_init(void) | ||
1793 | { | ||
1794 | int rc; | ||
1795 | |||
1796 | #if IS_ENABLED(CONFIG_OF) | ||
1797 | /* Register platform driver */ | ||
1798 | platform_driver_register(&crypto_safexcel); | ||
1799 | #endif | ||
1800 | |||
1801 | #if IS_ENABLED(CONFIG_PCI) | ||
1802 | /* Register PCI driver */ | ||
1803 | rc = pci_register_driver(&safexcel_pci_driver); | ||
1804 | #endif | ||
1805 | |||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1809 | static void __exit safexcel_exit(void) | ||
1810 | { | ||
1811 | #if IS_ENABLED(CONFIG_OF) | ||
1812 | /* Unregister platform driver */ | ||
1813 | platform_driver_unregister(&crypto_safexcel); | ||
1814 | #endif | ||
1815 | |||
1816 | #if IS_ENABLED(CONFIG_PCI) | ||
1817 | /* Unregister PCI driver if successfully registered before */ | ||
1818 | pci_unregister_driver(&safexcel_pci_driver); | ||
1819 | #endif | ||
1820 | } | ||
1821 | |||
1822 | module_init(safexcel_init); | ||
1823 | module_exit(safexcel_exit); | ||
1215 | 1824 | ||
1216 | MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); | 1825 | MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); |
1217 | MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); | 1826 | MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); |
1218 | MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); | 1827 | MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); |
1219 | MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); | 1828 | MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); |
1220 | MODULE_LICENSE("GPL v2"); | 1829 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index e0c202f33674..930cc48a6f85 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h | |||
@@ -14,14 +14,23 @@ | |||
14 | #include <crypto/sha.h> | 14 | #include <crypto/sha.h> |
15 | #include <crypto/skcipher.h> | 15 | #include <crypto/skcipher.h> |
16 | 16 | ||
17 | #define EIP197_HIA_VERSION_LE 0xca35 | 17 | #define EIP197_HIA_VERSION_BE 0xca35 |
18 | #define EIP197_HIA_VERSION_BE 0x35ca | 18 | #define EIP197_HIA_VERSION_LE 0x35ca |
19 | #define EIP97_VERSION_LE 0x9e61 | ||
20 | #define EIP197_VERSION_LE 0x3ac5 | ||
21 | #define EIP96_VERSION_LE 0x9f60 | ||
22 | #define EIP197_REG_LO16(reg) (reg & 0xffff) | ||
23 | #define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff) | ||
24 | #define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff) | ||
25 | #define EIP197_VERSION_SWAP(reg) (((reg & 0xf0) << 4) | \ | ||
26 | ((reg >> 4) & 0xf0) | \ | ||
27 | ((reg >> 12) & 0xf)) | ||
19 | 28 | ||
20 | /* Static configuration */ | 29 | /* Static configuration */ |
21 | #define EIP197_DEFAULT_RING_SIZE 400 | 30 | #define EIP197_DEFAULT_RING_SIZE 400 |
22 | #define EIP197_MAX_TOKENS 8 | 31 | #define EIP197_MAX_TOKENS 18 |
23 | #define EIP197_MAX_RINGS 4 | 32 | #define EIP197_MAX_RINGS 4 |
24 | #define EIP197_FETCH_COUNT 1 | 33 | #define EIP197_FETCH_DEPTH 2 |
25 | #define EIP197_MAX_BATCH_SZ 64 | 34 | #define EIP197_MAX_BATCH_SZ 64 |
26 | 35 | ||
27 | #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ | 36 | #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ |
@@ -38,6 +47,27 @@ | |||
38 | char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ | 47 | char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ |
39 | struct type##_request *name = (void *)__##name##_desc | 48 | struct type##_request *name = (void *)__##name##_desc |
40 | 49 | ||
50 | /* Xilinx dev board base offsets */ | ||
51 | #define EIP197_XLX_GPIO_BASE 0x200000 | ||
52 | #define EIP197_XLX_IRQ_BLOCK_ID_ADDR 0x2000 | ||
53 | #define EIP197_XLX_IRQ_BLOCK_ID_VALUE 0x1fc2 | ||
54 | #define EIP197_XLX_USER_INT_ENB_MSK 0x2004 | ||
55 | #define EIP197_XLX_USER_INT_ENB_SET 0x2008 | ||
56 | #define EIP197_XLX_USER_INT_ENB_CLEAR 0x200c | ||
57 | #define EIP197_XLX_USER_INT_BLOCK 0x2040 | ||
58 | #define EIP197_XLX_USER_INT_PEND 0x2048 | ||
59 | #define EIP197_XLX_USER_VECT_LUT0_ADDR 0x2080 | ||
60 | #define EIP197_XLX_USER_VECT_LUT0_IDENT 0x03020100 | ||
61 | #define EIP197_XLX_USER_VECT_LUT1_ADDR 0x2084 | ||
62 | #define EIP197_XLX_USER_VECT_LUT1_IDENT 0x07060504 | ||
63 | #define EIP197_XLX_USER_VECT_LUT2_ADDR 0x2088 | ||
64 | #define EIP197_XLX_USER_VECT_LUT2_IDENT 0x0b0a0908 | ||
65 | #define EIP197_XLX_USER_VECT_LUT3_ADDR 0x208c | ||
66 | #define EIP197_XLX_USER_VECT_LUT3_IDENT 0x0f0e0d0c | ||
67 | |||
68 | /* Helper defines for probe function */ | ||
69 | #define EIP197_IRQ_NUMBER(i, is_pci) (i + is_pci) | ||
70 | |||
41 | /* Register base offsets */ | 71 | /* Register base offsets */ |
42 | #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) | 72 | #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) |
43 | #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) | 73 | #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) |
@@ -49,6 +79,7 @@ | |||
49 | #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) | 79 | #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) |
50 | #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) | 80 | #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) |
51 | #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) | 81 | #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) |
82 | #define EIP197_GLOBAL(priv) ((priv)->base + (priv)->offsets.global) | ||
52 | 83 | ||
53 | /* EIP197 base offsets */ | 84 | /* EIP197 base offsets */ |
54 | #define EIP197_HIA_AIC_BASE 0x90000 | 85 | #define EIP197_HIA_AIC_BASE 0x90000 |
@@ -61,6 +92,7 @@ | |||
61 | #define EIP197_HIA_DSE_THR_BASE 0x8d040 | 92 | #define EIP197_HIA_DSE_THR_BASE 0x8d040 |
62 | #define EIP197_HIA_GEN_CFG_BASE 0xf0000 | 93 | #define EIP197_HIA_GEN_CFG_BASE 0xf0000 |
63 | #define EIP197_PE_BASE 0xa0000 | 94 | #define EIP197_PE_BASE 0xa0000 |
95 | #define EIP197_GLOBAL_BASE 0xf0000 | ||
64 | 96 | ||
65 | /* EIP97 base offsets */ | 97 | /* EIP97 base offsets */ |
66 | #define EIP97_HIA_AIC_BASE 0x0 | 98 | #define EIP97_HIA_AIC_BASE 0x0 |
@@ -73,6 +105,7 @@ | |||
73 | #define EIP97_HIA_DSE_THR_BASE 0xf600 | 105 | #define EIP97_HIA_DSE_THR_BASE 0xf600 |
74 | #define EIP97_HIA_GEN_CFG_BASE 0x10000 | 106 | #define EIP97_HIA_GEN_CFG_BASE 0x10000 |
75 | #define EIP97_PE_BASE 0x10000 | 107 | #define EIP97_PE_BASE 0x10000 |
108 | #define EIP97_GLOBAL_BASE 0x10000 | ||
76 | 109 | ||
77 | /* CDR/RDR register offsets */ | 110 | /* CDR/RDR register offsets */ |
78 | #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) | 111 | #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) |
@@ -115,16 +148,22 @@ | |||
115 | #define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) | 148 | #define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) |
116 | #define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) | 149 | #define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) |
117 | #define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) | 150 | #define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) |
151 | #define EIP197_PE_ICE_PUTF_CTRL(n) (0x0d00 + (0x2000 * (n))) | ||
118 | #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) | 152 | #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) |
119 | #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) | 153 | #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) |
154 | #define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n))) | ||
120 | #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) | 155 | #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) |
121 | #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) | 156 | #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) |
122 | #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) | 157 | #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) |
123 | #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) | 158 | #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) |
124 | #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) | 159 | #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) |
160 | #define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n))) | ||
161 | #define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) | ||
162 | #define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n))) | ||
125 | #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) | 163 | #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) |
126 | #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) | 164 | #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) |
127 | #define EIP197_MST_CTRL 0xfff4 | 165 | #define EIP197_MST_CTRL 0xfff4 |
166 | #define EIP197_VERSION 0xfffc | ||
128 | 167 | ||
129 | /* EIP197-specific registers, no indirection */ | 168 | /* EIP197-specific registers, no indirection */ |
130 | #define EIP197_CLASSIFICATION_RAMS 0xe0000 | 169 | #define EIP197_CLASSIFICATION_RAMS 0xe0000 |
@@ -139,6 +178,12 @@ | |||
139 | #define EIP197_TRC_ECCADMINSTAT 0xf0838 | 178 | #define EIP197_TRC_ECCADMINSTAT 0xf0838 |
140 | #define EIP197_TRC_ECCDATASTAT 0xf083c | 179 | #define EIP197_TRC_ECCDATASTAT 0xf083c |
141 | #define EIP197_TRC_ECCDATA 0xf0840 | 180 | #define EIP197_TRC_ECCDATA 0xf0840 |
181 | #define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n))) | ||
182 | #define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n))) | ||
183 | #define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n))) | ||
184 | #define EIP197_FLUE_OFFSETS 0xf6808 | ||
185 | #define EIP197_FLUE_ARC4_OFFSET 0xf680c | ||
186 | #define EIP197_FLUE_IFC_LUT(n) (0xf6820 + (4 * (n))) | ||
142 | #define EIP197_CS_RAM_CTRL 0xf7ff0 | 187 | #define EIP197_CS_RAM_CTRL 0xf7ff0 |
143 | 188 | ||
144 | /* EIP197_HIA_xDR_DESC_SIZE */ | 189 | /* EIP197_HIA_xDR_DESC_SIZE */ |
@@ -186,6 +231,19 @@ | |||
186 | #define EIP197_N_PES_OFFSET 4 | 231 | #define EIP197_N_PES_OFFSET 4 |
187 | #define EIP197_N_PES_MASK GENMASK(4, 0) | 232 | #define EIP197_N_PES_MASK GENMASK(4, 0) |
188 | #define EIP97_N_PES_MASK GENMASK(2, 0) | 233 | #define EIP97_N_PES_MASK GENMASK(2, 0) |
234 | #define EIP197_HWDATAW_OFFSET 25 | ||
235 | #define EIP197_HWDATAW_MASK GENMASK(3, 0) | ||
236 | #define EIP97_HWDATAW_MASK GENMASK(2, 0) | ||
237 | #define EIP197_CFSIZE_OFFSET 9 | ||
238 | #define EIP197_CFSIZE_ADJUST 4 | ||
239 | #define EIP97_CFSIZE_OFFSET 8 | ||
240 | #define EIP197_CFSIZE_MASK GENMASK(3, 0) | ||
241 | #define EIP97_CFSIZE_MASK GENMASK(4, 0) | ||
242 | #define EIP197_RFSIZE_OFFSET 12 | ||
243 | #define EIP197_RFSIZE_ADJUST 4 | ||
244 | #define EIP97_RFSIZE_OFFSET 12 | ||
245 | #define EIP197_RFSIZE_MASK GENMASK(3, 0) | ||
246 | #define EIP97_RFSIZE_MASK GENMASK(4, 0) | ||
189 | 247 | ||
190 | /* EIP197_HIA_AIC_R_ENABLE_CTRL */ | 248 | /* EIP197_HIA_AIC_R_ENABLE_CTRL */ |
191 | #define EIP197_CDR_IRQ(n) BIT((n) * 2) | 249 | #define EIP197_CDR_IRQ(n) BIT((n) * 2) |
@@ -207,6 +265,11 @@ | |||
207 | #define EIP197_DxE_THR_CTRL_EN BIT(30) | 265 | #define EIP197_DxE_THR_CTRL_EN BIT(30) |
208 | #define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) | 266 | #define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) |
209 | 267 | ||
268 | /* EIP197_PE_ICE_PUE/FPP_CTRL */ | ||
269 | #define EIP197_PE_ICE_UENG_START_OFFSET(n) ((n) << 16) | ||
270 | #define EIP197_PE_ICE_UENG_INIT_ALIGN_MASK 0x7ff0 | ||
271 | #define EIP197_PE_ICE_UENG_DEBUG_RESET BIT(3) | ||
272 | |||
210 | /* EIP197_HIA_AIC_G_ENABLED_STAT */ | 273 | /* EIP197_HIA_AIC_G_ENABLED_STAT */ |
211 | #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) | 274 | #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) |
212 | #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) | 275 | #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) |
@@ -223,6 +286,7 @@ | |||
223 | #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) | 286 | #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) |
224 | #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) | 287 | #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) |
225 | #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) | 288 | #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) |
289 | #define EIP197_MST_CTRL_BYTE_SWAP_BITS GENMASK(25, 24) | ||
226 | 290 | ||
227 | /* EIP197_PE_IN_DBUF/TBUF_THRES */ | 291 | /* EIP197_PE_IN_DBUF/TBUF_THRES */ |
228 | #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) | 292 | #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) |
@@ -252,45 +316,20 @@ | |||
252 | 316 | ||
253 | /* EIP197_PE_EIP96_TOKEN_CTRL */ | 317 | /* EIP197_PE_EIP96_TOKEN_CTRL */ |
254 | #define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) | 318 | #define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) |
255 | #define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19) | 319 | #define EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT BIT(17) |
256 | #define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) | 320 | #define EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT BIT(22) |
257 | 321 | ||
258 | /* EIP197_PE_EIP96_FUNCTION_EN */ | 322 | /* EIP197_PE_EIP96_FUNCTION_EN */ |
259 | #define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) | 323 | #define EIP197_FUNCTION_ALL 0xffffffff |
260 | #define EIP197_PROTOCOL_HASH_ONLY BIT(0) | ||
261 | #define EIP197_PROTOCOL_ENCRYPT_ONLY BIT(1) | ||
262 | #define EIP197_PROTOCOL_HASH_ENCRYPT BIT(2) | ||
263 | #define EIP197_PROTOCOL_HASH_DECRYPT BIT(3) | ||
264 | #define EIP197_PROTOCOL_ENCRYPT_HASH BIT(4) | ||
265 | #define EIP197_PROTOCOL_DECRYPT_HASH BIT(5) | ||
266 | #define EIP197_ALG_ARC4 BIT(7) | ||
267 | #define EIP197_ALG_AES_ECB BIT(8) | ||
268 | #define EIP197_ALG_AES_CBC BIT(9) | ||
269 | #define EIP197_ALG_AES_CTR_ICM BIT(10) | ||
270 | #define EIP197_ALG_AES_OFB BIT(11) | ||
271 | #define EIP197_ALG_AES_CFB BIT(12) | ||
272 | #define EIP197_ALG_DES_ECB BIT(13) | ||
273 | #define EIP197_ALG_DES_CBC BIT(14) | ||
274 | #define EIP197_ALG_DES_OFB BIT(16) | ||
275 | #define EIP197_ALG_DES_CFB BIT(17) | ||
276 | #define EIP197_ALG_3DES_ECB BIT(18) | ||
277 | #define EIP197_ALG_3DES_CBC BIT(19) | ||
278 | #define EIP197_ALG_3DES_OFB BIT(21) | ||
279 | #define EIP197_ALG_3DES_CFB BIT(22) | ||
280 | #define EIP197_ALG_MD5 BIT(24) | ||
281 | #define EIP197_ALG_HMAC_MD5 BIT(25) | ||
282 | #define EIP197_ALG_SHA1 BIT(26) | ||
283 | #define EIP197_ALG_HMAC_SHA1 BIT(27) | ||
284 | #define EIP197_ALG_SHA2 BIT(28) | ||
285 | #define EIP197_ALG_HMAC_SHA2 BIT(29) | ||
286 | #define EIP197_ALG_AES_XCBC_MAC BIT(30) | ||
287 | #define EIP197_ALG_GCM_HASH BIT(31) | ||
288 | 324 | ||
289 | /* EIP197_PE_EIP96_CONTEXT_CTRL */ | 325 | /* EIP197_PE_EIP96_CONTEXT_CTRL */ |
290 | #define EIP197_CONTEXT_SIZE(n) (n) | 326 | #define EIP197_CONTEXT_SIZE(n) (n) |
291 | #define EIP197_ADDRESS_MODE BIT(8) | 327 | #define EIP197_ADDRESS_MODE BIT(8) |
292 | #define EIP197_CONTROL_MODE BIT(9) | 328 | #define EIP197_CONTROL_MODE BIT(9) |
293 | 329 | ||
330 | /* EIP197_FLUE_CONFIG */ | ||
331 | #define EIP197_FLUE_CONFIG_MAGIC 0xc7000004 | ||
332 | |||
294 | /* Context Control */ | 333 | /* Context Control */ |
295 | struct safexcel_context_record { | 334 | struct safexcel_context_record { |
296 | u32 control0; | 335 | u32 control0; |
@@ -320,6 +359,7 @@ struct safexcel_context_record { | |||
320 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) | 359 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) |
321 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) | 360 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) |
322 | #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) | 361 | #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) |
362 | #define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21) | ||
323 | #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) | 363 | #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) |
324 | #define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) | 364 | #define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) |
325 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) | 365 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) |
@@ -327,12 +367,21 @@ struct safexcel_context_record { | |||
327 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) | 367 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) |
328 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) | 368 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) |
329 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) | 369 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) |
370 | #define CONTEXT_CONTROL_CRYPTO_ALG_GHASH (0x4 << 23) | ||
371 | #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23) | ||
372 | #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23) | ||
373 | #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23) | ||
330 | #define CONTEXT_CONTROL_INV_FR (0x5 << 24) | 374 | #define CONTEXT_CONTROL_INV_FR (0x5 << 24) |
331 | #define CONTEXT_CONTROL_INV_TR (0x6 << 24) | 375 | #define CONTEXT_CONTROL_INV_TR (0x6 << 24) |
332 | 376 | ||
333 | /* control1 */ | 377 | /* control1 */ |
334 | #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) | 378 | #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) |
335 | #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) | 379 | #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) |
380 | #define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0) | ||
381 | #define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0) | ||
382 | #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0) | ||
383 | #define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0) | ||
384 | #define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17)) | ||
336 | #define CONTEXT_CONTROL_IV0 BIT(5) | 385 | #define CONTEXT_CONTROL_IV0 BIT(5) |
337 | #define CONTEXT_CONTROL_IV1 BIT(6) | 386 | #define CONTEXT_CONTROL_IV1 BIT(6) |
338 | #define CONTEXT_CONTROL_IV2 BIT(7) | 387 | #define CONTEXT_CONTROL_IV2 BIT(7) |
@@ -342,6 +391,9 @@ struct safexcel_context_record { | |||
342 | #define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) | 391 | #define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) |
343 | #define CONTEXT_CONTROL_HASH_STORE BIT(19) | 392 | #define CONTEXT_CONTROL_HASH_STORE BIT(19) |
344 | 393 | ||
394 | #define EIP197_XCM_MODE_GCM 1 | ||
395 | #define EIP197_XCM_MODE_CCM 2 | ||
396 | |||
345 | /* The hash counter given to the engine in the context has a granularity of | 397 | /* The hash counter given to the engine in the context has a granularity of |
346 | * 64 bits. | 398 | * 64 bits. |
347 | */ | 399 | */ |
@@ -352,6 +404,8 @@ struct safexcel_context_record { | |||
352 | #define EIP197_TRC_ENABLE_1 BIT(5) | 404 | #define EIP197_TRC_ENABLE_1 BIT(5) |
353 | #define EIP197_TRC_ENABLE_2 BIT(6) | 405 | #define EIP197_TRC_ENABLE_2 BIT(6) |
354 | #define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) | 406 | #define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) |
407 | #define EIP197_CS_BANKSEL_MASK GENMASK(14, 12) | ||
408 | #define EIP197_CS_BANKSEL_OFS 12 | ||
355 | 409 | ||
356 | /* EIP197_TRC_PARAMS */ | 410 | /* EIP197_TRC_PARAMS */ |
357 | #define EIP197_TRC_PARAMS_SW_RESET BIT(0) | 411 | #define EIP197_TRC_PARAMS_SW_RESET BIT(0) |
@@ -369,19 +423,11 @@ struct safexcel_context_record { | |||
369 | #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) | 423 | #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) |
370 | 424 | ||
371 | /* Cache helpers */ | 425 | /* Cache helpers */ |
372 | #define EIP197B_CS_RC_MAX 52 | 426 | #define EIP197_CS_TRC_REC_WC 64 |
373 | #define EIP197D_CS_RC_MAX 96 | ||
374 | #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) | 427 | #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) |
375 | #define EIP197_CS_RC_NEXT(x) (x) | 428 | #define EIP197_CS_RC_NEXT(x) (x) |
376 | #define EIP197_CS_RC_PREV(x) ((x) << 10) | 429 | #define EIP197_CS_RC_PREV(x) ((x) << 10) |
377 | #define EIP197_RC_NULL 0x3ff | 430 | #define EIP197_RC_NULL 0x3ff |
378 | #define EIP197B_CS_TRC_REC_WC 59 | ||
379 | #define EIP197D_CS_TRC_REC_WC 64 | ||
380 | #define EIP197B_CS_TRC_LG_REC_WC 73 | ||
381 | #define EIP197D_CS_TRC_LG_REC_WC 80 | ||
382 | #define EIP197B_CS_HT_WC 64 | ||
383 | #define EIP197D_CS_HT_WC 256 | ||
384 | |||
385 | 431 | ||
386 | /* Result data */ | 432 | /* Result data */ |
387 | struct result_data_desc { | 433 | struct result_data_desc { |
@@ -423,6 +469,14 @@ struct safexcel_result_desc { | |||
423 | struct result_data_desc result_data; | 469 | struct result_data_desc result_data; |
424 | } __packed; | 470 | } __packed; |
425 | 471 | ||
472 | /* | ||
473 | * The EIP(1)97 only needs to fetch the descriptor part of | ||
474 | * the result descriptor, not the result token part! | ||
475 | */ | ||
476 | #define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\ | ||
477 | sizeof(struct result_data_desc)) /\ | ||
478 | sizeof(u32)) | ||
479 | |||
426 | struct safexcel_token { | 480 | struct safexcel_token { |
427 | u32 packet_length:17; | 481 | u32 packet_length:17; |
428 | u8 stat:2; | 482 | u8 stat:2; |
@@ -442,6 +496,7 @@ struct safexcel_token { | |||
442 | #define EIP197_TOKEN_OPCODE_INSERT 0x2 | 496 | #define EIP197_TOKEN_OPCODE_INSERT 0x2 |
443 | #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT | 497 | #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT |
444 | #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 | 498 | #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 |
499 | #define EIP197_TOKEN_OPCODE_INSERT_REMRES 0xa | ||
445 | #define EIP197_TOKEN_OPCODE_VERIFY 0xd | 500 | #define EIP197_TOKEN_OPCODE_VERIFY 0xd |
446 | #define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe | 501 | #define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe |
447 | #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) | 502 | #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) |
@@ -455,10 +510,11 @@ static inline void eip197_noop_token(struct safexcel_token *token) | |||
455 | /* Instructions */ | 510 | /* Instructions */ |
456 | #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c | 511 | #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c |
457 | #define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 | 512 | #define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 |
513 | #define EIP197_TOKEN_INS_ORIGIN_TOKEN 0x1b | ||
458 | #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) | 514 | #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) |
459 | #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) | 515 | #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) |
460 | #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) | 516 | #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) |
461 | #define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) | 517 | #define EIP197_TOKEN_INS_TYPE_CRYPTO BIT(7) |
462 | #define EIP197_TOKEN_INS_LAST BIT(8) | 518 | #define EIP197_TOKEN_INS_LAST BIT(8) |
463 | 519 | ||
464 | /* Processing Engine Control Data */ | 520 | /* Processing Engine Control Data */ |
@@ -509,6 +565,11 @@ struct safexcel_command_desc { | |||
509 | * Internal structures & functions | 565 | * Internal structures & functions |
510 | */ | 566 | */ |
511 | 567 | ||
568 | #define EIP197_FW_TERMINAL_NOPS 2 | ||
569 | #define EIP197_FW_START_POLLCNT 16 | ||
570 | #define EIP197_FW_PUE_READY 0x14 | ||
571 | #define EIP197_FW_FPP_READY 0x18 | ||
572 | |||
512 | enum eip197_fw { | 573 | enum eip197_fw { |
513 | FW_IFPP = 0, | 574 | FW_IFPP = 0, |
514 | FW_IPUE, | 575 | FW_IPUE, |
@@ -581,10 +642,42 @@ struct safexcel_ring { | |||
581 | struct crypto_async_request *backlog; | 642 | struct crypto_async_request *backlog; |
582 | }; | 643 | }; |
583 | 644 | ||
645 | /* EIP integration context flags */ | ||
584 | enum safexcel_eip_version { | 646 | enum safexcel_eip_version { |
585 | EIP97IES = BIT(0), | 647 | /* Platform (EIP integration context) specifier */ |
586 | EIP197B = BIT(1), | 648 | EIP97IES_MRVL, |
587 | EIP197D = BIT(2), | 649 | EIP197B_MRVL, |
650 | EIP197D_MRVL, | ||
651 | EIP197_DEVBRD | ||
652 | }; | ||
653 | |||
654 | /* Priority we use for advertising our algorithms */ | ||
655 | #define SAFEXCEL_CRA_PRIORITY 300 | ||
656 | |||
657 | /* EIP algorithm presence flags */ | ||
658 | enum safexcel_eip_algorithms { | ||
659 | SAFEXCEL_ALG_BC0 = BIT(5), | ||
660 | SAFEXCEL_ALG_SM4 = BIT(6), | ||
661 | SAFEXCEL_ALG_SM3 = BIT(7), | ||
662 | SAFEXCEL_ALG_CHACHA20 = BIT(8), | ||
663 | SAFEXCEL_ALG_POLY1305 = BIT(9), | ||
664 | SAFEXCEL_SEQMASK_256 = BIT(10), | ||
665 | SAFEXCEL_SEQMASK_384 = BIT(11), | ||
666 | SAFEXCEL_ALG_AES = BIT(12), | ||
667 | SAFEXCEL_ALG_AES_XFB = BIT(13), | ||
668 | SAFEXCEL_ALG_DES = BIT(15), | ||
669 | SAFEXCEL_ALG_DES_XFB = BIT(16), | ||
670 | SAFEXCEL_ALG_ARC4 = BIT(18), | ||
671 | SAFEXCEL_ALG_AES_XTS = BIT(20), | ||
672 | SAFEXCEL_ALG_WIRELESS = BIT(21), | ||
673 | SAFEXCEL_ALG_MD5 = BIT(22), | ||
674 | SAFEXCEL_ALG_SHA1 = BIT(23), | ||
675 | SAFEXCEL_ALG_SHA2_256 = BIT(25), | ||
676 | SAFEXCEL_ALG_SHA2_512 = BIT(26), | ||
677 | SAFEXCEL_ALG_XCBC_MAC = BIT(27), | ||
678 | SAFEXCEL_ALG_CBC_MAC_ALL = BIT(29), | ||
679 | SAFEXCEL_ALG_GHASH = BIT(30), | ||
680 | SAFEXCEL_ALG_SHA3 = BIT(31), | ||
588 | }; | 681 | }; |
589 | 682 | ||
590 | struct safexcel_register_offsets { | 683 | struct safexcel_register_offsets { |
@@ -598,10 +691,22 @@ struct safexcel_register_offsets { | |||
598 | u32 hia_dse_thr; | 691 | u32 hia_dse_thr; |
599 | u32 hia_gen_cfg; | 692 | u32 hia_gen_cfg; |
600 | u32 pe; | 693 | u32 pe; |
694 | u32 global; | ||
601 | }; | 695 | }; |
602 | 696 | ||
603 | enum safexcel_flags { | 697 | enum safexcel_flags { |
604 | EIP197_TRC_CACHE = BIT(0), | 698 | EIP197_TRC_CACHE = BIT(0), |
699 | SAFEXCEL_HW_EIP197 = BIT(1), | ||
700 | }; | ||
701 | |||
702 | struct safexcel_hwconfig { | ||
703 | enum safexcel_eip_algorithms algo_flags; | ||
704 | int hwver; | ||
705 | int hiaver; | ||
706 | int pever; | ||
707 | int hwdataw; | ||
708 | int hwcfsize; | ||
709 | int hwrfsize; | ||
605 | }; | 710 | }; |
606 | 711 | ||
607 | struct safexcel_crypto_priv { | 712 | struct safexcel_crypto_priv { |
@@ -613,6 +718,7 @@ struct safexcel_crypto_priv { | |||
613 | 718 | ||
614 | enum safexcel_eip_version version; | 719 | enum safexcel_eip_version version; |
615 | struct safexcel_register_offsets offsets; | 720 | struct safexcel_register_offsets offsets; |
721 | struct safexcel_hwconfig hwconfig; | ||
616 | u32 flags; | 722 | u32 flags; |
617 | 723 | ||
618 | /* context DMA pool */ | 724 | /* context DMA pool */ |
@@ -637,14 +743,16 @@ struct safexcel_context { | |||
637 | bool exit_inv; | 743 | bool exit_inv; |
638 | }; | 744 | }; |
639 | 745 | ||
746 | #define HASH_CACHE_SIZE SHA512_BLOCK_SIZE | ||
747 | |||
640 | struct safexcel_ahash_export_state { | 748 | struct safexcel_ahash_export_state { |
641 | u64 len[2]; | 749 | u64 len; |
642 | u64 processed[2]; | 750 | u64 processed; |
643 | 751 | ||
644 | u32 digest; | 752 | u32 digest; |
645 | 753 | ||
646 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; | 754 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; |
647 | u8 cache[SHA512_BLOCK_SIZE << 1]; | 755 | u8 cache[HASH_CACHE_SIZE]; |
648 | }; | 756 | }; |
649 | 757 | ||
650 | /* | 758 | /* |
@@ -655,7 +763,7 @@ struct safexcel_ahash_export_state { | |||
655 | struct safexcel_alg_template { | 763 | struct safexcel_alg_template { |
656 | struct safexcel_crypto_priv *priv; | 764 | struct safexcel_crypto_priv *priv; |
657 | enum safexcel_alg_type type; | 765 | enum safexcel_alg_type type; |
658 | u32 engines; | 766 | enum safexcel_eip_algorithms algo_mask; |
659 | union { | 767 | union { |
660 | struct skcipher_alg skcipher; | 768 | struct skcipher_alg skcipher; |
661 | struct aead_alg aead; | 769 | struct aead_alg aead; |
@@ -716,6 +824,9 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede; | |||
716 | extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; | 824 | extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; |
717 | extern struct safexcel_alg_template safexcel_alg_ecb_aes; | 825 | extern struct safexcel_alg_template safexcel_alg_ecb_aes; |
718 | extern struct safexcel_alg_template safexcel_alg_cbc_aes; | 826 | extern struct safexcel_alg_template safexcel_alg_cbc_aes; |
827 | extern struct safexcel_alg_template safexcel_alg_cfb_aes; | ||
828 | extern struct safexcel_alg_template safexcel_alg_ofb_aes; | ||
829 | extern struct safexcel_alg_template safexcel_alg_ctr_aes; | ||
719 | extern struct safexcel_alg_template safexcel_alg_md5; | 830 | extern struct safexcel_alg_template safexcel_alg_md5; |
720 | extern struct safexcel_alg_template safexcel_alg_sha1; | 831 | extern struct safexcel_alg_template safexcel_alg_sha1; |
721 | extern struct safexcel_alg_template safexcel_alg_sha224; | 832 | extern struct safexcel_alg_template safexcel_alg_sha224; |
@@ -733,5 +844,14 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; | |||
733 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; | 844 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; |
734 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; | 845 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; |
735 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; | 846 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; |
847 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede; | ||
848 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes; | ||
849 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes; | ||
850 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes; | ||
851 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes; | ||
852 | extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes; | ||
853 | extern struct safexcel_alg_template safexcel_alg_xts_aes; | ||
854 | extern struct safexcel_alg_template safexcel_alg_gcm; | ||
855 | extern struct safexcel_alg_template safexcel_alg_ccm; | ||
736 | 856 | ||
737 | #endif | 857 | #endif |
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 8cdbdbe35681..ef51f8c2b473 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c | |||
@@ -12,8 +12,12 @@ | |||
12 | #include <crypto/aead.h> | 12 | #include <crypto/aead.h> |
13 | #include <crypto/aes.h> | 13 | #include <crypto/aes.h> |
14 | #include <crypto/authenc.h> | 14 | #include <crypto/authenc.h> |
15 | #include <crypto/des.h> | 15 | #include <crypto/ctr.h> |
16 | #include <crypto/internal/des.h> | ||
17 | #include <crypto/gcm.h> | ||
18 | #include <crypto/ghash.h> | ||
16 | #include <crypto/sha.h> | 19 | #include <crypto/sha.h> |
20 | #include <crypto/xts.h> | ||
17 | #include <crypto/skcipher.h> | 21 | #include <crypto/skcipher.h> |
18 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
19 | #include <crypto/internal/skcipher.h> | 23 | #include <crypto/internal/skcipher.h> |
@@ -38,15 +42,19 @@ struct safexcel_cipher_ctx { | |||
38 | u32 mode; | 42 | u32 mode; |
39 | enum safexcel_cipher_alg alg; | 43 | enum safexcel_cipher_alg alg; |
40 | bool aead; | 44 | bool aead; |
45 | int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */ | ||
41 | 46 | ||
42 | __le32 key[8]; | 47 | __le32 key[16]; |
43 | unsigned int key_len; | 48 | u32 nonce; |
49 | unsigned int key_len, xts; | ||
44 | 50 | ||
45 | /* All the below is AEAD specific */ | 51 | /* All the below is AEAD specific */ |
46 | u32 hash_alg; | 52 | u32 hash_alg; |
47 | u32 state_sz; | 53 | u32 state_sz; |
48 | u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; | 54 | u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; |
49 | u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; | 55 | u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; |
56 | |||
57 | struct crypto_cipher *hkaes; | ||
50 | }; | 58 | }; |
51 | 59 | ||
52 | struct safexcel_cipher_req { | 60 | struct safexcel_cipher_req { |
@@ -54,16 +62,47 @@ struct safexcel_cipher_req { | |||
54 | /* Number of result descriptors associated to the request */ | 62 | /* Number of result descriptors associated to the request */ |
55 | unsigned int rdescs; | 63 | unsigned int rdescs; |
56 | bool needs_inv; | 64 | bool needs_inv; |
65 | int nr_src, nr_dst; | ||
57 | }; | 66 | }; |
58 | 67 | ||
59 | static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | 68 | static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, |
60 | struct safexcel_command_desc *cdesc, | 69 | struct safexcel_command_desc *cdesc) |
61 | u32 length) | ||
62 | { | 70 | { |
63 | struct safexcel_token *token; | 71 | u32 block_sz = 0; |
64 | u32 offset = 0, block_sz = 0; | 72 | |
73 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { | ||
74 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | ||
75 | |||
76 | /* 32 bit nonce */ | ||
77 | cdesc->control_data.token[0] = ctx->nonce; | ||
78 | /* 64 bit IV part */ | ||
79 | memcpy(&cdesc->control_data.token[1], iv, 8); | ||
80 | /* 32 bit counter, start at 1 (big endian!) */ | ||
81 | cdesc->control_data.token[3] = cpu_to_be32(1); | ||
82 | |||
83 | return; | ||
84 | } else if (ctx->xcm == EIP197_XCM_MODE_GCM) { | ||
85 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | ||
86 | |||
87 | /* 96 bit IV part */ | ||
88 | memcpy(&cdesc->control_data.token[0], iv, 12); | ||
89 | /* 32 bit counter, start at 1 (big endian!) */ | ||
90 | cdesc->control_data.token[3] = cpu_to_be32(1); | ||
91 | |||
92 | return; | ||
93 | } else if (ctx->xcm == EIP197_XCM_MODE_CCM) { | ||
94 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | ||
65 | 95 | ||
66 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | 96 | /* Variable length IV part */ |
97 | memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]); | ||
98 | /* Start variable length counter at 0 */ | ||
99 | memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0], | ||
100 | 0, iv[0] + 1); | ||
101 | |||
102 | return; | ||
103 | } | ||
104 | |||
105 | if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { | ||
67 | switch (ctx->alg) { | 106 | switch (ctx->alg) { |
68 | case SAFEXCEL_DES: | 107 | case SAFEXCEL_DES: |
69 | block_sz = DES_BLOCK_SIZE; | 108 | block_sz = DES_BLOCK_SIZE; |
@@ -78,39 +117,28 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | |||
78 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | 117 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; |
79 | break; | 118 | break; |
80 | } | 119 | } |
81 | |||
82 | offset = block_sz / sizeof(u32); | ||
83 | memcpy(cdesc->control_data.token, iv, block_sz); | 120 | memcpy(cdesc->control_data.token, iv, block_sz); |
84 | } | 121 | } |
122 | } | ||
123 | |||
124 | static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | ||
125 | struct safexcel_command_desc *cdesc, | ||
126 | u32 length) | ||
127 | { | ||
128 | struct safexcel_token *token; | ||
129 | |||
130 | safexcel_cipher_token(ctx, iv, cdesc); | ||
85 | 131 | ||
86 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); | 132 | /* skip over worst case IV of 4 dwords, no need to be exact */ |
133 | token = (struct safexcel_token *)(cdesc->control_data.token + 4); | ||
87 | 134 | ||
88 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | 135 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; |
89 | token[0].packet_length = length; | 136 | token[0].packet_length = length; |
90 | token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | | 137 | token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | |
91 | EIP197_TOKEN_STAT_LAST_HASH; | 138 | EIP197_TOKEN_STAT_LAST_HASH; |
92 | token[0].instructions = EIP197_TOKEN_INS_LAST | | 139 | token[0].instructions = EIP197_TOKEN_INS_LAST | |
93 | EIP197_TOKEN_INS_TYPE_CRYTO | | 140 | EIP197_TOKEN_INS_TYPE_CRYPTO | |
94 | EIP197_TOKEN_INS_TYPE_OUTPUT; | 141 | EIP197_TOKEN_INS_TYPE_OUTPUT; |
95 | |||
96 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | ||
97 | u32 last = (EIP197_MAX_TOKENS - 1) - offset; | ||
98 | |||
99 | token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS; | ||
100 | token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL | | ||
101 | EIP197_TOKEN_EXEC_IF_SUCCESSFUL| | ||
102 | EIP197_TOKEN_CTX_OFFSET(0x2); | ||
103 | token[last].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
104 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
105 | token[last].instructions = | ||
106 | EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) | | ||
107 | EIP197_TOKEN_INS_ORIGIN_IV0; | ||
108 | |||
109 | /* Store the updated IV values back in the internal context | ||
110 | * registers. | ||
111 | */ | ||
112 | cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE; | ||
113 | } | ||
114 | } | 142 | } |
115 | 143 | ||
116 | static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | 144 | static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, |
@@ -119,53 +147,123 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | |||
119 | u32 cryptlen, u32 assoclen, u32 digestsize) | 147 | u32 cryptlen, u32 assoclen, u32 digestsize) |
120 | { | 148 | { |
121 | struct safexcel_token *token; | 149 | struct safexcel_token *token; |
122 | unsigned offset = 0; | ||
123 | 150 | ||
124 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | 151 | safexcel_cipher_token(ctx, iv, cdesc); |
125 | offset = AES_BLOCK_SIZE / sizeof(u32); | ||
126 | memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); | ||
127 | 152 | ||
128 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | 153 | if (direction == SAFEXCEL_ENCRYPT) { |
154 | /* align end of instruction sequence to end of token */ | ||
155 | token = (struct safexcel_token *)(cdesc->control_data.token + | ||
156 | EIP197_MAX_TOKENS - 13); | ||
157 | |||
158 | token[12].opcode = EIP197_TOKEN_OPCODE_INSERT; | ||
159 | token[12].packet_length = digestsize; | ||
160 | token[12].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
161 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
162 | token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | | ||
163 | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | ||
164 | } else { | ||
165 | cryptlen -= digestsize; | ||
166 | |||
167 | /* align end of instruction sequence to end of token */ | ||
168 | token = (struct safexcel_token *)(cdesc->control_data.token + | ||
169 | EIP197_MAX_TOKENS - 14); | ||
170 | |||
171 | token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; | ||
172 | token[12].packet_length = digestsize; | ||
173 | token[12].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
174 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
175 | token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | ||
176 | |||
177 | token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY; | ||
178 | token[13].packet_length = digestsize | | ||
179 | EIP197_TOKEN_HASH_RESULT_VERIFY; | ||
180 | token[13].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
181 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
182 | token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; | ||
129 | } | 183 | } |
130 | 184 | ||
131 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); | 185 | token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION; |
186 | token[6].packet_length = assoclen; | ||
187 | |||
188 | if (likely(cryptlen)) { | ||
189 | token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; | ||
190 | |||
191 | token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | ||
192 | token[10].packet_length = cryptlen; | ||
193 | token[10].stat = EIP197_TOKEN_STAT_LAST_HASH; | ||
194 | token[10].instructions = EIP197_TOKEN_INS_LAST | | ||
195 | EIP197_TOKEN_INS_TYPE_CRYPTO | | ||
196 | EIP197_TOKEN_INS_TYPE_HASH | | ||
197 | EIP197_TOKEN_INS_TYPE_OUTPUT; | ||
198 | } else if (ctx->xcm != EIP197_XCM_MODE_CCM) { | ||
199 | token[6].stat = EIP197_TOKEN_STAT_LAST_HASH; | ||
200 | token[6].instructions = EIP197_TOKEN_INS_LAST | | ||
201 | EIP197_TOKEN_INS_TYPE_HASH; | ||
202 | } | ||
132 | 203 | ||
133 | if (direction == SAFEXCEL_DECRYPT) | 204 | if (!ctx->xcm) |
134 | cryptlen -= digestsize; | 205 | return; |
135 | 206 | ||
136 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | 207 | token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; |
137 | token[0].packet_length = assoclen; | 208 | token[8].packet_length = 0; |
138 | token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | | 209 | token[8].instructions = AES_BLOCK_SIZE; |
139 | EIP197_TOKEN_INS_TYPE_OUTPUT; | ||
140 | 210 | ||
141 | token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | 211 | token[9].opcode = EIP197_TOKEN_OPCODE_INSERT; |
142 | token[1].packet_length = cryptlen; | 212 | token[9].packet_length = AES_BLOCK_SIZE; |
143 | token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; | 213 | token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | |
144 | token[1].instructions = EIP197_TOKEN_INS_LAST | | 214 | EIP197_TOKEN_INS_TYPE_CRYPTO; |
145 | EIP197_TOKEN_INS_TYPE_CRYTO | | ||
146 | EIP197_TOKEN_INS_TYPE_HASH | | ||
147 | EIP197_TOKEN_INS_TYPE_OUTPUT; | ||
148 | 215 | ||
149 | if (direction == SAFEXCEL_ENCRYPT) { | 216 | if (ctx->xcm == EIP197_XCM_MODE_GCM) { |
150 | token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; | 217 | token[6].instructions = EIP197_TOKEN_INS_LAST | |
151 | token[2].packet_length = digestsize; | 218 | EIP197_TOKEN_INS_TYPE_HASH; |
152 | token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
153 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
154 | token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | | ||
155 | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | ||
156 | } else { | 219 | } else { |
157 | token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; | 220 | u8 *cbcmaciv = (u8 *)&token[1]; |
158 | token[2].packet_length = digestsize; | 221 | u32 *aadlen = (u32 *)&token[5]; |
159 | token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | | 222 | |
160 | EIP197_TOKEN_STAT_LAST_PACKET; | 223 | /* Construct IV block B0 for the CBC-MAC */ |
161 | token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | 224 | token[0].opcode = EIP197_TOKEN_OPCODE_INSERT; |
162 | 225 | token[0].packet_length = AES_BLOCK_SIZE + | |
163 | token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; | 226 | ((assoclen > 0) << 1); |
164 | token[3].packet_length = digestsize | | 227 | token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN | |
165 | EIP197_TOKEN_HASH_RESULT_VERIFY; | 228 | EIP197_TOKEN_INS_TYPE_HASH; |
166 | token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | | 229 | /* Variable length IV part */ |
167 | EIP197_TOKEN_STAT_LAST_PACKET; | 230 | memcpy(cbcmaciv, iv, 15 - iv[0]); |
168 | token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; | 231 | /* fixup flags byte */ |
232 | cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2); | ||
233 | /* Clear upper bytes of variable message length to 0 */ | ||
234 | memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1); | ||
235 | /* insert lower 2 bytes of message length */ | ||
236 | cbcmaciv[14] = cryptlen >> 8; | ||
237 | cbcmaciv[15] = cryptlen & 255; | ||
238 | |||
239 | if (assoclen) { | ||
240 | *aadlen = cpu_to_le32(cpu_to_be16(assoclen)); | ||
241 | assoclen += 2; | ||
242 | } | ||
243 | |||
244 | token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; | ||
245 | |||
246 | /* Align AAD data towards hash engine */ | ||
247 | token[7].opcode = EIP197_TOKEN_OPCODE_INSERT; | ||
248 | assoclen &= 15; | ||
249 | token[7].packet_length = assoclen ? 16 - assoclen : 0; | ||
250 | |||
251 | if (likely(cryptlen)) { | ||
252 | token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH; | ||
253 | |||
254 | /* Align crypto data towards hash engine */ | ||
255 | token[10].stat = 0; | ||
256 | |||
257 | token[11].opcode = EIP197_TOKEN_OPCODE_INSERT; | ||
258 | cryptlen &= 15; | ||
259 | token[11].packet_length = cryptlen ? 16 - cryptlen : 0; | ||
260 | token[11].stat = EIP197_TOKEN_STAT_LAST_HASH; | ||
261 | token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH; | ||
262 | } else { | ||
263 | token[7].stat = EIP197_TOKEN_STAT_LAST_HASH; | ||
264 | token[7].instructions = EIP197_TOKEN_INS_LAST | | ||
265 | EIP197_TOKEN_INS_TYPE_HASH; | ||
266 | } | ||
169 | } | 267 | } |
170 | } | 268 | } |
171 | 269 | ||
@@ -178,7 +276,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, | |||
178 | struct crypto_aes_ctx aes; | 276 | struct crypto_aes_ctx aes; |
179 | int ret, i; | 277 | int ret, i; |
180 | 278 | ||
181 | ret = crypto_aes_expand_key(&aes, key, len); | 279 | ret = aes_expandkey(&aes, key, len); |
182 | if (ret) { | 280 | if (ret) { |
183 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 281 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
184 | return ret; | 282 | return ret; |
@@ -202,22 +300,49 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, | |||
202 | return 0; | 300 | return 0; |
203 | } | 301 | } |
204 | 302 | ||
205 | static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, | 303 | static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, |
206 | unsigned int len) | 304 | unsigned int len) |
207 | { | 305 | { |
208 | struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); | 306 | struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); |
209 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 307 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
210 | struct safexcel_ahash_export_state istate, ostate; | 308 | struct safexcel_ahash_export_state istate, ostate; |
211 | struct safexcel_crypto_priv *priv = ctx->priv; | 309 | struct safexcel_crypto_priv *priv = ctx->priv; |
212 | struct crypto_authenc_keys keys; | 310 | struct crypto_authenc_keys keys; |
311 | struct crypto_aes_ctx aes; | ||
312 | int err = -EINVAL; | ||
213 | 313 | ||
214 | if (crypto_authenc_extractkeys(&keys, key, len) != 0) | 314 | if (crypto_authenc_extractkeys(&keys, key, len) != 0) |
215 | goto badkey; | 315 | goto badkey; |
216 | 316 | ||
217 | if (keys.enckeylen > sizeof(ctx->key)) | 317 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { |
218 | goto badkey; | 318 | /* Minimum keysize is minimum AES key size + nonce size */ |
319 | if (keys.enckeylen < (AES_MIN_KEY_SIZE + | ||
320 | CTR_RFC3686_NONCE_SIZE)) | ||
321 | goto badkey; | ||
322 | /* last 4 bytes of key are the nonce! */ | ||
323 | ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen - | ||
324 | CTR_RFC3686_NONCE_SIZE); | ||
325 | /* exclude the nonce here */ | ||
326 | keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; | ||
327 | } | ||
219 | 328 | ||
220 | /* Encryption key */ | 329 | /* Encryption key */ |
330 | switch (ctx->alg) { | ||
331 | case SAFEXCEL_3DES: | ||
332 | err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); | ||
333 | if (unlikely(err)) | ||
334 | goto badkey_expflags; | ||
335 | break; | ||
336 | case SAFEXCEL_AES: | ||
337 | err = aes_expandkey(&aes, keys.enckey, keys.enckeylen); | ||
338 | if (unlikely(err)) | ||
339 | goto badkey; | ||
340 | break; | ||
341 | default: | ||
342 | dev_err(priv->dev, "aead: unsupported cipher algorithm\n"); | ||
343 | goto badkey; | ||
344 | } | ||
345 | |||
221 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && | 346 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && |
222 | memcmp(ctx->key, keys.enckey, keys.enckeylen)) | 347 | memcmp(ctx->key, keys.enckey, keys.enckeylen)) |
223 | ctx->base.needs_inv = true; | 348 | ctx->base.needs_inv = true; |
@@ -274,8 +399,9 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, | |||
274 | 399 | ||
275 | badkey: | 400 | badkey: |
276 | crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 401 | crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
402 | badkey_expflags: | ||
277 | memzero_explicit(&keys, sizeof(keys)); | 403 | memzero_explicit(&keys, sizeof(keys)); |
278 | return -EINVAL; | 404 | return err; |
279 | } | 405 | } |
280 | 406 | ||
281 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, | 407 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, |
@@ -284,59 +410,78 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, | |||
284 | struct safexcel_command_desc *cdesc) | 410 | struct safexcel_command_desc *cdesc) |
285 | { | 411 | { |
286 | struct safexcel_crypto_priv *priv = ctx->priv; | 412 | struct safexcel_crypto_priv *priv = ctx->priv; |
287 | int ctrl_size; | 413 | int ctrl_size = ctx->key_len / sizeof(u32); |
414 | |||
415 | cdesc->control_data.control1 = ctx->mode; | ||
288 | 416 | ||
289 | if (ctx->aead) { | 417 | if (ctx->aead) { |
418 | /* Take in account the ipad+opad digests */ | ||
419 | if (ctx->xcm) { | ||
420 | ctrl_size += ctx->state_sz / sizeof(u32); | ||
421 | cdesc->control_data.control0 = | ||
422 | CONTEXT_CONTROL_KEY_EN | | ||
423 | CONTEXT_CONTROL_DIGEST_XCM | | ||
424 | ctx->hash_alg | | ||
425 | CONTEXT_CONTROL_SIZE(ctrl_size); | ||
426 | } else { | ||
427 | ctrl_size += ctx->state_sz / sizeof(u32) * 2; | ||
428 | cdesc->control_data.control0 = | ||
429 | CONTEXT_CONTROL_KEY_EN | | ||
430 | CONTEXT_CONTROL_DIGEST_HMAC | | ||
431 | ctx->hash_alg | | ||
432 | CONTEXT_CONTROL_SIZE(ctrl_size); | ||
433 | } | ||
290 | if (sreq->direction == SAFEXCEL_ENCRYPT) | 434 | if (sreq->direction == SAFEXCEL_ENCRYPT) |
291 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; | 435 | cdesc->control_data.control0 |= |
436 | (ctx->xcm == EIP197_XCM_MODE_CCM) ? | ||
437 | CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT : | ||
438 | CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; | ||
439 | |||
292 | else | 440 | else |
293 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; | 441 | cdesc->control_data.control0 |= |
442 | (ctx->xcm == EIP197_XCM_MODE_CCM) ? | ||
443 | CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN : | ||
444 | CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; | ||
294 | } else { | 445 | } else { |
295 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; | 446 | if (sreq->direction == SAFEXCEL_ENCRYPT) |
296 | 447 | cdesc->control_data.control0 = | |
297 | /* The decryption control type is a combination of the | 448 | CONTEXT_CONTROL_TYPE_CRYPTO_OUT | |
298 | * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all | 449 | CONTEXT_CONTROL_KEY_EN | |
299 | * types. | 450 | CONTEXT_CONTROL_SIZE(ctrl_size); |
300 | */ | 451 | else |
301 | if (sreq->direction == SAFEXCEL_DECRYPT) | 452 | cdesc->control_data.control0 = |
302 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; | 453 | CONTEXT_CONTROL_TYPE_CRYPTO_IN | |
454 | CONTEXT_CONTROL_KEY_EN | | ||
455 | CONTEXT_CONTROL_SIZE(ctrl_size); | ||
303 | } | 456 | } |
304 | 457 | ||
305 | cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; | ||
306 | cdesc->control_data.control1 |= ctx->mode; | ||
307 | |||
308 | if (ctx->aead) | ||
309 | cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | | ||
310 | ctx->hash_alg; | ||
311 | |||
312 | if (ctx->alg == SAFEXCEL_DES) { | 458 | if (ctx->alg == SAFEXCEL_DES) { |
313 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES; | 459 | cdesc->control_data.control0 |= |
460 | CONTEXT_CONTROL_CRYPTO_ALG_DES; | ||
314 | } else if (ctx->alg == SAFEXCEL_3DES) { | 461 | } else if (ctx->alg == SAFEXCEL_3DES) { |
315 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; | 462 | cdesc->control_data.control0 |= |
463 | CONTEXT_CONTROL_CRYPTO_ALG_3DES; | ||
316 | } else if (ctx->alg == SAFEXCEL_AES) { | 464 | } else if (ctx->alg == SAFEXCEL_AES) { |
317 | switch (ctx->key_len) { | 465 | switch (ctx->key_len >> ctx->xts) { |
318 | case AES_KEYSIZE_128: | 466 | case AES_KEYSIZE_128: |
319 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; | 467 | cdesc->control_data.control0 |= |
468 | CONTEXT_CONTROL_CRYPTO_ALG_AES128; | ||
320 | break; | 469 | break; |
321 | case AES_KEYSIZE_192: | 470 | case AES_KEYSIZE_192: |
322 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; | 471 | cdesc->control_data.control0 |= |
472 | CONTEXT_CONTROL_CRYPTO_ALG_AES192; | ||
323 | break; | 473 | break; |
324 | case AES_KEYSIZE_256: | 474 | case AES_KEYSIZE_256: |
325 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; | 475 | cdesc->control_data.control0 |= |
476 | CONTEXT_CONTROL_CRYPTO_ALG_AES256; | ||
326 | break; | 477 | break; |
327 | default: | 478 | default: |
328 | dev_err(priv->dev, "aes keysize not supported: %u\n", | 479 | dev_err(priv->dev, "aes keysize not supported: %u\n", |
329 | ctx->key_len); | 480 | ctx->key_len >> ctx->xts); |
330 | return -EINVAL; | 481 | return -EINVAL; |
331 | } | 482 | } |
332 | } | 483 | } |
333 | 484 | ||
334 | ctrl_size = ctx->key_len / sizeof(u32); | ||
335 | if (ctx->aead) | ||
336 | /* Take in account the ipad+opad digests */ | ||
337 | ctrl_size += ctx->state_sz / sizeof(u32) * 2; | ||
338 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); | ||
339 | |||
340 | return 0; | 485 | return 0; |
341 | } | 486 | } |
342 | 487 | ||
@@ -348,6 +493,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
348 | struct safexcel_cipher_req *sreq, | 493 | struct safexcel_cipher_req *sreq, |
349 | bool *should_complete, int *ret) | 494 | bool *should_complete, int *ret) |
350 | { | 495 | { |
496 | struct skcipher_request *areq = skcipher_request_cast(async); | ||
497 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); | ||
498 | struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher); | ||
351 | struct safexcel_result_desc *rdesc; | 499 | struct safexcel_result_desc *rdesc; |
352 | int ndesc = 0; | 500 | int ndesc = 0; |
353 | 501 | ||
@@ -374,10 +522,22 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
374 | safexcel_complete(priv, ring); | 522 | safexcel_complete(priv, ring); |
375 | 523 | ||
376 | if (src == dst) { | 524 | if (src == dst) { |
377 | dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); | 525 | dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); |
378 | } else { | 526 | } else { |
379 | dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE); | 527 | dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); |
380 | dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); | 528 | dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); |
529 | } | ||
530 | |||
531 | /* | ||
532 | * Update IV in req from last crypto output word for CBC modes | ||
533 | */ | ||
534 | if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && | ||
535 | (sreq->direction == SAFEXCEL_ENCRYPT)) { | ||
536 | /* For encrypt take the last output word */ | ||
537 | sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv, | ||
538 | crypto_skcipher_ivsize(skcipher), | ||
539 | (cryptlen - | ||
540 | crypto_skcipher_ivsize(skcipher))); | ||
381 | } | 541 | } |
382 | 542 | ||
383 | *should_complete = true; | 543 | *should_complete = true; |
@@ -392,53 +552,105 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, | |||
392 | unsigned int digestsize, u8 *iv, int *commands, | 552 | unsigned int digestsize, u8 *iv, int *commands, |
393 | int *results) | 553 | int *results) |
394 | { | 554 | { |
555 | struct skcipher_request *areq = skcipher_request_cast(base); | ||
556 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq); | ||
395 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); | 557 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); |
396 | struct safexcel_crypto_priv *priv = ctx->priv; | 558 | struct safexcel_crypto_priv *priv = ctx->priv; |
397 | struct safexcel_command_desc *cdesc; | 559 | struct safexcel_command_desc *cdesc; |
560 | struct safexcel_command_desc *first_cdesc = NULL; | ||
398 | struct safexcel_result_desc *rdesc, *first_rdesc = NULL; | 561 | struct safexcel_result_desc *rdesc, *first_rdesc = NULL; |
399 | struct scatterlist *sg; | 562 | struct scatterlist *sg; |
400 | unsigned int totlen = cryptlen + assoclen; | 563 | unsigned int totlen; |
401 | int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; | 564 | unsigned int totlen_src = cryptlen + assoclen; |
402 | int i, ret = 0; | 565 | unsigned int totlen_dst = totlen_src; |
566 | int n_cdesc = 0, n_rdesc = 0; | ||
567 | int queued, i, ret = 0; | ||
568 | bool first = true; | ||
569 | |||
570 | sreq->nr_src = sg_nents_for_len(src, totlen_src); | ||
571 | |||
572 | if (ctx->aead) { | ||
573 | /* | ||
574 | * AEAD has auth tag appended to output for encrypt and | ||
575 | * removed from the output for decrypt! | ||
576 | */ | ||
577 | if (sreq->direction == SAFEXCEL_DECRYPT) | ||
578 | totlen_dst -= digestsize; | ||
579 | else | ||
580 | totlen_dst += digestsize; | ||
581 | |||
582 | memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), | ||
583 | ctx->ipad, ctx->state_sz); | ||
584 | if (!ctx->xcm) | ||
585 | memcpy(ctx->base.ctxr->data + (ctx->key_len + | ||
586 | ctx->state_sz) / sizeof(u32), ctx->opad, | ||
587 | ctx->state_sz); | ||
588 | } else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) && | ||
589 | (sreq->direction == SAFEXCEL_DECRYPT)) { | ||
590 | /* | ||
591 | * Save IV from last crypto input word for CBC modes in decrypt | ||
592 | * direction. Need to do this first in case of inplace operation | ||
593 | * as it will be overwritten. | ||
594 | */ | ||
595 | sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv, | ||
596 | crypto_skcipher_ivsize(skcipher), | ||
597 | (totlen_src - | ||
598 | crypto_skcipher_ivsize(skcipher))); | ||
599 | } | ||
600 | |||
601 | sreq->nr_dst = sg_nents_for_len(dst, totlen_dst); | ||
602 | |||
603 | /* | ||
604 | * Remember actual input length, source buffer length may be | ||
605 | * updated in case of inline operation below. | ||
606 | */ | ||
607 | totlen = totlen_src; | ||
608 | queued = totlen_src; | ||
403 | 609 | ||
404 | if (src == dst) { | 610 | if (src == dst) { |
405 | nr_src = dma_map_sg(priv->dev, src, sg_nents(src), | 611 | sreq->nr_src = max(sreq->nr_src, sreq->nr_dst); |
406 | DMA_BIDIRECTIONAL); | 612 | sreq->nr_dst = sreq->nr_src; |
407 | nr_dst = nr_src; | 613 | if (unlikely((totlen_src || totlen_dst) && |
408 | if (!nr_src) | 614 | (sreq->nr_src <= 0))) { |
615 | dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!", | ||
616 | max(totlen_src, totlen_dst)); | ||
409 | return -EINVAL; | 617 | return -EINVAL; |
618 | } | ||
619 | dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); | ||
410 | } else { | 620 | } else { |
411 | nr_src = dma_map_sg(priv->dev, src, sg_nents(src), | 621 | if (unlikely(totlen_src && (sreq->nr_src <= 0))) { |
412 | DMA_TO_DEVICE); | 622 | dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", |
413 | if (!nr_src) | 623 | totlen_src); |
414 | return -EINVAL; | 624 | return -EINVAL; |
625 | } | ||
626 | dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); | ||
415 | 627 | ||
416 | nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst), | 628 | if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { |
417 | DMA_FROM_DEVICE); | 629 | dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", |
418 | if (!nr_dst) { | 630 | totlen_dst); |
419 | dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); | 631 | dma_unmap_sg(priv->dev, src, sreq->nr_src, |
632 | DMA_TO_DEVICE); | ||
420 | return -EINVAL; | 633 | return -EINVAL; |
421 | } | 634 | } |
635 | dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); | ||
422 | } | 636 | } |
423 | 637 | ||
424 | memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); | 638 | memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); |
425 | 639 | ||
426 | if (ctx->aead) { | 640 | /* The EIP cannot deal with zero length input packets! */ |
427 | memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), | 641 | if (totlen == 0) |
428 | ctx->ipad, ctx->state_sz); | 642 | totlen = 1; |
429 | memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32), | ||
430 | ctx->opad, ctx->state_sz); | ||
431 | } | ||
432 | 643 | ||
433 | /* command descriptors */ | 644 | /* command descriptors */ |
434 | for_each_sg(src, sg, nr_src, i) { | 645 | for_each_sg(src, sg, sreq->nr_src, i) { |
435 | int len = sg_dma_len(sg); | 646 | int len = sg_dma_len(sg); |
436 | 647 | ||
437 | /* Do not overflow the request */ | 648 | /* Do not overflow the request */ |
438 | if (queued - len < 0) | 649 | if (queued - len < 0) |
439 | len = queued; | 650 | len = queued; |
440 | 651 | ||
441 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), | 652 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, |
653 | !(queued - len), | ||
442 | sg_dma_address(sg), len, totlen, | 654 | sg_dma_address(sg), len, totlen, |
443 | ctx->base.ctxr_dma); | 655 | ctx->base.ctxr_dma); |
444 | if (IS_ERR(cdesc)) { | 656 | if (IS_ERR(cdesc)) { |
@@ -449,14 +661,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, | |||
449 | n_cdesc++; | 661 | n_cdesc++; |
450 | 662 | ||
451 | if (n_cdesc == 1) { | 663 | if (n_cdesc == 1) { |
452 | safexcel_context_control(ctx, base, sreq, cdesc); | 664 | first_cdesc = cdesc; |
453 | if (ctx->aead) | ||
454 | safexcel_aead_token(ctx, iv, cdesc, | ||
455 | sreq->direction, cryptlen, | ||
456 | assoclen, digestsize); | ||
457 | else | ||
458 | safexcel_skcipher_token(ctx, iv, cdesc, | ||
459 | cryptlen); | ||
460 | } | 665 | } |
461 | 666 | ||
462 | queued -= len; | 667 | queued -= len; |
@@ -464,23 +669,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, | |||
464 | break; | 669 | break; |
465 | } | 670 | } |
466 | 671 | ||
672 | if (unlikely(!n_cdesc)) { | ||
673 | /* | ||
674 | * Special case: zero length input buffer. | ||
675 | * The engine always needs the 1st command descriptor, however! | ||
676 | */ | ||
677 | first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen, | ||
678 | ctx->base.ctxr_dma); | ||
679 | n_cdesc = 1; | ||
680 | } | ||
681 | |||
682 | /* Add context control words and token to first command descriptor */ | ||
683 | safexcel_context_control(ctx, base, sreq, first_cdesc); | ||
684 | if (ctx->aead) | ||
685 | safexcel_aead_token(ctx, iv, first_cdesc, | ||
686 | sreq->direction, cryptlen, | ||
687 | assoclen, digestsize); | ||
688 | else | ||
689 | safexcel_skcipher_token(ctx, iv, first_cdesc, | ||
690 | cryptlen); | ||
691 | |||
467 | /* result descriptors */ | 692 | /* result descriptors */ |
468 | for_each_sg(dst, sg, nr_dst, i) { | 693 | for_each_sg(dst, sg, sreq->nr_dst, i) { |
469 | bool first = !i, last = sg_is_last(sg); | 694 | bool last = (i == sreq->nr_dst - 1); |
470 | u32 len = sg_dma_len(sg); | 695 | u32 len = sg_dma_len(sg); |
471 | 696 | ||
472 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | 697 | /* only allow the part of the buffer we know we need */ |
473 | sg_dma_address(sg), len); | 698 | if (len > totlen_dst) |
699 | len = totlen_dst; | ||
700 | if (unlikely(!len)) | ||
701 | break; | ||
702 | totlen_dst -= len; | ||
703 | |||
704 | /* skip over AAD space in buffer - not written */ | ||
705 | if (assoclen) { | ||
706 | if (assoclen >= len) { | ||
707 | assoclen -= len; | ||
708 | continue; | ||
709 | } | ||
710 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | ||
711 | sg_dma_address(sg) + | ||
712 | assoclen, | ||
713 | len - assoclen); | ||
714 | assoclen = 0; | ||
715 | } else { | ||
716 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | ||
717 | sg_dma_address(sg), | ||
718 | len); | ||
719 | } | ||
474 | if (IS_ERR(rdesc)) { | 720 | if (IS_ERR(rdesc)) { |
475 | /* No space left in the result descriptor ring */ | 721 | /* No space left in the result descriptor ring */ |
476 | ret = PTR_ERR(rdesc); | 722 | ret = PTR_ERR(rdesc); |
477 | goto rdesc_rollback; | 723 | goto rdesc_rollback; |
478 | } | 724 | } |
479 | if (first) | 725 | if (first) { |
480 | first_rdesc = rdesc; | 726 | first_rdesc = rdesc; |
727 | first = false; | ||
728 | } | ||
481 | n_rdesc++; | 729 | n_rdesc++; |
482 | } | 730 | } |
483 | 731 | ||
732 | if (unlikely(first)) { | ||
733 | /* | ||
734 | * Special case: AEAD decrypt with only AAD data. | ||
735 | * In this case there is NO output data from the engine, | ||
736 | * but the engine still needs a result descriptor! | ||
737 | * Create a dummy one just for catching the result token. | ||
738 | */ | ||
739 | rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); | ||
740 | if (IS_ERR(rdesc)) { | ||
741 | /* No space left in the result descriptor ring */ | ||
742 | ret = PTR_ERR(rdesc); | ||
743 | goto rdesc_rollback; | ||
744 | } | ||
745 | first_rdesc = rdesc; | ||
746 | n_rdesc = 1; | ||
747 | } | ||
748 | |||
484 | safexcel_rdr_req_set(priv, ring, first_rdesc, base); | 749 | safexcel_rdr_req_set(priv, ring, first_rdesc, base); |
485 | 750 | ||
486 | *commands = n_cdesc; | 751 | *commands = n_cdesc; |
@@ -495,10 +760,10 @@ cdesc_rollback: | |||
495 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | 760 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); |
496 | 761 | ||
497 | if (src == dst) { | 762 | if (src == dst) { |
498 | dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL); | 763 | dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); |
499 | } else { | 764 | } else { |
500 | dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); | 765 | dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); |
501 | dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE); | 766 | dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); |
502 | } | 767 | } |
503 | 768 | ||
504 | return ret; | 769 | return ret; |
@@ -570,7 +835,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, | |||
570 | { | 835 | { |
571 | struct skcipher_request *req = skcipher_request_cast(async); | 836 | struct skcipher_request *req = skcipher_request_cast(async); |
572 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | 837 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
573 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm); | ||
574 | int err; | 838 | int err; |
575 | 839 | ||
576 | if (sreq->needs_inv) { | 840 | if (sreq->needs_inv) { |
@@ -581,24 +845,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, | |||
581 | err = safexcel_handle_req_result(priv, ring, async, req->src, | 845 | err = safexcel_handle_req_result(priv, ring, async, req->src, |
582 | req->dst, req->cryptlen, sreq, | 846 | req->dst, req->cryptlen, sreq, |
583 | should_complete, ret); | 847 | should_complete, ret); |
584 | |||
585 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | ||
586 | u32 block_sz = 0; | ||
587 | |||
588 | switch (ctx->alg) { | ||
589 | case SAFEXCEL_DES: | ||
590 | block_sz = DES_BLOCK_SIZE; | ||
591 | break; | ||
592 | case SAFEXCEL_3DES: | ||
593 | block_sz = DES3_EDE_BLOCK_SIZE; | ||
594 | break; | ||
595 | case SAFEXCEL_AES: | ||
596 | block_sz = AES_BLOCK_SIZE; | ||
597 | break; | ||
598 | } | ||
599 | |||
600 | memcpy(req->iv, ctx->base.ctxr->data, block_sz); | ||
601 | } | ||
602 | } | 848 | } |
603 | 849 | ||
604 | return err; | 850 | return err; |
@@ -656,12 +902,22 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, | |||
656 | 902 | ||
657 | BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); | 903 | BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); |
658 | 904 | ||
659 | if (sreq->needs_inv) | 905 | if (sreq->needs_inv) { |
660 | ret = safexcel_cipher_send_inv(async, ring, commands, results); | 906 | ret = safexcel_cipher_send_inv(async, ring, commands, results); |
661 | else | 907 | } else { |
908 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | ||
909 | u8 input_iv[AES_BLOCK_SIZE]; | ||
910 | |||
911 | /* | ||
912 | * Save input IV in case of CBC decrypt mode | ||
913 | * Will be overwritten with output IV prior to use! | ||
914 | */ | ||
915 | memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher)); | ||
916 | |||
662 | ret = safexcel_send_req(async, ring, sreq, req->src, | 917 | ret = safexcel_send_req(async, ring, sreq, req->src, |
663 | req->dst, req->cryptlen, 0, 0, req->iv, | 918 | req->dst, req->cryptlen, 0, 0, input_iv, |
664 | commands, results); | 919 | commands, results); |
920 | } | ||
665 | 921 | ||
666 | sreq->rdescs = *results; | 922 | sreq->rdescs = *results; |
667 | return ret; | 923 | return ret; |
@@ -756,8 +1012,7 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) | |||
756 | 1012 | ||
757 | static int safexcel_queue_req(struct crypto_async_request *base, | 1013 | static int safexcel_queue_req(struct crypto_async_request *base, |
758 | struct safexcel_cipher_req *sreq, | 1014 | struct safexcel_cipher_req *sreq, |
759 | enum safexcel_cipher_direction dir, u32 mode, | 1015 | enum safexcel_cipher_direction dir) |
760 | enum safexcel_cipher_alg alg) | ||
761 | { | 1016 | { |
762 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); | 1017 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); |
763 | struct safexcel_crypto_priv *priv = ctx->priv; | 1018 | struct safexcel_crypto_priv *priv = ctx->priv; |
@@ -765,8 +1020,6 @@ static int safexcel_queue_req(struct crypto_async_request *base, | |||
765 | 1020 | ||
766 | sreq->needs_inv = false; | 1021 | sreq->needs_inv = false; |
767 | sreq->direction = dir; | 1022 | sreq->direction = dir; |
768 | ctx->alg = alg; | ||
769 | ctx->mode = mode; | ||
770 | 1023 | ||
771 | if (ctx->base.ctxr) { | 1024 | if (ctx->base.ctxr) { |
772 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { | 1025 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { |
@@ -794,18 +1047,16 @@ static int safexcel_queue_req(struct crypto_async_request *base, | |||
794 | return ret; | 1047 | return ret; |
795 | } | 1048 | } |
796 | 1049 | ||
797 | static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) | 1050 | static int safexcel_encrypt(struct skcipher_request *req) |
798 | { | 1051 | { |
799 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1052 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), |
800 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | 1053 | SAFEXCEL_ENCRYPT); |
801 | SAFEXCEL_AES); | ||
802 | } | 1054 | } |
803 | 1055 | ||
804 | static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) | 1056 | static int safexcel_decrypt(struct skcipher_request *req) |
805 | { | 1057 | { |
806 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1058 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), |
807 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | 1059 | SAFEXCEL_DECRYPT); |
808 | SAFEXCEL_AES); | ||
809 | } | 1060 | } |
810 | 1061 | ||
811 | static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) | 1062 | static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) |
@@ -879,104 +1130,234 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) | |||
879 | } | 1130 | } |
880 | } | 1131 | } |
881 | 1132 | ||
1133 | static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm) | ||
1134 | { | ||
1135 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1136 | |||
1137 | safexcel_skcipher_cra_init(tfm); | ||
1138 | ctx->alg = SAFEXCEL_AES; | ||
1139 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; | ||
1140 | return 0; | ||
1141 | } | ||
1142 | |||
882 | struct safexcel_alg_template safexcel_alg_ecb_aes = { | 1143 | struct safexcel_alg_template safexcel_alg_ecb_aes = { |
883 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1144 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
884 | .engines = EIP97IES | EIP197B | EIP197D, | 1145 | .algo_mask = SAFEXCEL_ALG_AES, |
885 | .alg.skcipher = { | 1146 | .alg.skcipher = { |
886 | .setkey = safexcel_skcipher_aes_setkey, | 1147 | .setkey = safexcel_skcipher_aes_setkey, |
887 | .encrypt = safexcel_ecb_aes_encrypt, | 1148 | .encrypt = safexcel_encrypt, |
888 | .decrypt = safexcel_ecb_aes_decrypt, | 1149 | .decrypt = safexcel_decrypt, |
889 | .min_keysize = AES_MIN_KEY_SIZE, | 1150 | .min_keysize = AES_MIN_KEY_SIZE, |
890 | .max_keysize = AES_MAX_KEY_SIZE, | 1151 | .max_keysize = AES_MAX_KEY_SIZE, |
891 | .base = { | 1152 | .base = { |
892 | .cra_name = "ecb(aes)", | 1153 | .cra_name = "ecb(aes)", |
893 | .cra_driver_name = "safexcel-ecb-aes", | 1154 | .cra_driver_name = "safexcel-ecb-aes", |
894 | .cra_priority = 300, | 1155 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
895 | .cra_flags = CRYPTO_ALG_ASYNC | | 1156 | .cra_flags = CRYPTO_ALG_ASYNC | |
896 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1157 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
897 | .cra_blocksize = AES_BLOCK_SIZE, | 1158 | .cra_blocksize = AES_BLOCK_SIZE, |
898 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1159 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
899 | .cra_alignmask = 0, | 1160 | .cra_alignmask = 0, |
900 | .cra_init = safexcel_skcipher_cra_init, | 1161 | .cra_init = safexcel_skcipher_aes_ecb_cra_init, |
901 | .cra_exit = safexcel_skcipher_cra_exit, | 1162 | .cra_exit = safexcel_skcipher_cra_exit, |
902 | .cra_module = THIS_MODULE, | 1163 | .cra_module = THIS_MODULE, |
903 | }, | 1164 | }, |
904 | }, | 1165 | }, |
905 | }; | 1166 | }; |
906 | 1167 | ||
907 | static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) | 1168 | static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm) |
908 | { | 1169 | { |
909 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1170 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
910 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | ||
911 | SAFEXCEL_AES); | ||
912 | } | ||
913 | 1171 | ||
914 | static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) | 1172 | safexcel_skcipher_cra_init(tfm); |
915 | { | 1173 | ctx->alg = SAFEXCEL_AES; |
916 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1174 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; |
917 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | 1175 | return 0; |
918 | SAFEXCEL_AES); | ||
919 | } | 1176 | } |
920 | 1177 | ||
921 | struct safexcel_alg_template safexcel_alg_cbc_aes = { | 1178 | struct safexcel_alg_template safexcel_alg_cbc_aes = { |
922 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1179 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
923 | .engines = EIP97IES | EIP197B | EIP197D, | 1180 | .algo_mask = SAFEXCEL_ALG_AES, |
924 | .alg.skcipher = { | 1181 | .alg.skcipher = { |
925 | .setkey = safexcel_skcipher_aes_setkey, | 1182 | .setkey = safexcel_skcipher_aes_setkey, |
926 | .encrypt = safexcel_cbc_aes_encrypt, | 1183 | .encrypt = safexcel_encrypt, |
927 | .decrypt = safexcel_cbc_aes_decrypt, | 1184 | .decrypt = safexcel_decrypt, |
928 | .min_keysize = AES_MIN_KEY_SIZE, | 1185 | .min_keysize = AES_MIN_KEY_SIZE, |
929 | .max_keysize = AES_MAX_KEY_SIZE, | 1186 | .max_keysize = AES_MAX_KEY_SIZE, |
930 | .ivsize = AES_BLOCK_SIZE, | 1187 | .ivsize = AES_BLOCK_SIZE, |
931 | .base = { | 1188 | .base = { |
932 | .cra_name = "cbc(aes)", | 1189 | .cra_name = "cbc(aes)", |
933 | .cra_driver_name = "safexcel-cbc-aes", | 1190 | .cra_driver_name = "safexcel-cbc-aes", |
934 | .cra_priority = 300, | 1191 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
935 | .cra_flags = CRYPTO_ALG_ASYNC | | 1192 | .cra_flags = CRYPTO_ALG_ASYNC | |
936 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1193 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
937 | .cra_blocksize = AES_BLOCK_SIZE, | 1194 | .cra_blocksize = AES_BLOCK_SIZE, |
938 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1195 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
939 | .cra_alignmask = 0, | 1196 | .cra_alignmask = 0, |
940 | .cra_init = safexcel_skcipher_cra_init, | 1197 | .cra_init = safexcel_skcipher_aes_cbc_cra_init, |
941 | .cra_exit = safexcel_skcipher_cra_exit, | 1198 | .cra_exit = safexcel_skcipher_cra_exit, |
942 | .cra_module = THIS_MODULE, | 1199 | .cra_module = THIS_MODULE, |
943 | }, | 1200 | }, |
944 | }, | 1201 | }, |
945 | }; | 1202 | }; |
946 | 1203 | ||
947 | static int safexcel_cbc_des_encrypt(struct skcipher_request *req) | 1204 | static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm) |
948 | { | 1205 | { |
949 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1206 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
950 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | 1207 | |
951 | SAFEXCEL_DES); | 1208 | safexcel_skcipher_cra_init(tfm); |
1209 | ctx->alg = SAFEXCEL_AES; | ||
1210 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB; | ||
1211 | return 0; | ||
952 | } | 1212 | } |
953 | 1213 | ||
954 | static int safexcel_cbc_des_decrypt(struct skcipher_request *req) | 1214 | struct safexcel_alg_template safexcel_alg_cfb_aes = { |
1215 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
1216 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB, | ||
1217 | .alg.skcipher = { | ||
1218 | .setkey = safexcel_skcipher_aes_setkey, | ||
1219 | .encrypt = safexcel_encrypt, | ||
1220 | .decrypt = safexcel_decrypt, | ||
1221 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1222 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1223 | .ivsize = AES_BLOCK_SIZE, | ||
1224 | .base = { | ||
1225 | .cra_name = "cfb(aes)", | ||
1226 | .cra_driver_name = "safexcel-cfb-aes", | ||
1227 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1228 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1229 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1230 | .cra_blocksize = 1, | ||
1231 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1232 | .cra_alignmask = 0, | ||
1233 | .cra_init = safexcel_skcipher_aes_cfb_cra_init, | ||
1234 | .cra_exit = safexcel_skcipher_cra_exit, | ||
1235 | .cra_module = THIS_MODULE, | ||
1236 | }, | ||
1237 | }, | ||
1238 | }; | ||
1239 | |||
1240 | static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm) | ||
955 | { | 1241 | { |
956 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1242 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
957 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | 1243 | |
958 | SAFEXCEL_DES); | 1244 | safexcel_skcipher_cra_init(tfm); |
1245 | ctx->alg = SAFEXCEL_AES; | ||
1246 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB; | ||
1247 | return 0; | ||
959 | } | 1248 | } |
960 | 1249 | ||
961 | static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, | 1250 | struct safexcel_alg_template safexcel_alg_ofb_aes = { |
962 | unsigned int len) | 1251 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
1252 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB, | ||
1253 | .alg.skcipher = { | ||
1254 | .setkey = safexcel_skcipher_aes_setkey, | ||
1255 | .encrypt = safexcel_encrypt, | ||
1256 | .decrypt = safexcel_decrypt, | ||
1257 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1258 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1259 | .ivsize = AES_BLOCK_SIZE, | ||
1260 | .base = { | ||
1261 | .cra_name = "ofb(aes)", | ||
1262 | .cra_driver_name = "safexcel-ofb-aes", | ||
1263 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1264 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1265 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1266 | .cra_blocksize = 1, | ||
1267 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1268 | .cra_alignmask = 0, | ||
1269 | .cra_init = safexcel_skcipher_aes_ofb_cra_init, | ||
1270 | .cra_exit = safexcel_skcipher_cra_exit, | ||
1271 | .cra_module = THIS_MODULE, | ||
1272 | }, | ||
1273 | }, | ||
1274 | }; | ||
1275 | |||
1276 | static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, | ||
1277 | const u8 *key, unsigned int len) | ||
963 | { | 1278 | { |
964 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); | 1279 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); |
965 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 1280 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
966 | u32 tmp[DES_EXPKEY_WORDS]; | 1281 | struct safexcel_crypto_priv *priv = ctx->priv; |
967 | int ret; | 1282 | struct crypto_aes_ctx aes; |
1283 | int ret, i; | ||
1284 | unsigned int keylen; | ||
968 | 1285 | ||
969 | if (len != DES_KEY_SIZE) { | 1286 | /* last 4 bytes of key are the nonce! */ |
1287 | ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE); | ||
1288 | /* exclude the nonce here */ | ||
1289 | keylen = len - CTR_RFC3686_NONCE_SIZE; | ||
1290 | ret = aes_expandkey(&aes, key, keylen); | ||
1291 | if (ret) { | ||
970 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1292 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
971 | return -EINVAL; | 1293 | return ret; |
972 | } | 1294 | } |
973 | 1295 | ||
974 | ret = des_ekey(tmp, key); | 1296 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { |
975 | if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 1297 | for (i = 0; i < keylen / sizeof(u32); i++) { |
976 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 1298 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { |
977 | return -EINVAL; | 1299 | ctx->base.needs_inv = true; |
1300 | break; | ||
1301 | } | ||
1302 | } | ||
978 | } | 1303 | } |
979 | 1304 | ||
1305 | for (i = 0; i < keylen / sizeof(u32); i++) | ||
1306 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | ||
1307 | |||
1308 | ctx->key_len = keylen; | ||
1309 | |||
1310 | memzero_explicit(&aes, sizeof(aes)); | ||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm) | ||
1315 | { | ||
1316 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1317 | |||
1318 | safexcel_skcipher_cra_init(tfm); | ||
1319 | ctx->alg = SAFEXCEL_AES; | ||
1320 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | struct safexcel_alg_template safexcel_alg_ctr_aes = { | ||
1325 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
1326 | .algo_mask = SAFEXCEL_ALG_AES, | ||
1327 | .alg.skcipher = { | ||
1328 | .setkey = safexcel_skcipher_aesctr_setkey, | ||
1329 | .encrypt = safexcel_encrypt, | ||
1330 | .decrypt = safexcel_decrypt, | ||
1331 | /* Add nonce size */ | ||
1332 | .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, | ||
1333 | .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, | ||
1334 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1335 | .base = { | ||
1336 | .cra_name = "rfc3686(ctr(aes))", | ||
1337 | .cra_driver_name = "safexcel-ctr-aes", | ||
1338 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1339 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1340 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1341 | .cra_blocksize = 1, | ||
1342 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1343 | .cra_alignmask = 0, | ||
1344 | .cra_init = safexcel_skcipher_aes_ctr_cra_init, | ||
1345 | .cra_exit = safexcel_skcipher_cra_exit, | ||
1346 | .cra_module = THIS_MODULE, | ||
1347 | }, | ||
1348 | }, | ||
1349 | }; | ||
1350 | |||
1351 | static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, | ||
1352 | unsigned int len) | ||
1353 | { | ||
1354 | struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); | ||
1355 | int ret; | ||
1356 | |||
1357 | ret = verify_skcipher_des_key(ctfm, key); | ||
1358 | if (ret) | ||
1359 | return ret; | ||
1360 | |||
980 | /* if context exits and key changed, need to invalidate it */ | 1361 | /* if context exits and key changed, need to invalidate it */ |
981 | if (ctx->base.ctxr_dma) | 1362 | if (ctx->base.ctxr_dma) |
982 | if (memcmp(ctx->key, key, len)) | 1363 | if (memcmp(ctx->key, key, len)) |
@@ -988,94 +1369,85 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, | |||
988 | return 0; | 1369 | return 0; |
989 | } | 1370 | } |
990 | 1371 | ||
1372 | static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm) | ||
1373 | { | ||
1374 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1375 | |||
1376 | safexcel_skcipher_cra_init(tfm); | ||
1377 | ctx->alg = SAFEXCEL_DES; | ||
1378 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; | ||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
991 | struct safexcel_alg_template safexcel_alg_cbc_des = { | 1382 | struct safexcel_alg_template safexcel_alg_cbc_des = { |
992 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1383 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
993 | .engines = EIP97IES | EIP197B | EIP197D, | 1384 | .algo_mask = SAFEXCEL_ALG_DES, |
994 | .alg.skcipher = { | 1385 | .alg.skcipher = { |
995 | .setkey = safexcel_des_setkey, | 1386 | .setkey = safexcel_des_setkey, |
996 | .encrypt = safexcel_cbc_des_encrypt, | 1387 | .encrypt = safexcel_encrypt, |
997 | .decrypt = safexcel_cbc_des_decrypt, | 1388 | .decrypt = safexcel_decrypt, |
998 | .min_keysize = DES_KEY_SIZE, | 1389 | .min_keysize = DES_KEY_SIZE, |
999 | .max_keysize = DES_KEY_SIZE, | 1390 | .max_keysize = DES_KEY_SIZE, |
1000 | .ivsize = DES_BLOCK_SIZE, | 1391 | .ivsize = DES_BLOCK_SIZE, |
1001 | .base = { | 1392 | .base = { |
1002 | .cra_name = "cbc(des)", | 1393 | .cra_name = "cbc(des)", |
1003 | .cra_driver_name = "safexcel-cbc-des", | 1394 | .cra_driver_name = "safexcel-cbc-des", |
1004 | .cra_priority = 300, | 1395 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1005 | .cra_flags = CRYPTO_ALG_ASYNC | | 1396 | .cra_flags = CRYPTO_ALG_ASYNC | |
1006 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1397 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1007 | .cra_blocksize = DES_BLOCK_SIZE, | 1398 | .cra_blocksize = DES_BLOCK_SIZE, |
1008 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1399 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
1009 | .cra_alignmask = 0, | 1400 | .cra_alignmask = 0, |
1010 | .cra_init = safexcel_skcipher_cra_init, | 1401 | .cra_init = safexcel_skcipher_des_cbc_cra_init, |
1011 | .cra_exit = safexcel_skcipher_cra_exit, | 1402 | .cra_exit = safexcel_skcipher_cra_exit, |
1012 | .cra_module = THIS_MODULE, | 1403 | .cra_module = THIS_MODULE, |
1013 | }, | 1404 | }, |
1014 | }, | 1405 | }, |
1015 | }; | 1406 | }; |
1016 | 1407 | ||
1017 | static int safexcel_ecb_des_encrypt(struct skcipher_request *req) | 1408 | static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm) |
1018 | { | 1409 | { |
1019 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1410 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
1020 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | ||
1021 | SAFEXCEL_DES); | ||
1022 | } | ||
1023 | 1411 | ||
1024 | static int safexcel_ecb_des_decrypt(struct skcipher_request *req) | 1412 | safexcel_skcipher_cra_init(tfm); |
1025 | { | 1413 | ctx->alg = SAFEXCEL_DES; |
1026 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1414 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; |
1027 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | 1415 | return 0; |
1028 | SAFEXCEL_DES); | ||
1029 | } | 1416 | } |
1030 | 1417 | ||
1031 | struct safexcel_alg_template safexcel_alg_ecb_des = { | 1418 | struct safexcel_alg_template safexcel_alg_ecb_des = { |
1032 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1419 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
1033 | .engines = EIP97IES | EIP197B | EIP197D, | 1420 | .algo_mask = SAFEXCEL_ALG_DES, |
1034 | .alg.skcipher = { | 1421 | .alg.skcipher = { |
1035 | .setkey = safexcel_des_setkey, | 1422 | .setkey = safexcel_des_setkey, |
1036 | .encrypt = safexcel_ecb_des_encrypt, | 1423 | .encrypt = safexcel_encrypt, |
1037 | .decrypt = safexcel_ecb_des_decrypt, | 1424 | .decrypt = safexcel_decrypt, |
1038 | .min_keysize = DES_KEY_SIZE, | 1425 | .min_keysize = DES_KEY_SIZE, |
1039 | .max_keysize = DES_KEY_SIZE, | 1426 | .max_keysize = DES_KEY_SIZE, |
1040 | .ivsize = DES_BLOCK_SIZE, | ||
1041 | .base = { | 1427 | .base = { |
1042 | .cra_name = "ecb(des)", | 1428 | .cra_name = "ecb(des)", |
1043 | .cra_driver_name = "safexcel-ecb-des", | 1429 | .cra_driver_name = "safexcel-ecb-des", |
1044 | .cra_priority = 300, | 1430 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1045 | .cra_flags = CRYPTO_ALG_ASYNC | | 1431 | .cra_flags = CRYPTO_ALG_ASYNC | |
1046 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1432 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1047 | .cra_blocksize = DES_BLOCK_SIZE, | 1433 | .cra_blocksize = DES_BLOCK_SIZE, |
1048 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1434 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
1049 | .cra_alignmask = 0, | 1435 | .cra_alignmask = 0, |
1050 | .cra_init = safexcel_skcipher_cra_init, | 1436 | .cra_init = safexcel_skcipher_des_ecb_cra_init, |
1051 | .cra_exit = safexcel_skcipher_cra_exit, | 1437 | .cra_exit = safexcel_skcipher_cra_exit, |
1052 | .cra_module = THIS_MODULE, | 1438 | .cra_module = THIS_MODULE, |
1053 | }, | 1439 | }, |
1054 | }, | 1440 | }, |
1055 | }; | 1441 | }; |
1056 | 1442 | ||
1057 | static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req) | ||
1058 | { | ||
1059 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | ||
1060 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | ||
1061 | SAFEXCEL_3DES); | ||
1062 | } | ||
1063 | |||
1064 | static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req) | ||
1065 | { | ||
1066 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | ||
1067 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, | ||
1068 | SAFEXCEL_3DES); | ||
1069 | } | ||
1070 | |||
1071 | static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, | 1443 | static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, |
1072 | const u8 *key, unsigned int len) | 1444 | const u8 *key, unsigned int len) |
1073 | { | 1445 | { |
1074 | struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); | 1446 | struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); |
1075 | int err; | 1447 | int err; |
1076 | 1448 | ||
1077 | err = des3_verify_key(ctfm, key); | 1449 | err = verify_skcipher_des3_key(ctfm, key); |
1078 | if (unlikely(err)) | 1450 | if (err) |
1079 | return err; | 1451 | return err; |
1080 | 1452 | ||
1081 | /* if context exits and key changed, need to invalidate it */ | 1453 | /* if context exits and key changed, need to invalidate it */ |
@@ -1091,66 +1463,71 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, | |||
1091 | return 0; | 1463 | return 0; |
1092 | } | 1464 | } |
1093 | 1465 | ||
1466 | static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm) | ||
1467 | { | ||
1468 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1469 | |||
1470 | safexcel_skcipher_cra_init(tfm); | ||
1471 | ctx->alg = SAFEXCEL_3DES; | ||
1472 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; | ||
1473 | return 0; | ||
1474 | } | ||
1475 | |||
1094 | struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { | 1476 | struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { |
1095 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1477 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
1096 | .engines = EIP97IES | EIP197B | EIP197D, | 1478 | .algo_mask = SAFEXCEL_ALG_DES, |
1097 | .alg.skcipher = { | 1479 | .alg.skcipher = { |
1098 | .setkey = safexcel_des3_ede_setkey, | 1480 | .setkey = safexcel_des3_ede_setkey, |
1099 | .encrypt = safexcel_cbc_des3_ede_encrypt, | 1481 | .encrypt = safexcel_encrypt, |
1100 | .decrypt = safexcel_cbc_des3_ede_decrypt, | 1482 | .decrypt = safexcel_decrypt, |
1101 | .min_keysize = DES3_EDE_KEY_SIZE, | 1483 | .min_keysize = DES3_EDE_KEY_SIZE, |
1102 | .max_keysize = DES3_EDE_KEY_SIZE, | 1484 | .max_keysize = DES3_EDE_KEY_SIZE, |
1103 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1485 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1104 | .base = { | 1486 | .base = { |
1105 | .cra_name = "cbc(des3_ede)", | 1487 | .cra_name = "cbc(des3_ede)", |
1106 | .cra_driver_name = "safexcel-cbc-des3_ede", | 1488 | .cra_driver_name = "safexcel-cbc-des3_ede", |
1107 | .cra_priority = 300, | 1489 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1108 | .cra_flags = CRYPTO_ALG_ASYNC | | 1490 | .cra_flags = CRYPTO_ALG_ASYNC | |
1109 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1491 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1110 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1492 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1111 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1493 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
1112 | .cra_alignmask = 0, | 1494 | .cra_alignmask = 0, |
1113 | .cra_init = safexcel_skcipher_cra_init, | 1495 | .cra_init = safexcel_skcipher_des3_cbc_cra_init, |
1114 | .cra_exit = safexcel_skcipher_cra_exit, | 1496 | .cra_exit = safexcel_skcipher_cra_exit, |
1115 | .cra_module = THIS_MODULE, | 1497 | .cra_module = THIS_MODULE, |
1116 | }, | 1498 | }, |
1117 | }, | 1499 | }, |
1118 | }; | 1500 | }; |
1119 | 1501 | ||
1120 | static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req) | 1502 | static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm) |
1121 | { | 1503 | { |
1122 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1504 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
1123 | SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | ||
1124 | SAFEXCEL_3DES); | ||
1125 | } | ||
1126 | 1505 | ||
1127 | static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) | 1506 | safexcel_skcipher_cra_init(tfm); |
1128 | { | 1507 | ctx->alg = SAFEXCEL_3DES; |
1129 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | 1508 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; |
1130 | SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, | 1509 | return 0; |
1131 | SAFEXCEL_3DES); | ||
1132 | } | 1510 | } |
1133 | 1511 | ||
1134 | struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { | 1512 | struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { |
1135 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | 1513 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, |
1136 | .engines = EIP97IES | EIP197B | EIP197D, | 1514 | .algo_mask = SAFEXCEL_ALG_DES, |
1137 | .alg.skcipher = { | 1515 | .alg.skcipher = { |
1138 | .setkey = safexcel_des3_ede_setkey, | 1516 | .setkey = safexcel_des3_ede_setkey, |
1139 | .encrypt = safexcel_ecb_des3_ede_encrypt, | 1517 | .encrypt = safexcel_encrypt, |
1140 | .decrypt = safexcel_ecb_des3_ede_decrypt, | 1518 | .decrypt = safexcel_decrypt, |
1141 | .min_keysize = DES3_EDE_KEY_SIZE, | 1519 | .min_keysize = DES3_EDE_KEY_SIZE, |
1142 | .max_keysize = DES3_EDE_KEY_SIZE, | 1520 | .max_keysize = DES3_EDE_KEY_SIZE, |
1143 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1144 | .base = { | 1521 | .base = { |
1145 | .cra_name = "ecb(des3_ede)", | 1522 | .cra_name = "ecb(des3_ede)", |
1146 | .cra_driver_name = "safexcel-ecb-des3_ede", | 1523 | .cra_driver_name = "safexcel-ecb-des3_ede", |
1147 | .cra_priority = 300, | 1524 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1148 | .cra_flags = CRYPTO_ALG_ASYNC | | 1525 | .cra_flags = CRYPTO_ALG_ASYNC | |
1149 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1526 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1150 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1527 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1151 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | 1528 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), |
1152 | .cra_alignmask = 0, | 1529 | .cra_alignmask = 0, |
1153 | .cra_init = safexcel_skcipher_cra_init, | 1530 | .cra_init = safexcel_skcipher_des3_ecb_cra_init, |
1154 | .cra_exit = safexcel_skcipher_cra_exit, | 1531 | .cra_exit = safexcel_skcipher_cra_exit, |
1155 | .cra_module = THIS_MODULE, | 1532 | .cra_module = THIS_MODULE, |
1156 | }, | 1533 | }, |
@@ -1161,16 +1538,14 @@ static int safexcel_aead_encrypt(struct aead_request *req) | |||
1161 | { | 1538 | { |
1162 | struct safexcel_cipher_req *creq = aead_request_ctx(req); | 1539 | struct safexcel_cipher_req *creq = aead_request_ctx(req); |
1163 | 1540 | ||
1164 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, | 1541 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); |
1165 | CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); | ||
1166 | } | 1542 | } |
1167 | 1543 | ||
1168 | static int safexcel_aead_decrypt(struct aead_request *req) | 1544 | static int safexcel_aead_decrypt(struct aead_request *req) |
1169 | { | 1545 | { |
1170 | struct safexcel_cipher_req *creq = aead_request_ctx(req); | 1546 | struct safexcel_cipher_req *creq = aead_request_ctx(req); |
1171 | 1547 | ||
1172 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, | 1548 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); |
1173 | CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); | ||
1174 | } | 1549 | } |
1175 | 1550 | ||
1176 | static int safexcel_aead_cra_init(struct crypto_tfm *tfm) | 1551 | static int safexcel_aead_cra_init(struct crypto_tfm *tfm) |
@@ -1185,6 +1560,8 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) | |||
1185 | 1560 | ||
1186 | ctx->priv = tmpl->priv; | 1561 | ctx->priv = tmpl->priv; |
1187 | 1562 | ||
1563 | ctx->alg = SAFEXCEL_AES; /* default */ | ||
1564 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */ | ||
1188 | ctx->aead = true; | 1565 | ctx->aead = true; |
1189 | ctx->base.send = safexcel_aead_send; | 1566 | ctx->base.send = safexcel_aead_send; |
1190 | ctx->base.handle_result = safexcel_aead_handle_result; | 1567 | ctx->base.handle_result = safexcel_aead_handle_result; |
@@ -1203,9 +1580,9 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) | |||
1203 | 1580 | ||
1204 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { | 1581 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { |
1205 | .type = SAFEXCEL_ALG_TYPE_AEAD, | 1582 | .type = SAFEXCEL_ALG_TYPE_AEAD, |
1206 | .engines = EIP97IES | EIP197B | EIP197D, | 1583 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, |
1207 | .alg.aead = { | 1584 | .alg.aead = { |
1208 | .setkey = safexcel_aead_aes_setkey, | 1585 | .setkey = safexcel_aead_setkey, |
1209 | .encrypt = safexcel_aead_encrypt, | 1586 | .encrypt = safexcel_aead_encrypt, |
1210 | .decrypt = safexcel_aead_decrypt, | 1587 | .decrypt = safexcel_aead_decrypt, |
1211 | .ivsize = AES_BLOCK_SIZE, | 1588 | .ivsize = AES_BLOCK_SIZE, |
@@ -1213,7 +1590,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { | |||
1213 | .base = { | 1590 | .base = { |
1214 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1591 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1215 | .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", | 1592 | .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", |
1216 | .cra_priority = 300, | 1593 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1217 | .cra_flags = CRYPTO_ALG_ASYNC | | 1594 | .cra_flags = CRYPTO_ALG_ASYNC | |
1218 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1595 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1219 | .cra_blocksize = AES_BLOCK_SIZE, | 1596 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1238,9 +1615,9 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) | |||
1238 | 1615 | ||
1239 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { | 1616 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { |
1240 | .type = SAFEXCEL_ALG_TYPE_AEAD, | 1617 | .type = SAFEXCEL_ALG_TYPE_AEAD, |
1241 | .engines = EIP97IES | EIP197B | EIP197D, | 1618 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, |
1242 | .alg.aead = { | 1619 | .alg.aead = { |
1243 | .setkey = safexcel_aead_aes_setkey, | 1620 | .setkey = safexcel_aead_setkey, |
1244 | .encrypt = safexcel_aead_encrypt, | 1621 | .encrypt = safexcel_aead_encrypt, |
1245 | .decrypt = safexcel_aead_decrypt, | 1622 | .decrypt = safexcel_aead_decrypt, |
1246 | .ivsize = AES_BLOCK_SIZE, | 1623 | .ivsize = AES_BLOCK_SIZE, |
@@ -1248,7 +1625,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { | |||
1248 | .base = { | 1625 | .base = { |
1249 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 1626 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
1250 | .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", | 1627 | .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", |
1251 | .cra_priority = 300, | 1628 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1252 | .cra_flags = CRYPTO_ALG_ASYNC | | 1629 | .cra_flags = CRYPTO_ALG_ASYNC | |
1253 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1630 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1254 | .cra_blocksize = AES_BLOCK_SIZE, | 1631 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1273,9 +1650,9 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) | |||
1273 | 1650 | ||
1274 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { | 1651 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { |
1275 | .type = SAFEXCEL_ALG_TYPE_AEAD, | 1652 | .type = SAFEXCEL_ALG_TYPE_AEAD, |
1276 | .engines = EIP97IES | EIP197B | EIP197D, | 1653 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, |
1277 | .alg.aead = { | 1654 | .alg.aead = { |
1278 | .setkey = safexcel_aead_aes_setkey, | 1655 | .setkey = safexcel_aead_setkey, |
1279 | .encrypt = safexcel_aead_encrypt, | 1656 | .encrypt = safexcel_aead_encrypt, |
1280 | .decrypt = safexcel_aead_decrypt, | 1657 | .decrypt = safexcel_aead_decrypt, |
1281 | .ivsize = AES_BLOCK_SIZE, | 1658 | .ivsize = AES_BLOCK_SIZE, |
@@ -1283,7 +1660,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { | |||
1283 | .base = { | 1660 | .base = { |
1284 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | 1661 | .cra_name = "authenc(hmac(sha224),cbc(aes))", |
1285 | .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", | 1662 | .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", |
1286 | .cra_priority = 300, | 1663 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1287 | .cra_flags = CRYPTO_ALG_ASYNC | | 1664 | .cra_flags = CRYPTO_ALG_ASYNC | |
1288 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1665 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1289 | .cra_blocksize = AES_BLOCK_SIZE, | 1666 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1308,9 +1685,9 @@ static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm) | |||
1308 | 1685 | ||
1309 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { | 1686 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { |
1310 | .type = SAFEXCEL_ALG_TYPE_AEAD, | 1687 | .type = SAFEXCEL_ALG_TYPE_AEAD, |
1311 | .engines = EIP97IES | EIP197B | EIP197D, | 1688 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, |
1312 | .alg.aead = { | 1689 | .alg.aead = { |
1313 | .setkey = safexcel_aead_aes_setkey, | 1690 | .setkey = safexcel_aead_setkey, |
1314 | .encrypt = safexcel_aead_encrypt, | 1691 | .encrypt = safexcel_aead_encrypt, |
1315 | .decrypt = safexcel_aead_decrypt, | 1692 | .decrypt = safexcel_aead_decrypt, |
1316 | .ivsize = AES_BLOCK_SIZE, | 1693 | .ivsize = AES_BLOCK_SIZE, |
@@ -1318,7 +1695,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { | |||
1318 | .base = { | 1695 | .base = { |
1319 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | 1696 | .cra_name = "authenc(hmac(sha512),cbc(aes))", |
1320 | .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", | 1697 | .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", |
1321 | .cra_priority = 300, | 1698 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1322 | .cra_flags = CRYPTO_ALG_ASYNC | | 1699 | .cra_flags = CRYPTO_ALG_ASYNC | |
1323 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1700 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1324 | .cra_blocksize = AES_BLOCK_SIZE, | 1701 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1343,9 +1720,9 @@ static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm) | |||
1343 | 1720 | ||
1344 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { | 1721 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { |
1345 | .type = SAFEXCEL_ALG_TYPE_AEAD, | 1722 | .type = SAFEXCEL_ALG_TYPE_AEAD, |
1346 | .engines = EIP97IES | EIP197B | EIP197D, | 1723 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, |
1347 | .alg.aead = { | 1724 | .alg.aead = { |
1348 | .setkey = safexcel_aead_aes_setkey, | 1725 | .setkey = safexcel_aead_setkey, |
1349 | .encrypt = safexcel_aead_encrypt, | 1726 | .encrypt = safexcel_aead_encrypt, |
1350 | .decrypt = safexcel_aead_decrypt, | 1727 | .decrypt = safexcel_aead_decrypt, |
1351 | .ivsize = AES_BLOCK_SIZE, | 1728 | .ivsize = AES_BLOCK_SIZE, |
@@ -1353,7 +1730,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { | |||
1353 | .base = { | 1730 | .base = { |
1354 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | 1731 | .cra_name = "authenc(hmac(sha384),cbc(aes))", |
1355 | .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", | 1732 | .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", |
1356 | .cra_priority = 300, | 1733 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1357 | .cra_flags = CRYPTO_ALG_ASYNC | | 1734 | .cra_flags = CRYPTO_ALG_ASYNC | |
1358 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1735 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1359 | .cra_blocksize = AES_BLOCK_SIZE, | 1736 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1365,3 +1742,564 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { | |||
1365 | }, | 1742 | }, |
1366 | }, | 1743 | }, |
1367 | }; | 1744 | }; |
1745 | |||
1746 | static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) | ||
1747 | { | ||
1748 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1749 | |||
1750 | safexcel_aead_sha1_cra_init(tfm); | ||
1751 | ctx->alg = SAFEXCEL_3DES; /* override default */ | ||
1752 | return 0; | ||
1753 | } | ||
1754 | |||
1755 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { | ||
1756 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1757 | .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1, | ||
1758 | .alg.aead = { | ||
1759 | .setkey = safexcel_aead_setkey, | ||
1760 | .encrypt = safexcel_aead_encrypt, | ||
1761 | .decrypt = safexcel_aead_decrypt, | ||
1762 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1763 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1764 | .base = { | ||
1765 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | ||
1766 | .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", | ||
1767 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1768 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1769 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1770 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1771 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1772 | .cra_alignmask = 0, | ||
1773 | .cra_init = safexcel_aead_sha1_des3_cra_init, | ||
1774 | .cra_exit = safexcel_aead_cra_exit, | ||
1775 | .cra_module = THIS_MODULE, | ||
1776 | }, | ||
1777 | }, | ||
1778 | }; | ||
1779 | |||
1780 | static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm) | ||
1781 | { | ||
1782 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1783 | |||
1784 | safexcel_aead_sha1_cra_init(tfm); | ||
1785 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ | ||
1786 | return 0; | ||
1787 | } | ||
1788 | |||
1789 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { | ||
1790 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1791 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1, | ||
1792 | .alg.aead = { | ||
1793 | .setkey = safexcel_aead_setkey, | ||
1794 | .encrypt = safexcel_aead_encrypt, | ||
1795 | .decrypt = safexcel_aead_decrypt, | ||
1796 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1797 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1798 | .base = { | ||
1799 | .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | ||
1800 | .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", | ||
1801 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1802 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1803 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1804 | .cra_blocksize = 1, | ||
1805 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1806 | .cra_alignmask = 0, | ||
1807 | .cra_init = safexcel_aead_sha1_ctr_cra_init, | ||
1808 | .cra_exit = safexcel_aead_cra_exit, | ||
1809 | .cra_module = THIS_MODULE, | ||
1810 | }, | ||
1811 | }, | ||
1812 | }; | ||
1813 | |||
1814 | static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm) | ||
1815 | { | ||
1816 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1817 | |||
1818 | safexcel_aead_sha256_cra_init(tfm); | ||
1819 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ | ||
1820 | return 0; | ||
1821 | } | ||
1822 | |||
1823 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { | ||
1824 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1825 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, | ||
1826 | .alg.aead = { | ||
1827 | .setkey = safexcel_aead_setkey, | ||
1828 | .encrypt = safexcel_aead_encrypt, | ||
1829 | .decrypt = safexcel_aead_decrypt, | ||
1830 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1831 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1832 | .base = { | ||
1833 | .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | ||
1834 | .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", | ||
1835 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1836 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1837 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1838 | .cra_blocksize = 1, | ||
1839 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1840 | .cra_alignmask = 0, | ||
1841 | .cra_init = safexcel_aead_sha256_ctr_cra_init, | ||
1842 | .cra_exit = safexcel_aead_cra_exit, | ||
1843 | .cra_module = THIS_MODULE, | ||
1844 | }, | ||
1845 | }, | ||
1846 | }; | ||
1847 | |||
1848 | static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm) | ||
1849 | { | ||
1850 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1851 | |||
1852 | safexcel_aead_sha224_cra_init(tfm); | ||
1853 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ | ||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { | ||
1858 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1859 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256, | ||
1860 | .alg.aead = { | ||
1861 | .setkey = safexcel_aead_setkey, | ||
1862 | .encrypt = safexcel_aead_encrypt, | ||
1863 | .decrypt = safexcel_aead_decrypt, | ||
1864 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1865 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
1866 | .base = { | ||
1867 | .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", | ||
1868 | .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", | ||
1869 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1870 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1871 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1872 | .cra_blocksize = 1, | ||
1873 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1874 | .cra_alignmask = 0, | ||
1875 | .cra_init = safexcel_aead_sha224_ctr_cra_init, | ||
1876 | .cra_exit = safexcel_aead_cra_exit, | ||
1877 | .cra_module = THIS_MODULE, | ||
1878 | }, | ||
1879 | }, | ||
1880 | }; | ||
1881 | |||
1882 | static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm) | ||
1883 | { | ||
1884 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1885 | |||
1886 | safexcel_aead_sha512_cra_init(tfm); | ||
1887 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ | ||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1891 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { | ||
1892 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1893 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, | ||
1894 | .alg.aead = { | ||
1895 | .setkey = safexcel_aead_setkey, | ||
1896 | .encrypt = safexcel_aead_encrypt, | ||
1897 | .decrypt = safexcel_aead_decrypt, | ||
1898 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1899 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1900 | .base = { | ||
1901 | .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | ||
1902 | .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", | ||
1903 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1904 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1905 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1906 | .cra_blocksize = 1, | ||
1907 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1908 | .cra_alignmask = 0, | ||
1909 | .cra_init = safexcel_aead_sha512_ctr_cra_init, | ||
1910 | .cra_exit = safexcel_aead_cra_exit, | ||
1911 | .cra_module = THIS_MODULE, | ||
1912 | }, | ||
1913 | }, | ||
1914 | }; | ||
1915 | |||
1916 | static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm) | ||
1917 | { | ||
1918 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1919 | |||
1920 | safexcel_aead_sha384_cra_init(tfm); | ||
1921 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */ | ||
1922 | return 0; | ||
1923 | } | ||
1924 | |||
1925 | struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { | ||
1926 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
1927 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512, | ||
1928 | .alg.aead = { | ||
1929 | .setkey = safexcel_aead_setkey, | ||
1930 | .encrypt = safexcel_aead_encrypt, | ||
1931 | .decrypt = safexcel_aead_decrypt, | ||
1932 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
1933 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
1934 | .base = { | ||
1935 | .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | ||
1936 | .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", | ||
1937 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
1938 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1939 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1940 | .cra_blocksize = 1, | ||
1941 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
1942 | .cra_alignmask = 0, | ||
1943 | .cra_init = safexcel_aead_sha384_ctr_cra_init, | ||
1944 | .cra_exit = safexcel_aead_cra_exit, | ||
1945 | .cra_module = THIS_MODULE, | ||
1946 | }, | ||
1947 | }, | ||
1948 | }; | ||
1949 | |||
1950 | static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, | ||
1951 | const u8 *key, unsigned int len) | ||
1952 | { | ||
1953 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); | ||
1954 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1955 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
1956 | struct crypto_aes_ctx aes; | ||
1957 | int ret, i; | ||
1958 | unsigned int keylen; | ||
1959 | |||
1960 | /* Check for illegal XTS keys */ | ||
1961 | ret = xts_verify_key(ctfm, key, len); | ||
1962 | if (ret) | ||
1963 | return ret; | ||
1964 | |||
1965 | /* Only half of the key data is cipher key */ | ||
1966 | keylen = (len >> 1); | ||
1967 | ret = aes_expandkey(&aes, key, keylen); | ||
1968 | if (ret) { | ||
1969 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1970 | return ret; | ||
1971 | } | ||
1972 | |||
1973 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { | ||
1974 | for (i = 0; i < keylen / sizeof(u32); i++) { | ||
1975 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | ||
1976 | ctx->base.needs_inv = true; | ||
1977 | break; | ||
1978 | } | ||
1979 | } | ||
1980 | } | ||
1981 | |||
1982 | for (i = 0; i < keylen / sizeof(u32); i++) | ||
1983 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | ||
1984 | |||
1985 | /* The other half is the tweak key */ | ||
1986 | ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen); | ||
1987 | if (ret) { | ||
1988 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1989 | return ret; | ||
1990 | } | ||
1991 | |||
1992 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { | ||
1993 | for (i = 0; i < keylen / sizeof(u32); i++) { | ||
1994 | if (ctx->key[i + keylen / sizeof(u32)] != | ||
1995 | cpu_to_le32(aes.key_enc[i])) { | ||
1996 | ctx->base.needs_inv = true; | ||
1997 | break; | ||
1998 | } | ||
1999 | } | ||
2000 | } | ||
2001 | |||
2002 | for (i = 0; i < keylen / sizeof(u32); i++) | ||
2003 | ctx->key[i + keylen / sizeof(u32)] = | ||
2004 | cpu_to_le32(aes.key_enc[i]); | ||
2005 | |||
2006 | ctx->key_len = keylen << 1; | ||
2007 | |||
2008 | memzero_explicit(&aes, sizeof(aes)); | ||
2009 | return 0; | ||
2010 | } | ||
2011 | |||
2012 | static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm) | ||
2013 | { | ||
2014 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2015 | |||
2016 | safexcel_skcipher_cra_init(tfm); | ||
2017 | ctx->alg = SAFEXCEL_AES; | ||
2018 | ctx->xts = 1; | ||
2019 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS; | ||
2020 | return 0; | ||
2021 | } | ||
2022 | |||
2023 | static int safexcel_encrypt_xts(struct skcipher_request *req) | ||
2024 | { | ||
2025 | if (req->cryptlen < XTS_BLOCK_SIZE) | ||
2026 | return -EINVAL; | ||
2027 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | ||
2028 | SAFEXCEL_ENCRYPT); | ||
2029 | } | ||
2030 | |||
2031 | static int safexcel_decrypt_xts(struct skcipher_request *req) | ||
2032 | { | ||
2033 | if (req->cryptlen < XTS_BLOCK_SIZE) | ||
2034 | return -EINVAL; | ||
2035 | return safexcel_queue_req(&req->base, skcipher_request_ctx(req), | ||
2036 | SAFEXCEL_DECRYPT); | ||
2037 | } | ||
2038 | |||
2039 | struct safexcel_alg_template safexcel_alg_xts_aes = { | ||
2040 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
2041 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS, | ||
2042 | .alg.skcipher = { | ||
2043 | .setkey = safexcel_skcipher_aesxts_setkey, | ||
2044 | .encrypt = safexcel_encrypt_xts, | ||
2045 | .decrypt = safexcel_decrypt_xts, | ||
2046 | /* XTS actually uses 2 AES keys glued together */ | ||
2047 | .min_keysize = AES_MIN_KEY_SIZE * 2, | ||
2048 | .max_keysize = AES_MAX_KEY_SIZE * 2, | ||
2049 | .ivsize = XTS_BLOCK_SIZE, | ||
2050 | .base = { | ||
2051 | .cra_name = "xts(aes)", | ||
2052 | .cra_driver_name = "safexcel-xts-aes", | ||
2053 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
2054 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
2055 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
2056 | .cra_blocksize = XTS_BLOCK_SIZE, | ||
2057 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
2058 | .cra_alignmask = 0, | ||
2059 | .cra_init = safexcel_skcipher_aes_xts_cra_init, | ||
2060 | .cra_exit = safexcel_skcipher_cra_exit, | ||
2061 | .cra_module = THIS_MODULE, | ||
2062 | }, | ||
2063 | }, | ||
2064 | }; | ||
2065 | |||
2066 | static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, | ||
2067 | unsigned int len) | ||
2068 | { | ||
2069 | struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); | ||
2070 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2071 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
2072 | struct crypto_aes_ctx aes; | ||
2073 | u32 hashkey[AES_BLOCK_SIZE >> 2]; | ||
2074 | int ret, i; | ||
2075 | |||
2076 | ret = aes_expandkey(&aes, key, len); | ||
2077 | if (ret) { | ||
2078 | crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
2079 | memzero_explicit(&aes, sizeof(aes)); | ||
2080 | return ret; | ||
2081 | } | ||
2082 | |||
2083 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { | ||
2084 | for (i = 0; i < len / sizeof(u32); i++) { | ||
2085 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | ||
2086 | ctx->base.needs_inv = true; | ||
2087 | break; | ||
2088 | } | ||
2089 | } | ||
2090 | } | ||
2091 | |||
2092 | for (i = 0; i < len / sizeof(u32); i++) | ||
2093 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | ||
2094 | |||
2095 | ctx->key_len = len; | ||
2096 | |||
2097 | /* Compute hash key by encrypting zeroes with cipher key */ | ||
2098 | crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK); | ||
2099 | crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & | ||
2100 | CRYPTO_TFM_REQ_MASK); | ||
2101 | ret = crypto_cipher_setkey(ctx->hkaes, key, len); | ||
2102 | crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) & | ||
2103 | CRYPTO_TFM_RES_MASK); | ||
2104 | if (ret) | ||
2105 | return ret; | ||
2106 | |||
2107 | memset(hashkey, 0, AES_BLOCK_SIZE); | ||
2108 | crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey); | ||
2109 | |||
2110 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { | ||
2111 | for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { | ||
2112 | if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) { | ||
2113 | ctx->base.needs_inv = true; | ||
2114 | break; | ||
2115 | } | ||
2116 | } | ||
2117 | } | ||
2118 | |||
2119 | for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) | ||
2120 | ctx->ipad[i] = cpu_to_be32(hashkey[i]); | ||
2121 | |||
2122 | memzero_explicit(hashkey, AES_BLOCK_SIZE); | ||
2123 | memzero_explicit(&aes, sizeof(aes)); | ||
2124 | return 0; | ||
2125 | } | ||
2126 | |||
2127 | static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) | ||
2128 | { | ||
2129 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2130 | |||
2131 | safexcel_aead_cra_init(tfm); | ||
2132 | ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH; | ||
2133 | ctx->state_sz = GHASH_BLOCK_SIZE; | ||
2134 | ctx->xcm = EIP197_XCM_MODE_GCM; | ||
2135 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ | ||
2136 | |||
2137 | ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); | ||
2138 | if (IS_ERR(ctx->hkaes)) | ||
2139 | return PTR_ERR(ctx->hkaes); | ||
2140 | |||
2141 | return 0; | ||
2142 | } | ||
2143 | |||
2144 | static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) | ||
2145 | { | ||
2146 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2147 | |||
2148 | crypto_free_cipher(ctx->hkaes); | ||
2149 | safexcel_aead_cra_exit(tfm); | ||
2150 | } | ||
2151 | |||
2152 | static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm, | ||
2153 | unsigned int authsize) | ||
2154 | { | ||
2155 | return crypto_gcm_check_authsize(authsize); | ||
2156 | } | ||
2157 | |||
2158 | struct safexcel_alg_template safexcel_alg_gcm = { | ||
2159 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
2160 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH, | ||
2161 | .alg.aead = { | ||
2162 | .setkey = safexcel_aead_gcm_setkey, | ||
2163 | .setauthsize = safexcel_aead_gcm_setauthsize, | ||
2164 | .encrypt = safexcel_aead_encrypt, | ||
2165 | .decrypt = safexcel_aead_decrypt, | ||
2166 | .ivsize = GCM_AES_IV_SIZE, | ||
2167 | .maxauthsize = GHASH_DIGEST_SIZE, | ||
2168 | .base = { | ||
2169 | .cra_name = "gcm(aes)", | ||
2170 | .cra_driver_name = "safexcel-gcm-aes", | ||
2171 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
2172 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
2173 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
2174 | .cra_blocksize = 1, | ||
2175 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
2176 | .cra_alignmask = 0, | ||
2177 | .cra_init = safexcel_aead_gcm_cra_init, | ||
2178 | .cra_exit = safexcel_aead_gcm_cra_exit, | ||
2179 | .cra_module = THIS_MODULE, | ||
2180 | }, | ||
2181 | }, | ||
2182 | }; | ||
2183 | |||
2184 | static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, | ||
2185 | unsigned int len) | ||
2186 | { | ||
2187 | struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); | ||
2188 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2189 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
2190 | struct crypto_aes_ctx aes; | ||
2191 | int ret, i; | ||
2192 | |||
2193 | ret = aes_expandkey(&aes, key, len); | ||
2194 | if (ret) { | ||
2195 | crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
2196 | memzero_explicit(&aes, sizeof(aes)); | ||
2197 | return ret; | ||
2198 | } | ||
2199 | |||
2200 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { | ||
2201 | for (i = 0; i < len / sizeof(u32); i++) { | ||
2202 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | ||
2203 | ctx->base.needs_inv = true; | ||
2204 | break; | ||
2205 | } | ||
2206 | } | ||
2207 | } | ||
2208 | |||
2209 | for (i = 0; i < len / sizeof(u32); i++) { | ||
2210 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | ||
2211 | ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] = | ||
2212 | cpu_to_be32(aes.key_enc[i]); | ||
2213 | } | ||
2214 | |||
2215 | ctx->key_len = len; | ||
2216 | ctx->state_sz = 2 * AES_BLOCK_SIZE + len; | ||
2217 | |||
2218 | if (len == AES_KEYSIZE_192) | ||
2219 | ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192; | ||
2220 | else if (len == AES_KEYSIZE_256) | ||
2221 | ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256; | ||
2222 | else | ||
2223 | ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; | ||
2224 | |||
2225 | memzero_explicit(&aes, sizeof(aes)); | ||
2226 | return 0; | ||
2227 | } | ||
2228 | |||
2229 | static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm) | ||
2230 | { | ||
2231 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2232 | |||
2233 | safexcel_aead_cra_init(tfm); | ||
2234 | ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128; | ||
2235 | ctx->state_sz = 3 * AES_BLOCK_SIZE; | ||
2236 | ctx->xcm = EIP197_XCM_MODE_CCM; | ||
2237 | ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ | ||
2238 | return 0; | ||
2239 | } | ||
2240 | |||
2241 | static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm, | ||
2242 | unsigned int authsize) | ||
2243 | { | ||
2244 | /* Borrowed from crypto/ccm.c */ | ||
2245 | switch (authsize) { | ||
2246 | case 4: | ||
2247 | case 6: | ||
2248 | case 8: | ||
2249 | case 10: | ||
2250 | case 12: | ||
2251 | case 14: | ||
2252 | case 16: | ||
2253 | break; | ||
2254 | default: | ||
2255 | return -EINVAL; | ||
2256 | } | ||
2257 | |||
2258 | return 0; | ||
2259 | } | ||
2260 | |||
2261 | static int safexcel_ccm_encrypt(struct aead_request *req) | ||
2262 | { | ||
2263 | struct safexcel_cipher_req *creq = aead_request_ctx(req); | ||
2264 | |||
2265 | if (req->iv[0] < 1 || req->iv[0] > 7) | ||
2266 | return -EINVAL; | ||
2267 | |||
2268 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT); | ||
2269 | } | ||
2270 | |||
2271 | static int safexcel_ccm_decrypt(struct aead_request *req) | ||
2272 | { | ||
2273 | struct safexcel_cipher_req *creq = aead_request_ctx(req); | ||
2274 | |||
2275 | if (req->iv[0] < 1 || req->iv[0] > 7) | ||
2276 | return -EINVAL; | ||
2277 | |||
2278 | return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT); | ||
2279 | } | ||
2280 | |||
2281 | struct safexcel_alg_template safexcel_alg_ccm = { | ||
2282 | .type = SAFEXCEL_ALG_TYPE_AEAD, | ||
2283 | .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL, | ||
2284 | .alg.aead = { | ||
2285 | .setkey = safexcel_aead_ccm_setkey, | ||
2286 | .setauthsize = safexcel_aead_ccm_setauthsize, | ||
2287 | .encrypt = safexcel_ccm_encrypt, | ||
2288 | .decrypt = safexcel_ccm_decrypt, | ||
2289 | .ivsize = AES_BLOCK_SIZE, | ||
2290 | .maxauthsize = AES_BLOCK_SIZE, | ||
2291 | .base = { | ||
2292 | .cra_name = "ccm(aes)", | ||
2293 | .cra_driver_name = "safexcel-ccm-aes", | ||
2294 | .cra_priority = SAFEXCEL_CRA_PRIORITY, | ||
2295 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
2296 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
2297 | .cra_blocksize = 1, | ||
2298 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
2299 | .cra_alignmask = 0, | ||
2300 | .cra_init = safexcel_aead_ccm_cra_init, | ||
2301 | .cra_exit = safexcel_aead_cra_exit, | ||
2302 | .cra_module = THIS_MODULE, | ||
2303 | }, | ||
2304 | }, | ||
2305 | }; | ||
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index a80a5e757b1f..2effb6d21e8b 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -29,33 +29,31 @@ struct safexcel_ahash_req { | |||
29 | bool finish; | 29 | bool finish; |
30 | bool hmac; | 30 | bool hmac; |
31 | bool needs_inv; | 31 | bool needs_inv; |
32 | bool hmac_zlen; | ||
33 | bool len_is_le; | ||
32 | 34 | ||
33 | int nents; | 35 | int nents; |
34 | dma_addr_t result_dma; | 36 | dma_addr_t result_dma; |
35 | 37 | ||
36 | u32 digest; | 38 | u32 digest; |
37 | 39 | ||
38 | u8 state_sz; /* expected sate size, only set once */ | 40 | u8 state_sz; /* expected state size, only set once */ |
41 | u8 block_sz; /* block size, only set once */ | ||
39 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); | 42 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); |
40 | 43 | ||
41 | u64 len[2]; | 44 | u64 len; |
42 | u64 processed[2]; | 45 | u64 processed; |
43 | 46 | ||
44 | u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); | 47 | u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32)); |
45 | dma_addr_t cache_dma; | 48 | dma_addr_t cache_dma; |
46 | unsigned int cache_sz; | 49 | unsigned int cache_sz; |
47 | 50 | ||
48 | u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); | 51 | u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32)); |
49 | }; | 52 | }; |
50 | 53 | ||
51 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) | 54 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) |
52 | { | 55 | { |
53 | u64 len, processed; | 56 | return req->len - req->processed; |
54 | |||
55 | len = (0xffffffff * req->len[1]) + req->len[0]; | ||
56 | processed = (0xffffffff * req->processed[1]) + req->processed[0]; | ||
57 | |||
58 | return len - processed; | ||
59 | } | 57 | } |
60 | 58 | ||
61 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, | 59 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, |
@@ -79,75 +77,104 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc, | |||
79 | 77 | ||
80 | static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | 78 | static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, |
81 | struct safexcel_ahash_req *req, | 79 | struct safexcel_ahash_req *req, |
82 | struct safexcel_command_desc *cdesc, | 80 | struct safexcel_command_desc *cdesc) |
83 | unsigned int digestsize) | ||
84 | { | 81 | { |
85 | struct safexcel_crypto_priv *priv = ctx->priv; | 82 | struct safexcel_crypto_priv *priv = ctx->priv; |
86 | int i; | 83 | u64 count = 0; |
87 | 84 | ||
88 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; | ||
89 | cdesc->control_data.control0 |= ctx->alg; | 85 | cdesc->control_data.control0 |= ctx->alg; |
90 | cdesc->control_data.control0 |= req->digest; | 86 | |
91 | 87 | /* | |
92 | if (!req->finish) | 88 | * Copy the input digest if needed, and setup the context |
93 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | 89 | * fields. Do this now as we need it to setup the first command |
94 | 90 | * descriptor. | |
95 | if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { | 91 | */ |
96 | if (req->processed[0] || req->processed[1]) { | 92 | if (!req->processed) { |
97 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) | 93 | /* First - and possibly only - block of basic hash only */ |
98 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); | 94 | if (req->finish) { |
99 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | 95 | cdesc->control_data.control0 |= |
100 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); | 96 | CONTEXT_CONTROL_TYPE_HASH_OUT | |
101 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || | 97 | CONTEXT_CONTROL_RESTART_HASH | |
102 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | 98 | /* ensure its not 0! */ |
103 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); | 99 | CONTEXT_CONTROL_SIZE(1); |
104 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || | ||
105 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) | ||
106 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); | ||
107 | |||
108 | cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; | ||
109 | } else { | 100 | } else { |
110 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; | 101 | cdesc->control_data.control0 |= |
102 | CONTEXT_CONTROL_TYPE_HASH_OUT | | ||
103 | CONTEXT_CONTROL_RESTART_HASH | | ||
104 | CONTEXT_CONTROL_NO_FINISH_HASH | | ||
105 | /* ensure its not 0! */ | ||
106 | CONTEXT_CONTROL_SIZE(1); | ||
111 | } | 107 | } |
108 | return; | ||
109 | } | ||
112 | 110 | ||
113 | /* | 111 | /* Hash continuation or HMAC, setup (inner) digest from state */ |
114 | * Copy the input digest if needed, and setup the context | 112 | memcpy(ctx->base.ctxr->data, req->state, req->state_sz); |
115 | * fields. Do this now as we need it to setup the first command | 113 | |
116 | * descriptor. | 114 | if (req->finish) { |
117 | */ | 115 | /* Compute digest count for hash/HMAC finish operations */ |
118 | if (req->processed[0] || req->processed[1]) { | 116 | if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || |
119 | for (i = 0; i < digestsize / sizeof(u32); i++) | 117 | req->hmac_zlen || (req->processed != req->block_sz)) { |
120 | ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); | 118 | count = req->processed / EIP197_COUNTER_BLOCK_SIZE; |
121 | 119 | ||
122 | if (req->finish) { | 120 | /* This is a hardware limitation, as the |
123 | u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; | 121 | * counter must fit into an u32. This represents |
124 | count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * | 122 | * a fairly big amount of input data, so we |
125 | req->processed[1]); | 123 | * shouldn't see this. |
126 | 124 | */ | |
127 | /* This is a haredware limitation, as the | 125 | if (unlikely(count & 0xffffffff00000000ULL)) { |
128 | * counter must fit into an u32. This represents | 126 | dev_warn(priv->dev, |
129 | * a farily big amount of input data, so we | 127 | "Input data is too big\n"); |
130 | * shouldn't see this. | 128 | return; |
131 | */ | ||
132 | if (unlikely(count & 0xffff0000)) { | ||
133 | dev_warn(priv->dev, | ||
134 | "Input data is too big\n"); | ||
135 | return; | ||
136 | } | ||
137 | |||
138 | ctx->base.ctxr->data[i] = cpu_to_le32(count); | ||
139 | } | 129 | } |
140 | } | 130 | } |
141 | } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { | ||
142 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); | ||
143 | 131 | ||
144 | memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); | 132 | if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) || |
145 | memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), | 133 | /* Special case: zero length HMAC */ |
146 | ctx->opad, req->state_sz); | 134 | req->hmac_zlen || |
135 | /* PE HW < 4.4 cannot do HMAC continue, fake using hash */ | ||
136 | (req->processed != req->block_sz)) { | ||
137 | /* Basic hash continue operation, need digest + cnt */ | ||
138 | cdesc->control_data.control0 |= | ||
139 | CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) | | ||
140 | CONTEXT_CONTROL_TYPE_HASH_OUT | | ||
141 | CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
142 | /* For zero-len HMAC, don't finalize, already padded! */ | ||
143 | if (req->hmac_zlen) | ||
144 | cdesc->control_data.control0 |= | ||
145 | CONTEXT_CONTROL_NO_FINISH_HASH; | ||
146 | cdesc->control_data.control1 |= | ||
147 | CONTEXT_CONTROL_DIGEST_CNT; | ||
148 | ctx->base.ctxr->data[req->state_sz >> 2] = | ||
149 | cpu_to_le32(count); | ||
150 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
151 | |||
152 | /* Clear zero-length HMAC flag for next operation! */ | ||
153 | req->hmac_zlen = false; | ||
154 | } else { /* HMAC */ | ||
155 | /* Need outer digest for HMAC finalization */ | ||
156 | memcpy(ctx->base.ctxr->data + (req->state_sz >> 2), | ||
157 | ctx->opad, req->state_sz); | ||
158 | |||
159 | /* Single pass HMAC - no digest count */ | ||
160 | cdesc->control_data.control0 |= | ||
161 | CONTEXT_CONTROL_SIZE(req->state_sz >> 1) | | ||
162 | CONTEXT_CONTROL_TYPE_HASH_OUT | | ||
163 | CONTEXT_CONTROL_DIGEST_HMAC; | ||
164 | } | ||
165 | } else { /* Hash continuation, do not finish yet */ | ||
166 | cdesc->control_data.control0 |= | ||
167 | CONTEXT_CONTROL_SIZE(req->state_sz >> 2) | | ||
168 | CONTEXT_CONTROL_DIGEST_PRECOMPUTED | | ||
169 | CONTEXT_CONTROL_TYPE_HASH_OUT | | ||
170 | CONTEXT_CONTROL_NO_FINISH_HASH; | ||
147 | } | 171 | } |
148 | } | 172 | } |
149 | 173 | ||
150 | static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, | 174 | static int safexcel_ahash_enqueue(struct ahash_request *areq); |
175 | |||
176 | static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, | ||
177 | int ring, | ||
151 | struct crypto_async_request *async, | 178 | struct crypto_async_request *async, |
152 | bool *should_complete, int *ret) | 179 | bool *should_complete, int *ret) |
153 | { | 180 | { |
@@ -155,6 +182,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
155 | struct ahash_request *areq = ahash_request_cast(async); | 182 | struct ahash_request *areq = ahash_request_cast(async); |
156 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 183 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
157 | struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); | 184 | struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); |
185 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
158 | u64 cache_len; | 186 | u64 cache_len; |
159 | 187 | ||
160 | *ret = 0; | 188 | *ret = 0; |
@@ -188,9 +216,31 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
188 | sreq->cache_sz = 0; | 216 | sreq->cache_sz = 0; |
189 | } | 217 | } |
190 | 218 | ||
191 | if (sreq->finish) | 219 | if (sreq->finish) { |
220 | if (sreq->hmac && | ||
221 | (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) { | ||
222 | /* Faking HMAC using hash - need to do outer hash */ | ||
223 | memcpy(sreq->cache, sreq->state, | ||
224 | crypto_ahash_digestsize(ahash)); | ||
225 | |||
226 | memcpy(sreq->state, ctx->opad, sreq->state_sz); | ||
227 | |||
228 | sreq->len = sreq->block_sz + | ||
229 | crypto_ahash_digestsize(ahash); | ||
230 | sreq->processed = sreq->block_sz; | ||
231 | sreq->hmac = 0; | ||
232 | |||
233 | ctx->base.needs_inv = true; | ||
234 | areq->nbytes = 0; | ||
235 | safexcel_ahash_enqueue(areq); | ||
236 | |||
237 | *should_complete = false; /* Not done yet */ | ||
238 | return 1; | ||
239 | } | ||
240 | |||
192 | memcpy(areq->result, sreq->state, | 241 | memcpy(areq->result, sreq->state, |
193 | crypto_ahash_digestsize(ahash)); | 242 | crypto_ahash_digestsize(ahash)); |
243 | } | ||
194 | 244 | ||
195 | cache_len = safexcel_queued_len(sreq); | 245 | cache_len = safexcel_queued_len(sreq); |
196 | if (cache_len) | 246 | if (cache_len) |
@@ -205,7 +255,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
205 | int *commands, int *results) | 255 | int *commands, int *results) |
206 | { | 256 | { |
207 | struct ahash_request *areq = ahash_request_cast(async); | 257 | struct ahash_request *areq = ahash_request_cast(async); |
208 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
209 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 258 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
210 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | 259 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); |
211 | struct safexcel_crypto_priv *priv = ctx->priv; | 260 | struct safexcel_crypto_priv *priv = ctx->priv; |
@@ -213,33 +262,25 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
213 | struct safexcel_result_desc *rdesc; | 262 | struct safexcel_result_desc *rdesc; |
214 | struct scatterlist *sg; | 263 | struct scatterlist *sg; |
215 | int i, extra = 0, n_cdesc = 0, ret = 0; | 264 | int i, extra = 0, n_cdesc = 0, ret = 0; |
216 | u64 queued, len, cache_len, cache_max; | 265 | u64 queued, len, cache_len; |
217 | |||
218 | cache_max = crypto_ahash_blocksize(ahash); | ||
219 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
220 | cache_max <<= 1; | ||
221 | 266 | ||
222 | queued = len = safexcel_queued_len(req); | 267 | queued = len = safexcel_queued_len(req); |
223 | if (queued <= cache_max) | 268 | if (queued <= HASH_CACHE_SIZE) |
224 | cache_len = queued; | 269 | cache_len = queued; |
225 | else | 270 | else |
226 | cache_len = queued - areq->nbytes; | 271 | cache_len = queued - areq->nbytes; |
227 | 272 | ||
228 | if (!req->last_req) { | 273 | if (!req->finish && !req->last_req) { |
229 | /* If this is not the last request and the queued data does not | 274 | /* If this is not the last request and the queued data does not |
230 | * fit into full blocks, cache it for the next send() call. | 275 | * fit into full cache blocks, cache it for the next send call. |
231 | */ | 276 | */ |
232 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | 277 | extra = queued & (HASH_CACHE_SIZE - 1); |
233 | |||
234 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC && | ||
235 | extra < crypto_ahash_blocksize(ahash)) | ||
236 | extra += crypto_ahash_blocksize(ahash); | ||
237 | 278 | ||
238 | /* If this is not the last request and the queued data | 279 | /* If this is not the last request and the queued data |
239 | * is a multiple of a block, cache the last one for now. | 280 | * is a multiple of a block, cache the last one for now. |
240 | */ | 281 | */ |
241 | if (!extra) | 282 | if (!extra) |
242 | extra = crypto_ahash_blocksize(ahash); | 283 | extra = HASH_CACHE_SIZE; |
243 | 284 | ||
244 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 285 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
245 | req->cache_next, extra, | 286 | req->cache_next, extra, |
@@ -247,6 +288,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
247 | 288 | ||
248 | queued -= extra; | 289 | queued -= extra; |
249 | len -= extra; | 290 | len -= extra; |
291 | |||
292 | if (!queued) { | ||
293 | *commands = 0; | ||
294 | *results = 0; | ||
295 | return 0; | ||
296 | } | ||
250 | } | 297 | } |
251 | 298 | ||
252 | /* Add a command descriptor for the cached data, if any */ | 299 | /* Add a command descriptor for the cached data, if any */ |
@@ -272,8 +319,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
272 | goto send_command; | 319 | goto send_command; |
273 | } | 320 | } |
274 | 321 | ||
322 | /* Skip descriptor generation for zero-length requests */ | ||
323 | if (!areq->nbytes) | ||
324 | goto send_command; | ||
325 | |||
275 | /* Now handle the current ahash request buffer(s) */ | 326 | /* Now handle the current ahash request buffer(s) */ |
276 | req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src), | 327 | req->nents = dma_map_sg(priv->dev, areq->src, |
328 | sg_nents_for_len(areq->src, | ||
329 | areq->nbytes), | ||
277 | DMA_TO_DEVICE); | 330 | DMA_TO_DEVICE); |
278 | if (!req->nents) { | 331 | if (!req->nents) { |
279 | ret = -ENOMEM; | 332 | ret = -ENOMEM; |
@@ -288,7 +341,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
288 | sglen = queued; | 341 | sglen = queued; |
289 | 342 | ||
290 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, | 343 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, |
291 | !(queued - sglen), sg_dma_address(sg), | 344 | !(queued - sglen), |
345 | sg_dma_address(sg), | ||
292 | sglen, len, ctx->base.ctxr_dma); | 346 | sglen, len, ctx->base.ctxr_dma); |
293 | if (IS_ERR(cdesc)) { | 347 | if (IS_ERR(cdesc)) { |
294 | ret = PTR_ERR(cdesc); | 348 | ret = PTR_ERR(cdesc); |
@@ -306,7 +360,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
306 | 360 | ||
307 | send_command: | 361 | send_command: |
308 | /* Setup the context options */ | 362 | /* Setup the context options */ |
309 | safexcel_context_control(ctx, req, first_cdesc, req->state_sz); | 363 | safexcel_context_control(ctx, req, first_cdesc); |
310 | 364 | ||
311 | /* Add the token */ | 365 | /* Add the token */ |
312 | safexcel_hash_token(first_cdesc, len, req->state_sz); | 366 | safexcel_hash_token(first_cdesc, len, req->state_sz); |
@@ -328,9 +382,7 @@ send_command: | |||
328 | 382 | ||
329 | safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); | 383 | safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); |
330 | 384 | ||
331 | req->processed[0] += len; | 385 | req->processed += len; |
332 | if (req->processed[0] < len) | ||
333 | req->processed[1]++; | ||
334 | 386 | ||
335 | *commands = n_cdesc; | 387 | *commands = n_cdesc; |
336 | *results = 1; | 388 | *results = 1; |
@@ -355,27 +407,6 @@ unmap_cache: | |||
355 | return ret; | 407 | return ret; |
356 | } | 408 | } |
357 | 409 | ||
358 | static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) | ||
359 | { | ||
360 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
361 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
362 | unsigned int state_w_sz = req->state_sz / sizeof(u32); | ||
363 | u64 processed; | ||
364 | int i; | ||
365 | |||
366 | processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; | ||
367 | processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; | ||
368 | |||
369 | for (i = 0; i < state_w_sz; i++) | ||
370 | if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) | ||
371 | return true; | ||
372 | |||
373 | if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) | ||
374 | return true; | ||
375 | |||
376 | return false; | ||
377 | } | ||
378 | |||
379 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | 410 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
380 | int ring, | 411 | int ring, |
381 | struct crypto_async_request *async, | 412 | struct crypto_async_request *async, |
@@ -523,30 +554,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
523 | /* safexcel_ahash_cache: cache data until at least one request can be sent to | 554 | /* safexcel_ahash_cache: cache data until at least one request can be sent to |
524 | * the engine, aka. when there is at least 1 block size in the pipe. | 555 | * the engine, aka. when there is at least 1 block size in the pipe. |
525 | */ | 556 | */ |
526 | static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max) | 557 | static int safexcel_ahash_cache(struct ahash_request *areq) |
527 | { | 558 | { |
528 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 559 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
529 | u64 queued, cache_len; | 560 | u64 cache_len; |
530 | 561 | ||
531 | /* queued: everything accepted by the driver which will be handled by | ||
532 | * the next send() calls. | ||
533 | * tot sz handled by update() - tot sz handled by send() | ||
534 | */ | ||
535 | queued = safexcel_queued_len(req); | ||
536 | /* cache_len: everything accepted by the driver but not sent yet, | 562 | /* cache_len: everything accepted by the driver but not sent yet, |
537 | * tot sz handled by update() - last req sz - tot sz handled by send() | 563 | * tot sz handled by update() - last req sz - tot sz handled by send() |
538 | */ | 564 | */ |
539 | cache_len = queued - areq->nbytes; | 565 | cache_len = safexcel_queued_len(req); |
540 | 566 | ||
541 | /* | 567 | /* |
542 | * In case there isn't enough bytes to proceed (less than a | 568 | * In case there isn't enough bytes to proceed (less than a |
543 | * block size), cache the data until we have enough. | 569 | * block size), cache the data until we have enough. |
544 | */ | 570 | */ |
545 | if (cache_len + areq->nbytes <= cache_max) { | 571 | if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) { |
546 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 572 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
547 | req->cache + cache_len, | 573 | req->cache + cache_len, |
548 | areq->nbytes, 0); | 574 | areq->nbytes, 0); |
549 | return areq->nbytes; | 575 | return 0; |
550 | } | 576 | } |
551 | 577 | ||
552 | /* We couldn't cache all the data */ | 578 | /* We couldn't cache all the data */ |
@@ -564,14 +590,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) | |||
564 | 590 | ||
565 | if (ctx->base.ctxr) { | 591 | if (ctx->base.ctxr) { |
566 | if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && | 592 | if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && |
567 | (req->processed[0] || req->processed[1]) && | 593 | req->processed && |
568 | req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) | 594 | (/* invalidate for basic hash continuation finish */ |
569 | /* We're still setting needs_inv here, even though it is | 595 | (req->finish && |
596 | (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) || | ||
597 | /* invalidate if (i)digest changed */ | ||
598 | memcmp(ctx->base.ctxr->data, req->state, req->state_sz) || | ||
599 | /* invalidate for HMAC continuation finish */ | ||
600 | (req->finish && (req->processed != req->block_sz)) || | ||
601 | /* invalidate for HMAC finish with odigest changed */ | ||
602 | (req->finish && | ||
603 | memcmp(ctx->base.ctxr->data + (req->state_sz>>2), | ||
604 | ctx->opad, req->state_sz)))) | ||
605 | /* | ||
606 | * We're still setting needs_inv here, even though it is | ||
570 | * cleared right away, because the needs_inv flag can be | 607 | * cleared right away, because the needs_inv flag can be |
571 | * set in other functions and we want to keep the same | 608 | * set in other functions and we want to keep the same |
572 | * logic. | 609 | * logic. |
573 | */ | 610 | */ |
574 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | 611 | ctx->base.needs_inv = true; |
575 | 612 | ||
576 | if (ctx->base.needs_inv) { | 613 | if (ctx->base.needs_inv) { |
577 | ctx->base.needs_inv = false; | 614 | ctx->base.needs_inv = false; |
@@ -601,35 +638,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) | |||
601 | static int safexcel_ahash_update(struct ahash_request *areq) | 638 | static int safexcel_ahash_update(struct ahash_request *areq) |
602 | { | 639 | { |
603 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 640 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
604 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 641 | int ret; |
605 | u32 cache_max; | ||
606 | 642 | ||
607 | /* If the request is 0 length, do nothing */ | 643 | /* If the request is 0 length, do nothing */ |
608 | if (!areq->nbytes) | 644 | if (!areq->nbytes) |
609 | return 0; | 645 | return 0; |
610 | 646 | ||
611 | req->len[0] += areq->nbytes; | 647 | /* Add request to the cache if it fits */ |
612 | if (req->len[0] < areq->nbytes) | 648 | ret = safexcel_ahash_cache(areq); |
613 | req->len[1]++; | ||
614 | |||
615 | cache_max = crypto_ahash_blocksize(ahash); | ||
616 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
617 | cache_max <<= 1; | ||
618 | 649 | ||
619 | safexcel_ahash_cache(areq, cache_max); | 650 | /* Update total request length */ |
651 | req->len += areq->nbytes; | ||
620 | 652 | ||
621 | /* | 653 | /* If not all data could fit into the cache, go process the excess. |
622 | * We're not doing partial updates when performing an hmac request. | 654 | * Also go process immediately for an HMAC IV precompute, which |
623 | * Everything will be handled by the final() call. | 655 | * will never be finished at all, but needs to be processed anyway. |
624 | */ | 656 | */ |
625 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | 657 | if ((ret && !req->finish) || req->last_req) |
626 | return 0; | ||
627 | |||
628 | if (req->hmac) | ||
629 | return safexcel_ahash_enqueue(areq); | ||
630 | |||
631 | if (!req->last_req && | ||
632 | safexcel_queued_len(req) > cache_max) | ||
633 | return safexcel_ahash_enqueue(areq); | 658 | return safexcel_ahash_enqueue(areq); |
634 | 659 | ||
635 | return 0; | 660 | return 0; |
@@ -640,11 +665,14 @@ static int safexcel_ahash_final(struct ahash_request *areq) | |||
640 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 665 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
641 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | 666 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); |
642 | 667 | ||
643 | req->last_req = true; | ||
644 | req->finish = true; | 668 | req->finish = true; |
645 | 669 | ||
646 | /* If we have an overall 0 length request */ | 670 | if (unlikely(!req->len && !areq->nbytes)) { |
647 | if (!req->len[0] && !req->len[1] && !areq->nbytes) { | 671 | /* |
672 | * If we have an overall 0 length *hash* request: | ||
673 | * The HW cannot do 0 length hash, so we provide the correct | ||
674 | * result directly here. | ||
675 | */ | ||
648 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) | 676 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
649 | memcpy(areq->result, md5_zero_message_hash, | 677 | memcpy(areq->result, md5_zero_message_hash, |
650 | MD5_DIGEST_SIZE); | 678 | MD5_DIGEST_SIZE); |
@@ -665,6 +693,43 @@ static int safexcel_ahash_final(struct ahash_request *areq) | |||
665 | SHA512_DIGEST_SIZE); | 693 | SHA512_DIGEST_SIZE); |
666 | 694 | ||
667 | return 0; | 695 | return 0; |
696 | } else if (unlikely(req->hmac && | ||
697 | (req->len == req->block_sz) && | ||
698 | !areq->nbytes)) { | ||
699 | /* | ||
700 | * If we have an overall 0 length *HMAC* request: | ||
701 | * For HMAC, we need to finalize the inner digest | ||
702 | * and then perform the outer hash. | ||
703 | */ | ||
704 | |||
705 | /* generate pad block in the cache */ | ||
706 | /* start with a hash block of all zeroes */ | ||
707 | memset(req->cache, 0, req->block_sz); | ||
708 | /* set the first byte to 0x80 to 'append a 1 bit' */ | ||
709 | req->cache[0] = 0x80; | ||
710 | /* add the length in bits in the last 2 bytes */ | ||
711 | if (req->len_is_le) { | ||
712 | /* Little endian length word (e.g. MD5) */ | ||
713 | req->cache[req->block_sz-8] = (req->block_sz << 3) & | ||
714 | 255; | ||
715 | req->cache[req->block_sz-7] = (req->block_sz >> 5); | ||
716 | } else { | ||
717 | /* Big endian length word (e.g. any SHA) */ | ||
718 | req->cache[req->block_sz-2] = (req->block_sz >> 5); | ||
719 | req->cache[req->block_sz-1] = (req->block_sz << 3) & | ||
720 | 255; | ||
721 | } | ||
722 | |||
723 | req->len += req->block_sz; /* plus 1 hash block */ | ||
724 | |||
725 | /* Set special zero-length HMAC flag */ | ||
726 | req->hmac_zlen = true; | ||
727 | |||
728 | /* Finalize HMAC */ | ||
729 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | ||
730 | } else if (req->hmac) { | ||
731 | /* Finalize HMAC */ | ||
732 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | ||
668 | } | 733 | } |
669 | 734 | ||
670 | return safexcel_ahash_enqueue(areq); | 735 | return safexcel_ahash_enqueue(areq); |
@@ -674,7 +739,6 @@ static int safexcel_ahash_finup(struct ahash_request *areq) | |||
674 | { | 739 | { |
675 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 740 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
676 | 741 | ||
677 | req->last_req = true; | ||
678 | req->finish = true; | 742 | req->finish = true; |
679 | 743 | ||
680 | safexcel_ahash_update(areq); | 744 | safexcel_ahash_update(areq); |
@@ -683,52 +747,36 @@ static int safexcel_ahash_finup(struct ahash_request *areq) | |||
683 | 747 | ||
684 | static int safexcel_ahash_export(struct ahash_request *areq, void *out) | 748 | static int safexcel_ahash_export(struct ahash_request *areq, void *out) |
685 | { | 749 | { |
686 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
687 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 750 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
688 | struct safexcel_ahash_export_state *export = out; | 751 | struct safexcel_ahash_export_state *export = out; |
689 | u32 cache_sz; | ||
690 | 752 | ||
691 | cache_sz = crypto_ahash_blocksize(ahash); | 753 | export->len = req->len; |
692 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | 754 | export->processed = req->processed; |
693 | cache_sz <<= 1; | ||
694 | |||
695 | export->len[0] = req->len[0]; | ||
696 | export->len[1] = req->len[1]; | ||
697 | export->processed[0] = req->processed[0]; | ||
698 | export->processed[1] = req->processed[1]; | ||
699 | 755 | ||
700 | export->digest = req->digest; | 756 | export->digest = req->digest; |
701 | 757 | ||
702 | memcpy(export->state, req->state, req->state_sz); | 758 | memcpy(export->state, req->state, req->state_sz); |
703 | memcpy(export->cache, req->cache, cache_sz); | 759 | memcpy(export->cache, req->cache, HASH_CACHE_SIZE); |
704 | 760 | ||
705 | return 0; | 761 | return 0; |
706 | } | 762 | } |
707 | 763 | ||
708 | static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | 764 | static int safexcel_ahash_import(struct ahash_request *areq, const void *in) |
709 | { | 765 | { |
710 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
711 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 766 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
712 | const struct safexcel_ahash_export_state *export = in; | 767 | const struct safexcel_ahash_export_state *export = in; |
713 | u32 cache_sz; | ||
714 | int ret; | 768 | int ret; |
715 | 769 | ||
716 | ret = crypto_ahash_init(areq); | 770 | ret = crypto_ahash_init(areq); |
717 | if (ret) | 771 | if (ret) |
718 | return ret; | 772 | return ret; |
719 | 773 | ||
720 | cache_sz = crypto_ahash_blocksize(ahash); | 774 | req->len = export->len; |
721 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | 775 | req->processed = export->processed; |
722 | cache_sz <<= 1; | ||
723 | |||
724 | req->len[0] = export->len[0]; | ||
725 | req->len[1] = export->len[1]; | ||
726 | req->processed[0] = export->processed[0]; | ||
727 | req->processed[1] = export->processed[1]; | ||
728 | 776 | ||
729 | req->digest = export->digest; | 777 | req->digest = export->digest; |
730 | 778 | ||
731 | memcpy(req->cache, export->cache, cache_sz); | 779 | memcpy(req->cache, export->cache, HASH_CACHE_SIZE); |
732 | memcpy(req->state, export->state, req->state_sz); | 780 | memcpy(req->state, export->state, req->state_sz); |
733 | 781 | ||
734 | return 0; | 782 | return 0; |
@@ -757,15 +805,10 @@ static int safexcel_sha1_init(struct ahash_request *areq) | |||
757 | 805 | ||
758 | memset(req, 0, sizeof(*req)); | 806 | memset(req, 0, sizeof(*req)); |
759 | 807 | ||
760 | req->state[0] = SHA1_H0; | ||
761 | req->state[1] = SHA1_H1; | ||
762 | req->state[2] = SHA1_H2; | ||
763 | req->state[3] = SHA1_H3; | ||
764 | req->state[4] = SHA1_H4; | ||
765 | |||
766 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; | 808 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; |
767 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 809 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
768 | req->state_sz = SHA1_DIGEST_SIZE; | 810 | req->state_sz = SHA1_DIGEST_SIZE; |
811 | req->block_sz = SHA1_BLOCK_SIZE; | ||
769 | 812 | ||
770 | return 0; | 813 | return 0; |
771 | } | 814 | } |
@@ -802,7 +845,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) | |||
802 | 845 | ||
803 | struct safexcel_alg_template safexcel_alg_sha1 = { | 846 | struct safexcel_alg_template safexcel_alg_sha1 = { |
804 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 847 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
805 | .engines = EIP97IES | EIP197B | EIP197D, | 848 | .algo_mask = SAFEXCEL_ALG_SHA1, |
806 | .alg.ahash = { | 849 | .alg.ahash = { |
807 | .init = safexcel_sha1_init, | 850 | .init = safexcel_sha1_init, |
808 | .update = safexcel_ahash_update, | 851 | .update = safexcel_ahash_update, |
@@ -817,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { | |||
817 | .base = { | 860 | .base = { |
818 | .cra_name = "sha1", | 861 | .cra_name = "sha1", |
819 | .cra_driver_name = "safexcel-sha1", | 862 | .cra_driver_name = "safexcel-sha1", |
820 | .cra_priority = 300, | 863 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
821 | .cra_flags = CRYPTO_ALG_ASYNC | | 864 | .cra_flags = CRYPTO_ALG_ASYNC | |
822 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 865 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
823 | .cra_blocksize = SHA1_BLOCK_SIZE, | 866 | .cra_blocksize = SHA1_BLOCK_SIZE, |
@@ -832,10 +875,23 @@ struct safexcel_alg_template safexcel_alg_sha1 = { | |||
832 | 875 | ||
833 | static int safexcel_hmac_sha1_init(struct ahash_request *areq) | 876 | static int safexcel_hmac_sha1_init(struct ahash_request *areq) |
834 | { | 877 | { |
878 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
835 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 879 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
836 | 880 | ||
837 | safexcel_sha1_init(areq); | 881 | memset(req, 0, sizeof(*req)); |
838 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 882 | |
883 | /* Start from ipad precompute */ | ||
884 | memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE); | ||
885 | /* Already processed the key^ipad part now! */ | ||
886 | req->len = SHA1_BLOCK_SIZE; | ||
887 | req->processed = SHA1_BLOCK_SIZE; | ||
888 | |||
889 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; | ||
890 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
891 | req->state_sz = SHA1_DIGEST_SIZE; | ||
892 | req->block_sz = SHA1_BLOCK_SIZE; | ||
893 | req->hmac = true; | ||
894 | |||
839 | return 0; | 895 | return 0; |
840 | } | 896 | } |
841 | 897 | ||
@@ -1004,21 +1060,16 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1004 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 1060 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
1005 | struct safexcel_crypto_priv *priv = ctx->priv; | 1061 | struct safexcel_crypto_priv *priv = ctx->priv; |
1006 | struct safexcel_ahash_export_state istate, ostate; | 1062 | struct safexcel_ahash_export_state istate, ostate; |
1007 | int ret, i; | 1063 | int ret; |
1008 | 1064 | ||
1009 | ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); | 1065 | ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); |
1010 | if (ret) | 1066 | if (ret) |
1011 | return ret; | 1067 | return ret; |
1012 | 1068 | ||
1013 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { | 1069 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr && |
1014 | for (i = 0; i < state_sz / sizeof(u32); i++) { | 1070 | (memcmp(ctx->ipad, istate.state, state_sz) || |
1015 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || | 1071 | memcmp(ctx->opad, ostate.state, state_sz))) |
1016 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | 1072 | ctx->base.needs_inv = true; |
1017 | ctx->base.needs_inv = true; | ||
1018 | break; | ||
1019 | } | ||
1020 | } | ||
1021 | } | ||
1022 | 1073 | ||
1023 | memcpy(ctx->ipad, &istate.state, state_sz); | 1074 | memcpy(ctx->ipad, &istate.state, state_sz); |
1024 | memcpy(ctx->opad, &ostate.state, state_sz); | 1075 | memcpy(ctx->opad, &ostate.state, state_sz); |
@@ -1035,7 +1086,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1035 | 1086 | ||
1036 | struct safexcel_alg_template safexcel_alg_hmac_sha1 = { | 1087 | struct safexcel_alg_template safexcel_alg_hmac_sha1 = { |
1037 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1088 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1038 | .engines = EIP97IES | EIP197B | EIP197D, | 1089 | .algo_mask = SAFEXCEL_ALG_SHA1, |
1039 | .alg.ahash = { | 1090 | .alg.ahash = { |
1040 | .init = safexcel_hmac_sha1_init, | 1091 | .init = safexcel_hmac_sha1_init, |
1041 | .update = safexcel_ahash_update, | 1092 | .update = safexcel_ahash_update, |
@@ -1051,7 +1102,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { | |||
1051 | .base = { | 1102 | .base = { |
1052 | .cra_name = "hmac(sha1)", | 1103 | .cra_name = "hmac(sha1)", |
1053 | .cra_driver_name = "safexcel-hmac-sha1", | 1104 | .cra_driver_name = "safexcel-hmac-sha1", |
1054 | .cra_priority = 300, | 1105 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1055 | .cra_flags = CRYPTO_ALG_ASYNC | | 1106 | .cra_flags = CRYPTO_ALG_ASYNC | |
1056 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1107 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1057 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1108 | .cra_blocksize = SHA1_BLOCK_SIZE, |
@@ -1071,18 +1122,10 @@ static int safexcel_sha256_init(struct ahash_request *areq) | |||
1071 | 1122 | ||
1072 | memset(req, 0, sizeof(*req)); | 1123 | memset(req, 0, sizeof(*req)); |
1073 | 1124 | ||
1074 | req->state[0] = SHA256_H0; | ||
1075 | req->state[1] = SHA256_H1; | ||
1076 | req->state[2] = SHA256_H2; | ||
1077 | req->state[3] = SHA256_H3; | ||
1078 | req->state[4] = SHA256_H4; | ||
1079 | req->state[5] = SHA256_H5; | ||
1080 | req->state[6] = SHA256_H6; | ||
1081 | req->state[7] = SHA256_H7; | ||
1082 | |||
1083 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; | 1125 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; |
1084 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 1126 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1085 | req->state_sz = SHA256_DIGEST_SIZE; | 1127 | req->state_sz = SHA256_DIGEST_SIZE; |
1128 | req->block_sz = SHA256_BLOCK_SIZE; | ||
1086 | 1129 | ||
1087 | return 0; | 1130 | return 0; |
1088 | } | 1131 | } |
@@ -1099,7 +1142,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq) | |||
1099 | 1142 | ||
1100 | struct safexcel_alg_template safexcel_alg_sha256 = { | 1143 | struct safexcel_alg_template safexcel_alg_sha256 = { |
1101 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1144 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1102 | .engines = EIP97IES | EIP197B | EIP197D, | 1145 | .algo_mask = SAFEXCEL_ALG_SHA2_256, |
1103 | .alg.ahash = { | 1146 | .alg.ahash = { |
1104 | .init = safexcel_sha256_init, | 1147 | .init = safexcel_sha256_init, |
1105 | .update = safexcel_ahash_update, | 1148 | .update = safexcel_ahash_update, |
@@ -1114,7 +1157,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { | |||
1114 | .base = { | 1157 | .base = { |
1115 | .cra_name = "sha256", | 1158 | .cra_name = "sha256", |
1116 | .cra_driver_name = "safexcel-sha256", | 1159 | .cra_driver_name = "safexcel-sha256", |
1117 | .cra_priority = 300, | 1160 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1118 | .cra_flags = CRYPTO_ALG_ASYNC | | 1161 | .cra_flags = CRYPTO_ALG_ASYNC | |
1119 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1162 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1120 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1163 | .cra_blocksize = SHA256_BLOCK_SIZE, |
@@ -1134,18 +1177,10 @@ static int safexcel_sha224_init(struct ahash_request *areq) | |||
1134 | 1177 | ||
1135 | memset(req, 0, sizeof(*req)); | 1178 | memset(req, 0, sizeof(*req)); |
1136 | 1179 | ||
1137 | req->state[0] = SHA224_H0; | ||
1138 | req->state[1] = SHA224_H1; | ||
1139 | req->state[2] = SHA224_H2; | ||
1140 | req->state[3] = SHA224_H3; | ||
1141 | req->state[4] = SHA224_H4; | ||
1142 | req->state[5] = SHA224_H5; | ||
1143 | req->state[6] = SHA224_H6; | ||
1144 | req->state[7] = SHA224_H7; | ||
1145 | |||
1146 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; | 1180 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; |
1147 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 1181 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1148 | req->state_sz = SHA256_DIGEST_SIZE; | 1182 | req->state_sz = SHA256_DIGEST_SIZE; |
1183 | req->block_sz = SHA256_BLOCK_SIZE; | ||
1149 | 1184 | ||
1150 | return 0; | 1185 | return 0; |
1151 | } | 1186 | } |
@@ -1162,7 +1197,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq) | |||
1162 | 1197 | ||
1163 | struct safexcel_alg_template safexcel_alg_sha224 = { | 1198 | struct safexcel_alg_template safexcel_alg_sha224 = { |
1164 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1199 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1165 | .engines = EIP97IES | EIP197B | EIP197D, | 1200 | .algo_mask = SAFEXCEL_ALG_SHA2_256, |
1166 | .alg.ahash = { | 1201 | .alg.ahash = { |
1167 | .init = safexcel_sha224_init, | 1202 | .init = safexcel_sha224_init, |
1168 | .update = safexcel_ahash_update, | 1203 | .update = safexcel_ahash_update, |
@@ -1177,7 +1212,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = { | |||
1177 | .base = { | 1212 | .base = { |
1178 | .cra_name = "sha224", | 1213 | .cra_name = "sha224", |
1179 | .cra_driver_name = "safexcel-sha224", | 1214 | .cra_driver_name = "safexcel-sha224", |
1180 | .cra_priority = 300, | 1215 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1181 | .cra_flags = CRYPTO_ALG_ASYNC | | 1216 | .cra_flags = CRYPTO_ALG_ASYNC | |
1182 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1217 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1183 | .cra_blocksize = SHA224_BLOCK_SIZE, | 1218 | .cra_blocksize = SHA224_BLOCK_SIZE, |
@@ -1199,10 +1234,23 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1199 | 1234 | ||
1200 | static int safexcel_hmac_sha224_init(struct ahash_request *areq) | 1235 | static int safexcel_hmac_sha224_init(struct ahash_request *areq) |
1201 | { | 1236 | { |
1237 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
1202 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 1238 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1203 | 1239 | ||
1204 | safexcel_sha224_init(areq); | 1240 | memset(req, 0, sizeof(*req)); |
1205 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 1241 | |
1242 | /* Start from ipad precompute */ | ||
1243 | memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); | ||
1244 | /* Already processed the key^ipad part now! */ | ||
1245 | req->len = SHA256_BLOCK_SIZE; | ||
1246 | req->processed = SHA256_BLOCK_SIZE; | ||
1247 | |||
1248 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; | ||
1249 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1250 | req->state_sz = SHA256_DIGEST_SIZE; | ||
1251 | req->block_sz = SHA256_BLOCK_SIZE; | ||
1252 | req->hmac = true; | ||
1253 | |||
1206 | return 0; | 1254 | return 0; |
1207 | } | 1255 | } |
1208 | 1256 | ||
@@ -1218,7 +1266,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq) | |||
1218 | 1266 | ||
1219 | struct safexcel_alg_template safexcel_alg_hmac_sha224 = { | 1267 | struct safexcel_alg_template safexcel_alg_hmac_sha224 = { |
1220 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1268 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1221 | .engines = EIP97IES | EIP197B | EIP197D, | 1269 | .algo_mask = SAFEXCEL_ALG_SHA2_256, |
1222 | .alg.ahash = { | 1270 | .alg.ahash = { |
1223 | .init = safexcel_hmac_sha224_init, | 1271 | .init = safexcel_hmac_sha224_init, |
1224 | .update = safexcel_ahash_update, | 1272 | .update = safexcel_ahash_update, |
@@ -1234,7 +1282,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { | |||
1234 | .base = { | 1282 | .base = { |
1235 | .cra_name = "hmac(sha224)", | 1283 | .cra_name = "hmac(sha224)", |
1236 | .cra_driver_name = "safexcel-hmac-sha224", | 1284 | .cra_driver_name = "safexcel-hmac-sha224", |
1237 | .cra_priority = 300, | 1285 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1238 | .cra_flags = CRYPTO_ALG_ASYNC | | 1286 | .cra_flags = CRYPTO_ALG_ASYNC | |
1239 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1287 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1240 | .cra_blocksize = SHA224_BLOCK_SIZE, | 1288 | .cra_blocksize = SHA224_BLOCK_SIZE, |
@@ -1256,10 +1304,23 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1256 | 1304 | ||
1257 | static int safexcel_hmac_sha256_init(struct ahash_request *areq) | 1305 | static int safexcel_hmac_sha256_init(struct ahash_request *areq) |
1258 | { | 1306 | { |
1307 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
1259 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 1308 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1260 | 1309 | ||
1261 | safexcel_sha256_init(areq); | 1310 | memset(req, 0, sizeof(*req)); |
1262 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 1311 | |
1312 | /* Start from ipad precompute */ | ||
1313 | memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE); | ||
1314 | /* Already processed the key^ipad part now! */ | ||
1315 | req->len = SHA256_BLOCK_SIZE; | ||
1316 | req->processed = SHA256_BLOCK_SIZE; | ||
1317 | |||
1318 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; | ||
1319 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1320 | req->state_sz = SHA256_DIGEST_SIZE; | ||
1321 | req->block_sz = SHA256_BLOCK_SIZE; | ||
1322 | req->hmac = true; | ||
1323 | |||
1263 | return 0; | 1324 | return 0; |
1264 | } | 1325 | } |
1265 | 1326 | ||
@@ -1275,7 +1336,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq) | |||
1275 | 1336 | ||
1276 | struct safexcel_alg_template safexcel_alg_hmac_sha256 = { | 1337 | struct safexcel_alg_template safexcel_alg_hmac_sha256 = { |
1277 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1338 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1278 | .engines = EIP97IES | EIP197B | EIP197D, | 1339 | .algo_mask = SAFEXCEL_ALG_SHA2_256, |
1279 | .alg.ahash = { | 1340 | .alg.ahash = { |
1280 | .init = safexcel_hmac_sha256_init, | 1341 | .init = safexcel_hmac_sha256_init, |
1281 | .update = safexcel_ahash_update, | 1342 | .update = safexcel_ahash_update, |
@@ -1291,7 +1352,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { | |||
1291 | .base = { | 1352 | .base = { |
1292 | .cra_name = "hmac(sha256)", | 1353 | .cra_name = "hmac(sha256)", |
1293 | .cra_driver_name = "safexcel-hmac-sha256", | 1354 | .cra_driver_name = "safexcel-hmac-sha256", |
1294 | .cra_priority = 300, | 1355 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1295 | .cra_flags = CRYPTO_ALG_ASYNC | | 1356 | .cra_flags = CRYPTO_ALG_ASYNC | |
1296 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1357 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1297 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1358 | .cra_blocksize = SHA256_BLOCK_SIZE, |
@@ -1311,26 +1372,10 @@ static int safexcel_sha512_init(struct ahash_request *areq) | |||
1311 | 1372 | ||
1312 | memset(req, 0, sizeof(*req)); | 1373 | memset(req, 0, sizeof(*req)); |
1313 | 1374 | ||
1314 | req->state[0] = lower_32_bits(SHA512_H0); | ||
1315 | req->state[1] = upper_32_bits(SHA512_H0); | ||
1316 | req->state[2] = lower_32_bits(SHA512_H1); | ||
1317 | req->state[3] = upper_32_bits(SHA512_H1); | ||
1318 | req->state[4] = lower_32_bits(SHA512_H2); | ||
1319 | req->state[5] = upper_32_bits(SHA512_H2); | ||
1320 | req->state[6] = lower_32_bits(SHA512_H3); | ||
1321 | req->state[7] = upper_32_bits(SHA512_H3); | ||
1322 | req->state[8] = lower_32_bits(SHA512_H4); | ||
1323 | req->state[9] = upper_32_bits(SHA512_H4); | ||
1324 | req->state[10] = lower_32_bits(SHA512_H5); | ||
1325 | req->state[11] = upper_32_bits(SHA512_H5); | ||
1326 | req->state[12] = lower_32_bits(SHA512_H6); | ||
1327 | req->state[13] = upper_32_bits(SHA512_H6); | ||
1328 | req->state[14] = lower_32_bits(SHA512_H7); | ||
1329 | req->state[15] = upper_32_bits(SHA512_H7); | ||
1330 | |||
1331 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; | 1375 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; |
1332 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 1376 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1333 | req->state_sz = SHA512_DIGEST_SIZE; | 1377 | req->state_sz = SHA512_DIGEST_SIZE; |
1378 | req->block_sz = SHA512_BLOCK_SIZE; | ||
1334 | 1379 | ||
1335 | return 0; | 1380 | return 0; |
1336 | } | 1381 | } |
@@ -1347,7 +1392,7 @@ static int safexcel_sha512_digest(struct ahash_request *areq) | |||
1347 | 1392 | ||
1348 | struct safexcel_alg_template safexcel_alg_sha512 = { | 1393 | struct safexcel_alg_template safexcel_alg_sha512 = { |
1349 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1394 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1350 | .engines = EIP97IES | EIP197B | EIP197D, | 1395 | .algo_mask = SAFEXCEL_ALG_SHA2_512, |
1351 | .alg.ahash = { | 1396 | .alg.ahash = { |
1352 | .init = safexcel_sha512_init, | 1397 | .init = safexcel_sha512_init, |
1353 | .update = safexcel_ahash_update, | 1398 | .update = safexcel_ahash_update, |
@@ -1362,7 +1407,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { | |||
1362 | .base = { | 1407 | .base = { |
1363 | .cra_name = "sha512", | 1408 | .cra_name = "sha512", |
1364 | .cra_driver_name = "safexcel-sha512", | 1409 | .cra_driver_name = "safexcel-sha512", |
1365 | .cra_priority = 300, | 1410 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1366 | .cra_flags = CRYPTO_ALG_ASYNC | | 1411 | .cra_flags = CRYPTO_ALG_ASYNC | |
1367 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1412 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1368 | .cra_blocksize = SHA512_BLOCK_SIZE, | 1413 | .cra_blocksize = SHA512_BLOCK_SIZE, |
@@ -1382,26 +1427,10 @@ static int safexcel_sha384_init(struct ahash_request *areq) | |||
1382 | 1427 | ||
1383 | memset(req, 0, sizeof(*req)); | 1428 | memset(req, 0, sizeof(*req)); |
1384 | 1429 | ||
1385 | req->state[0] = lower_32_bits(SHA384_H0); | ||
1386 | req->state[1] = upper_32_bits(SHA384_H0); | ||
1387 | req->state[2] = lower_32_bits(SHA384_H1); | ||
1388 | req->state[3] = upper_32_bits(SHA384_H1); | ||
1389 | req->state[4] = lower_32_bits(SHA384_H2); | ||
1390 | req->state[5] = upper_32_bits(SHA384_H2); | ||
1391 | req->state[6] = lower_32_bits(SHA384_H3); | ||
1392 | req->state[7] = upper_32_bits(SHA384_H3); | ||
1393 | req->state[8] = lower_32_bits(SHA384_H4); | ||
1394 | req->state[9] = upper_32_bits(SHA384_H4); | ||
1395 | req->state[10] = lower_32_bits(SHA384_H5); | ||
1396 | req->state[11] = upper_32_bits(SHA384_H5); | ||
1397 | req->state[12] = lower_32_bits(SHA384_H6); | ||
1398 | req->state[13] = upper_32_bits(SHA384_H6); | ||
1399 | req->state[14] = lower_32_bits(SHA384_H7); | ||
1400 | req->state[15] = upper_32_bits(SHA384_H7); | ||
1401 | |||
1402 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; | 1430 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; |
1403 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 1431 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1404 | req->state_sz = SHA512_DIGEST_SIZE; | 1432 | req->state_sz = SHA512_DIGEST_SIZE; |
1433 | req->block_sz = SHA512_BLOCK_SIZE; | ||
1405 | 1434 | ||
1406 | return 0; | 1435 | return 0; |
1407 | } | 1436 | } |
@@ -1418,7 +1447,7 @@ static int safexcel_sha384_digest(struct ahash_request *areq) | |||
1418 | 1447 | ||
1419 | struct safexcel_alg_template safexcel_alg_sha384 = { | 1448 | struct safexcel_alg_template safexcel_alg_sha384 = { |
1420 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1449 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1421 | .engines = EIP97IES | EIP197B | EIP197D, | 1450 | .algo_mask = SAFEXCEL_ALG_SHA2_512, |
1422 | .alg.ahash = { | 1451 | .alg.ahash = { |
1423 | .init = safexcel_sha384_init, | 1452 | .init = safexcel_sha384_init, |
1424 | .update = safexcel_ahash_update, | 1453 | .update = safexcel_ahash_update, |
@@ -1433,7 +1462,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = { | |||
1433 | .base = { | 1462 | .base = { |
1434 | .cra_name = "sha384", | 1463 | .cra_name = "sha384", |
1435 | .cra_driver_name = "safexcel-sha384", | 1464 | .cra_driver_name = "safexcel-sha384", |
1436 | .cra_priority = 300, | 1465 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1437 | .cra_flags = CRYPTO_ALG_ASYNC | | 1466 | .cra_flags = CRYPTO_ALG_ASYNC | |
1438 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1467 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1439 | .cra_blocksize = SHA384_BLOCK_SIZE, | 1468 | .cra_blocksize = SHA384_BLOCK_SIZE, |
@@ -1455,10 +1484,23 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1455 | 1484 | ||
1456 | static int safexcel_hmac_sha512_init(struct ahash_request *areq) | 1485 | static int safexcel_hmac_sha512_init(struct ahash_request *areq) |
1457 | { | 1486 | { |
1487 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
1458 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 1488 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1459 | 1489 | ||
1460 | safexcel_sha512_init(areq); | 1490 | memset(req, 0, sizeof(*req)); |
1461 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 1491 | |
1492 | /* Start from ipad precompute */ | ||
1493 | memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); | ||
1494 | /* Already processed the key^ipad part now! */ | ||
1495 | req->len = SHA512_BLOCK_SIZE; | ||
1496 | req->processed = SHA512_BLOCK_SIZE; | ||
1497 | |||
1498 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; | ||
1499 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1500 | req->state_sz = SHA512_DIGEST_SIZE; | ||
1501 | req->block_sz = SHA512_BLOCK_SIZE; | ||
1502 | req->hmac = true; | ||
1503 | |||
1462 | return 0; | 1504 | return 0; |
1463 | } | 1505 | } |
1464 | 1506 | ||
@@ -1474,7 +1516,7 @@ static int safexcel_hmac_sha512_digest(struct ahash_request *areq) | |||
1474 | 1516 | ||
1475 | struct safexcel_alg_template safexcel_alg_hmac_sha512 = { | 1517 | struct safexcel_alg_template safexcel_alg_hmac_sha512 = { |
1476 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1518 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1477 | .engines = EIP97IES | EIP197B | EIP197D, | 1519 | .algo_mask = SAFEXCEL_ALG_SHA2_512, |
1478 | .alg.ahash = { | 1520 | .alg.ahash = { |
1479 | .init = safexcel_hmac_sha512_init, | 1521 | .init = safexcel_hmac_sha512_init, |
1480 | .update = safexcel_ahash_update, | 1522 | .update = safexcel_ahash_update, |
@@ -1490,7 +1532,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { | |||
1490 | .base = { | 1532 | .base = { |
1491 | .cra_name = "hmac(sha512)", | 1533 | .cra_name = "hmac(sha512)", |
1492 | .cra_driver_name = "safexcel-hmac-sha512", | 1534 | .cra_driver_name = "safexcel-hmac-sha512", |
1493 | .cra_priority = 300, | 1535 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1494 | .cra_flags = CRYPTO_ALG_ASYNC | | 1536 | .cra_flags = CRYPTO_ALG_ASYNC | |
1495 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1537 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1496 | .cra_blocksize = SHA512_BLOCK_SIZE, | 1538 | .cra_blocksize = SHA512_BLOCK_SIZE, |
@@ -1512,10 +1554,23 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1512 | 1554 | ||
1513 | static int safexcel_hmac_sha384_init(struct ahash_request *areq) | 1555 | static int safexcel_hmac_sha384_init(struct ahash_request *areq) |
1514 | { | 1556 | { |
1557 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
1515 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 1558 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1516 | 1559 | ||
1517 | safexcel_sha384_init(areq); | 1560 | memset(req, 0, sizeof(*req)); |
1518 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 1561 | |
1562 | /* Start from ipad precompute */ | ||
1563 | memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE); | ||
1564 | /* Already processed the key^ipad part now! */ | ||
1565 | req->len = SHA512_BLOCK_SIZE; | ||
1566 | req->processed = SHA512_BLOCK_SIZE; | ||
1567 | |||
1568 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; | ||
1569 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1570 | req->state_sz = SHA512_DIGEST_SIZE; | ||
1571 | req->block_sz = SHA512_BLOCK_SIZE; | ||
1572 | req->hmac = true; | ||
1573 | |||
1519 | return 0; | 1574 | return 0; |
1520 | } | 1575 | } |
1521 | 1576 | ||
@@ -1531,7 +1586,7 @@ static int safexcel_hmac_sha384_digest(struct ahash_request *areq) | |||
1531 | 1586 | ||
1532 | struct safexcel_alg_template safexcel_alg_hmac_sha384 = { | 1587 | struct safexcel_alg_template safexcel_alg_hmac_sha384 = { |
1533 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1588 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1534 | .engines = EIP97IES | EIP197B | EIP197D, | 1589 | .algo_mask = SAFEXCEL_ALG_SHA2_512, |
1535 | .alg.ahash = { | 1590 | .alg.ahash = { |
1536 | .init = safexcel_hmac_sha384_init, | 1591 | .init = safexcel_hmac_sha384_init, |
1537 | .update = safexcel_ahash_update, | 1592 | .update = safexcel_ahash_update, |
@@ -1547,7 +1602,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { | |||
1547 | .base = { | 1602 | .base = { |
1548 | .cra_name = "hmac(sha384)", | 1603 | .cra_name = "hmac(sha384)", |
1549 | .cra_driver_name = "safexcel-hmac-sha384", | 1604 | .cra_driver_name = "safexcel-hmac-sha384", |
1550 | .cra_priority = 300, | 1605 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1551 | .cra_flags = CRYPTO_ALG_ASYNC | | 1606 | .cra_flags = CRYPTO_ALG_ASYNC | |
1552 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1607 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1553 | .cra_blocksize = SHA384_BLOCK_SIZE, | 1608 | .cra_blocksize = SHA384_BLOCK_SIZE, |
@@ -1567,14 +1622,10 @@ static int safexcel_md5_init(struct ahash_request *areq) | |||
1567 | 1622 | ||
1568 | memset(req, 0, sizeof(*req)); | 1623 | memset(req, 0, sizeof(*req)); |
1569 | 1624 | ||
1570 | req->state[0] = MD5_H0; | ||
1571 | req->state[1] = MD5_H1; | ||
1572 | req->state[2] = MD5_H2; | ||
1573 | req->state[3] = MD5_H3; | ||
1574 | |||
1575 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; | 1625 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; |
1576 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | 1626 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1577 | req->state_sz = MD5_DIGEST_SIZE; | 1627 | req->state_sz = MD5_DIGEST_SIZE; |
1628 | req->block_sz = MD5_HMAC_BLOCK_SIZE; | ||
1578 | 1629 | ||
1579 | return 0; | 1630 | return 0; |
1580 | } | 1631 | } |
@@ -1591,7 +1642,7 @@ static int safexcel_md5_digest(struct ahash_request *areq) | |||
1591 | 1642 | ||
1592 | struct safexcel_alg_template safexcel_alg_md5 = { | 1643 | struct safexcel_alg_template safexcel_alg_md5 = { |
1593 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1644 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1594 | .engines = EIP97IES | EIP197B | EIP197D, | 1645 | .algo_mask = SAFEXCEL_ALG_MD5, |
1595 | .alg.ahash = { | 1646 | .alg.ahash = { |
1596 | .init = safexcel_md5_init, | 1647 | .init = safexcel_md5_init, |
1597 | .update = safexcel_ahash_update, | 1648 | .update = safexcel_ahash_update, |
@@ -1606,7 +1657,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { | |||
1606 | .base = { | 1657 | .base = { |
1607 | .cra_name = "md5", | 1658 | .cra_name = "md5", |
1608 | .cra_driver_name = "safexcel-md5", | 1659 | .cra_driver_name = "safexcel-md5", |
1609 | .cra_priority = 300, | 1660 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1610 | .cra_flags = CRYPTO_ALG_ASYNC | | 1661 | .cra_flags = CRYPTO_ALG_ASYNC | |
1611 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1662 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1612 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | 1663 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, |
@@ -1621,10 +1672,24 @@ struct safexcel_alg_template safexcel_alg_md5 = { | |||
1621 | 1672 | ||
1622 | static int safexcel_hmac_md5_init(struct ahash_request *areq) | 1673 | static int safexcel_hmac_md5_init(struct ahash_request *areq) |
1623 | { | 1674 | { |
1675 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
1624 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 1676 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1625 | 1677 | ||
1626 | safexcel_md5_init(areq); | 1678 | memset(req, 0, sizeof(*req)); |
1627 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | 1679 | |
1680 | /* Start from ipad precompute */ | ||
1681 | memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE); | ||
1682 | /* Already processed the key^ipad part now! */ | ||
1683 | req->len = MD5_HMAC_BLOCK_SIZE; | ||
1684 | req->processed = MD5_HMAC_BLOCK_SIZE; | ||
1685 | |||
1686 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; | ||
1687 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1688 | req->state_sz = MD5_DIGEST_SIZE; | ||
1689 | req->block_sz = MD5_HMAC_BLOCK_SIZE; | ||
1690 | req->len_is_le = true; /* MD5 is little endian! ... */ | ||
1691 | req->hmac = true; | ||
1692 | |||
1628 | return 0; | 1693 | return 0; |
1629 | } | 1694 | } |
1630 | 1695 | ||
@@ -1647,7 +1712,7 @@ static int safexcel_hmac_md5_digest(struct ahash_request *areq) | |||
1647 | 1712 | ||
1648 | struct safexcel_alg_template safexcel_alg_hmac_md5 = { | 1713 | struct safexcel_alg_template safexcel_alg_hmac_md5 = { |
1649 | .type = SAFEXCEL_ALG_TYPE_AHASH, | 1714 | .type = SAFEXCEL_ALG_TYPE_AHASH, |
1650 | .engines = EIP97IES | EIP197B | EIP197D, | 1715 | .algo_mask = SAFEXCEL_ALG_MD5, |
1651 | .alg.ahash = { | 1716 | .alg.ahash = { |
1652 | .init = safexcel_hmac_md5_init, | 1717 | .init = safexcel_hmac_md5_init, |
1653 | .update = safexcel_ahash_update, | 1718 | .update = safexcel_ahash_update, |
@@ -1663,7 +1728,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { | |||
1663 | .base = { | 1728 | .base = { |
1664 | .cra_name = "hmac(md5)", | 1729 | .cra_name = "hmac(md5)", |
1665 | .cra_driver_name = "safexcel-hmac-md5", | 1730 | .cra_driver_name = "safexcel-hmac-md5", |
1666 | .cra_priority = 300, | 1731 | .cra_priority = SAFEXCEL_CRA_PRIORITY, |
1667 | .cra_flags = CRYPTO_ALG_ASYNC | | 1732 | .cra_flags = CRYPTO_ALG_ASYNC | |
1668 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1733 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1669 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | 1734 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, |
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index 142bc3f5c45c..0f269b89cfd4 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c | |||
@@ -137,7 +137,13 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr | |||
137 | struct safexcel_token *token = | 137 | struct safexcel_token *token = |
138 | (struct safexcel_token *)cdesc->control_data.token; | 138 | (struct safexcel_token *)cdesc->control_data.token; |
139 | 139 | ||
140 | cdesc->control_data.packet_length = full_data_len; | 140 | /* |
141 | * Note that the length here MUST be >0 or else the EIP(1)97 | ||
142 | * may hang. Newer EIP197 firmware actually incorporates this | ||
143 | * fix already, but that doesn't help the EIP97 and we may | ||
144 | * also be running older firmware. | ||
145 | */ | ||
146 | cdesc->control_data.packet_length = full_data_len ?: 1; | ||
141 | cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | | 147 | cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | |
142 | EIP197_OPTION_64BIT_CTX | | 148 | EIP197_OPTION_64BIT_CTX | |
143 | EIP197_OPTION_CTX_CTRL_IN_CMD; | 149 | EIP197_OPTION_CTX_CTRL_IN_CMD; |
@@ -145,7 +151,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr | |||
145 | (lower_32_bits(context) & GENMASK(31, 2)) >> 2; | 151 | (lower_32_bits(context) & GENMASK(31, 2)) >> 2; |
146 | cdesc->control_data.context_hi = upper_32_bits(context); | 152 | cdesc->control_data.context_hi = upper_32_bits(context); |
147 | 153 | ||
148 | if (priv->version == EIP197B || priv->version == EIP197D) | 154 | if (priv->version == EIP197B_MRVL || |
155 | priv->version == EIP197D_MRVL) | ||
149 | cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; | 156 | cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; |
150 | 157 | ||
151 | /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ | 158 | /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index acedafe3fa98..9181523ba760 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | #include <crypto/ctr.h> | 19 | #include <crypto/ctr.h> |
20 | #include <crypto/des.h> | 20 | #include <crypto/internal/des.h> |
21 | #include <crypto/aes.h> | 21 | #include <crypto/aes.h> |
22 | #include <crypto/hmac.h> | 22 | #include <crypto/hmac.h> |
23 | #include <crypto/sha.h> | 23 | #include <crypto/sha.h> |
@@ -756,10 +756,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, | |||
756 | } | 756 | } |
757 | cipher_cfg |= keylen_cfg; | 757 | cipher_cfg |= keylen_cfg; |
758 | } else { | 758 | } else { |
759 | u32 tmp[DES_EXPKEY_WORDS]; | 759 | crypto_des_verify_key(tfm, key); |
760 | if (des_ekey(tmp, key) == 0) { | ||
761 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
762 | } | ||
763 | } | 760 | } |
764 | /* write cfg word to cryptinfo */ | 761 | /* write cfg word to cryptinfo */ |
765 | *(u32*)cinfo = cpu_to_be32(cipher_cfg); | 762 | *(u32*)cinfo = cpu_to_be32(cipher_cfg); |
@@ -851,14 +848,8 @@ out: | |||
851 | static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 848 | static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
852 | unsigned int key_len) | 849 | unsigned int key_len) |
853 | { | 850 | { |
854 | u32 flags = crypto_ablkcipher_get_flags(tfm); | 851 | return verify_ablkcipher_des3_key(tfm, key) ?: |
855 | int err; | 852 | ablk_setkey(tfm, key, key_len); |
856 | |||
857 | err = __des3_verify_key(&flags, key); | ||
858 | if (unlikely(err)) | ||
859 | crypto_ablkcipher_set_flags(tfm, flags); | ||
860 | |||
861 | return ablk_setkey(tfm, key, key_len); | ||
862 | } | 853 | } |
863 | 854 | ||
864 | static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 855 | static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
@@ -1181,7 +1172,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
1181 | unsigned int keylen) | 1172 | unsigned int keylen) |
1182 | { | 1173 | { |
1183 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | 1174 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); |
1184 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
1185 | struct crypto_authenc_keys keys; | 1175 | struct crypto_authenc_keys keys; |
1186 | int err; | 1176 | int err; |
1187 | 1177 | ||
@@ -1193,12 +1183,8 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
1193 | if (keys.authkeylen > sizeof(ctx->authkey)) | 1183 | if (keys.authkeylen > sizeof(ctx->authkey)) |
1194 | goto badkey; | 1184 | goto badkey; |
1195 | 1185 | ||
1196 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | 1186 | err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen); |
1197 | goto badkey; | 1187 | if (err) |
1198 | |||
1199 | flags = crypto_aead_get_flags(tfm); | ||
1200 | err = __des3_verify_key(&flags, keys.enckey); | ||
1201 | if (unlikely(err)) | ||
1202 | goto badkey; | 1188 | goto badkey; |
1203 | 1189 | ||
1204 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); | 1190 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
@@ -1209,7 +1195,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
1209 | memzero_explicit(&keys, sizeof(keys)); | 1195 | memzero_explicit(&keys, sizeof(keys)); |
1210 | return aead_setup(tfm, crypto_aead_authsize(tfm)); | 1196 | return aead_setup(tfm, crypto_aead_authsize(tfm)); |
1211 | badkey: | 1197 | badkey: |
1212 | crypto_aead_set_flags(tfm, flags); | ||
1213 | memzero_explicit(&keys, sizeof(keys)); | 1198 | memzero_explicit(&keys, sizeof(keys)); |
1214 | return err; | 1199 | return err; |
1215 | } | 1200 | } |
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index f4321f3c0777..84ceddfee76b 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <crypto/aes.h> | 12 | #include <crypto/aes.h> |
13 | #include <crypto/des.h> | 13 | #include <crypto/internal/des.h> |
14 | 14 | ||
15 | #include "cesa.h" | 15 | #include "cesa.h" |
16 | 16 | ||
@@ -254,7 +254,7 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, | |||
254 | int ret; | 254 | int ret; |
255 | int i; | 255 | int i; |
256 | 256 | ||
257 | ret = crypto_aes_expand_key(&ctx->aes, key, len); | 257 | ret = aes_expandkey(&ctx->aes, key, len); |
258 | if (ret) { | 258 | if (ret) { |
259 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 259 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
260 | return ret; | 260 | return ret; |
@@ -272,21 +272,12 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, | |||
272 | static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, | 272 | static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, |
273 | unsigned int len) | 273 | unsigned int len) |
274 | { | 274 | { |
275 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); | 275 | struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); |
276 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); | 276 | int err; |
277 | u32 tmp[DES_EXPKEY_WORDS]; | ||
278 | int ret; | ||
279 | |||
280 | if (len != DES_KEY_SIZE) { | ||
281 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | 277 | ||
285 | ret = des_ekey(tmp, key); | 278 | err = verify_skcipher_des_key(cipher, key); |
286 | if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 279 | if (err) |
287 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 280 | return err; |
288 | return -EINVAL; | ||
289 | } | ||
290 | 281 | ||
291 | memcpy(ctx->key, key, DES_KEY_SIZE); | 282 | memcpy(ctx->key, key, DES_KEY_SIZE); |
292 | 283 | ||
@@ -299,8 +290,8 @@ static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, | |||
299 | struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); | 290 | struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); |
300 | int err; | 291 | int err; |
301 | 292 | ||
302 | err = des3_verify_key(cipher, key); | 293 | err = verify_skcipher_des3_key(cipher, key); |
303 | if (unlikely(err)) | 294 | if (err) |
304 | return err; | 295 | return err; |
305 | 296 | ||
306 | memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); | 297 | memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 0f0ac851f4eb..a2b35fb0fb89 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
@@ -1148,8 +1148,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, | |||
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | /* Set the memory region to 0 to avoid any leak. */ | 1150 | /* Set the memory region to 0 to avoid any leak. */ |
1151 | memset(keydup, 0, keylen); | 1151 | kzfree(keydup); |
1152 | kfree(keydup); | ||
1153 | 1152 | ||
1154 | if (ret) | 1153 | if (ret) |
1155 | return ret; | 1154 | return ret; |
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index b7477ee32ca0..90c9644fb8a8 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) | 24 | #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) |
25 | 25 | ||
26 | /* AES-CBC/ECB/CTR command token */ | 26 | /* AES-CBC/ECB/CTR/OFB/CFB command token */ |
27 | #define AES_CMD0 cpu_to_le32(0x05000000) | 27 | #define AES_CMD0 cpu_to_le32(0x05000000) |
28 | #define AES_CMD1 cpu_to_le32(0x2d060000) | 28 | #define AES_CMD1 cpu_to_le32(0x2d060000) |
29 | #define AES_CMD2 cpu_to_le32(0xe4a63806) | 29 | #define AES_CMD2 cpu_to_le32(0xe4a63806) |
@@ -50,6 +50,8 @@ | |||
50 | /* AES transform information word 1 fields */ | 50 | /* AES transform information word 1 fields */ |
51 | #define AES_TFM_ECB cpu_to_le32(0x0 << 0) | 51 | #define AES_TFM_ECB cpu_to_le32(0x0 << 0) |
52 | #define AES_TFM_CBC cpu_to_le32(0x1 << 0) | 52 | #define AES_TFM_CBC cpu_to_le32(0x1 << 0) |
53 | #define AES_TFM_OFB cpu_to_le32(0x4 << 0) | ||
54 | #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) | ||
53 | #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ | 55 | #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ |
54 | #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ | 56 | #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ |
55 | #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ | 57 | #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ |
@@ -58,13 +60,15 @@ | |||
58 | #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) | 60 | #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) |
59 | 61 | ||
60 | /* AES flags */ | 62 | /* AES flags */ |
61 | #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0) | 63 | #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) |
62 | #define AES_FLAGS_ECB BIT(0) | 64 | #define AES_FLAGS_ECB BIT(0) |
63 | #define AES_FLAGS_CBC BIT(1) | 65 | #define AES_FLAGS_CBC BIT(1) |
64 | #define AES_FLAGS_CTR BIT(2) | 66 | #define AES_FLAGS_CTR BIT(2) |
65 | #define AES_FLAGS_GCM BIT(3) | 67 | #define AES_FLAGS_OFB BIT(3) |
66 | #define AES_FLAGS_ENCRYPT BIT(4) | 68 | #define AES_FLAGS_CFB128 BIT(4) |
67 | #define AES_FLAGS_BUSY BIT(5) | 69 | #define AES_FLAGS_GCM BIT(5) |
70 | #define AES_FLAGS_ENCRYPT BIT(6) | ||
71 | #define AES_FLAGS_BUSY BIT(7) | ||
68 | 72 | ||
69 | #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) | 73 | #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) |
70 | 74 | ||
@@ -101,6 +105,7 @@ struct mtk_aes_reqctx { | |||
101 | struct mtk_aes_base_ctx { | 105 | struct mtk_aes_base_ctx { |
102 | struct mtk_cryp *cryp; | 106 | struct mtk_cryp *cryp; |
103 | u32 keylen; | 107 | u32 keylen; |
108 | __le32 key[12]; | ||
104 | __le32 keymode; | 109 | __le32 keymode; |
105 | 110 | ||
106 | mtk_aes_fn start; | 111 | mtk_aes_fn start; |
@@ -405,7 +410,7 @@ exit: | |||
405 | return mtk_aes_complete(cryp, aes, -EINVAL); | 410 | return mtk_aes_complete(cryp, aes, -EINVAL); |
406 | } | 411 | } |
407 | 412 | ||
408 | /* Initialize transform information of CBC/ECB/CTR mode */ | 413 | /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ |
409 | static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, | 414 | static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, |
410 | size_t len) | 415 | size_t len) |
411 | { | 416 | { |
@@ -434,7 +439,12 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, | |||
434 | case AES_FLAGS_CTR: | 439 | case AES_FLAGS_CTR: |
435 | info->tfm[1] = AES_TFM_CTR_LOAD; | 440 | info->tfm[1] = AES_TFM_CTR_LOAD; |
436 | goto ctr; | 441 | goto ctr; |
437 | 442 | case AES_FLAGS_OFB: | |
443 | info->tfm[1] = AES_TFM_OFB; | ||
444 | break; | ||
445 | case AES_FLAGS_CFB128: | ||
446 | info->tfm[1] = AES_TFM_CFB128; | ||
447 | break; | ||
438 | default: | 448 | default: |
439 | /* Should not happen... */ | 449 | /* Should not happen... */ |
440 | return; | 450 | return; |
@@ -525,6 +535,8 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, | |||
525 | backlog->complete(backlog, -EINPROGRESS); | 535 | backlog->complete(backlog, -EINPROGRESS); |
526 | 536 | ||
527 | ctx = crypto_tfm_ctx(areq->tfm); | 537 | ctx = crypto_tfm_ctx(areq->tfm); |
538 | /* Write key into state buffer */ | ||
539 | memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); | ||
528 | 540 | ||
529 | aes->areq = areq; | 541 | aes->areq = areq; |
530 | aes->ctx = ctx; | 542 | aes->ctx = ctx; |
@@ -644,21 +656,26 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, | |||
644 | } | 656 | } |
645 | 657 | ||
646 | ctx->keylen = SIZE_IN_WORDS(keylen); | 658 | ctx->keylen = SIZE_IN_WORDS(keylen); |
647 | mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); | 659 | mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); |
648 | 660 | ||
649 | return 0; | 661 | return 0; |
650 | } | 662 | } |
651 | 663 | ||
652 | static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) | 664 | static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) |
653 | { | 665 | { |
654 | struct mtk_aes_base_ctx *ctx; | 666 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
667 | struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
655 | struct mtk_aes_reqctx *rctx; | 668 | struct mtk_aes_reqctx *rctx; |
669 | struct mtk_cryp *cryp; | ||
670 | |||
671 | cryp = mtk_aes_find_dev(ctx); | ||
672 | if (!cryp) | ||
673 | return -ENODEV; | ||
656 | 674 | ||
657 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
658 | rctx = ablkcipher_request_ctx(req); | 675 | rctx = ablkcipher_request_ctx(req); |
659 | rctx->mode = mode; | 676 | rctx->mode = mode; |
660 | 677 | ||
661 | return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT), | 678 | return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), |
662 | &req->base); | 679 | &req->base); |
663 | } | 680 | } |
664 | 681 | ||
@@ -692,16 +709,29 @@ static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req) | |||
692 | return mtk_aes_crypt(req, AES_FLAGS_CTR); | 709 | return mtk_aes_crypt(req, AES_FLAGS_CTR); |
693 | } | 710 | } |
694 | 711 | ||
712 | static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
713 | { | ||
714 | return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); | ||
715 | } | ||
716 | |||
717 | static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
718 | { | ||
719 | return mtk_aes_crypt(req, AES_FLAGS_OFB); | ||
720 | } | ||
721 | |||
722 | static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req) | ||
723 | { | ||
724 | return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); | ||
725 | } | ||
726 | |||
727 | static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req) | ||
728 | { | ||
729 | return mtk_aes_crypt(req, AES_FLAGS_CFB128); | ||
730 | } | ||
731 | |||
695 | static int mtk_aes_cra_init(struct crypto_tfm *tfm) | 732 | static int mtk_aes_cra_init(struct crypto_tfm *tfm) |
696 | { | 733 | { |
697 | struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 734 | struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
698 | struct mtk_cryp *cryp = NULL; | ||
699 | |||
700 | cryp = mtk_aes_find_dev(&ctx->base); | ||
701 | if (!cryp) { | ||
702 | pr_err("can't find crypto device\n"); | ||
703 | return -ENODEV; | ||
704 | } | ||
705 | 735 | ||
706 | tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); | 736 | tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); |
707 | ctx->base.start = mtk_aes_start; | 737 | ctx->base.start = mtk_aes_start; |
@@ -711,13 +741,6 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm) | |||
711 | static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm) | 741 | static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm) |
712 | { | 742 | { |
713 | struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 743 | struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
714 | struct mtk_cryp *cryp = NULL; | ||
715 | |||
716 | cryp = mtk_aes_find_dev(&ctx->base); | ||
717 | if (!cryp) { | ||
718 | pr_err("can't find crypto device\n"); | ||
719 | return -ENODEV; | ||
720 | } | ||
721 | 744 | ||
722 | tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); | 745 | tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); |
723 | ctx->base.start = mtk_aes_ctr_start; | 746 | ctx->base.start = mtk_aes_ctr_start; |
@@ -787,6 +810,48 @@ static struct crypto_alg aes_algs[] = { | |||
787 | .decrypt = mtk_aes_ctr_decrypt, | 810 | .decrypt = mtk_aes_ctr_decrypt, |
788 | } | 811 | } |
789 | }, | 812 | }, |
813 | { | ||
814 | .cra_name = "ofb(aes)", | ||
815 | .cra_driver_name = "ofb-aes-mtk", | ||
816 | .cra_priority = 400, | ||
817 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
818 | CRYPTO_ALG_ASYNC, | ||
819 | .cra_init = mtk_aes_cra_init, | ||
820 | .cra_blocksize = 1, | ||
821 | .cra_ctxsize = sizeof(struct mtk_aes_ctx), | ||
822 | .cra_alignmask = 0xf, | ||
823 | .cra_type = &crypto_ablkcipher_type, | ||
824 | .cra_module = THIS_MODULE, | ||
825 | .cra_u.ablkcipher = { | ||
826 | .min_keysize = AES_MIN_KEY_SIZE, | ||
827 | .max_keysize = AES_MAX_KEY_SIZE, | ||
828 | .ivsize = AES_BLOCK_SIZE, | ||
829 | .setkey = mtk_aes_setkey, | ||
830 | .encrypt = mtk_aes_ofb_encrypt, | ||
831 | .decrypt = mtk_aes_ofb_decrypt, | ||
832 | } | ||
833 | }, | ||
834 | { | ||
835 | .cra_name = "cfb(aes)", | ||
836 | .cra_driver_name = "cfb-aes-mtk", | ||
837 | .cra_priority = 400, | ||
838 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
839 | CRYPTO_ALG_ASYNC, | ||
840 | .cra_init = mtk_aes_cra_init, | ||
841 | .cra_blocksize = 1, | ||
842 | .cra_ctxsize = sizeof(struct mtk_aes_ctx), | ||
843 | .cra_alignmask = 0xf, | ||
844 | .cra_type = &crypto_ablkcipher_type, | ||
845 | .cra_module = THIS_MODULE, | ||
846 | .cra_u.ablkcipher = { | ||
847 | .min_keysize = AES_MIN_KEY_SIZE, | ||
848 | .max_keysize = AES_MAX_KEY_SIZE, | ||
849 | .ivsize = AES_BLOCK_SIZE, | ||
850 | .setkey = mtk_aes_setkey, | ||
851 | .encrypt = mtk_aes_cfb_encrypt, | ||
852 | .decrypt = mtk_aes_cfb_decrypt, | ||
853 | } | ||
854 | }, | ||
790 | }; | 855 | }; |
791 | 856 | ||
792 | static inline struct mtk_aes_gcm_ctx * | 857 | static inline struct mtk_aes_gcm_ctx * |
@@ -905,14 +970,11 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) | |||
905 | aes->resume = mtk_aes_transfer_complete; | 970 | aes->resume = mtk_aes_transfer_complete; |
906 | /* Compute total process length. */ | 971 | /* Compute total process length. */ |
907 | aes->total = len + gctx->authsize; | 972 | aes->total = len + gctx->authsize; |
908 | /* Compute text length. */ | ||
909 | gctx->textlen = req->cryptlen; | ||
910 | /* Hardware will append authenticated tag to output buffer */ | 973 | /* Hardware will append authenticated tag to output buffer */ |
911 | scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); | 974 | scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); |
912 | } else { | 975 | } else { |
913 | aes->resume = mtk_aes_gcm_tag_verify; | 976 | aes->resume = mtk_aes_gcm_tag_verify; |
914 | aes->total = len; | 977 | aes->total = len; |
915 | gctx->textlen = req->cryptlen - gctx->authsize; | ||
916 | } | 978 | } |
917 | 979 | ||
918 | return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); | 980 | return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); |
@@ -923,6 +985,15 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) | |||
923 | struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 985 | struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
924 | struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); | 986 | struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); |
925 | struct mtk_aes_reqctx *rctx = aead_request_ctx(req); | 987 | struct mtk_aes_reqctx *rctx = aead_request_ctx(req); |
988 | struct mtk_cryp *cryp; | ||
989 | bool enc = !!(mode & AES_FLAGS_ENCRYPT); | ||
990 | |||
991 | cryp = mtk_aes_find_dev(ctx); | ||
992 | if (!cryp) | ||
993 | return -ENODEV; | ||
994 | |||
995 | /* Compute text length. */ | ||
996 | gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); | ||
926 | 997 | ||
927 | /* Empty messages are not supported yet */ | 998 | /* Empty messages are not supported yet */ |
928 | if (!gctx->textlen && !req->assoclen) | 999 | if (!gctx->textlen && !req->assoclen) |
@@ -930,8 +1001,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) | |||
930 | 1001 | ||
931 | rctx->mode = AES_FLAGS_GCM | mode; | 1002 | rctx->mode = AES_FLAGS_GCM | mode; |
932 | 1003 | ||
933 | return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), | 1004 | return mtk_aes_handle_queue(cryp, enc, &req->base); |
934 | &req->base); | ||
935 | } | 1005 | } |
936 | 1006 | ||
937 | /* | 1007 | /* |
@@ -1003,10 +1073,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
1003 | if (err) | 1073 | if (err) |
1004 | goto out; | 1074 | goto out; |
1005 | 1075 | ||
1006 | /* Write key into state buffer */ | 1076 | mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); |
1007 | mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); | 1077 | mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, |
1008 | /* Write key(H) into state buffer */ | ||
1009 | mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash, | ||
1010 | AES_BLOCK_SIZE); | 1078 | AES_BLOCK_SIZE); |
1011 | out: | 1079 | out: |
1012 | kzfree(data); | 1080 | kzfree(data); |
@@ -1046,13 +1114,6 @@ static int mtk_aes_gcm_decrypt(struct aead_request *req) | |||
1046 | static int mtk_aes_gcm_init(struct crypto_aead *aead) | 1114 | static int mtk_aes_gcm_init(struct crypto_aead *aead) |
1047 | { | 1115 | { |
1048 | struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); | 1116 | struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); |
1049 | struct mtk_cryp *cryp = NULL; | ||
1050 | |||
1051 | cryp = mtk_aes_find_dev(&ctx->base); | ||
1052 | if (!cryp) { | ||
1053 | pr_err("can't find crypto device\n"); | ||
1054 | return -ENODEV; | ||
1055 | } | ||
1056 | 1117 | ||
1057 | ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, | 1118 | ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, |
1058 | CRYPTO_ALG_ASYNC); | 1119 | CRYPTO_ALG_ASYNC); |
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 125318a88cd4..7e3ad085b5bd 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c | |||
@@ -481,7 +481,6 @@ err_cleanup: | |||
481 | 481 | ||
482 | static int mtk_crypto_probe(struct platform_device *pdev) | 482 | static int mtk_crypto_probe(struct platform_device *pdev) |
483 | { | 483 | { |
484 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
485 | struct mtk_cryp *cryp; | 484 | struct mtk_cryp *cryp; |
486 | int i, err; | 485 | int i, err; |
487 | 486 | ||
@@ -489,16 +488,14 @@ static int mtk_crypto_probe(struct platform_device *pdev) | |||
489 | if (!cryp) | 488 | if (!cryp) |
490 | return -ENOMEM; | 489 | return -ENOMEM; |
491 | 490 | ||
492 | cryp->base = devm_ioremap_resource(&pdev->dev, res); | 491 | cryp->base = devm_platform_ioremap_resource(pdev, 0); |
493 | if (IS_ERR(cryp->base)) | 492 | if (IS_ERR(cryp->base)) |
494 | return PTR_ERR(cryp->base); | 493 | return PTR_ERR(cryp->base); |
495 | 494 | ||
496 | for (i = 0; i < MTK_IRQ_NUM; i++) { | 495 | for (i = 0; i < MTK_IRQ_NUM; i++) { |
497 | cryp->irq[i] = platform_get_irq(pdev, i); | 496 | cryp->irq[i] = platform_get_irq(pdev, i); |
498 | if (cryp->irq[i] < 0) { | 497 | if (cryp->irq[i] < 0) |
499 | dev_err(cryp->dev, "no IRQ:%d resource info\n", i); | ||
500 | return cryp->irq[i]; | 498 | return cryp->irq[i]; |
501 | } | ||
502 | } | 499 | } |
503 | 500 | ||
504 | cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); | 501 | cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); |
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index f03b0f06fb2f..9e9f48bb7f85 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c | |||
@@ -778,7 +778,9 @@ static int mtk_sha_finup(struct ahash_request *req) | |||
778 | ctx->flags |= SHA_FLAGS_FINUP; | 778 | ctx->flags |= SHA_FLAGS_FINUP; |
779 | 779 | ||
780 | err1 = mtk_sha_update(req); | 780 | err1 = mtk_sha_update(req); |
781 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | 781 | if (err1 == -EINPROGRESS || |
782 | (err1 == -EBUSY && (ahash_request_flags(req) & | ||
783 | CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
782 | return err1; | 784 | return err1; |
783 | /* | 785 | /* |
784 | * final() has to be always called to cleanup resources | 786 | * final() has to be always called to cleanup resources |
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index f1fa637cb029..bf8d2197bc11 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c | |||
@@ -994,16 +994,12 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
994 | } | 994 | } |
995 | 995 | ||
996 | dcp_vmi_irq = platform_get_irq(pdev, 0); | 996 | dcp_vmi_irq = platform_get_irq(pdev, 0); |
997 | if (dcp_vmi_irq < 0) { | 997 | if (dcp_vmi_irq < 0) |
998 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); | ||
999 | return dcp_vmi_irq; | 998 | return dcp_vmi_irq; |
1000 | } | ||
1001 | 999 | ||
1002 | dcp_irq = platform_get_irq(pdev, 1); | 1000 | dcp_irq = platform_get_irq(pdev, 1); |
1003 | if (dcp_irq < 0) { | 1001 | if (dcp_irq < 0) |
1004 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); | ||
1005 | return dcp_irq; | 1002 | return dcp_irq; |
1006 | } | ||
1007 | 1003 | ||
1008 | sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); | 1004 | sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); |
1009 | if (!sdcp) | 1005 | if (!sdcp) |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 760e72a5893b..dc15b06e96ab 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <crypto/md5.h> | 17 | #include <crypto/md5.h> |
18 | #include <crypto/sha.h> | 18 | #include <crypto/sha.h> |
19 | #include <crypto/aes.h> | 19 | #include <crypto/aes.h> |
20 | #include <crypto/des.h> | 20 | #include <crypto/internal/des.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
@@ -760,21 +760,13 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
760 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 760 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
761 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 761 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
762 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 762 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
763 | u32 tmp[DES_EXPKEY_WORDS]; | ||
764 | int err; | 763 | int err; |
765 | 764 | ||
766 | ctx->enc_type = n2alg->enc_type; | 765 | err = verify_ablkcipher_des_key(cipher, key); |
767 | 766 | if (err) | |
768 | if (keylen != DES_KEY_SIZE) { | 767 | return err; |
769 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
770 | return -EINVAL; | ||
771 | } | ||
772 | 768 | ||
773 | err = des_ekey(tmp, key); | 769 | ctx->enc_type = n2alg->enc_type; |
774 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | ||
775 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
776 | return -EINVAL; | ||
777 | } | ||
778 | 770 | ||
779 | ctx->key_len = keylen; | 771 | ctx->key_len = keylen; |
780 | memcpy(ctx->key.des, key, keylen); | 772 | memcpy(ctx->key.des, key, keylen); |
@@ -787,15 +779,11 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
787 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 779 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
788 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 780 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
789 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 781 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
790 | u32 flags; | ||
791 | int err; | 782 | int err; |
792 | 783 | ||
793 | flags = crypto_ablkcipher_get_flags(cipher); | 784 | err = verify_ablkcipher_des3_key(cipher, key); |
794 | err = __des3_verify_key(&flags, key); | 785 | if (err) |
795 | if (unlikely(err)) { | ||
796 | crypto_ablkcipher_set_flags(cipher, flags); | ||
797 | return err; | 786 | return err; |
798 | } | ||
799 | 787 | ||
800 | ctx->enc_type = n2alg->enc_type; | 788 | ctx->enc_type = n2alg->enc_type; |
801 | 789 | ||
@@ -1295,20 +1283,20 @@ struct n2_hash_tmpl { | |||
1295 | u8 hmac_type; | 1283 | u8 hmac_type; |
1296 | }; | 1284 | }; |
1297 | 1285 | ||
1298 | static const u32 md5_init[MD5_HASH_WORDS] = { | 1286 | static const u32 n2_md5_init[MD5_HASH_WORDS] = { |
1299 | cpu_to_le32(MD5_H0), | 1287 | cpu_to_le32(MD5_H0), |
1300 | cpu_to_le32(MD5_H1), | 1288 | cpu_to_le32(MD5_H1), |
1301 | cpu_to_le32(MD5_H2), | 1289 | cpu_to_le32(MD5_H2), |
1302 | cpu_to_le32(MD5_H3), | 1290 | cpu_to_le32(MD5_H3), |
1303 | }; | 1291 | }; |
1304 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | 1292 | static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = { |
1305 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | 1293 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, |
1306 | }; | 1294 | }; |
1307 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | 1295 | static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = { |
1308 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | 1296 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
1309 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | 1297 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, |
1310 | }; | 1298 | }; |
1311 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | 1299 | static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = { |
1312 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | 1300 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, |
1313 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | 1301 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, |
1314 | }; | 1302 | }; |
@@ -1316,7 +1304,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | |||
1316 | static const struct n2_hash_tmpl hash_tmpls[] = { | 1304 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1317 | { .name = "md5", | 1305 | { .name = "md5", |
1318 | .hash_zero = md5_zero_message_hash, | 1306 | .hash_zero = md5_zero_message_hash, |
1319 | .hash_init = md5_init, | 1307 | .hash_init = n2_md5_init, |
1320 | .auth_type = AUTH_TYPE_MD5, | 1308 | .auth_type = AUTH_TYPE_MD5, |
1321 | .hmac_type = AUTH_TYPE_HMAC_MD5, | 1309 | .hmac_type = AUTH_TYPE_HMAC_MD5, |
1322 | .hw_op_hashsz = MD5_DIGEST_SIZE, | 1310 | .hw_op_hashsz = MD5_DIGEST_SIZE, |
@@ -1324,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1324 | .block_size = MD5_HMAC_BLOCK_SIZE }, | 1312 | .block_size = MD5_HMAC_BLOCK_SIZE }, |
1325 | { .name = "sha1", | 1313 | { .name = "sha1", |
1326 | .hash_zero = sha1_zero_message_hash, | 1314 | .hash_zero = sha1_zero_message_hash, |
1327 | .hash_init = sha1_init, | 1315 | .hash_init = n2_sha1_init, |
1328 | .auth_type = AUTH_TYPE_SHA1, | 1316 | .auth_type = AUTH_TYPE_SHA1, |
1329 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | 1317 | .hmac_type = AUTH_TYPE_HMAC_SHA1, |
1330 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | 1318 | .hw_op_hashsz = SHA1_DIGEST_SIZE, |
@@ -1332,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1332 | .block_size = SHA1_BLOCK_SIZE }, | 1320 | .block_size = SHA1_BLOCK_SIZE }, |
1333 | { .name = "sha256", | 1321 | { .name = "sha256", |
1334 | .hash_zero = sha256_zero_message_hash, | 1322 | .hash_zero = sha256_zero_message_hash, |
1335 | .hash_init = sha256_init, | 1323 | .hash_init = n2_sha256_init, |
1336 | .auth_type = AUTH_TYPE_SHA256, | 1324 | .auth_type = AUTH_TYPE_SHA256, |
1337 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | 1325 | .hmac_type = AUTH_TYPE_HMAC_SHA256, |
1338 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | 1326 | .hw_op_hashsz = SHA256_DIGEST_SIZE, |
@@ -1340,7 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1340 | .block_size = SHA256_BLOCK_SIZE }, | 1328 | .block_size = SHA256_BLOCK_SIZE }, |
1341 | { .name = "sha224", | 1329 | { .name = "sha224", |
1342 | .hash_zero = sha224_zero_message_hash, | 1330 | .hash_zero = sha224_zero_message_hash, |
1343 | .hash_init = sha224_init, | 1331 | .hash_init = n2_sha224_init, |
1344 | .auth_type = AUTH_TYPE_SHA256, | 1332 | .auth_type = AUTH_TYPE_SHA256, |
1345 | .hmac_type = AUTH_TYPE_RESERVED, | 1333 | .hmac_type = AUTH_TYPE_RESERVED, |
1346 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | 1334 | .hw_op_hashsz = SHA256_DIGEST_SIZE, |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index e78ff5c65ed6..c037a2403b82 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -1020,6 +1020,7 @@ static __init int nx842_powernv_init(void) | |||
1020 | ret = nx842_powernv_probe_vas(dn); | 1020 | ret = nx842_powernv_probe_vas(dn); |
1021 | if (ret) { | 1021 | if (ret) { |
1022 | nx842_delete_coprocs(); | 1022 | nx842_delete_coprocs(); |
1023 | of_node_put(dn); | ||
1023 | return ret; | 1024 | return ret; |
1024 | } | 1025 | } |
1025 | } | 1026 | } |
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index c6b5a3be02be..7ecca168f8c4 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
@@ -9,9 +9,6 @@ | |||
9 | #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" | 9 | #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" |
10 | #define NX_VERSION "1.0" | 10 | #define NX_VERSION "1.0" |
11 | 11 | ||
12 | static const char nx_driver_string[] = NX_STRING; | ||
13 | static const char nx_driver_version[] = NX_VERSION; | ||
14 | |||
15 | /* a scatterlist in the format PHYP is expecting */ | 12 | /* a scatterlist in the format PHYP is expecting */ |
16 | struct nx_sg { | 13 | struct nx_sg { |
17 | u64 addr; | 14 | u64 addr; |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 45a4647f7030..2f53fbb74100 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -1180,7 +1180,6 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1180 | 1180 | ||
1181 | irq = platform_get_irq(pdev, 0); | 1181 | irq = platform_get_irq(pdev, 0); |
1182 | if (irq < 0) { | 1182 | if (irq < 0) { |
1183 | dev_err(dev, "can't get IRQ resource\n"); | ||
1184 | err = irq; | 1183 | err = irq; |
1185 | goto err_irq; | 1184 | goto err_irq; |
1186 | } | 1185 | } |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 1ee69a979677..b19d7e5d55ec 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
36 | #include <crypto/des.h> | 36 | #include <crypto/internal/des.h> |
37 | #include <crypto/algapi.h> | 37 | #include <crypto/algapi.h> |
38 | #include <crypto/engine.h> | 38 | #include <crypto/engine.h> |
39 | 39 | ||
@@ -650,20 +650,13 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
650 | unsigned int keylen) | 650 | unsigned int keylen) |
651 | { | 651 | { |
652 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 652 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
653 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 653 | int err; |
654 | 654 | ||
655 | pr_debug("enter, keylen: %d\n", keylen); | 655 | pr_debug("enter, keylen: %d\n", keylen); |
656 | 656 | ||
657 | /* Do we need to test against weak key? */ | 657 | err = verify_ablkcipher_des_key(cipher, key); |
658 | if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { | 658 | if (err) |
659 | u32 tmp[DES_EXPKEY_WORDS]; | 659 | return err; |
660 | int ret = des_ekey(tmp, key); | ||
661 | |||
662 | if (!ret) { | ||
663 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
664 | return -EINVAL; | ||
665 | } | ||
666 | } | ||
667 | 660 | ||
668 | memcpy(ctx->key, key, keylen); | 661 | memcpy(ctx->key, key, keylen); |
669 | ctx->keylen = keylen; | 662 | ctx->keylen = keylen; |
@@ -672,20 +665,16 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
672 | } | 665 | } |
673 | 666 | ||
674 | static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 667 | static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
675 | unsigned int keylen) | 668 | unsigned int keylen) |
676 | { | 669 | { |
677 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 670 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
678 | u32 flags; | ||
679 | int err; | 671 | int err; |
680 | 672 | ||
681 | pr_debug("enter, keylen: %d\n", keylen); | 673 | pr_debug("enter, keylen: %d\n", keylen); |
682 | 674 | ||
683 | flags = crypto_ablkcipher_get_flags(cipher); | 675 | err = verify_ablkcipher_des3_key(cipher, key); |
684 | err = __des3_verify_key(&flags, key); | 676 | if (err) |
685 | if (unlikely(err)) { | ||
686 | crypto_ablkcipher_set_flags(cipher, flags); | ||
687 | return err; | 677 | return err; |
688 | } | ||
689 | 678 | ||
690 | memcpy(ctx->key, key, keylen); | 679 | memcpy(ctx->key, key, keylen); |
691 | ctx->keylen = keylen; | 680 | ctx->keylen = keylen; |
@@ -1049,7 +1038,6 @@ static int omap_des_probe(struct platform_device *pdev) | |||
1049 | 1038 | ||
1050 | irq = platform_get_irq(pdev, 0); | 1039 | irq = platform_get_irq(pdev, 0); |
1051 | if (irq < 0) { | 1040 | if (irq < 0) { |
1052 | dev_err(dev, "can't get IRQ resource: %d\n", irq); | ||
1053 | err = irq; | 1041 | err = irq; |
1054 | goto err_irq; | 1042 | goto err_irq; |
1055 | } | 1043 | } |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e8e2907bd9f4..ac80bc6af093 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -1989,7 +1989,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, | |||
1989 | /* Get the IRQ */ | 1989 | /* Get the IRQ */ |
1990 | dd->irq = platform_get_irq(pdev, 0); | 1990 | dd->irq = platform_get_irq(pdev, 0); |
1991 | if (dd->irq < 0) { | 1991 | if (dd->irq < 0) { |
1992 | dev_err(dev, "no IRQ resource info\n"); | ||
1993 | err = dd->irq; | 1992 | err = dd->irq; |
1994 | goto err; | 1993 | goto err; |
1995 | } | 1994 | } |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index ad020133da19..8a0661250078 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -145,7 +145,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
145 | ctx->cword.encrypt.keygen = 1; | 145 | ctx->cword.encrypt.keygen = 1; |
146 | ctx->cword.decrypt.keygen = 1; | 146 | ctx->cword.decrypt.keygen = 1; |
147 | 147 | ||
148 | if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { | 148 | if (aes_expandkey(&gen_aes, in_key, key_len)) { |
149 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 149 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
150 | return -EINVAL; | 150 | return -EINVAL; |
151 | } | 151 | } |
@@ -300,7 +300,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | |||
300 | return iv; | 300 | return iv; |
301 | } | 301 | } |
302 | 302 | ||
303 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 303 | static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
304 | { | 304 | { |
305 | struct aes_ctx *ctx = aes_ctx(tfm); | 305 | struct aes_ctx *ctx = aes_ctx(tfm); |
306 | 306 | ||
@@ -309,7 +309,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
309 | padlock_store_cword(&ctx->cword.encrypt); | 309 | padlock_store_cword(&ctx->cword.encrypt); |
310 | } | 310 | } |
311 | 311 | ||
312 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 312 | static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
313 | { | 313 | { |
314 | struct aes_ctx *ctx = aes_ctx(tfm); | 314 | struct aes_ctx *ctx = aes_ctx(tfm); |
315 | 315 | ||
@@ -332,8 +332,8 @@ static struct crypto_alg aes_alg = { | |||
332 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 332 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
333 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 333 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
334 | .cia_setkey = aes_set_key, | 334 | .cia_setkey = aes_set_key, |
335 | .cia_encrypt = aes_encrypt, | 335 | .cia_encrypt = padlock_aes_encrypt, |
336 | .cia_decrypt = aes_decrypt, | 336 | .cia_decrypt = padlock_aes_decrypt, |
337 | } | 337 | } |
338 | } | 338 | } |
339 | }; | 339 | }; |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index b985cb85c9bc..3cbefb41b099 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <crypto/aes.h> | 6 | #include <crypto/aes.h> |
7 | #include <crypto/algapi.h> | 7 | #include <crypto/algapi.h> |
8 | #include <crypto/authenc.h> | 8 | #include <crypto/authenc.h> |
9 | #include <crypto/des.h> | 9 | #include <crypto/internal/des.h> |
10 | #include <crypto/md5.h> | 10 | #include <crypto/md5.h> |
11 | #include <crypto/sha.h> | 11 | #include <crypto/sha.h> |
12 | #include <crypto/internal/skcipher.h> | 12 | #include <crypto/internal/skcipher.h> |
@@ -736,16 +736,12 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm) | |||
736 | static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 736 | static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
737 | unsigned int len) | 737 | unsigned int len) |
738 | { | 738 | { |
739 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 739 | struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
740 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 740 | int err; |
741 | u32 tmp[DES_EXPKEY_WORDS]; | ||
742 | 741 | ||
743 | if (unlikely(!des_ekey(tmp, key)) && | 742 | err = verify_ablkcipher_des_key(cipher, key); |
744 | (crypto_ablkcipher_get_flags(cipher) & | 743 | if (err) |
745 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 744 | return err; |
746 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | 745 | ||
750 | memcpy(ctx->key, key, len); | 746 | memcpy(ctx->key, key, len); |
751 | ctx->key_len = len; | 747 | ctx->key_len = len; |
@@ -761,15 +757,11 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
761 | unsigned int len) | 757 | unsigned int len) |
762 | { | 758 | { |
763 | struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 759 | struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
764 | u32 flags; | ||
765 | int err; | 760 | int err; |
766 | 761 | ||
767 | flags = crypto_ablkcipher_get_flags(cipher); | 762 | err = verify_ablkcipher_des3_key(cipher, key); |
768 | err = __des3_verify_key(&flags, key); | 763 | if (err) |
769 | if (unlikely(err)) { | ||
770 | crypto_ablkcipher_set_flags(cipher, flags); | ||
771 | return err; | 764 | return err; |
772 | } | ||
773 | 765 | ||
774 | memcpy(ctx->key, key, len); | 766 | memcpy(ctx->key, key, len); |
775 | ctx->key_len = len; | 767 | ctx->key_len = len; |
@@ -1624,7 +1616,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table); | |||
1624 | static int spacc_probe(struct platform_device *pdev) | 1616 | static int spacc_probe(struct platform_device *pdev) |
1625 | { | 1617 | { |
1626 | int i, err, ret; | 1618 | int i, err, ret; |
1627 | struct resource *mem, *irq; | 1619 | struct resource *irq; |
1628 | struct device_node *np = pdev->dev.of_node; | 1620 | struct device_node *np = pdev->dev.of_node; |
1629 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), | 1621 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), |
1630 | GFP_KERNEL); | 1622 | GFP_KERNEL); |
@@ -1653,8 +1645,7 @@ static int spacc_probe(struct platform_device *pdev) | |||
1653 | 1645 | ||
1654 | engine->name = dev_name(&pdev->dev); | 1646 | engine->name = dev_name(&pdev->dev); |
1655 | 1647 | ||
1656 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1648 | engine->regs = devm_platform_ioremap_resource(pdev, 0); |
1657 | engine->regs = devm_ioremap_resource(&pdev->dev, mem); | ||
1658 | if (IS_ERR(engine->regs)) | 1649 | if (IS_ERR(engine->regs)) |
1659 | return PTR_ERR(engine->regs); | 1650 | return PTR_ERR(engine->regs); |
1660 | 1651 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 5c4c0a253129..d78f8d5c89c3 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
@@ -95,7 +95,7 @@ struct service_hndl { | |||
95 | 95 | ||
96 | static inline int get_current_node(void) | 96 | static inline int get_current_node(void) |
97 | { | 97 | { |
98 | return topology_physical_package_id(smp_processor_id()); | 98 | return topology_physical_package_id(raw_smp_processor_id()); |
99 | } | 99 | } |
100 | 100 | ||
101 | int adf_service_register(struct service_hndl *service); | 101 | int adf_service_register(struct service_hndl *service); |
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index a976210ba41c..7a98bf5cc967 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <crypto/aes.h> | 9 | #include <crypto/aes.h> |
10 | #include <crypto/des.h> | 10 | #include <crypto/internal/des.h> |
11 | #include <crypto/internal/skcipher.h> | 11 | #include <crypto/internal/skcipher.h> |
12 | 12 | ||
13 | #include "cipher.h" | 13 | #include "cipher.h" |
@@ -154,27 +154,17 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, | |||
154 | { | 154 | { |
155 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); | 155 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); |
156 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 156 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
157 | unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; | ||
158 | int ret; | 157 | int ret; |
159 | 158 | ||
160 | if (!key || !keylen) | 159 | if (!key || !keylen) |
161 | return -EINVAL; | 160 | return -EINVAL; |
162 | 161 | ||
163 | if (IS_AES(flags)) { | 162 | switch (keylen) { |
164 | switch (keylen) { | 163 | case AES_KEYSIZE_128: |
165 | case AES_KEYSIZE_128: | 164 | case AES_KEYSIZE_256: |
166 | case AES_KEYSIZE_256: | 165 | break; |
167 | break; | 166 | default: |
168 | default: | 167 | goto fallback; |
169 | goto fallback; | ||
170 | } | ||
171 | } else if (IS_DES(flags)) { | ||
172 | u32 tmp[DES_EXPKEY_WORDS]; | ||
173 | |||
174 | ret = des_ekey(tmp, key); | ||
175 | if (!ret && (crypto_ablkcipher_get_flags(ablk) & | ||
176 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) | ||
177 | goto weakkey; | ||
178 | } | 168 | } |
179 | 169 | ||
180 | ctx->enc_keylen = keylen; | 170 | ctx->enc_keylen = keylen; |
@@ -185,24 +175,32 @@ fallback: | |||
185 | if (!ret) | 175 | if (!ret) |
186 | ctx->enc_keylen = keylen; | 176 | ctx->enc_keylen = keylen; |
187 | return ret; | 177 | return ret; |
188 | weakkey: | 178 | } |
189 | crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); | 179 | |
190 | return -EINVAL; | 180 | static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key, |
181 | unsigned int keylen) | ||
182 | { | ||
183 | struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); | ||
184 | int err; | ||
185 | |||
186 | err = verify_ablkcipher_des_key(ablk, key); | ||
187 | if (err) | ||
188 | return err; | ||
189 | |||
190 | ctx->enc_keylen = keylen; | ||
191 | memcpy(ctx->enc_key, key, keylen); | ||
192 | return 0; | ||
191 | } | 193 | } |
192 | 194 | ||
193 | static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, | 195 | static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, |
194 | unsigned int keylen) | 196 | unsigned int keylen) |
195 | { | 197 | { |
196 | struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); | 198 | struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); |
197 | u32 flags; | ||
198 | int err; | 199 | int err; |
199 | 200 | ||
200 | flags = crypto_ablkcipher_get_flags(ablk); | 201 | err = verify_ablkcipher_des3_key(ablk, key); |
201 | err = __des3_verify_key(&flags, key); | 202 | if (err) |
202 | if (unlikely(err)) { | ||
203 | crypto_ablkcipher_set_flags(ablk, flags); | ||
204 | return err; | 203 | return err; |
205 | } | ||
206 | 204 | ||
207 | ctx->enc_keylen = keylen; | 205 | ctx->enc_keylen = keylen; |
208 | memcpy(ctx->enc_key, key, keylen); | 206 | memcpy(ctx->enc_key, key, keylen); |
@@ -374,8 +372,9 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, | |||
374 | alg->cra_ablkcipher.ivsize = def->ivsize; | 372 | alg->cra_ablkcipher.ivsize = def->ivsize; |
375 | alg->cra_ablkcipher.min_keysize = def->min_keysize; | 373 | alg->cra_ablkcipher.min_keysize = def->min_keysize; |
376 | alg->cra_ablkcipher.max_keysize = def->max_keysize; | 374 | alg->cra_ablkcipher.max_keysize = def->max_keysize; |
377 | alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? | 375 | alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey : |
378 | qce_des3_setkey : qce_ablkcipher_setkey; | 376 | IS_DES(def->flags) ? qce_des_setkey : |
377 | qce_ablkcipher_setkey; | ||
379 | alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; | 378 | alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; |
380 | alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; | 379 | alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; |
381 | 380 | ||
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index ef1d74e8ddb2..08d4ce3bfddf 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c | |||
@@ -167,7 +167,6 @@ static int qce_crypto_probe(struct platform_device *pdev) | |||
167 | { | 167 | { |
168 | struct device *dev = &pdev->dev; | 168 | struct device *dev = &pdev->dev; |
169 | struct qce_device *qce; | 169 | struct qce_device *qce; |
170 | struct resource *res; | ||
171 | int ret; | 170 | int ret; |
172 | 171 | ||
173 | qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); | 172 | qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); |
@@ -177,8 +176,7 @@ static int qce_crypto_probe(struct platform_device *pdev) | |||
177 | qce->dev = dev; | 176 | qce->dev = dev; |
178 | platform_set_drvdata(pdev, qce); | 177 | platform_set_drvdata(pdev, qce); |
179 | 178 | ||
180 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 179 | qce->base = devm_platform_ioremap_resource(pdev, 0); |
181 | qce->base = devm_ioremap_resource(&pdev->dev, res); | ||
182 | if (IS_ERR(qce->base)) | 180 | if (IS_ERR(qce->base)) |
183 | return PTR_ERR(qce->base); | 181 | return PTR_ERR(qce->base); |
184 | 182 | ||
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index e54249ccc009..4730f84b646d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c | |||
@@ -153,7 +153,6 @@ static struct rng_alg qcom_rng_alg = { | |||
153 | 153 | ||
154 | static int qcom_rng_probe(struct platform_device *pdev) | 154 | static int qcom_rng_probe(struct platform_device *pdev) |
155 | { | 155 | { |
156 | struct resource *res; | ||
157 | struct qcom_rng *rng; | 156 | struct qcom_rng *rng; |
158 | int ret; | 157 | int ret; |
159 | 158 | ||
@@ -164,8 +163,7 @@ static int qcom_rng_probe(struct platform_device *pdev) | |||
164 | platform_set_drvdata(pdev, rng); | 163 | platform_set_drvdata(pdev, rng); |
165 | mutex_init(&rng->lock); | 164 | mutex_init(&rng->lock); |
166 | 165 | ||
167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 166 | rng->base = devm_platform_ioremap_resource(pdev, 0); |
168 | rng->base = devm_ioremap_resource(&pdev->dev, res); | ||
169 | if (IS_ERR(rng->base)) | 167 | if (IS_ERR(rng->base)) |
170 | return PTR_ERR(rng->base); | 168 | return PTR_ERR(rng->base); |
171 | 169 | ||
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 8d7e2545e65a..e5714ef24bf2 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c | |||
@@ -311,7 +311,6 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table); | |||
311 | 311 | ||
312 | static int rk_crypto_probe(struct platform_device *pdev) | 312 | static int rk_crypto_probe(struct platform_device *pdev) |
313 | { | 313 | { |
314 | struct resource *res; | ||
315 | struct device *dev = &pdev->dev; | 314 | struct device *dev = &pdev->dev; |
316 | struct rk_crypto_info *crypto_info; | 315 | struct rk_crypto_info *crypto_info; |
317 | int err = 0; | 316 | int err = 0; |
@@ -339,8 +338,7 @@ static int rk_crypto_probe(struct platform_device *pdev) | |||
339 | 338 | ||
340 | spin_lock_init(&crypto_info->lock); | 339 | spin_lock_init(&crypto_info->lock); |
341 | 340 | ||
342 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 341 | crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); |
343 | crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); | ||
344 | if (IS_ERR(crypto_info->reg)) { | 342 | if (IS_ERR(crypto_info->reg)) { |
345 | err = PTR_ERR(crypto_info->reg); | 343 | err = PTR_ERR(crypto_info->reg); |
346 | goto err_crypto; | 344 | goto err_crypto; |
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 54ee5b3ed9db..18e2b3f29336 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h | |||
@@ -3,7 +3,7 @@ | |||
3 | #define __RK3288_CRYPTO_H__ | 3 | #define __RK3288_CRYPTO_H__ |
4 | 4 | ||
5 | #include <crypto/aes.h> | 5 | #include <crypto/aes.h> |
6 | #include <crypto/des.h> | 6 | #include <crypto/internal/des.h> |
7 | #include <crypto/algapi.h> | 7 | #include <crypto/algapi.h> |
8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 96078aaa2098..d0f4b2d18059 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c | |||
@@ -46,15 +46,12 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher, | |||
46 | static int rk_des_setkey(struct crypto_ablkcipher *cipher, | 46 | static int rk_des_setkey(struct crypto_ablkcipher *cipher, |
47 | const u8 *key, unsigned int keylen) | 47 | const u8 *key, unsigned int keylen) |
48 | { | 48 | { |
49 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 49 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
50 | struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 50 | int err; |
51 | u32 tmp[DES_EXPKEY_WORDS]; | ||
52 | 51 | ||
53 | if (!des_ekey(tmp, key) && | 52 | err = verify_ablkcipher_des_key(cipher, key); |
54 | (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 53 | if (err) |
55 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 54 | return err; |
56 | return -EINVAL; | ||
57 | } | ||
58 | 55 | ||
59 | ctx->keylen = keylen; | 56 | ctx->keylen = keylen; |
60 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); | 57 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); |
@@ -65,15 +62,11 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, | |||
65 | const u8 *key, unsigned int keylen) | 62 | const u8 *key, unsigned int keylen) |
66 | { | 63 | { |
67 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 64 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
68 | u32 flags; | ||
69 | int err; | 65 | int err; |
70 | 66 | ||
71 | flags = crypto_ablkcipher_get_flags(cipher); | 67 | err = verify_ablkcipher_des3_key(cipher, key); |
72 | err = __des3_verify_key(&flags, key); | 68 | if (err) |
73 | if (unlikely(err)) { | ||
74 | crypto_ablkcipher_set_flags(cipher, flags); | ||
75 | return err; | 69 | return err; |
76 | } | ||
77 | 70 | ||
78 | ctx->keylen = keylen; | 71 | ctx->keylen = keylen; |
79 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); | 72 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 9ef25230c199..010f1bb20dad 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -2056,9 +2056,12 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
2056 | struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 2056 | struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
2057 | struct s5p_aes_dev *dev = ctx->dev; | 2057 | struct s5p_aes_dev *dev = ctx->dev; |
2058 | 2058 | ||
2059 | if (!req->nbytes) | ||
2060 | return 0; | ||
2061 | |||
2059 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) && | 2062 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) && |
2060 | ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) { | 2063 | ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) { |
2061 | dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); | 2064 | dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n"); |
2062 | return -EINVAL; | 2065 | return -EINVAL; |
2063 | } | 2066 | } |
2064 | 2067 | ||
@@ -2170,7 +2173,7 @@ static struct crypto_alg algs[] = { | |||
2170 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 2173 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
2171 | CRYPTO_ALG_ASYNC | | 2174 | CRYPTO_ALG_ASYNC | |
2172 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 2175 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
2173 | .cra_blocksize = AES_BLOCK_SIZE, | 2176 | .cra_blocksize = 1, |
2174 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), | 2177 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), |
2175 | .cra_alignmask = 0x0f, | 2178 | .cra_alignmask = 0x0f, |
2176 | .cra_type = &crypto_ablkcipher_type, | 2179 | .cra_type = &crypto_ablkcipher_type, |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index b0b8e3d48aef..8ac8ec6decd5 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -1403,10 +1403,8 @@ static int sahara_probe(struct platform_device *pdev) | |||
1403 | 1403 | ||
1404 | /* Get the IRQ */ | 1404 | /* Get the IRQ */ |
1405 | irq = platform_get_irq(pdev, 0); | 1405 | irq = platform_get_irq(pdev, 0); |
1406 | if (irq < 0) { | 1406 | if (irq < 0) |
1407 | dev_err(&pdev->dev, "failed to get irq resource\n"); | ||
1408 | return irq; | 1407 | return irq; |
1409 | } | ||
1410 | 1408 | ||
1411 | err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, | 1409 | err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, |
1412 | 0, dev_name(&pdev->dev), dev); | 1410 | 0, dev_name(&pdev->dev), dev); |
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index d6576280fc9b..1aba9372cd23 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig | |||
@@ -25,7 +25,7 @@ config CRYPTO_DEV_STM32_CRYP | |||
25 | depends on ARCH_STM32 | 25 | depends on ARCH_STM32 |
26 | select CRYPTO_HASH | 26 | select CRYPTO_HASH |
27 | select CRYPTO_ENGINE | 27 | select CRYPTO_ENGINE |
28 | select CRYPTO_DES | 28 | select CRYPTO_LIB_DES |
29 | help | 29 | help |
30 | This enables support for the CRYP (AES/DES/TDES) hw accelerator which | 30 | This enables support for the CRYP (AES/DES/TDES) hw accelerator which |
31 | can be found on STMicroelectronics STM32 SOC. | 31 | can be found on STMicroelectronics STM32 SOC. |
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 440c9f1bd006..9e11c3480353 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c | |||
@@ -255,7 +255,6 @@ static int stm32_crc_probe(struct platform_device *pdev) | |||
255 | { | 255 | { |
256 | struct device *dev = &pdev->dev; | 256 | struct device *dev = &pdev->dev; |
257 | struct stm32_crc *crc; | 257 | struct stm32_crc *crc; |
258 | struct resource *res; | ||
259 | int ret; | 258 | int ret; |
260 | 259 | ||
261 | crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); | 260 | crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); |
@@ -264,8 +263,7 @@ static int stm32_crc_probe(struct platform_device *pdev) | |||
264 | 263 | ||
265 | crc->dev = dev; | 264 | crc->dev = dev; |
266 | 265 | ||
267 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 266 | crc->regs = devm_platform_ioremap_resource(pdev, 0); |
268 | crc->regs = devm_ioremap_resource(dev, res); | ||
269 | if (IS_ERR(crc->regs)) { | 267 | if (IS_ERR(crc->regs)) { |
270 | dev_err(dev, "Cannot map CRC IO\n"); | 268 | dev_err(dev, "Cannot map CRC IO\n"); |
271 | return PTR_ERR(crc->regs); | 269 | return PTR_ERR(crc->regs); |
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 98ae02826e8f..ba5ea6434f9c 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/reset.h> | 15 | #include <linux/reset.h> |
16 | 16 | ||
17 | #include <crypto/aes.h> | 17 | #include <crypto/aes.h> |
18 | #include <crypto/des.h> | 18 | #include <crypto/internal/des.h> |
19 | #include <crypto/engine.h> | 19 | #include <crypto/engine.h> |
20 | #include <crypto/scatterwalk.h> | 20 | #include <crypto/scatterwalk.h> |
21 | #include <crypto/internal/aead.h> | 21 | #include <crypto/internal/aead.h> |
@@ -767,35 +767,15 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
767 | static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 767 | static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
768 | unsigned int keylen) | 768 | unsigned int keylen) |
769 | { | 769 | { |
770 | u32 tmp[DES_EXPKEY_WORDS]; | 770 | return verify_ablkcipher_des_key(tfm, key) ?: |
771 | 771 | stm32_cryp_setkey(tfm, key, keylen); | |
772 | if (keylen != DES_KEY_SIZE) | ||
773 | return -EINVAL; | ||
774 | |||
775 | if ((crypto_ablkcipher_get_flags(tfm) & | ||
776 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && | ||
777 | unlikely(!des_ekey(tmp, key))) { | ||
778 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); | ||
779 | return -EINVAL; | ||
780 | } | ||
781 | |||
782 | return stm32_cryp_setkey(tfm, key, keylen); | ||
783 | } | 772 | } |
784 | 773 | ||
785 | static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 774 | static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
786 | unsigned int keylen) | 775 | unsigned int keylen) |
787 | { | 776 | { |
788 | u32 flags; | 777 | return verify_ablkcipher_des3_key(tfm, key) ?: |
789 | int err; | 778 | stm32_cryp_setkey(tfm, key, keylen); |
790 | |||
791 | flags = crypto_ablkcipher_get_flags(tfm); | ||
792 | err = __des3_verify_key(&flags, key); | ||
793 | if (unlikely(err)) { | ||
794 | crypto_ablkcipher_set_flags(tfm, flags); | ||
795 | return err; | ||
796 | } | ||
797 | |||
798 | return stm32_cryp_setkey(tfm, key, keylen); | ||
799 | } | 779 | } |
800 | 780 | ||
801 | static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 781 | static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
@@ -1955,7 +1935,6 @@ static int stm32_cryp_probe(struct platform_device *pdev) | |||
1955 | { | 1935 | { |
1956 | struct device *dev = &pdev->dev; | 1936 | struct device *dev = &pdev->dev; |
1957 | struct stm32_cryp *cryp; | 1937 | struct stm32_cryp *cryp; |
1958 | struct resource *res; | ||
1959 | struct reset_control *rst; | 1938 | struct reset_control *rst; |
1960 | int irq, ret; | 1939 | int irq, ret; |
1961 | 1940 | ||
@@ -1969,16 +1948,13 @@ static int stm32_cryp_probe(struct platform_device *pdev) | |||
1969 | 1948 | ||
1970 | cryp->dev = dev; | 1949 | cryp->dev = dev; |
1971 | 1950 | ||
1972 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1951 | cryp->regs = devm_platform_ioremap_resource(pdev, 0); |
1973 | cryp->regs = devm_ioremap_resource(dev, res); | ||
1974 | if (IS_ERR(cryp->regs)) | 1952 | if (IS_ERR(cryp->regs)) |
1975 | return PTR_ERR(cryp->regs); | 1953 | return PTR_ERR(cryp->regs); |
1976 | 1954 | ||
1977 | irq = platform_get_irq(pdev, 0); | 1955 | irq = platform_get_irq(pdev, 0); |
1978 | if (irq < 0) { | 1956 | if (irq < 0) |
1979 | dev_err(dev, "Cannot get IRQ resource\n"); | ||
1980 | return irq; | 1957 | return irq; |
1981 | } | ||
1982 | 1958 | ||
1983 | ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, | 1959 | ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, |
1984 | stm32_cryp_irq_thread, IRQF_ONESHOT, | 1960 | stm32_cryp_irq_thread, IRQF_ONESHOT, |
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 2b70d8796f25..cfc8e0e37bee 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
@@ -1450,10 +1450,8 @@ static int stm32_hash_probe(struct platform_device *pdev) | |||
1450 | return ret; | 1450 | return ret; |
1451 | 1451 | ||
1452 | irq = platform_get_irq(pdev, 0); | 1452 | irq = platform_get_irq(pdev, 0); |
1453 | if (irq < 0) { | 1453 | if (irq < 0) |
1454 | dev_err(dev, "Cannot get IRQ resource\n"); | ||
1455 | return irq; | 1454 | return irq; |
1456 | } | ||
1457 | 1455 | ||
1458 | ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, | 1456 | ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, |
1459 | stm32_hash_irq_thread, IRQF_ONESHOT, | 1457 | stm32_hash_irq_thread, IRQF_ONESHOT, |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 6f7cbf6c2b55..6536fd4bee65 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
@@ -542,25 +542,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
542 | unsigned int keylen) | 542 | unsigned int keylen) |
543 | { | 543 | { |
544 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); | 544 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
545 | struct sun4i_ss_ctx *ss = op->ss; | 545 | int err; |
546 | u32 flags; | ||
547 | u32 tmp[DES_EXPKEY_WORDS]; | ||
548 | int ret; | ||
549 | |||
550 | if (unlikely(keylen != DES_KEY_SIZE)) { | ||
551 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); | ||
552 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
553 | return -EINVAL; | ||
554 | } | ||
555 | |||
556 | flags = crypto_skcipher_get_flags(tfm); | ||
557 | 546 | ||
558 | ret = des_ekey(tmp, key); | 547 | err = verify_skcipher_des_key(tfm, key); |
559 | if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 548 | if (err) |
560 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); | 549 | return err; |
561 | dev_dbg(ss->dev, "Weak key %u\n", keylen); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | 550 | ||
565 | op->keylen = keylen; | 551 | op->keylen = keylen; |
566 | memcpy(op->key, key, keylen); | 552 | memcpy(op->key, key, keylen); |
@@ -578,8 +564,8 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
578 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); | 564 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
579 | int err; | 565 | int err; |
580 | 566 | ||
581 | err = des3_verify_key(tfm, key); | 567 | err = verify_skcipher_des3_key(tfm, key); |
582 | if (unlikely(err)) | 568 | if (err) |
583 | return err; | 569 | return err; |
584 | 570 | ||
585 | op->keylen = keylen; | 571 | op->keylen = keylen; |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 2e8704271f45..9aa6fe081a27 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c | |||
@@ -225,7 +225,6 @@ static struct sun4i_ss_alg_template ss_algs[] = { | |||
225 | 225 | ||
226 | static int sun4i_ss_probe(struct platform_device *pdev) | 226 | static int sun4i_ss_probe(struct platform_device *pdev) |
227 | { | 227 | { |
228 | struct resource *res; | ||
229 | u32 v; | 228 | u32 v; |
230 | int err, i; | 229 | int err, i; |
231 | unsigned long cr; | 230 | unsigned long cr; |
@@ -240,8 +239,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) | |||
240 | if (!ss) | 239 | if (!ss) |
241 | return -ENOMEM; | 240 | return -ENOMEM; |
242 | 241 | ||
243 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 242 | ss->base = devm_platform_ioremap_resource(pdev, 0); |
244 | ss->base = devm_ioremap_resource(&pdev->dev, res); | ||
245 | if (IS_ERR(ss->base)) { | 243 | if (IS_ERR(ss->base)) { |
246 | dev_err(&pdev->dev, "Cannot request MMIO\n"); | 244 | dev_err(&pdev->dev, "Cannot request MMIO\n"); |
247 | return PTR_ERR(ss->base); | 245 | return PTR_ERR(ss->base); |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index 8654d48aedc0..35a27a7145f8 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <crypto/internal/hash.h> | 29 | #include <crypto/internal/hash.h> |
30 | #include <crypto/internal/skcipher.h> | 30 | #include <crypto/internal/skcipher.h> |
31 | #include <crypto/aes.h> | 31 | #include <crypto/aes.h> |
32 | #include <crypto/des.h> | 32 | #include <crypto/internal/des.h> |
33 | #include <crypto/internal/rng.h> | 33 | #include <crypto/internal/rng.h> |
34 | #include <crypto/rng.h> | 34 | #include <crypto/rng.h> |
35 | 35 | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c9d686a0e805..cb6c10b1bf36 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #include <crypto/algapi.h> | 31 | #include <crypto/algapi.h> |
32 | #include <crypto/aes.h> | 32 | #include <crypto/aes.h> |
33 | #include <crypto/des.h> | 33 | #include <crypto/internal/des.h> |
34 | #include <crypto/sha.h> | 34 | #include <crypto/sha.h> |
35 | #include <crypto/md5.h> | 35 | #include <crypto/md5.h> |
36 | #include <crypto/internal/aead.h> | 36 | #include <crypto/internal/aead.h> |
@@ -925,7 +925,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc, | |||
925 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 925 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
926 | struct device *dev = ctx->dev; | 926 | struct device *dev = ctx->dev; |
927 | struct crypto_authenc_keys keys; | 927 | struct crypto_authenc_keys keys; |
928 | u32 flags; | ||
929 | int err; | 928 | int err; |
930 | 929 | ||
931 | err = crypto_authenc_extractkeys(&keys, key, keylen); | 930 | err = crypto_authenc_extractkeys(&keys, key, keylen); |
@@ -936,15 +935,9 @@ static int aead_des3_setkey(struct crypto_aead *authenc, | |||
936 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) | 935 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) |
937 | goto badkey; | 936 | goto badkey; |
938 | 937 | ||
939 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) | 938 | err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen); |
940 | goto badkey; | 939 | if (err) |
941 | |||
942 | flags = crypto_aead_get_flags(authenc); | ||
943 | err = __des3_verify_key(&flags, keys.enckey); | ||
944 | if (unlikely(err)) { | ||
945 | crypto_aead_set_flags(authenc, flags); | ||
946 | goto out; | 940 | goto out; |
947 | } | ||
948 | 941 | ||
949 | if (ctx->keylen) | 942 | if (ctx->keylen) |
950 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); | 943 | dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); |
@@ -1517,32 +1510,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1517 | static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher, | 1510 | static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher, |
1518 | const u8 *key, unsigned int keylen) | 1511 | const u8 *key, unsigned int keylen) |
1519 | { | 1512 | { |
1520 | u32 tmp[DES_EXPKEY_WORDS]; | 1513 | return verify_ablkcipher_des_key(cipher, key) ?: |
1521 | 1514 | ablkcipher_setkey(cipher, key, keylen); | |
1522 | if (unlikely(crypto_ablkcipher_get_flags(cipher) & | ||
1523 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && | ||
1524 | !des_ekey(tmp, key)) { | ||
1525 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); | ||
1526 | return -EINVAL; | ||
1527 | } | ||
1528 | |||
1529 | return ablkcipher_setkey(cipher, key, keylen); | ||
1530 | } | 1515 | } |
1531 | 1516 | ||
1532 | static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, | 1517 | static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, |
1533 | const u8 *key, unsigned int keylen) | 1518 | const u8 *key, unsigned int keylen) |
1534 | { | 1519 | { |
1535 | u32 flags; | 1520 | return verify_ablkcipher_des3_key(cipher, key) ?: |
1536 | int err; | 1521 | ablkcipher_setkey(cipher, key, keylen); |
1537 | |||
1538 | flags = crypto_ablkcipher_get_flags(cipher); | ||
1539 | err = __des3_verify_key(&flags, key); | ||
1540 | if (unlikely(err)) { | ||
1541 | crypto_ablkcipher_set_flags(cipher, flags); | ||
1542 | return err; | ||
1543 | } | ||
1544 | |||
1545 | return ablkcipher_setkey(cipher, key, keylen); | ||
1546 | } | 1522 | } |
1547 | 1523 | ||
1548 | static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, | 1524 | static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, |
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index 349d34eaac13..b1c6f739f77b 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig | |||
@@ -9,7 +9,7 @@ config CRYPTO_DEV_UX500_CRYP | |||
9 | depends on CRYPTO_DEV_UX500 | 9 | depends on CRYPTO_DEV_UX500 |
10 | select CRYPTO_ALGAPI | 10 | select CRYPTO_ALGAPI |
11 | select CRYPTO_BLKCIPHER | 11 | select CRYPTO_BLKCIPHER |
12 | select CRYPTO_DES | 12 | select CRYPTO_LIB_DES |
13 | help | 13 | help |
14 | This selects the crypto driver for the UX500_CRYP hardware. It supports | 14 | This selects the crypto driver for the UX500_CRYP hardware. It supports |
15 | AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. | 15 | AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. |
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index bd89504e8167..8da7f87b339b 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h | |||
@@ -241,12 +241,12 @@ struct cryp_device_data { | |||
241 | struct clk *clk; | 241 | struct clk *clk; |
242 | struct regulator *pwr_regulator; | 242 | struct regulator *pwr_regulator; |
243 | int power_status; | 243 | int power_status; |
244 | struct spinlock ctx_lock; | 244 | spinlock_t ctx_lock; |
245 | struct cryp_ctx *current_ctx; | 245 | struct cryp_ctx *current_ctx; |
246 | struct klist_node list_node; | 246 | struct klist_node list_node; |
247 | struct cryp_dma dma; | 247 | struct cryp_dma dma; |
248 | bool power_state; | 248 | bool power_state; |
249 | struct spinlock power_state_spinlock; | 249 | spinlock_t power_state_spinlock; |
250 | bool restore_dev_ctx; | 250 | bool restore_dev_ctx; |
251 | }; | 251 | }; |
252 | 252 | ||
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index b4beb54c0dbe..1628ae7a1467 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <crypto/aes.h> | 29 | #include <crypto/aes.h> |
30 | #include <crypto/algapi.h> | 30 | #include <crypto/algapi.h> |
31 | #include <crypto/ctr.h> | 31 | #include <crypto/ctr.h> |
32 | #include <crypto/des.h> | 32 | #include <crypto/internal/des.h> |
33 | #include <crypto/scatterwalk.h> | 33 | #include <crypto/scatterwalk.h> |
34 | 34 | ||
35 | #include <linux/platform_data/crypto-ux500.h> | 35 | #include <linux/platform_data/crypto-ux500.h> |
@@ -528,9 +528,9 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, | |||
528 | 528 | ||
529 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | 529 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); |
530 | 530 | ||
531 | if (unlikely(!IS_ALIGNED((u32)sg, 4))) { | 531 | if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) { |
532 | dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " | 532 | dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " |
533 | "aligned! Addr: 0x%08x", __func__, (u32)sg); | 533 | "aligned! Addr: 0x%08lx", __func__, (unsigned long)sg); |
534 | return -EFAULT; | 534 | return -EFAULT; |
535 | } | 535 | } |
536 | 536 | ||
@@ -763,9 +763,9 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx, | |||
763 | 763 | ||
764 | ctx->outlen = ctx->datalen; | 764 | ctx->outlen = ctx->datalen; |
765 | 765 | ||
766 | if (unlikely(!IS_ALIGNED((u32)indata, 4))) { | 766 | if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) { |
767 | pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " | 767 | pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " |
768 | "0x%08x", __func__, (u32)indata); | 768 | "0x%08lx", __func__, (unsigned long)indata); |
769 | return -EINVAL; | 769 | return -EINVAL; |
770 | } | 770 | } |
771 | 771 | ||
@@ -987,26 +987,13 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
987 | const u8 *key, unsigned int keylen) | 987 | const u8 *key, unsigned int keylen) |
988 | { | 988 | { |
989 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 989 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
990 | u32 *flags = &cipher->base.crt_flags; | 990 | int err; |
991 | u32 tmp[DES_EXPKEY_WORDS]; | ||
992 | int ret; | ||
993 | 991 | ||
994 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | 992 | pr_debug(DEV_DBG_NAME " [%s]", __func__); |
995 | if (keylen != DES_KEY_SIZE) { | ||
996 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
997 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", | ||
998 | __func__); | ||
999 | return -EINVAL; | ||
1000 | } | ||
1001 | 993 | ||
1002 | ret = des_ekey(tmp, key); | 994 | err = verify_ablkcipher_des_key(cipher, key); |
1003 | if (unlikely(ret == 0) && | 995 | if (err) |
1004 | (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { | 996 | return err; |
1005 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
1006 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", | ||
1007 | __func__); | ||
1008 | return -EINVAL; | ||
1009 | } | ||
1010 | 997 | ||
1011 | memcpy(ctx->key, key, keylen); | 998 | memcpy(ctx->key, key, keylen); |
1012 | ctx->keylen = keylen; | 999 | ctx->keylen = keylen; |
@@ -1019,17 +1006,13 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1019 | const u8 *key, unsigned int keylen) | 1006 | const u8 *key, unsigned int keylen) |
1020 | { | 1007 | { |
1021 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1008 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1022 | u32 flags; | ||
1023 | int err; | 1009 | int err; |
1024 | 1010 | ||
1025 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | 1011 | pr_debug(DEV_DBG_NAME " [%s]", __func__); |
1026 | 1012 | ||
1027 | flags = crypto_ablkcipher_get_flags(cipher); | 1013 | err = verify_ablkcipher_des3_key(cipher, key); |
1028 | err = __des3_verify_key(&flags, key); | 1014 | if (err) |
1029 | if (unlikely(err)) { | ||
1030 | crypto_ablkcipher_set_flags(cipher, flags); | ||
1031 | return err; | 1015 | return err; |
1032 | } | ||
1033 | 1016 | ||
1034 | memcpy(ctx->key, key, keylen); | 1017 | memcpy(ctx->key, key, keylen); |
1035 | ctx->keylen = keylen; | 1018 | ctx->keylen = keylen; |
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h index ab2bd00c1c36..7c9bcc15125f 100644 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ b/drivers/crypto/ux500/hash/hash_alg.h | |||
@@ -366,10 +366,10 @@ struct hash_device_data { | |||
366 | phys_addr_t phybase; | 366 | phys_addr_t phybase; |
367 | struct klist_node list_node; | 367 | struct klist_node list_node; |
368 | struct device *dev; | 368 | struct device *dev; |
369 | struct spinlock ctx_lock; | 369 | spinlock_t ctx_lock; |
370 | struct hash_ctx *current_ctx; | 370 | struct hash_ctx *current_ctx; |
371 | bool power_state; | 371 | bool power_state; |
372 | struct spinlock power_state_lock; | 372 | spinlock_t power_state_lock; |
373 | struct regulator *regulator; | 373 | struct regulator *regulator; |
374 | struct clk *clk; | 374 | struct clk *clk; |
375 | bool restore_dev_state; | 375 | bool restore_dev_state; |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index f1ebc3dfa21e..c172a6953477 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -806,7 +806,7 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
806 | * HW peripheral, otherwise we first copy data | 806 | * HW peripheral, otherwise we first copy data |
807 | * to a local buffer | 807 | * to a local buffer |
808 | */ | 808 | */ |
809 | if ((0 == (((u32)data_buffer) % 4)) && | 809 | if (IS_ALIGNED((unsigned long)data_buffer, 4) && |
810 | (0 == *index)) | 810 | (0 == *index)) |
811 | hash_processblock(device_data, | 811 | hash_processblock(device_data, |
812 | (const u32 *)data_buffer, | 812 | (const u32 *)data_buffer, |
@@ -864,7 +864,8 @@ static int hash_dma_final(struct ahash_request *req) | |||
864 | if (ret) | 864 | if (ret) |
865 | return ret; | 865 | return ret; |
866 | 866 | ||
867 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); | 867 | dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, |
868 | (unsigned long)ctx); | ||
868 | 869 | ||
869 | if (req_ctx->updated) { | 870 | if (req_ctx->updated) { |
870 | ret = hash_resume_state(device_data, &device_data->state); | 871 | ret = hash_resume_state(device_data, &device_data->state); |
@@ -969,7 +970,8 @@ static int hash_hw_final(struct ahash_request *req) | |||
969 | if (ret) | 970 | if (ret) |
970 | return ret; | 971 | return ret; |
971 | 972 | ||
972 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); | 973 | dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, |
974 | (unsigned long)ctx); | ||
973 | 975 | ||
974 | if (req_ctx->updated) { | 976 | if (req_ctx->updated) { |
975 | ret = hash_resume_state(device_data, &device_data->state); | 977 | ret = hash_resume_state(device_data, &device_data->state); |
@@ -1272,8 +1274,8 @@ void hash_get_digest(struct hash_device_data *device_data, | |||
1272 | else | 1274 | else |
1273 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); | 1275 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); |
1274 | 1276 | ||
1275 | dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", | 1277 | dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n", |
1276 | __func__, (u32) digest); | 1278 | __func__, (unsigned long)digest); |
1277 | 1279 | ||
1278 | /* Copy result into digest array */ | 1280 | /* Copy result into digest array */ |
1279 | for (count = 0; count < loop_ctr; count++) { | 1281 | for (count = 0; count < loop_ctr; count++) { |
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 10f266d462d6..42d19205166b 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c | |||
@@ -129,13 +129,11 @@ static int virtio_crypto_alg_ablkcipher_init_session( | |||
129 | * Avoid to do DMA from the stack, switch to using | 129 | * Avoid to do DMA from the stack, switch to using |
130 | * dynamically-allocated for the key | 130 | * dynamically-allocated for the key |
131 | */ | 131 | */ |
132 | uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC); | 132 | uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC); |
133 | 133 | ||
134 | if (!cipher_key) | 134 | if (!cipher_key) |
135 | return -ENOMEM; | 135 | return -ENOMEM; |
136 | 136 | ||
137 | memcpy(cipher_key, key, keylen); | ||
138 | |||
139 | spin_lock(&vcrypto->ctrl_lock); | 137 | spin_lock(&vcrypto->ctrl_lock); |
140 | /* Pad ctrl header */ | 138 | /* Pad ctrl header */ |
141 | vcrypto->ctrl.header.opcode = | 139 | vcrypto->ctrl.header.opcode = |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 49f7258045fa..d59e736882f6 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -84,7 +84,7 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) | |||
84 | u8 tweak[AES_BLOCK_SIZE]; | 84 | u8 tweak[AES_BLOCK_SIZE]; |
85 | int ret; | 85 | int ret; |
86 | 86 | ||
87 | if (!crypto_simd_usable()) { | 87 | if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) { |
88 | struct skcipher_request *subreq = skcipher_request_ctx(req); | 88 | struct skcipher_request *subreq = skcipher_request_ctx(req); |
89 | 89 | ||
90 | *subreq = *req; | 90 | *subreq = *req; |